Esempio n. 1
0
static __exit void modexit(void)
{
	kernel_sock_shutdown(client,0);
	kernel_sock_shutdown(sock,0);
	unregister_netdev(tcptun_netdev);
	free_netdev(tcptun_netdev);

}
Esempio n. 2
0
/* called when the last reference to the qp is dropped */
static void rxe_qp_do_cleanup(struct work_struct *work)
{
	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);

	rxe_drop_all_mcast_groups(qp);

	if (qp->sq.queue)
		rxe_queue_cleanup(qp->sq.queue);

	if (qp->srq)
		rxe_drop_ref(qp->srq);

	if (qp->rq.queue)
		rxe_queue_cleanup(qp->rq.queue);

	if (qp->scq)
		rxe_drop_ref(qp->scq);
	if (qp->rcq)
		rxe_drop_ref(qp->rcq);
	if (qp->pd)
		rxe_drop_ref(qp->pd);

	if (qp->resp.mr) {
		rxe_drop_ref(qp->resp.mr);
		qp->resp.mr = NULL;
	}

	if (qp_type(qp) == IB_QPT_RC)
		sk_dst_reset(qp->sk->sk);

	free_rd_atomic_resources(qp);

	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
	sock_release(qp->sk);
}
Esempio n. 3
0
void sc_socket_close(void) {
    TRACEKMOD("### sc_socket_close\n");

    /* Close sockets */
    if (sc_sockets[SOCKET_UDP]) {
        kernel_sock_shutdown(sc_sockets[SOCKET_UDP], SHUT_RDWR);
        sock_release(sc_sockets[SOCKET_UDP]);
    }

    if (sc_sockets[SOCKET_UDPLITE]) {
        kernel_sock_shutdown(sc_sockets[SOCKET_UDPLITE], SHUT_RDWR);
        sock_release(sc_sockets[SOCKET_UDPLITE]);
    }

    memset(sc_sockets, 0, sizeof(sc_sockets));
}
Esempio n. 4
0
/* called when the last reference to the qp is dropped */
void rxe_qp_cleanup(void *arg)
{
	struct rxe_qp *qp = arg;

	rxe_drop_all_mcast_groups(qp);

	if (qp->sq.queue)
		rxe_queue_cleanup(qp->sq.queue);

	if (qp->srq)
		rxe_drop_ref(qp->srq);

	if (qp->rq.queue)
		rxe_queue_cleanup(qp->rq.queue);

	if (qp->scq)
		rxe_drop_ref(qp->scq);
	if (qp->rcq)
		rxe_drop_ref(qp->rcq);
	if (qp->pd)
		rxe_drop_ref(qp->pd);

	if (qp->resp.mr) {
		rxe_drop_ref(qp->resp.mr);
		qp->resp.mr = NULL;
	}

	free_rd_atomic_resources(qp);

	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
}
Esempio n. 5
0
static void syslog_close(struct socket **socket)
{
    if (!*socket) return;
    kernel_sock_shutdown(*socket, SHUT_RDWR);
    sock_release(*socket);
    *socket = NULL;
}
Esempio n. 6
0
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        bool reserved)
{
    struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
    struct nbd_device *nbd = cmd->nbd;
    struct socket *sock = NULL;

    spin_lock(&nbd->sock_lock);

    set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);

    if (nbd->sock) {
        sock = nbd->sock;
        get_file(sock->file);
    }

    spin_unlock(&nbd->sock_lock);
    if (sock) {
        kernel_sock_shutdown(sock, SHUT_RDWR);
        sockfd_put(sock);
    }

    req->errors++;
    dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
    return BLK_EH_HANDLED;
}
Esempio n. 7
0
/*
 * Forcibly shutdown the socket causing all listeners to error
 */
static void sock_shutdown(struct nbd_device *nbd)
{
	if (!nbd->sock)
		return;

	dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
	kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
	nbd->sock = NULL;
	del_timer_sync(&nbd->timeout_timer);
}
Esempio n. 8
0
/*
 * Forcibly shutdown the socket causing all listeners to error
 */
static void sock_shutdown(struct nbd_device *nbd, int lock)
{
	if (lock)
		mutex_lock(&nbd->tx_lock);
	if (nbd->sock) {
		dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
		kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
		nbd->sock = NULL;
	}
	if (lock)
		mutex_unlock(&nbd->tx_lock);
}
Esempio n. 9
0
static void sock_shutdown(struct nbd_device *lo, int lock)
{
	if (lock)
		mutex_lock(&lo->tx_lock);
	if (lo->sock) {
		printk(KERN_WARNING "%s: shutting down socket\n",
			lo->disk->disk_name);
		kernel_sock_shutdown(lo->sock, SHUT_RDWR);
		lo->sock = NULL;
	}
	if (lock)
		mutex_unlock(&lo->tx_lock);
}
/**---------------------------------------------------------------------------
 * Close an existing upcall socket.
 * 
 * @param[in] info Pointer to the upcall information structure.
 *---------------------------------------------------------------------------*/ 
static void
vqec_dp_ipcserver_close_upcall_sock (vqec_dp_upcall_info_t *upcall_info)
{
    if (upcall_info->sock) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
        upcall_info->sock->ops->shutdown(upcall_info->sock, 0);
#else
        kernel_sock_shutdown(upcall_info->sock, SHUT_RDWR);
#endif
        sock_release(upcall_info->sock); 
        upcall_info->sock = NULL;
    }
}
Esempio n. 11
0
int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
                     struct socket **sockp)
{
    struct sockaddr_in6 udp6_addr;
    int err;
    struct socket *sock = NULL;

    err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
    if (err < 0)
        goto error;

    sk_change_net(sock->sk, net);

    udp6_addr.sin6_family = AF_INET6;
    memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
           sizeof(udp6_addr.sin6_addr));
    udp6_addr.sin6_port = cfg->local_udp_port;
    err = kernel_bind(sock, (struct sockaddr *)&udp6_addr,
                      sizeof(udp6_addr));
    if (err < 0)
        goto error;

    if (cfg->peer_udp_port) {
        udp6_addr.sin6_family = AF_INET6;
        memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
               sizeof(udp6_addr.sin6_addr));
        udp6_addr.sin6_port = cfg->peer_udp_port;
        err = kernel_connect(sock,
                             (struct sockaddr *)&udp6_addr,
                             sizeof(udp6_addr), 0);
    }
    if (err < 0)
        goto error;

    udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums);
    udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums);

    *sockp = sock;
    return 0;

error:
    if (sock) {
        kernel_sock_shutdown(sock, SHUT_RDWR);
        sk_release_kernel(sock->sk);
    }
    *sockp = NULL;
    return err;
}
Esempio n. 12
0
/*
 * Forcibly shutdown the socket causing all listeners to error
 */
static void sock_shutdown(struct nbd_device *nbd)
{
	spin_lock_irq(&nbd->sock_lock);

	if (!nbd->sock) {
		spin_unlock_irq(&nbd->sock_lock);
		return;
	}

	dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
	kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
	sockfd_put(nbd->sock);
	nbd->sock = NULL;
	spin_unlock_irq(&nbd->sock_lock);

	del_timer(&nbd->timeout_timer);
}
Esempio n. 13
0
/*
 * Forcibly shutdown the socket causing all listeners to error
 */
static void sock_shutdown(struct nbd_device *nbd)
{
	int i;

	if (nbd->num_connections == 0)
		return;
	if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
		return;

	for (i = 0; i < nbd->num_connections; i++) {
		struct nbd_sock *nsock = nbd->socks[i];
		mutex_lock(&nsock->tx_lock);
		kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
		mutex_unlock(&nsock->tx_lock);
	}
	dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
}
static void sock_shutdown(struct nbd_device *nbd, int lock)
{
	/* Forcibly shutdown the socket causing all listeners
	 * to error
	 *
	 * FIXME: This code is duplicated from sys_shutdown, but
	 * there should be a more generic interface rather than
	 * calling socket ops directly here */
	if (lock)
		mutex_lock(&nbd->tx_lock);
	if (nbd->sock) {
		dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
		kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
		nbd->sock = NULL;
	}
	if (lock)
		mutex_unlock(&nbd->tx_lock);
}
Esempio n. 15
0
int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
                     struct socket **sockp)
{
    int err;
    struct socket *sock = NULL;
    struct sockaddr_in udp_addr;

    err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
    if (err < 0)
        goto error;

    sk_change_net(sock->sk, net);

    udp_addr.sin_family = AF_INET;
    udp_addr.sin_addr = cfg->local_ip;
    udp_addr.sin_port = cfg->local_udp_port;
    err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
                      sizeof(udp_addr));
    if (err < 0)
        goto error;

    if (cfg->peer_udp_port) {
        udp_addr.sin_family = AF_INET;
        udp_addr.sin_addr = cfg->peer_ip;
        udp_addr.sin_port = cfg->peer_udp_port;
        err = kernel_connect(sock, (struct sockaddr *)&udp_addr,
                             sizeof(udp_addr), 0);
        if (err < 0)
            goto error;
    }

    sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;

    *sockp = sock;
    return 0;

error:
    if (sock) {
        kernel_sock_shutdown(sock, SHUT_RDWR);
        sk_release_kernel(sock->sk);
    }
    *sockp = NULL;
    return err;
}
Esempio n. 16
0
static void sock_shutdown(struct nbd_device *lo, int lock)
{
	/* Forcibly shutdown the socket causing all listeners
	 * to error
	 *
	 * FIXME: This code is duplicated from sys_shutdown, but
	 * there should be a more generic interface rather than
	 * calling socket ops directly here */
	if (lock)
		mutex_lock(&lo->tx_lock);
	if (lo->sock) {
		printk(KERN_WARNING "%s: shutting down socket\n",
			lo->disk->disk_name);
		kernel_sock_shutdown(lo->sock, SHUT_RDWR);
		lo->sock = NULL;
	}
	if (lock)
		mutex_unlock(&lo->tx_lock);
}
Esempio n. 17
0
static void nbd_xmit_timeout(unsigned long arg)
{
	struct nbd_device *nbd = (struct nbd_device *)arg;
	unsigned long flags;

	if (list_empty(&nbd->queue_head))
		return;

	spin_lock_irqsave(&nbd->sock_lock, flags);

	nbd->timedout = true;

	if (nbd->sock)
		kernel_sock_shutdown(nbd->sock, SHUT_RDWR);

	spin_unlock_irqrestore(&nbd->sock_lock, flags);

	dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
}
Esempio n. 18
0
int sclp_sock_create4(struct net *net, struct sclp_port_cfg *cfg, struct socket **sockp)
{
    int err;
    struct socket *sock = NULL;
    struct sockaddr_in sclp_addr;

    err = sock_create_kern(AF_INET, SOCK_SCLP, 0, &sock);
    if (err < 0)
        goto error;

    sk_change_net(sock->sk, net);

    sclp_addr.sin_family = AF_INET;
    sclp_addr.sin_addr = cfg->local_ip;
    sclp_addr.sin_port = cfg->local_sclp_port;

    err = kernel_bind(sock, (struct sockaddr*)&sclp_addr, sizeof(sclp_addr));
    if (err < 0)
        goto error;

    if (cfg->peer_sclp_port) {
        sclp_addr.sin_family = AF_INET;
        sclp_addr.sin_addr = cfg->peer_ip;
        sclp_addr.sin_port = cfg->peer_sclp_port;
        err = kernel_connect(sock, (struct sockaddr*)&sclp_addr, sizeof(sclp_addr), 0);
        if (err < 0)
            goto error;
    }

    *sockp = sock;
    return 0;

error:
    if (sock) {
        kernel_sock_shutdown(sock, SHUT_RDWR);
        sk_release_kernel(sock->sk);
    }
    *sockp = NULL;
    return err;
}
Esempio n. 19
0
File: rxrpc.c Progetto: krzk/linux
/*
 * close the RxRPC socket AFS was using
 */
void afs_close_socket(struct afs_net *net)
{
	_enter("");

	kernel_listen(net->socket, 0);
	flush_workqueue(afs_async_calls);

	if (net->spare_incoming_call) {
		afs_put_call(net->spare_incoming_call);
		net->spare_incoming_call = NULL;
	}

	_debug("outstanding %u", atomic_read(&net->nr_outstanding_calls));
	wait_var_event(&net->nr_outstanding_calls,
		       !atomic_read(&net->nr_outstanding_calls));
	_debug("no outstanding calls");

	kernel_sock_shutdown(net->socket, SHUT_RDWR);
	flush_workqueue(afs_async_calls);
	sock_release(net->socket);

	_debug("dework");
	_leave("");
}
static int32_t
vqec_reader_loop (void *task_params) 
{
    int32_t bytes_read, i;
    vqec_iobuf_t *m_iobuf;
    start_params_t params;
    vqec_dp_error_t err;

    /* 
     * -- reparent task to init().
     * -- accept SIGKILL.
     * -- signal parent to notify of thread startup. 
     */
    daemonize("vqec_reader_loop");
    allow_signal(SIGKILL);
    __set_current_state(TASK_INTERRUPTIBLE);
    params =  *(start_params_t *)task_params;
    complete(params.completion);

    m_iobuf = kmalloc(sizeof(vqec_iobuf_t) * params.iobufs, GFP_KERNEL);
    if (!m_iobuf) {
        return (0);
    }

    for (i = 0; i < params.iobufs; i++) {
        m_iobuf[i].buf_ptr = kmalloc(params.iobuf_size, GFP_KERNEL);
        m_iobuf[i].buf_len = params.iobuf_size;
    }

    while (TRUE) {
        flush_signals(current);
        bytes_read = 0;
        err = vqec_ifclient_tuner_recvmsg(params.cp_tuner_id,
                                          m_iobuf,
                                          1,
                                          &bytes_read,
                                          params.timeout);
        if (!bytes_read) {
            msleep(20);  /* sleep for 20 msec */
        } else if (params.sk) {
            udp_tx_pak(params.sk, 
                       m_iobuf[0].buf_ptr,
                       m_iobuf[0].buf_wrlen);
        }
        if (s_reader_task_stop) {
            s_reader_task_stop = FALSE;
            break;
        }
    }

    if (params.sk) {
        kernel_sock_shutdown(params.sk, SHUT_RDWR);
        sock_release(params.sk);
    }
    for (i = 0; i < params.iobufs; i++) {
        kfree(m_iobuf[i].buf_ptr);
    }
    kfree(m_iobuf);
    s_reader_task_running = FALSE;
    return (0);
}
Esempio n. 21
0
int smc_close_active(struct smc_sock *smc)
{
	struct smc_cdc_conn_state_flags *txflags =
		&smc->conn.local_tx_ctrl.conn_state_flags;
	struct smc_connection *conn = &smc->conn;
	struct sock *sk = &smc->sk;
	int old_state;
	long timeout;
	int rc = 0;

	timeout = current->flags & PF_EXITING ?
		  0 : sock_flag(sk, SOCK_LINGER) ?
		      sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;

	old_state = sk->sk_state;
again:
	switch (sk->sk_state) {
	case SMC_INIT:
		sk->sk_state = SMC_CLOSED;
		break;
	case SMC_LISTEN:
		sk->sk_state = SMC_CLOSED;
		sk->sk_state_change(sk); /* wake up accept */
		if (smc->clcsock && smc->clcsock->sk) {
			rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
			/* wake up kernel_accept of smc_tcp_listen_worker */
			smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
		}
		smc_close_cleanup_listen(sk);
		release_sock(sk);
		flush_work(&smc->tcp_listen_work);
		lock_sock(sk);
		break;
	case SMC_ACTIVE:
		smc_close_stream_wait(smc, timeout);
		release_sock(sk);
		cancel_delayed_work_sync(&conn->tx_work);
		lock_sock(sk);
		if (sk->sk_state == SMC_ACTIVE) {
			/* send close request */
			rc = smc_close_final(conn);
			if (rc)
				break;
			sk->sk_state = SMC_PEERCLOSEWAIT1;
		} else {
			/* peer event has changed the state */
			goto again;
		}
		break;
	case SMC_APPFINCLOSEWAIT:
		/* socket already shutdown wr or both (active close) */
		if (txflags->peer_done_writing &&
		    !smc_close_sent_any_close(conn)) {
			/* just shutdown wr done, send close request */
			rc = smc_close_final(conn);
			if (rc)
				break;
		}
		sk->sk_state = SMC_CLOSED;
		break;
	case SMC_APPCLOSEWAIT1:
	case SMC_APPCLOSEWAIT2:
		if (!smc_cdc_rxed_any_close(conn))
			smc_close_stream_wait(smc, timeout);
		release_sock(sk);
		cancel_delayed_work_sync(&conn->tx_work);
		lock_sock(sk);
		if (sk->sk_state != SMC_APPCLOSEWAIT1 &&
		    sk->sk_state != SMC_APPCLOSEWAIT2)
			goto again;
		/* confirm close from peer */
		rc = smc_close_final(conn);
		if (rc)
			break;
		if (smc_cdc_rxed_any_close(conn)) {
			/* peer has closed the socket already */
			sk->sk_state = SMC_CLOSED;
			sock_put(sk); /* postponed passive closing */
		} else {
			/* peer has just issued a shutdown write */
			sk->sk_state = SMC_PEERFINCLOSEWAIT;
		}
		break;
	case SMC_PEERCLOSEWAIT1:
	case SMC_PEERCLOSEWAIT2:
		if (txflags->peer_done_writing &&
		    !smc_close_sent_any_close(conn)) {
			/* just shutdown wr done, send close request */
			rc = smc_close_final(conn);
			if (rc)
				break;
		}
		/* peer sending PeerConnectionClosed will cause transition */
		break;
	case SMC_PEERFINCLOSEWAIT:
		/* peer sending PeerConnectionClosed will cause transition */
		break;
	case SMC_PROCESSABORT:
		smc_close_abort(conn);
		sk->sk_state = SMC_CLOSED;
		break;
	case SMC_PEERABORTWAIT:
	case SMC_CLOSED:
		/* nothing to do, add tracing in future patch */
		break;
	}

	if (old_state != sk->sk_state)
		sk->sk_state_change(sk);
	return rc;
}
Esempio n. 22
0
static int CliRecvThread(void *data) {
    struct kvec recviov, recvdataiov, sendiov, senddataiov;
    struct client_host *clihost = (struct client_host *)data;
    struct msghdr recvmsg, sendmsg, senddatamsg, recvdatamsg;
	struct netmsg_req msg_req;
	struct netmsg_data *msg_wrdata = (struct netmsg_data *)kmalloc(sizeof(struct netmsg_data), GFP_USER);
	struct netmsg_rpy msg_rpy;
	struct netmsg_data *msg_rddata = (struct netmsg_data *)kmalloc(sizeof(struct netmsg_data), GFP_USER);
    int len = 0;

	memset(&recvmsg, 0, sizeof(struct msghdr));
	memset(&recvdatamsg, 0, sizeof(struct msghdr));
	memset(&sendmsg, 0, sizeof(struct msghdr));
	memset(&senddatamsg, 0, sizeof(struct msghdr));

	sendmsg.msg_name = (void *)&clihost->host_addr;
	sendmsg.msg_namelen = sizeof(struct sockaddr_in);
	senddatamsg.msg_name = (void *)&clihost->host_data_addr;
	senddatamsg.msg_namelen = sizeof(struct sockaddr_in);

	memset(&recviov, 0, sizeof(struct kvec));
	memset(&recvdataiov, 0, sizeof(struct kvec));
	memset(&sendiov, 0, sizeof(struct kvec));
	memset(&senddataiov, 0, sizeof(struct kvec));
//  recviov.iov_base = (void *)&msg_req.info;
//  recviov.iov_len = sizeof(struct req_info);
//	sendiov.iov_base = (void *)&msg_rpy.info;
//	sendiov.iov_len = sizeof(struct rpy_info);
//	recvdataiov.iov_base = (void *)&msg_wrdata->info;
//	recvdataiov.iov_len = sizeof(struct data_info);
//	senddataiov.iov_base = (void *)&msg_rddata->info;
//	senddataiov.iov_len = sizeof(struct data_info);

    while (!kthread_should_stop()) {
        //schedule_timeout_interruptible(SCHEDULE_TIME * HZ);
		memset(&msg_req, 0, sizeof(struct netmsg_req));
		memset(&msg_rpy, 0, sizeof(struct netmsg_rpy));

		mutex_lock(&clihost->ptr_mutex);
		if(CLIHOST_STATE_CLOSED == clihost->state) {
			mutex_unlock(&clihost->ptr_mutex);
			continue;
		}
		mutex_unlock(&clihost->ptr_mutex);

		recviov.iov_base = (void *)&msg_req.info;
		recviov.iov_len = sizeof(struct req_info);
		len = kernel_recvmsg(clihost->sock, &recvmsg, &recviov, 1, 
					sizeof(struct req_info), 0);
        KER_DEBUG(KERN_ALERT"mempool handlethread: kernel_recvmsg len=%d, ID=%d\n",len, msg_req.info.msgID);
        //close of client
		if(len == 0) {
			break;
		}
		if (len < 0 || len != sizeof(struct req_info)) {
            KER_DEBUG(KERN_ALERT"mempool handlethread: kernel_recvmsg err, len=%d, buffer=%ld\n",
                    len, sizeof(struct req_info));
            if (len == -ECONNREFUSED) {
                KER_DEBUG(KERN_ALERT"mempool thread: Receive Port Unreachable packet!\n");
            }
			continue;
        }
		switch(msg_req.info.msgID) {
			//alloc block
			case NETMSG_CLI_REQUEST_ALLOC_BLK: {
				unsigned int nIndex = 0, count = 0;

				KER_PRT(KERN_INFO"begin to alloc\n");
				msg_rpy.info.msgID = NETMSG_SER_REPLY_ALLOC_BLK;

				mutex_lock(&Devices->blk_mutex);
				for(nIndex = 0, count = 0; nIndex < MAX_BLK_NUM_IN_MEMPOOL && count < BLK_MAX_PER_REQ &&
							count < msg_req.info.data.req_alloc_blk.blknum; nIndex++) {
					if(Devices->blk[nIndex].avail && !Devices->blk[nIndex].inuse) {
						msg_rpy.info.data.rpyblk.blkinfo[count].remoteIndex = nIndex;
						Devices->blk[nIndex].inuse = TRUE;
						count++;
					}
				}
				mutex_unlock(&Devices->blk_mutex);

				Devices->nblk_avail -= count; 
				clihost->block_inuse += count;
				msg_rpy.info.data.rpyblk.blk_alloc = count;
				msg_rpy.info.data.rpyblk.blk_rest_available = Devices->nblk_avail;

				KER_DEBUG(KERN_INFO"mempool thread: send alloc blk reply\n");
		
				break;
			}
			//write data
			case NETMSG_CLI_REQUEST_WRITE: {
				unsigned int nBlkIndex = 0, nPageIndex = 0;

				recvdataiov.iov_base = (void *)&msg_wrdata->info;
				recvdataiov.iov_len = sizeof(struct data_info);
				len = kernel_recvmsg(clihost->datasock, &recvmsg, &recvdataiov, 1, sizeof(struct data_info), 0);
				if (len < 0 || len != sizeof(struct data_info)) {
					KER_DEBUG(KERN_ALERT"mempool handlethread: kernel_recvmsg err, len=%d, buffer=%ld\n",
					        len, sizeof(struct req_info));
				    if (len == -ECONNREFUSED) {
						KER_DEBUG(KERN_ALERT"mempool thread: Receive Port Unreachable packet!\n");
					}
				}

				KER_PRT(KERN_INFO"begin to write\n");

				nBlkIndex = msg_req.info.data.req_write.remoteIndex;
				nPageIndex = msg_req.info.data.req_write.pageIndex;

				KER_DEBUG(KERN_INFO"mempool CliSendThread: nBlkIndex %d, nPageIndex %d\n", nBlkIndex, nPageIndex);
				KER_DEBUG(KERN_INFO"mempool CliSendThread: data %s\n", msg_wrdata->info.data);
				mutex_lock(&Devices->blk_mutex);
				memcpy(Devices->blk[nBlkIndex].blk_addr + nPageIndex * VPAGE_SIZE,
							msg_wrdata->info.data, VPAGE_SIZE);
				mutex_unlock(&Devices->blk_mutex);

				msg_rpy.info.msgID = NETMSG_SER_REPLY_WRITE;

				KER_PRT(KERN_INFO"end to write\n");
				break;
			}
			//read data
			case NETMSG_CLI_REQUEST_READ: {
				unsigned int nBlkIndex = 0, nPageIndex = 0;

				KER_PRT(KERN_INFO"begin to read\n");
				msg_rpy.info.msgID = NETMSG_SER_REPLY_READ;
				msg_rpy.info.data.rpy_read.vpageaddr = msg_req.info.data.req_read.vpageaddr;
				msg_rpy.info.data.rpy_read.remoteIndex = msg_req.info.data.req_read.remoteIndex;
				msg_rpy.info.data.rpy_read.pageIndex = msg_req.info.data.req_read.pageIndex;

				nBlkIndex = msg_req.info.data.req_write.remoteIndex;
				nPageIndex = msg_req.info.data.req_write.pageIndex;

				memcpy(msg_rddata->info.data, Devices->blk[nBlkIndex].blk_addr + nPageIndex * VPAGE_SIZE,
							VPAGE_SIZE);
				KER_PRT(KERN_INFO"end to read\n");

				senddataiov.iov_base = (void *)&msg_rddata->info;
				senddataiov.iov_len = sizeof(struct data_info);
				len = kernel_sendmsg(clihost->datasock, &senddatamsg, &senddataiov, 1, sizeof(struct data_info));

				if (len < 0 || len != sizeof(struct data_info)) {
					KER_DEBUG(KERN_ALERT"mempool handlethread: kernel_sendmsg err, len=%d, buffer=%ld\n",
					        len, sizeof(struct req_info));
				    if (len == -ECONNREFUSED) {
						KER_DEBUG(KERN_ALERT"mempool thread: Receive Port Unreachable packet!\n");
					}
				}
				break;
			}
			//heart beat
			case NETMSG_CLI_REQUEST_HEARTBEAT: {
				msg_rpy.info.msgID = NETMSG_SER_REPLY_HEARTBEAT;
				msg_rpy.info.data.rpy_heartbeat.blk_rest_available = Devices->nblk_avail;
				break;
			}
			default:
				continue;
		}

		sendiov.iov_base = (void *)&msg_rpy.info;
		sendiov.iov_len = sizeof(struct rpy_info);
		len = kernel_sendmsg(clihost->sock, &sendmsg, &sendiov, 1, sizeof(struct rpy_info));

		if(len != sizeof(struct rpy_info)) {
			KER_DEBUG(KERN_INFO"kernel_sendmsg err, len=%d, buffer=%ld\n",
						len, sizeof(struct rpy_info));
			if(len == -ECONNREFUSED) {
				KER_DEBUG(KERN_INFO"Receive Port Unreachable packet!\n");
			}
			//continue;
		}
		KER_PRT(KERN_INFO"end\n");

    }


	mutex_lock(&clihost->ptr_mutex);
	if(CLIHOST_STATE_CONNECTED == clihost->state) {
		clihost->state = CLIHOST_STATE_CLOSED;
		kernel_sock_shutdown(clihost->sock, SHUT_RDWR);
		kernel_sock_shutdown(clihost->datasock, SHUT_RDWR);
		//sock_release(clihost->sock);
		//sock_release(clihost->datasock);
		//clihost->sock = NULL;
	}
	mutex_unlock(&clihost->ptr_mutex);
	kfree(msg_wrdata);
	kfree(msg_rddata);
	while(!kthread_should_stop()) {
		schedule_timeout_interruptible(SCHEDULE_TIME * HZ);
	}
    return 0;
}
Esempio n. 23
0
int rds_tcp_accept_one(struct socket *sock)
{
	struct socket *new_sock = NULL;
	struct rds_connection *conn;
	int ret;
	struct inet_sock *inet;
	struct rds_tcp_connection *rs_tcp = NULL;
	int conn_state;
	struct rds_conn_path *cp;

	if (!sock) /* module unload or netns delete in progress */
		return -ENETUNREACH;

	ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
			       sock->sk->sk_type, sock->sk->sk_protocol,
			       &new_sock);
	if (ret)
		goto out;

	new_sock->type = sock->type;
	new_sock->ops = sock->ops;
	ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
	if (ret < 0)
		goto out;

	ret = rds_tcp_keepalive(new_sock);
	if (ret < 0)
		goto out;

	rds_tcp_tune(new_sock);

	inet = inet_sk(new_sock->sk);

	rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
		 &inet->inet_saddr, ntohs(inet->inet_sport),
		 &inet->inet_daddr, ntohs(inet->inet_dport));

	conn = rds_conn_create(sock_net(sock->sk),
			       inet->inet_saddr, inet->inet_daddr,
			       &rds_tcp_transport, GFP_KERNEL);
	if (IS_ERR(conn)) {
		ret = PTR_ERR(conn);
		goto out;
	}
	/* An incoming SYN request came in, and TCP just accepted it.
	 *
	 * If the client reboots, this conn will need to be cleaned up.
	 * rds_tcp_state_change() will do that cleanup
	 */
	rs_tcp = rds_tcp_accept_one_path(conn);
	if (!rs_tcp)
		goto rst_nsk;
	mutex_lock(&rs_tcp->t_conn_path_lock);
	cp = rs_tcp->t_cpath;
	conn_state = rds_conn_path_state(cp);
	WARN_ON(conn_state == RDS_CONN_UP);
	if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_ERROR)
		goto rst_nsk;
	if (rs_tcp->t_sock) {
		/* Need to resolve a duelling SYN between peers.
		 * We have an outstanding SYN to this peer, which may
		 * potentially have transitioned to the RDS_CONN_UP state,
		 * so we must quiesce any send threads before resetting
		 * c_transport_data.
		 */
		if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr) ||
		    !cp->cp_outgoing) {
			goto rst_nsk;
		} else {
			rds_tcp_reset_callbacks(new_sock, cp);
			cp->cp_outgoing = 0;
			/* rds_connect_path_complete() marks RDS_CONN_UP */
			rds_connect_path_complete(cp, RDS_CONN_RESETTING);
		}
	} else {
		rds_tcp_set_callbacks(new_sock, cp);
		rds_connect_path_complete(cp, RDS_CONN_CONNECTING);
	}
	new_sock = NULL;
	ret = 0;
	goto out;
rst_nsk:
	/* reset the newly returned accept sock and bail */
	kernel_sock_shutdown(new_sock, SHUT_RDWR);
	ret = 0;
out:
	if (rs_tcp)
		mutex_unlock(&rs_tcp->t_conn_path_lock);
	if (new_sock)
		sock_release(new_sock);
	return ret;
}
Esempio n. 24
0
int rpl_udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
		        struct socket **sockp)
{
	int err;
	struct socket *sock = NULL;

#if IS_ENABLED(CONFIG_IPV6)
	if (cfg->family == AF_INET6) {
		struct sockaddr_in6 udp6_addr;

		err = sock_create_kern(net, AF_INET6, SOCK_DGRAM, 0, &sock);
		if (err < 0)
			goto error;

		udp6_addr.sin6_family = AF_INET6;
		memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
		       sizeof(udp6_addr.sin6_addr));
		udp6_addr.sin6_port = cfg->local_udp_port;
		err = kernel_bind(sock, (struct sockaddr *)&udp6_addr,
				  sizeof(udp6_addr));
		if (err < 0)
			goto error;

		if (cfg->peer_udp_port) {
			udp6_addr.sin6_family = AF_INET6;
			memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
			       sizeof(udp6_addr.sin6_addr));
			udp6_addr.sin6_port = cfg->peer_udp_port;
			err = kernel_connect(sock,
					     (struct sockaddr *)&udp6_addr,
					     sizeof(udp6_addr), 0);
		}
		if (err < 0)
			goto error;
	} else
#endif
	if (cfg->family == AF_INET) {
		struct sockaddr_in udp_addr;

		err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
		if (err < 0)
			goto error;

		udp_addr.sin_family = AF_INET;
		udp_addr.sin_addr = cfg->local_ip;
		udp_addr.sin_port = cfg->local_udp_port;
		err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
				  sizeof(udp_addr));
		if (err < 0)
			goto error;

		if (cfg->peer_udp_port) {
			udp_addr.sin_family = AF_INET;
			udp_addr.sin_addr = cfg->peer_ip;
			udp_addr.sin_port = cfg->peer_udp_port;
			err = kernel_connect(sock,
					     (struct sockaddr *)&udp_addr,
					     sizeof(udp_addr), 0);
			if (err < 0)
				goto error;
		}
	} else {
		return -EPFNOSUPPORT;
	}


	*sockp = sock;

	return 0;

error:
	if (sock) {
		kernel_sock_shutdown(sock, SHUT_RDWR);
		sock_release(sock);
	}
	*sockp = NULL;
	return err;
}
Esempio n. 25
0
void rpl_udp_tunnel_sock_release(struct socket *sock)
{
	rcu_assign_sk_user_data(sock->sk, NULL);
	kernel_sock_shutdown(sock, SHUT_RDWR);
	sock_release(sock);
}
Esempio n. 26
0
void sclp_tunnel_sock_release(struct socket *sock)
{
    kernel_sock_shutdown(sock, SHUT_RDWR);
    sk_release_kernel(sock->sk);
}
static struct socket *
udp_tx_sock_create (struct in_addr if_address, 
                    uint16_t port,
                    struct in_addr dest_addr,
                    uint32_t bufsize)
{
    struct socket *sk;
    struct sockaddr_in saddr;
    char loop = 0;
    int err = 0;

    do {
        err = sock_create(PF_INET, SOCK_DGRAM, 0, &sk);
        if (err < 0) {
            break;
        }

        memset(&saddr, 0, sizeof(saddr));
        saddr.sin_addr = if_address;
        saddr.sin_family = AF_INET;
        saddr.sin_port = 0;    
        err = kernel_bind(sk, 
                          (struct sockaddr *) &saddr,
                          sizeof(saddr));
        if (err < 0) {
            break;
        }

        err = kernel_setsockopt(sk, 
                                IPPROTO_IP, 
                                IP_MULTICAST_LOOP, 
                                &loop, 
                                sizeof(loop));
        if (err < 0) {
            break;
        }

        memset(&saddr, 0, sizeof(saddr));
        saddr.sin_family = AF_INET;
        saddr.sin_addr = dest_addr;
        saddr.sin_port = port;
        err = kernel_connect(sk,
                             (struct sockaddr *)&saddr, 
                             sizeof(saddr), 
                             0);
        if (err < 0) {
            break;
        }

        if (bufsize) {
            err = kernel_setsockopt(sk, 
                                    SOL_SOCKET,
                                    SO_SNDBUF, 
                                    (char *)&bufsize,
                                    sizeof(bufsize));
            if (err < 0) {
                break;
            }
        }

    } while (0);

    if (err < 0) {
        kernel_sock_shutdown(sk, SHUT_RDWR);
        sock_release(sk);
        sk = NULL;
    }
    
    return (sk);
}