/* always call with the tx_lock held */ static int nbd_send_req(struct nbd_device *nbd, struct request *req) { int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); u32 type; if (req->cmd_type == REQ_TYPE_DRV_PRIV) type = NBD_CMD_DISC; else if (req->cmd_flags & REQ_DISCARD) type = NBD_CMD_TRIM; else if (req->cmd_flags & REQ_FLUSH) type = NBD_CMD_FLUSH; else if (rq_data_dir(req) == WRITE) type = NBD_CMD_WRITE; else type = NBD_CMD_READ; memset(&request, 0, sizeof(request)); request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(type); if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) { request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); } memcpy(request.handle, &req, sizeof(req)); dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", req, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, 1, &request, sizeof(request), (type == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); return -EIO; } if (type == NBD_CMD_WRITE) { struct req_iterator iter; struct bio_vec bvec; /* * we are really probing at internals to determine * whether to set MSG_MORE or not... */ rq_for_each_segment(bvec, req, iter) { flags = 0; if (!rq_iter_last(bvec, iter)) flags = MSG_MORE; dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", req, bvec.bv_len); result = sock_send_bvec(nbd, &bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); return -EIO; } } }
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, bool reserved) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); struct nbd_device *nbd = cmd->nbd; struct socket *sock = NULL; spin_lock(&nbd->sock_lock); set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); if (nbd->sock) { sock = nbd->sock; get_file(sock->file); } spin_unlock(&nbd->sock_lock); if (sock) { kernel_sock_shutdown(sock, SHUT_RDWR); sockfd_put(sock); } req->errors++; dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); return BLK_EH_HANDLED; }
static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) { bdev->bd_inode->i_size = 0; set_capacity(nbd->disk, 0); kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); return 0; }
static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev) { if (!nbd_is_connected(nbd)) return; bdev->bd_inode->i_size = nbd->bytesize; set_capacity(nbd->disk, nbd->bytesize >> 9); kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); }
static void nbd_end_request(struct nbd_cmd *cmd) { struct nbd_device *nbd = cmd->nbd; struct request *req = blk_mq_rq_from_pdu(cmd); int error = req->errors ? -EIO : 0; dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd, error ? "failed" : "done"); blk_mq_complete_request(req, error); }
static void nbd_end_request(struct nbd_device *nbd, struct request *req) { int error = req->errors ? -EIO : 0; struct request_queue *q = req->q; unsigned long flags; dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req, error ? "failed" : "done"); spin_lock_irqsave(q->queue_lock, flags); __blk_end_request_all(req, error); spin_unlock_irqrestore(q->queue_lock, flags); }
static void nbd_xmit_timeout(unsigned long arg) { struct nbd_device *nbd = (struct nbd_device *)arg; unsigned long flags; if (list_empty(&nbd->queue_head)) return; spin_lock_irqsave(&nbd->sock_lock, flags); nbd->timedout = true; if (nbd->sock) kernel_sock_shutdown(nbd->sock, SHUT_RDWR); spin_unlock_irqrestore(&nbd->sock_lock, flags); dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); }
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, bool reserved) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); struct nbd_device *nbd = cmd->nbd; dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); req->errors++; /* * If our disconnect packet times out then we're already holding the * config_lock and could deadlock here, so just set an error and return, * we'll handle shutting everything down later. */ if (req->cmd_type == REQ_TYPE_DRV_PRIV) return BLK_EH_HANDLED; mutex_lock(&nbd->config_lock); sock_shutdown(nbd); mutex_unlock(&nbd->config_lock); return BLK_EH_HANDLED; }
static void nbd_xmit_timeout(unsigned long arg) { struct nbd_device *nbd = (struct nbd_device *)arg; unsigned long flags; if (list_empty(&nbd->queue_head)) return; nbd->disconnect = true; spin_lock_irqsave(&nbd->tasks_lock, flags); if (nbd->task_recv) force_sig(SIGKILL, nbd->task_recv); if (nbd->task_send) force_sig(SIGKILL, nbd->task_send); spin_unlock_irqrestore(&nbd->tasks_lock, flags); dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); }
/* always call with the tx_lock held */ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); struct bio *bio; u32 type; u32 tag = blk_mq_unique_tag(req); if (req_op(req) == REQ_OP_DISCARD) type = NBD_CMD_TRIM; else if (req_op(req) == REQ_OP_FLUSH) type = NBD_CMD_FLUSH; else if (rq_data_dir(req) == WRITE) type = NBD_CMD_WRITE; else type = NBD_CMD_READ; memset(&request, 0, sizeof(request)); request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(type); if (type != NBD_CMD_FLUSH) { request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); } memcpy(request.handle, &tag, sizeof(tag)); dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", cmd, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, index, 1, &request, sizeof(request), (type == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); return -EIO; } if (type != NBD_CMD_WRITE) return 0; flags = 0; bio = req->bio; while (bio) { struct bio *next = bio->bi_next; struct bvec_iter iter; struct bio_vec bvec; bio_for_each_segment(bvec, bio, iter) { bool is_last = !next && bio_iter_last(bvec, iter); if (is_last) flags = MSG_MORE; dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", cmd, bvec.bv_len); result = sock_send_bvec(nbd, index, &bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); return -EIO; } /* * The completion might already have come in, * so break for the last one instead of letting * the iterator do it. This prevents use-after-free * of the bio. */ if (is_last) break; } bio = next; }