static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) { struct acc_dev *dev = _acc_dev; if (req->status != 0) acc_set_disconnected(dev); req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); }
struct msg * req_recv_next(struct context *ctx, struct conn *conn, bool alloc) { struct msg *msg; ASSERT(conn->client && !conn->proxy); if (conn->eof) { msg = conn->rmsg; /* client sent eof before sending the entire request */ if (msg != NULL) { conn->rmsg = NULL; ASSERT(msg->peer == NULL); ASSERT(msg->request && !msg->done); log_error("eof c %d discarding incomplete req %"PRIu64" len " "%"PRIu32"", conn->sd, msg->id, msg->mlen); req_put(msg); } /* * TCP half-close enables the client to terminate its half of the * connection (i.e. the client no longer sends data), but it still * is able to receive data from the proxy. The proxy closes its * half (by sending the second FIN) when the client has no * outstanding requests */ if (!conn->active(conn)) { conn->done = 1; log_debug(LOG_INFO, "c %d is done", conn->sd); } return NULL; } msg = conn->rmsg; if (msg != NULL) { ASSERT(msg->request); return msg; } if (!alloc) { return NULL; } msg = req_get(conn); if (msg != NULL) { conn->rmsg = msg; } return msg; }
static void mtp_tunnel_complete_in(struct usb_endpoint *ept, struct usb_request *req) { struct mtp_tunnel_context *ctxt = req->context; if (req->status != 0) ctxt->error = 1; req_put(ctxt, &ctxt->tx_idle, req); wake_up(&ctxt->write_wq); }
static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) { struct acc_dev *dev = _acc_dev; if (req->status == -ESHUTDOWN) { pr_debug("acc_complete_in set disconnected"); acc_set_disconnected(dev); } req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); }
static struct msg * rsp_make_error(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; /* peer message (response) */ struct msg *cmsg, *nmsg; /* current and next message (request) */ uint64_t id; err_t err; ASSERT(conn->client && !conn->proxy); ASSERT(msg->request && req_error(conn, msg)); ASSERT(msg->owner == conn); id = msg->frag_id; if (id != 0) { for (err = 0, cmsg = TAILQ_NEXT(msg, c_tqe); cmsg != NULL && cmsg->frag_id == id; cmsg = nmsg) { nmsg = TAILQ_NEXT(cmsg, c_tqe); /* dequeue request (error fragment) from client outq */ conn->dequeue_outq(ctx, conn, cmsg); if (err == 0 && cmsg->err != 0) { err = cmsg->err; } req_put(cmsg); } } else { err = msg->err; } pmsg = msg->peer; if (pmsg != NULL) { ASSERT(!pmsg->request && pmsg->peer == msg); msg->peer = NULL; pmsg->peer = NULL; rsp_put(pmsg); } #if 1 //shenzheng 2014-12-4 common //attention: the new error macro we defined must be a negative number. if(err >= 0) { #endif return msg_get_error(conn->redis, err); #if 1 //shenzheng 2014-12-4 common } else { return msg_get_error_other(conn->redis, err); } #endif }
static void adb_bind(struct usb_endpoint **ept, void *_ctxt) { struct adb_context *ctxt = _ctxt; struct usb_request *req; int n; ctxt->out = ept[0]; ctxt->in = ept[1]; printk(KERN_INFO "adb_bind() %p, %p\n", ctxt->out, ctxt->in); for (n = 0; n < RX_REQ_MAX; n++) { req = usb_ept_alloc_req(ctxt->out, 4096); if (req == 0) goto fail; req->context = ctxt; req->complete = adb_complete_out; req_put(ctxt, &ctxt->rx_idle, req); } for (n = 0; n < TX_REQ_MAX; n++) { req = usb_ept_alloc_req(ctxt->in, 4096); if (req == 0) goto fail; req->context = ctxt; req->complete = adb_complete_in; req_put(ctxt, &ctxt->tx_idle, req); } printk(KERN_INFO "adb_bind() allocated %d rx and %d tx requests\n", RX_REQ_MAX, TX_REQ_MAX); misc_register(&adb_device); misc_register(&adb_enable_device); return; fail: printk(KERN_ERR "adb_bind() could not allocate requests\n"); adb_unbind(ctxt); }
static bool rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->client && !conn->proxy); if(conn->is_Select_Msg){ conn->is_Select_Msg = 0; rsp_put(msg); log_debug(LOG_VERB," select success rsp %"PRIu64" len %"PRIu32" on s %d ", msg->id, msg->mlen, conn->sd); //ignore first response return true; } if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_error("filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id, msg->mlen, conn->sd); rsp_put(msg); errno = EINVAL; conn->err = errno; return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); if (pmsg->swallow) { conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); rsp_put(msg); req_put(pmsg); return true; } return false; }
static bool req_filter(struct context *ctx, struct conn *conn, struct msg *msg) { ASSERT(conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty req %"PRIu64" from c %d", msg->id, conn->sd); req_put(msg); return true; } /* * Handle "quit\r\n", which is the protocol way of doing a * passive close */ if (msg->quit) { ASSERT(conn->rmsg == NULL); log_debug(LOG_INFO, "filter quit req %"PRIu64" from c %d", msg->id, conn->sd); conn->eof = 1; conn->recv_ready = 0; req_put(msg); return true; } /* * if this conn is not authenticated, we will mark it as noforward, * and handle it in the redis_reply handler. * */ if (conn->need_auth) { msg->noforward = 1; } return false; }
static bool rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_debug(LOG_VERB, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id, msg->mlen, conn->sd); rsp_put(msg); return true; } if (pmsg->noreply) { conn->dequeue_outq(ctx, conn, pmsg); rsp_put(pmsg); rsp_put(msg); return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); if (pmsg->swallow) { conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; if (log_loggable(LOG_DEBUG)) { log_debug(LOG_DEBUG, "swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); } rsp_put(msg); req_put(pmsg); return true; } return false; }
static void dnode_rsp_swallow(struct context *ctx, struct conn *peer_conn, struct msg *req, struct msg *rsp) { peer_conn->dequeue_outq(ctx, peer_conn, req); req->done = 1; log_debug(LOG_VERB, "conn %p swallow %p", peer_conn, req); if (rsp) { log_debug(LOG_INFO, "dyn: swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", rsp->id, rsp->mlen, req->id, peer_conn->sd); dnode_rsp_put(rsp); } req_put(req); }
static void mtp_in_complete(struct usb_ep *ep, struct usb_request *req) { mtp_debug("status is %d %p %d\n", req->status, req, req->actual); if (req->status == -ECONNRESET) usb_ep_fifo_flush(ep); if (req->status != 0) { g_usb_mtp_context.error = 1; mtp_err("status is %d %p len=%d\n", req->status, req, req->actual); } req_put(&g_usb_mtp_context.tx_reqs, req); wake_up(&g_usb_mtp_context.tx_wq); }
//发送完成 void rsp_send_done(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; /* peer message (request) */ ASSERT(conn->client && !conn->proxy); ASSERT(conn->smsg == NULL); log_debug(LOG_VVERB, "send done rsp %"PRIu64" on c %d", msg->id, conn->sd); pmsg = msg->peer; ASSERT(!msg->request && pmsg->request); ASSERT(pmsg->peer == msg); ASSERT(pmsg->done && !pmsg->swallow); /* dequeue request from client outq */ conn->dequeue_outq(ctx, conn, pmsg); req_put(pmsg); }
static void start_out_receive(void) { struct usb_request *req; int ret; /* if we have idle read requests, get them queued */ while ((req = req_get(&g_usb_mtp_context.rx_reqs))) { req->length = BULK_BUFFER_SIZE; ret = usb_ep_queue(g_usb_mtp_context.bulk_out, req, GFP_ATOMIC); if (ret < 0) { mtp_err("error %d\n", ret); g_usb_mtp_context.error = 1; req_put(&g_usb_mtp_context.rx_reqs, req); } } }
static bool rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_debug(LOG_ERR, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id, msg->mlen, conn->sd); rsp_put(msg); return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); /* establish msg <-> pmsg (response <-> request) link */ msg->peer = pmsg; pmsg->peer = msg; if (pmsg->swallow) { if (pmsg->pre_swallow != NULL) { pmsg->pre_swallow(ctx, conn, msg); } conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); req_put(pmsg); return true; } return false; }
static void req_forward(struct context *ctx, struct conn *c_conn, struct msg *msg) { rstatus_t status; struct conn *s_conn; struct server_pool *pool; uint8_t *key; uint32_t keylen; struct keypos *kpos; ASSERT(c_conn->client && !c_conn->proxy); /* enqueue message (request) into client outq, if response is expected */ if (!msg->noreply) { c_conn->enqueue_outq(ctx, c_conn, msg); } pool = c_conn->owner; ASSERT(array_n(msg->keys) > 0); kpos = array_get(msg->keys, 0); key = kpos->start; keylen = (uint32_t)(kpos->end - kpos->start); s_conn = msg->routing(ctx, pool, msg, key, keylen); if (s_conn == NULL) { req_forward_error(ctx, c_conn, msg); return; } ASSERT(!s_conn->client && !s_conn->proxy); status = req_enqueue(ctx, s_conn, c_conn, msg); if (status != NC_OK) { req_put(msg); return; } req_forward_stats(ctx, s_conn->owner, msg); log_debug(LOG_VERB, "forward from c %d to s %d req %"PRIu64" len %"PRIu32 " type %d with key '%.*s'", c_conn->sd, s_conn->sd, msg->id, msg->mlen, msg->type, keylen, key); return; }
static struct msg * rsp_make_error(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; /* peer message (response) */ struct msg *cmsg, *nmsg; /* current and next message (request) */ uint64_t id; err_t err; ASSERT((conn->type == CONN_CLIENT) || (conn->type == CONN_DNODE_PEER_CLIENT)); ASSERT(msg->request && req_error(conn, msg)); ASSERT(msg->owner == conn); id = msg->frag_id; if (id != 0) { for (err = 0, cmsg = TAILQ_NEXT(msg, c_tqe); cmsg != NULL && cmsg->frag_id == id; cmsg = nmsg) { nmsg = TAILQ_NEXT(cmsg, c_tqe); /* dequeue request (error fragment) from client outq */ conn_dequeue_outq(ctx, conn, cmsg); if (err == 0 && cmsg->err != 0) { err = cmsg->err; } req_put(cmsg); } } else { err = msg->err; } pmsg = msg->selected_rsp; if (pmsg != NULL) { ASSERT(!pmsg->request && pmsg->peer == msg); msg->selected_rsp = NULL; pmsg->peer = NULL; rsp_put(pmsg); } return msg_get_error(conn, msg->dyn_error, err); }
static struct msg * rsp_make_error(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; /* peer message (response) */ struct msg *cmsg, *nmsg; /* current and next message (request) */ uint64_t id; err_t err; ASSERT(conn->client && !conn->proxy); ASSERT(msg->request && req_error(conn, msg)); ASSERT(msg->owner == conn); id = msg->frag_id; /* 将属于同一分片的msg的都干掉 */ if (id != 0) { for (err = 0, cmsg = TAILQ_NEXT(msg, c_tqe); cmsg != NULL && cmsg->frag_id == id; cmsg = nmsg) { nmsg = TAILQ_NEXT(cmsg, c_tqe); /* dequeue request (error fragment) from client outq */ conn->dequeue_outq(ctx, conn, cmsg); if (err == 0 && cmsg->err != 0) { err = cmsg->err; } req_put(cmsg); } } else { err = msg->err; } pmsg = msg->peer; if (pmsg != NULL) { ASSERT(!pmsg->request && pmsg->peer == msg); msg->peer = NULL; pmsg->peer = NULL; rsp_put(pmsg); } return msg_get_error(conn->redis, err); }
static bool dnode_rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->dnode_client && !conn->dnode_server); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "dyn: filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); dnode_rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_debug(LOG_INFO, "dyn: filter stray rsp %"PRIu64" len %"PRIu32" on s %d noreply %d", msg->id, msg->mlen, conn->sd, msg->noreply); dnode_rsp_put(msg); return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); if (pmsg->swallow) { conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; log_debug(LOG_INFO, "dyn: swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); dnode_rsp_put(msg); req_put(pmsg); return true; } return false; }
void server_close(struct context *ctx, struct conn *conn) { rstatus_t status; struct msg *msg, *nmsg; /* current and next message */ struct conn *c_conn; /* peer client connection */ ASSERT(!conn->client && !conn->proxy); server_close_stats(ctx, conn->owner, conn->err, conn->eof, conn->connected); if (conn->sd < 0) { server_failure(ctx, conn->owner); conn->unref(conn); conn_put(conn); return; } for (msg = TAILQ_FIRST(&conn->imsg_q); msg != NULL; msg = nmsg) { nmsg = TAILQ_NEXT(msg, s_tqe); /* dequeue the message (request) from server inq */ conn->dequeue_inq(ctx, conn, msg); /* * Don't send any error response, if * 1. request is tagged as noreply or, * 2. client has already closed its connection */ if (msg->swallow || msg->noreply) { log_debug(LOG_INFO, "close s %d swallow req %"PRIu64" len %"PRIu32 " type %d", conn->sd, msg->id, msg->mlen, msg->type); req_put(msg); } else { c_conn = msg->owner; //ASSERT(c_conn->client && !c_conn->proxy); msg->done = 1; msg->error = 1; msg->err = conn->err; msg->dyn_error = STORAGE_CONNECTION_REFUSE; if (req_done(c_conn, TAILQ_FIRST(&c_conn->omsg_q))) { event_add_out(ctx->evb, msg->owner); } log_debug(LOG_INFO, "close s %d schedule error for req %"PRIu64" " "len %"PRIu32" type %d from c %d%c %s", conn->sd, msg->id, msg->mlen, msg->type, c_conn->sd, conn->err ? ':' : ' ', conn->err ? strerror(conn->err): " "); } } ASSERT(TAILQ_EMPTY(&conn->imsg_q)); for (msg = TAILQ_FIRST(&conn->omsg_q); msg != NULL; msg = nmsg) { nmsg = TAILQ_NEXT(msg, s_tqe); /* dequeue the message (request) from server outq */ conn->dequeue_outq(ctx, conn, msg); if (msg->swallow) { log_debug(LOG_INFO, "close s %d swallow req %"PRIu64" len %"PRIu32 " type %d", conn->sd, msg->id, msg->mlen, msg->type); req_put(msg); } else { c_conn = msg->owner; //ASSERT(c_conn->client && !c_conn->proxy); msg->done = 1; msg->error = 1; msg->err = conn->err; if (req_done(c_conn, TAILQ_FIRST(&c_conn->omsg_q))) { event_add_out(ctx->evb, msg->owner); } log_debug(LOG_INFO, "close s %d schedule error for req %"PRIu64" " "len %"PRIu32" type %d from c %d%c %s", conn->sd, msg->id, msg->mlen, msg->type, c_conn->sd, conn->err ? ':' : ' ', conn->err ? strerror(conn->err): " "); } } ASSERT(TAILQ_EMPTY(&conn->omsg_q)); msg = conn->rmsg; if (msg != NULL) { conn->rmsg = NULL; ASSERT(!msg->request); ASSERT(msg->peer == NULL); rsp_put(msg); log_debug(LOG_INFO, "close s %d discarding rsp %"PRIu64" len %"PRIu32" " "in error", conn->sd, msg->id, msg->mlen); } ASSERT(conn->smsg == NULL); server_failure(ctx, conn->owner); conn->unref(conn); status = close(conn->sd); if (status < 0) { log_error("close s %d failed, ignored: %s", conn->sd, strerror(errno)); } conn->sd = -1; conn_put(conn); }
static int __init create_bulk_endpoints(struct acc_dev *dev, struct usb_endpoint_descriptor *in_desc, struct usb_endpoint_descriptor *out_desc) { struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; struct usb_ep *ep; int i; DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); ep = usb_ep_autoconfig(cdev->gadget, in_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_in = ep; ep = usb_ep_autoconfig(cdev->gadget, out_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; /* FIXME: why we need to apply out_ep twice? */ #if 0 ep = usb_ep_autoconfig(cdev->gadget, out_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; #endif /* now allocate requests for our endpoints */ for (i = 0; i < TX_REQ_MAX; i++) { req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE); if (!req) goto fail; req->complete = acc_complete_in; req_put(dev, &dev->tx_idle, req); } for (i = 0; i < RX_REQ_MAX; i++) { req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE); if (!req) goto fail; req->complete = acc_complete_out; dev->rx_req[i] = req; } return 0; fail: printk(KERN_ERR "acc_bind() could not allocate requests\n"); while ((req = req_get(dev, &dev->tx_idle))) acc_request_free(req, dev->ep_in); for (i = 0; i < RX_REQ_MAX; i++) acc_request_free(dev->rx_req[i], dev->ep_out); return -1; }
static bool rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_debug(LOG_ERR, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id, msg->mlen, conn->sd); rsp_put(msg); /* * Memcached server can respond with an error response before it has * received the entire request. This is most commonly seen for set * requests that exceed item_size_max. IMO, this behavior of memcached * is incorrect. The right behavior for update requests that are over * item_size_max would be to either: * - close the connection Or, * - read the entire item_size_max data and then send CLIENT_ERROR * * We handle this stray packet scenario in nutcracker by closing the * server connection which would end up sending SERVER_ERROR to all * clients that have requests pending on this server connection. The * fix is aggressive, but not doing so would lead to clients getting * out of sync with the server and as a result clients end up getting * responses that don't correspond to the right request. * * See: https://github.com/twitter/twemproxy/issues/149 */ conn->err = EINVAL; conn->done = 1; return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); if (pmsg->swallow) { conn->swallow_msg(conn, pmsg, msg); conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); rsp_put(msg); req_put(pmsg); return true; } return false; }
static ssize_t adb_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct adb_context *ctxt = &_context; struct usb_request *req; int r = count, xfer; int ret; unsigned MaxPacketSize; DBG("adb_read(%d)\n", count); if (_lock(&ctxt->read_excl)) return -EBUSY; /* we will block until we're online */ while (!(ctxt->online || ctxt->error)) { DBG("adb_read: waiting for online state\n"); ret = wait_event_interruptible(ctxt->read_wq, (ctxt->online || ctxt->error)); if (ret < 0) { _unlock(&ctxt->read_excl); return ret; } } MaxPacketSize = usb_ept_get_max_packet(ctxt->out); if (MaxPacketSize > 512) MaxPacketSize = 512; while (count > 0) { if (ctxt->error) { r = -EIO; break; } /* if we have idle read requests, get them queued */ while ((req = req_get(ctxt, &ctxt->rx_idle))) { requeue_req: req->length = MaxPacketSize; ret = usb_ept_queue_xfer(ctxt->out, req); if (ret < 0) { DBG("adb_read: failed to queue req %p (%d)\n", req, ret); r = -EIO; ctxt->error = 1; req_put(ctxt, &ctxt->rx_idle, req); goto fail; } else { DBG("%s(): rx %p queue\n", __func__, req); } } /* if we have data pending, give it to userspace */ if (ctxt->read_count > 0) { xfer = (ctxt->read_count < count) ? ctxt->read_count : count; if (copy_to_user(buf, ctxt->read_buf, xfer)) { r = -EFAULT; break; } ctxt->read_buf += xfer; ctxt->read_count -= xfer; buf += xfer; count -= xfer; /* if we've emptied the buffer, release the request */ if (ctxt->read_count == 0) { req_put(ctxt, &ctxt->rx_idle, ctxt->read_req); ctxt->read_req = 0; } continue; } /* wait for a request to complete */ req = 0; ret = wait_event_interruptible(ctxt->read_wq, ((req = req_get(ctxt, &ctxt->rx_done)) || ctxt->error)); if (req != 0) { /* if we got a 0-len one we need to put it back into ** service. if we made it the current read req we'd ** be stuck forever */ if (req->actual == 0) goto requeue_req; ctxt->read_req = req; ctxt->read_count = req->actual; ctxt->read_buf = req->buf; DBG("%s(): rx %p %d\n", __func__, req, req->actual); } if (ret < 0) { r = ret; break; } } fail: _unlock(&ctxt->read_excl); return r; }
void dnode_req_put(struct msg *msg) { req_put(msg); }
static int mtp_function_bind(struct usb_configuration *c, struct usb_function *f) { int n, rc, id; struct usb_ep *ep; struct usb_request *req; struct proc_dir_entry *mtp_proc = NULL; spin_lock_init(&g_usb_mtp_context.lock); g_usb_mtp_context.cdev = c->cdev; /* allocate interface ID(s) */ id = usb_interface_id(c, f); if (id < 0) return id; intf_desc.bInterfaceNumber = id; /* Find all the endpoints we will use */ ep = usb_ep_autoconfig(g_usb_mtp_context.cdev->gadget, &fs_bulk_in_desc); if (!ep) { mtp_err("auto-configure hs_bulk_in_desc error\n"); goto autoconf_fail; } ep->driver_data = &g_usb_mtp_context; g_usb_mtp_context.bulk_in = ep; ep = usb_ep_autoconfig(g_usb_mtp_context.cdev->gadget, &fs_bulk_out_desc); if (!ep) { mtp_err("auto-configure hs_bulk_out_desc error\n"); goto autoconf_fail; } ep->driver_data = &g_usb_mtp_context; g_usb_mtp_context.bulk_out = ep; ep = usb_ep_autoconfig(g_usb_mtp_context.cdev->gadget, &fs_intr_in_desc); if (!ep) { mtp_err("auto-configure hs_intr_in_desc error\n"); goto autoconf_fail; } ep->driver_data = &g_usb_mtp_context; g_usb_mtp_context.intr_in = ep; if (gadget_is_dualspeed(g_usb_mtp_context.cdev->gadget)) { /* Assume endpoint addresses are the same for both speeds */ hs_bulk_in_desc.bEndpointAddress = fs_bulk_in_desc.bEndpointAddress; hs_bulk_out_desc.bEndpointAddress = fs_bulk_out_desc.bEndpointAddress; hs_intr_in_desc.bEndpointAddress = fs_intr_in_desc.bEndpointAddress; } rc = -ENOMEM; for (n = 0; n < MAX_BULK_RX_REQ_NUM; n++) { req = req_new(g_usb_mtp_context.bulk_out, BULK_BUFFER_SIZE); if (!req) goto autoconf_fail; pending_reqs[n] = req; req->complete = mtp_out_complete; req_put(&g_usb_mtp_context.rx_reqs, req); } for (n = 0; n < MAX_BULK_TX_REQ_NUM; n++) { req = req_new(g_usb_mtp_context.bulk_in, BULK_BUFFER_SIZE); if (!req) goto autoconf_fail; req->complete = mtp_in_complete; req_put(&g_usb_mtp_context.tx_reqs, req); } for (n = 0; n < MAX_CTL_RX_REQ_NUM; n++) ctl_req_put(&g_usb_mtp_context.ctl_rx_reqs, &ctl_reqs[n]); g_usb_mtp_context.int_tx_req = req_new(g_usb_mtp_context.intr_in, BULK_BUFFER_SIZE); if (!g_usb_mtp_context.int_tx_req) goto autoconf_fail; g_usb_mtp_context.intr_in_busy = 0; g_usb_mtp_context.int_tx_req->complete = mtp_int_complete; g_usb_mtp_context.ctl_tx_req = req_new(g_usb_mtp_context.cdev->gadget->ep0, 512); if (!g_usb_mtp_context.ctl_tx_req) goto autoconf_fail; misc_register(&mtp_device); mtp_proc = create_proc_entry("mtpctl", 0666, 0); if (!mtp_proc) { mtp_err("creating /proc/mtpctl failed\n"); goto autoconf_fail; } mtp_proc->proc_fops = &mtp_ctl_fops; return 0; autoconf_fail: rc = -ENOTSUPP; mtp_function_unbind(c, f); return rc; }
static int mtp_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int len, clen, count, n; struct usb_request *req; struct mtp_event_data event; if (!g_usb_mtp_context.online) return -EINVAL; switch (cmd) { case MTP_IOC_EVENT: if (g_usb_mtp_context.intr_in_busy) { mtp_err("interrupt in request busy\n"); return -EBUSY; } count = MIN(_IOC_SIZE(cmd), MTP_EVENT_SIZE); if (copy_from_user(event.data, (void *)arg, count)) return -EINVAL; /* length is in little endian */ memcpy(&len, event.data, sizeof(len)); clen = le32_to_cpu(len); mtp_debug("len=%d cpu len=%d\n", len, clen); /* send event through interrupt in */ req = g_usb_mtp_context.int_tx_req; if (!req) return -EINVAL; count = MIN(MTP_EVENT_SIZE, clen); memcpy(req->buf, event.data, count); req->length = count; req->zero = 0; g_usb_mtp_context.intr_in_busy = 1; if (usb_ep_queue(g_usb_mtp_context.intr_in, req, GFP_ATOMIC)) { g_usb_mtp_context.intr_in_busy = 0; return -EINVAL; } break; case MTP_IOC_SEND_ZLP: req = req_get(&g_usb_mtp_context.tx_reqs); if (!req) return -EINVAL; req->length = 0; req->zero = 0; if (usb_ep_queue(g_usb_mtp_context.bulk_in, req, GFP_ATOMIC)) { req_put(&g_usb_mtp_context.tx_reqs, req); return -EINVAL; } break; case MTP_IOC_GET_EP_SIZE_IN: /* get endpoint buffer size for bulk in */ len = BULK_BUFFER_SIZE; if (copy_to_user((void *)arg, &len, sizeof(int))) return -EINVAL; break; case MTP_IOC_CANCEL_IO: mtp_debug("MTP_IOC_CANCEL_IO:\n"); g_usb_mtp_context.cancel = 1; for (n = 0; n < MAX_BULK_RX_REQ_NUM; n++) { req = pending_reqs[n]; if (req && req->actual) { mtp_err("n=%d %p %d\n", n, req, req->actual); req->actual = 0; } } /* we've cancelled the recv urb, start new one */ mtp_debug("MTP_IOC_CANCEL_IO end:\n"); wake_up(&g_usb_mtp_context.rx_wq); wake_up(&g_usb_mtp_context.tx_wq); break; case MTP_IOC_DEVICE_RESET: g_usb_mtp_context.cancel = 1; g_usb_mtp_context.ctl_cancel = 1; wake_up(&g_usb_mtp_context.rx_wq); wake_up(&g_usb_mtp_context.tx_wq); wake_up(&g_usb_mtp_context.ctl_rx_wq); wake_up(&g_usb_mtp_context.ctl_tx_wq); break; } return 0; }
static ssize_t mtp_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct usb_request *req; int rc = count, xfer; int ret; while (count > 0) { mtp_debug("count=%d\n", count); if (g_usb_mtp_context.error) return -EIO; /* get an idle tx request to use */ ret = wait_event_interruptible(g_usb_mtp_context.tx_wq, (g_usb_mtp_context.online || g_usb_mtp_context.cancel)); if (g_usb_mtp_context.cancel) { mtp_debug("cancel return in mtp_write at beginning\n"); g_usb_mtp_context.cancel = 0; return -EINVAL; } if (ret < 0) { mtp_err("wait_event_interruptible return %d\n", ret); rc = ret; break; } req = 0; mtp_debug("get tx req\n"); ret = wait_event_interruptible(g_usb_mtp_context.tx_wq, ((req = req_get(&g_usb_mtp_context.tx_reqs)) || g_usb_mtp_context.cancel)); mtp_debug("got tx req\n"); if (g_usb_mtp_context.cancel) { mtp_debug("cancel return in mtp_write get req\n"); if (req != 0) req_put(&g_usb_mtp_context.tx_reqs, req); g_usb_mtp_context.cancel = 0; return -EINVAL; } if (ret < 0) { mtp_err("wait_event_interruptible return(2) %d\n", ret); rc = ret; break; } if (req != 0) { if (count > BULK_BUFFER_SIZE) xfer = BULK_BUFFER_SIZE; else xfer = count; if (copy_from_user(req->buf, buf, xfer)) { req_put(&g_usb_mtp_context.tx_reqs, req); rc = -EFAULT; break; } req->length = xfer; ret = usb_ep_queue(g_usb_mtp_context.bulk_in, req, GFP_ATOMIC); if (ret < 0) { mtp_err("error %d\n", ret); g_usb_mtp_context.error = 1; req_put(&g_usb_mtp_context.tx_reqs, req); rc = ret; break; } buf += xfer; count -= xfer; mtp_debug("xfer=%d\n", xfer); } } mtp_debug("mtp_write returning %d\n", rc); return rc; }
static ssize_t mtp_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct usb_request *req = 0; int xfer, rc = count; int ret; while (count > 0) { mtp_debug("count=%d\n", count); if (g_usb_mtp_context.error) return -EIO; /* we will block until we're online */ ret = wait_event_interruptible(g_usb_mtp_context.rx_wq, (g_usb_mtp_context.online || g_usb_mtp_context.cancel)); if (g_usb_mtp_context.cancel) { mtp_debug("cancel return in mtp_read at beginning\n"); g_usb_mtp_context.cancel = 0; return -EINVAL; } if (ret < 0) { mtp_err("wait_event_interruptible return %d\n", ret); rc = ret; break; } /* if we have idle read requests, get them queued */ while (1) { req = req_get(&g_usb_mtp_context.rx_reqs); if (!req) break; requeue_req: req->length = BULK_BUFFER_SIZE; mtp_debug("rx %p queue\n", req); ret = usb_ep_queue(g_usb_mtp_context.bulk_out, req, GFP_ATOMIC); if (ret < 0) { mtp_err("queue error %d\n", ret); g_usb_mtp_context.error = 1; req_put(&g_usb_mtp_context.rx_reqs, req); return ret; } } /* if we have data pending, give it to userspace */ if (g_usb_mtp_context.data_len > 0) { if (g_usb_mtp_context.data_len < count) xfer = g_usb_mtp_context.data_len; else xfer = count; if (copy_to_user(buf, g_usb_mtp_context.read_buf, xfer)) { rc = -EFAULT; break; } g_usb_mtp_context.read_buf += xfer; g_usb_mtp_context.data_len -= xfer; buf += xfer; count -= xfer; mtp_debug("xfer=%d\n", xfer); /* if we've emptied the buffer, release the request */ if (g_usb_mtp_context.data_len == 0) { req_put(&g_usb_mtp_context.rx_reqs, g_usb_mtp_context.cur_read_req); g_usb_mtp_context.cur_read_req = 0; } continue; } /* wait for a request to complete */ req = 0; mtp_debug("wait req finish\n"); ret = wait_event_interruptible(g_usb_mtp_context.rx_wq, ((req = req_get(&g_usb_mtp_context.rx_done_reqs)) || g_usb_mtp_context.cancel)); mtp_debug("req finished\n"); if (g_usb_mtp_context.cancel) { if (req != 0) req_put(&g_usb_mtp_context.rx_reqs, req); mtp_debug("cancel return in mtp_read at complete\n"); g_usb_mtp_context.cancel = 0; return -EINVAL; } if (ret < 0) { mtp_err("wait_event_interruptible(2) return %d\n", ret); rc = ret; break; } if (req != 0) { /* if we got a 0-len one we need to put it back into ** service. if we made it the current read req we'd ** be stuck forever */ if (req->actual == 0) goto requeue_req; g_usb_mtp_context.cur_read_req = req; g_usb_mtp_context.data_len = req->actual; g_usb_mtp_context.read_buf = req->buf; mtp_debug("rx %p done actual=%d\n", req, req->actual); } } mtp_debug("mtp_read returning %d\n", rc); return rc; }
/* dnode sends a response back to a peer */ struct msg * dnode_rsp_send_next(struct context *ctx, struct conn *conn) { rstatus_t status; ASSERT(conn->dnode_client && !conn->dnode_server); struct msg *rsp = rsp_send_next(ctx, conn); if (rsp != NULL && conn->dyn_mode) { struct msg *pmsg = rsp->peer; //need to deal with multi-block later uint64_t msg_id = pmsg->dmsg->id; struct mbuf *header_buf = mbuf_get(); if (header_buf == NULL) { loga("Unable to obtain an mbuf for header!"); return NULL; //need to address error here properly } dmsg_type_t msg_type = DMSG_RES; //TODOs: need to set the outcoming conn to be secured too if the incoming conn is secured if (pmsg->owner->dnode_secured || conn->dnode_secured) { if (log_loggable(LOG_VVERB)) { log_debug(LOG_VVERB, "Encrypting response ..."); loga("AES encryption key: %s\n", base64_encode(conn->aes_key, AES_KEYLEN)); } if (ENCRYPTION) { status = dyn_aes_encrypt_msg(rsp, conn->aes_key); if (status == DN_ERROR) { loga("OOM to obtain an mbuf for encryption!"); mbuf_put(header_buf); req_put(rsp); return NULL; } if (log_loggable(LOG_VVERB)) { log_debug(LOG_VERB, "#encrypted bytes : %d", status); } dmsg_write(header_buf, msg_id, msg_type, conn, msg_length(rsp)); } else { if (log_loggable(LOG_VVERB)) { log_debug(LOG_VERB, "no encryption on the rsp payload"); } dmsg_write(header_buf, msg_id, msg_type, conn, msg_length(rsp)); } } else { //write dnode header log_info("sending dnode response with msg_id %u", msg_id); dmsg_write(header_buf, msg_id, msg_type, conn, msg_length(rsp)); } mbuf_insert_head(&rsp->mhdr, header_buf); if (log_loggable(LOG_VVERB)) { log_hexdump(LOG_VVERB, header_buf->pos, mbuf_length(header_buf), "resp dyn message - header: "); msg_dump(rsp); } } return rsp; }
/* Description: link data from a peer connection to a client-facing connection * peer_conn: a peer connection * msg : msg with data from the peer connection after parsing */ static void dnode_rsp_forward_match(struct context *ctx, struct conn *peer_conn, struct msg *rsp) { rstatus_t status; struct msg *req; struct conn *c_conn; req = TAILQ_FIRST(&peer_conn->omsg_q); c_conn = req->owner; /* if client consistency is dc_one forward the response from only the local node. Since dyn_dnode_peer is always a remote node, drop the rsp */ if (req->consistency == DC_ONE) { if (req->swallow) { dnode_rsp_swallow(ctx, peer_conn, req, rsp); return; } log_warn("req %d:%d with DC_ONE consistency is not being swallowed"); } /* if client consistency is dc_quorum, forward the response from only the local region/DC. */ if ((req->consistency == DC_QUORUM) && !peer_conn->same_dc) { if (req->swallow) { dnode_rsp_swallow(ctx, peer_conn, req, rsp); return; } log_warn("req %d:%d with DC_QUORUM consistency is not being swallowed"); } log_debug(LOG_DEBUG, "DNODE RSP RECEIVED %c %d dmsg->id %u req %u:%u rsp %u:%u, ", peer_conn->dnode_client ? 'c' : (peer_conn->dnode_server ? 's' : 'p'), peer_conn->sd, rsp->dmsg->id, req->id, req->parent_id, rsp->id, rsp->parent_id); ASSERT(req != NULL && req->peer == NULL); ASSERT(req->request && !req->done); if (log_loggable(LOG_VVERB)) { loga("Dumping content for response: "); msg_dump(rsp); loga("rsp id %d", rsp->id); loga("Dumping content for request:"); msg_dump(req); loga("req id %d", req->id); } peer_conn->dequeue_outq(ctx, peer_conn, req); req->done = 1; log_debug(LOG_VERB, "%p <-> %p", req, rsp); /* establish rsp <-> req (response <-> request) link */ req->peer = rsp; rsp->peer = req; rsp->pre_coalesce(rsp); ASSERT((c_conn->client && !c_conn->proxy) || (c_conn->dnode_client && !c_conn->dnode_server)); dnode_rsp_forward_stats(ctx, peer_conn->owner, rsp); if (TAILQ_FIRST(&c_conn->omsg_q) != NULL && dnode_req_done(c_conn, req)) { log_debug(LOG_INFO, "handle rsp %d:%d for req %d:%d conn %p", rsp->id, rsp->parent_id, req->id, req->parent_id, c_conn); // c_conn owns respnse now rstatus_t status = conn_handle_response(c_conn, req->parent_id ? req->parent_id : req->id, rsp); if (req->swallow) { log_debug(LOG_INFO, "swallow request %d:%d", req->id, req->parent_id); req_put(req); } } }
static void mtp_tunnel_bind(struct usb_endpoint **ept, void *_ctxt) { struct mtp_tunnel_context *ctxt = _ctxt; struct usb_request *req; int ret; #ifndef ALLOCATE_16K_BUFF int n; #endif ctxt->registered = 0; ctxt->out = ept[0]; ctxt->in = ept[1]; printk(KERN_DEBUG "mtp_tunnel_bind() %p, %p\n", ctxt->out, ctxt->in); #ifndef ALLOCATE_16K_BUFF for (n = 0; n < RX_REQ_MAX; n++) #endif { req = usb_ept_alloc_req(ctxt->out, TXN_MAX); if (req == 0) goto fail; req->context = ctxt; req->complete = mtp_tunnel_complete_out; req_put(ctxt, &ctxt->rx_idle, req); } #ifndef ALLOCATE_16K_BUFF for (n = 0; n < TX_REQ_MAX; n++) #endif { req = usb_ept_alloc_req(ctxt->in, TXN_MAX); if (req == 0) goto fail; req->context = ctxt; req->complete = mtp_tunnel_complete_in; req_put(ctxt, &ctxt->tx_idle, req); } #ifndef ALLOCATE_16K_BUFF printk(KERN_DEBUG "mtp_tunnel_bind() allocated %d rx and %d tx requests\n", RX_REQ_MAX, TX_REQ_MAX); #else printk(KERN_DEBUG "%s(): allocated buffer: %d\n", __func__, TXN_MAX); #endif misc_register(&mtp_tunnel_device); misc_register(&mtp_tunnel_enable_device); mtp_tunnel_dev.release = mtp_tunnel_dev_release; mtp_tunnel_dev.parent = &ctxt->pdev->dev; strcpy(mtp_tunnel_dev.bus_id, "interface"); ret = device_register(&mtp_tunnel_dev); if (ret != 0) { printk(KERN_WARNING "mtp_tunnel_dev failed to register device: %d\n", ret); goto fail_dev_register_fail; } ret = device_create_file(&mtp_tunnel_dev, &dev_attr_mtp_tunnel_status); if (ret != 0) { printk(KERN_WARNING "mtp_tunnel_dev device_create_file failed: %d\n", ret); device_unregister(&mtp_tunnel_dev); goto fail_dev_register_fail; } ctxt->registered = 1; return; fail_dev_register_fail: printk(KERN_ERR "%s() could not allocate requests\n", __func__); fail: printk(KERN_WARNING "mtp_tunnel_bind() could not allocate requests\n"); mtp_tunnel_unbind(ctxt); }