static void smd_try_to_send(struct diag_context *ctxt) { if (ctxt->ch) { int r = smd_read_avail(ctxt->ch); if (r > RXN_MAX) { printk(KERN_ERR "The SMD data is too large to send (%d) !!\n", r); // return; r = RXN_MAX; } if (r > 0) { struct diag_request *req = get_req(ctxt, &ctxt->rx_arm9_idle); if (!req) { printk(KERN_ERR "There is no enough request to ARM11!!\n"); return; } smd_read(ctxt->ch, req->buf, r); smd_xfer_count_func(r, data_set_rx); //req->length = r; //printk(KERN_ERR "ARM9 data to ARM11 %s\n", (char *)req->buf); req->actual = r; put_req(ctxt, &ctxt->rx_arm9_done, req); wake_up(&ctxt->read_arm9_wq); } } }
static void diag_configure(int configured, void *_ctxt) { struct diag_context *ctxt = _ctxt; DBG("diag_configure() %d\n", configured); if (configured) { ctxt->online = 1; /* if we have a stale request being read, recycle it */ ctxt->read_arm9_buf = 0; ctxt->read_arm9_count = 0; if (ctxt->read_arm9_req) { put_req(ctxt, &ctxt->rx_arm9_idle, ctxt->read_arm9_req); ctxt->read_arm9_req = 0; } } else { ctxt->online = 0; ctxt->error = 1; } /* readers may be blocked waiting for us to go online */ wake_up(&ctxt->read_arm9_wq); }
void* thread_producer() { req_t req_d; while(1) { // wait for a free buffer slot sthread_monitor_enter(mon); while (available_reqs == RING_SIZE) sthread_monitor_wait(mon); sthread_monitor_exit(mon); // create and clean request req_d = (req_t) malloc(sizeof(struct _req)); memset(req_d,0,sizeof(struct _req)); if ((req_d->reqsz = srv_recv_request(&(req_d->req),&(req_d->cliaddr),&(req_d->clilen))) == 0) continue; sthread_monitor_enter(mon); // send to buffer put_req(req_d); available_reqs++; sthread_monitor_signalall(mon); sthread_monitor_exit(mon); sthread_yield(); } }
static apr_status_t find_href(apr_xml_elem *e, const char *orig_path, parser_baton_t *ctx, apr_pool_t *pool) { apr_status_t status; do { /* print */ if (e->name[0] == 'a' && e->name[1] == '\0') { apr_xml_attr *a; a = e->attr; while (a) { if (strcasecmp(a->name, "href") == 0) { break; } a = a->next; } if (a) { status = put_req(a->value, orig_path, ctx, pool); if (status) { return status; } } } if (e->first_child) { status = find_href(e->first_child, orig_path, ctx, pool); if (status) { return status; } } e = e->next; } while (e); return APR_SUCCESS; }
static void diag_bind(void *_ctxt) { struct diag_context *ctxt = _ctxt; struct diag_request *req; int n; printk(KERN_DEBUG "diag_bind()\n"); for (n = 0; n < RX_REQ_MAX; n++) { req = kmalloc(sizeof(struct diag_request), GFP_KERNEL); if (req == 0) { pr_err("%s: kmalloc out of memory\n", __func__); goto fail; } req->buf = kmalloc(RXN_MAX, GFP_KERNEL); if (!req->buf) { pr_err("%s: kmalloc out of memory\n", __func__); kfree(req); goto fail; } req->context = ctxt; put_req(ctxt, &ctxt->rx_arm9_idle, req); } printk(KERN_DEBUG "diag_bind() allocated %d rx requests\n", RX_REQ_MAX); smd_xfer_count_func(0,data_set_clear); return; fail: printk(KERN_WARNING "diag_bind() could not allocate requests\n"); diag_unbind(ctxt); }
static int diag2arm9_release(struct inode *ip, struct file *fp) { struct diag_context *ctxt = &_context; struct diag_request *req; DBG_OP("+%s\n", __func__); diag_configure(0, ctxt); msm_diag_smd_close(); /* recycle unhandled rx reqs to user if any */ while ((req = get_req(ctxt, &ctxt->rx_arm9_done))) put_req(ctxt, &ctxt->rx_arm9_idle, req); /* Release readers that might be blocked */ wake_up(&ctxt->read_arm9_wq); _unlock(&ctxt->open_arm9_excl); DBG_OP("-%s\n", __func__); return 0; }
static ssize_t diag2arm9_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct diag_context *ctxt = &_context; struct diag_request *req; int r = 0, xfer; int ret; DBG("diag2arm9_read(%d)\n", count); if (_lock(&ctxt->read_arm9_excl)) return -EBUSY; while (count > 0) { /* if we have data pending, give it to userspace */ if (ctxt->read_arm9_count > 0) { xfer = (ctxt->read_arm9_count < count) ? ctxt->read_arm9_count : count; if (copy_to_user(buf, ctxt->read_arm9_buf, xfer)) { DBG("diag: copy_to_user fail\n"); r = -EFAULT; break; } ctxt->read_arm9_buf += xfer; ctxt->read_arm9_count -= xfer; buf += xfer; count -= xfer; r += xfer; /* if we've emptied the buffer, release the request */ if (ctxt->read_arm9_count == 0) { put_req(ctxt, &ctxt->rx_arm9_idle, ctxt->read_arm9_req); ctxt->read_arm9_req = 0; } continue; } /* wait for a request to complete */ req = 0; ret = wait_event_interruptible(ctxt->read_arm9_wq, ((req = get_req(ctxt, &ctxt->rx_arm9_done)) || ctxt->error)); if (req != 0) { /* if we got a 0-len one we need to put it back into ** service. if we made it the current read req we'd ** be stuck forever */ if (req->actual == 0) { put_req(ctxt, &ctxt->rx_arm9_idle, req); continue; } ctxt->read_arm9_req = req; ctxt->read_arm9_count = req->actual; ctxt->read_arm9_buf = req->buf; if (ctxt->read_arm9_count < count) count = ctxt->read_arm9_count; DBG("rx %p %d\n", req, req->actual); } if (ret < 0) { DBG_OP("%s: ret < 0\n", __func__); r = ret; break; } } _unlock(&ctxt->read_arm9_excl); return r; }
static ssize_t diag2arm9_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct diag_context *ctxt = &_context; struct diag_request *req; int r = 0, xfer; int ret; //ctxt->isRead = 1; DBG("diag2arm9_read(%d)\n", count); if (_lock(&ctxt->read_arm9_excl)) return -EBUSY; /* we will block until we're offline */ /* while (ctxt->online) { ret = wait_event_interruptible(ctxt->read_arm9_wq, !(ctxt->online)); if (ret < 0) { _unlock(&ctxt->read_arm9_excl); return ret; } } */ while (count > 0) { /* if (ctxt->error) { r = -EIO; break; } */ /* if we have idle read requests, get them queued */ /* while ((req = get_req(ctxt, &ctxt->rx_idle))) { requeue_req: req->length = TXN_MAX; ret = usb_ept_queue_xfer(ctxt->out, req); if (ret < 0) { DBG("diag_read: failed to queue req %p (%d)\n", req, ret); r = -EIO; ctxt->error = 1; put_req(ctxt, &ctxt->rx_idle, req); goto fail; } else { DBG("rx %p queue\n", req); } } */ /* if we have data pending, give it to userspace */ if (ctxt->read_arm9_count > 0) { xfer = (ctxt->read_arm9_count < count) ? ctxt->read_arm9_count : count; if (copy_to_user(buf, ctxt->read_arm9_buf, xfer)) { DBG("diag: copy_to_user fail\n"); r = -EFAULT; break; } ctxt->read_arm9_buf += xfer; ctxt->read_arm9_count -= xfer; buf += xfer; count -= xfer; r += xfer; /* if we've emptied the buffer, release the request */ if (ctxt->read_arm9_count == 0) { put_req(ctxt, &ctxt->rx_arm9_idle, ctxt->read_arm9_req); ctxt->read_arm9_req = 0; } continue; } /* wait for a request to complete */ req = 0; ret = wait_event_interruptible(ctxt->read_arm9_wq, ((req = get_req(ctxt, &ctxt->rx_arm9_done)) || ctxt->error)); if (req != 0) { /* if we got a 0-len one we need to put it back into ** service. if we made it the current read req we'd ** be stuck forever */ if (req->actual == 0) { // goto requeue_req; put_req(ctxt, &ctxt->rx_arm9_idle, req); continue; } ctxt->read_arm9_req = req; ctxt->read_arm9_count = req->actual; ctxt->read_arm9_buf = req->buf; if (ctxt->read_arm9_count < count) count = ctxt->read_arm9_count; DBG("rx %p %d\n", req, req->actual); } if (ret < 0) { DBG_OP("%s: ret < 0\n", __func__); r = ret; break; } } //fail: _unlock(&ctxt->read_arm9_excl); //ctxt->isRead = 0; return r; }