int twrite(spdid_t spdid, td_t td, int cbid, int sz) { int ret = -1; struct channel_info *channel; struct torrent *t; char *buf; if (tor_isnull(td)) return -EINVAL; LOCK(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); assert(t->data); if (!(t->flags & TOR_WRITE)) ERR_THROW(-EACCES, done); buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); channel = (struct channel_info*)t->data; ret = cringbuf_produce(&channels->rb, buf, sz); cos_trans_cntl(COS_TRANS_TRIGGER, 0, 0, 0); t->offset += ret; done: UNLOCK(); return ret; }
int tread(spdid_t spdid, td_t td, int cbid, int sz) { net_connection_t nc; struct torrent *t; char *buf; int ret; buf = cbuf2buf(cbid, sz); if (!buf) return -EINVAL; if (tor_isnull(td)) return -EINVAL; NET_LOCK_TAKE(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); if (!(t->flags & TOR_READ)) ERR_THROW(-EACCES, done); assert(t->data); nc = (net_connection_t)t->data; ret = net_recv(spdid, nc, buf, sz); done: NET_LOCK_RELEASE(); assert(lock_contested(&net_lock) != cos_get_thd_id()); return ret; }
int tread(spdid_t spdid, td_t td, int cbid, int sz) { td_t ntd; struct torrent *t; char *buf, *nbuf; int ret = -1; cbuf_t ncbid; if (tor_isnull(td)) return -EINVAL; t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); if (!(t->flags & TOR_WRITE)) ERR_THROW(-EACCES, done); assert(t->data); ntd = (td_t)t->data; buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); nbuf = cbuf_alloc(sz, &ncbid); assert(nbuf); /* printc("tip_tif_tread (thd %d)\n", cos_get_thd_id()); */ ret = server_tread(cos_spd_id(), ntd, ncbid, sz); if (ret < 0) goto free; /* ip_tread_cnt++; */ memcpy(buf, nbuf, ret); free: /* cbufp_deref(ncbid); */ // should keep this cbufp alive in netif for FT purpose? Jiguo cbuf_free(ncbid); done: return ret; }
int twrite(spdid_t spdid, td_t td, int cbid, int sz) { struct torrent *t; char *buf; int ret = -1; if (tor_isnull(td)) return -EINVAL; t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); if (!(t->flags & TOR_WRITE)) ERR_THROW(-EACCES, done); buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); ret = netif_event_xmit(spdid, buf, sz); /* // debug only */ /* cbuf_t debug_cb; */ /* if (debug_first == 0) { */ /* debug_first = 1; */ /* if (!(debug_buf = cbuf_alloc(sz, &debug_cb))) BUG(); */ /* memcpy(debug_buf, buf, sz); */ /* debug_amnt = sz; */ /* } */ done: return ret; }
int twrite(spdid_t spdid, td_t td, int cbid, int sz) { struct connection *c = NULL; struct torrent *t; char *buf; int ret = -1; if (tor_isnull(td)) return -EINVAL; buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); LOCK(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, unlock); if (!(t->flags & TOR_WRITE)) ERR_THROW(-EACCES, unlock); c = t->data; assert(c); lock_connection(c); UNLOCK(); if (connection_parse_requests(c, buf, sz)) ERR_THROW(-EINVAL, release); unlock_connection(c); ret = sz; done: return ret; unlock: UNLOCK(); release: unlock_connection(c); goto done; }
int twrite(spdid_t spdid, td_t td, int cbid, int sz) { td_t ntd; struct torrent *t; char *buf, *nbuf; int ret = -1; cbuf_t ncbid; if (tor_isnull(td)) return -EINVAL; t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); if (!(t->flags & TOR_WRITE)) ERR_THROW(-EACCES, done); assert(t->data); ntd = (td_t)t->data; buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); nbuf = cbuf_alloc(sz, &ncbid); assert(nbuf); memcpy(nbuf, buf, sz); ret = parent_twrite(cos_spd_id(), ntd, ncbid, sz); cbuf_free(ncbid); done: return ret; }
int tread(spdid_t spdid, td_t td, int cbid, int sz) { struct connection *c; struct torrent *t; char *buf; int ret; if (tor_isnull(td)) return -EINVAL; buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); LOCK(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, unlock); assert(!tor_is_usrdef(td) || t->data); if (!(t->flags & TOR_READ)) ERR_THROW(-EACCES, unlock); c = t->data; lock_connection(c); UNLOCK(); ret = connection_get_reply(c, buf, sz); unlock_connection(c); done: return ret; unlock: UNLOCK(); goto done; }
td_t tsplit(spdid_t spdid, td_t td, char *param, int len, tor_flags_t tflags, long evtid) { td_t ret = -1; struct torrent *t, *nt; struct fsobj *fso, *fsc, *parent; /* obj, child, and parent */ char *subpath; if (tor_isnull(td)) return -EINVAL; LOCK(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); fso = t->data; fsc = fsobj_path2obj(param, len, fso, &parent, &subpath); if (!fsc) return -ENOENT; fsobj_take(fsc); nt = tor_alloc(fsc, tflags); if (!nt) ERR_THROW(-ENOMEM, done); ret = nt->td; /* If we created the torrent, then trigger an event as we have data! */ evt_trigger(cos_spd_id(), evtid); done: UNLOCK(); return ret; }
int tread(spdid_t spdid, td_t td, int cbid, int sz) { int ret = -1, left; struct torrent *t; struct fsobj *fso; char *buf; if (tor_isnull(td)) return -EINVAL; LOCK(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); assert(!tor_is_usrdef(td) || t->data); if (!(t->flags & TOR_READ)) ERR_THROW(-EACCES, done); fso = t->data; assert(fso->size <= fso->allocated); assert(t->offset <= fso->size); if (!fso->size) ERR_THROW(0, done); buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); left = fso->size - t->offset; ret = left > sz ? sz : left; assert(fso->data); memcpy(buf, fso->data + t->offset, ret); t->offset += ret; cbuf_free(cbid); done: UNLOCK(); return ret; }
int tread(spdid_t spdid, td_t td, int cbid, int sz) { int ret = -1; struct channel_info *channel; struct torrent *t; char *buf; if (tor_isnull(td)) return -EINVAL; LOCK(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); assert(!tor_is_usrdef(td) || t->data); if (!(t->flags & TOR_READ)) ERR_THROW(-EACCES, done); buf = cbuf2buf(cbid, sz); if (!buf) goto done; channel = (struct channel_info*)t->data; ret = cringbuf_consume(&channel->rb, buf, sz); done: UNLOCK(); return ret; }
td_t tsplit(spdid_t spdid, td_t tid, char *param, int len, tor_flags_t tflags, long evtid) { td_t ret = -EINVAL; struct torrent *t; net_connection_t nc = 0; int accept = 0; if (tor_isnull(tid)) return -EINVAL; NET_LOCK_TAKE(); /* creating a new connection */ if (tid == td_root || len == 0 || strstr(param, "accept")) { if (tid == td_root) { /* new connection */ nc = net_create_tcp_connection(spdid, cos_get_thd_id(), evtid); if (nc <= 0) ERR_THROW(-ENOMEM, done); } else { /* len == 0 || strstr(param, "accept"), accept on connection */ t = tor_lookup(tid); if (!t) goto done; nc = net_accept(spdid, (net_connection_t)t->data); if (nc == -EAGAIN) { /* printc("net accept return EAGAIN\n"); */ ERR_THROW(-EAGAIN, done); } if (nc < 0) ERR_THROW(-EINVAL, done); if (0 < net_accept_data(spdid, nc, evtid)) BUG(); accept = 1; } t = tor_alloc((void*)nc, tflags); if (!t) ERR_THROW(-ENOMEM, free); ret = t->td; } else { /* modifying an existing connection */ t = tor_lookup(tid); if (!t) goto done; nc = (net_connection_t)t->data; } if (!accept && len != 0) { int r; NET_LOCK_RELEASE(); r = modify_connection(spdid, nc, param, len); if (r < 0) ret = r; NET_LOCK_TAKE(); } done: NET_LOCK_RELEASE(); assert(lock_contested(&net_lock) != cos_get_thd_id()); return ret; free: net_close(spdid, nc); goto done; }
int tread(spdid_t spdid, td_t td, int cbid, int sz) { struct connection *c; struct torrent *t; char *buf; int ret; /* printc("connmgr reads https thd %d\n", cos_get_thd_id()); */ if (tor_isnull(td)) return -EINVAL; buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); LOCK(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, unlock); assert(!tor_is_usrdef(td) || t->data); if (!(t->flags & TOR_READ)) ERR_THROW(-EACCES, unlock); c = t->data; lock_connection(c); UNLOCK(); /* // debug only */ /* if (debug_buf && debug_amnt > 0) { */ /* printc("use saved cbuf\n"); */ /* memcpy(buf, debug_buf, sz); */ /* ret = debug_amnt; */ /* unlock_connection(c); */ /* goto done; */ /* } */ ret = connection_get_reply(c, buf, sz); /* // debug only */ /* if (!debug_buf && debug_amnt == 0) { */ /* if (!(debug_buf = cbuf_alloc(sz, &debug_cb))) BUG(); */ /* printc("save the response cbuf\n"); */ /* memcpy(debug_buf, buf, sz); */ /* debug_amnt = ret; */ /* } */ unlock_connection(c); done: return ret; unlock: UNLOCK(); goto done; }
td_t tsplit(spdid_t spdid, td_t tid, char *param, int len, tor_flags_t tflags, long evtid) { td_t ret = -ENOMEM, ntd; struct torrent *t; if (tid != td_root) return -EINVAL; ntd = parent_tsplit(cos_spd_id(), tid, param, len, tflags, evtid); if (ntd <= 0) ERR_THROW(ntd, err); t = tor_alloc((void*)ntd, tflags); if (!t) ERR_THROW(-ENOMEM, err); ret = t->td; err: return ret; }
int twrite(spdid_t spdid, td_t td, int cbid, int sz) { td_t ntd; struct torrent *t; int ret = -1; if (tor_isnull(td)) return -EINVAL; t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); if (!(t->flags & TOR_WRITE)) ERR_THROW(-EACCES, done); assert(t->data); ntd = (td_t)t->data; ret = parent_twrite(cos_spd_id(), ntd, cbid, sz); done: return ret; }
int tmerge(spdid_t spdid, td_t td, td_t td_into, char *param, int len) { int ret = 0; /* currently only allow deletion */ if (td_into != td_null) ERR_THROW(-EINVAL, done); done: return ret; }
int init_ioctl_fd(struct netcf *ncf) { int ioctl_fd; int flags; ioctl_fd = socket(AF_INET, SOCK_STREAM, 0); ERR_THROW(ioctl_fd < 0, ncf, EINTERNAL, "failed to open socket for interface ioctl"); flags = fcntl(ioctl_fd, F_GETFD); ERR_THROW(flags < 0, ncf, EINTERNAL, "failed to get flags for ioctl socket"); flags = fcntl(ioctl_fd, F_SETFD, flags | FD_CLOEXEC); ERR_THROW(flags < 0, ncf, EINTERNAL, "failed to set FD_CLOEXEC flag on ioctl socket"); return ioctl_fd; error: if (ioctl_fd >= 0) close(ioctl_fd); return -1; }
int tmerge(spdid_t spdid, td_t td, td_t td_into, char *param, int len) { struct torrent *t; int ret = 0; LOCK(); if (!tor_is_usrdef(td)) ERR_THROW(-EINVAL, done); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); /* currently only allow deletion */ if (td_into != td_null) ERR_THROW(-EINVAL, done); assert(t->data); ((struct channel_info *)t->data)->t = NULL; tor_free(t); done: UNLOCK(); return ret; }
int tmerge(spdid_t spdid, td_t td, td_t td_into, char *param, int len) { int ret = 0; /* currently only allow deletion */ if (td_into != td_null) ERR_THROW(-EINVAL, done); trelease(spdid, td); done: assert(lock_contested(&net_lock) != cos_get_thd_id()); return ret; }
unsigned long cbuf_memory_target_get(spdid_t spdid) { struct cbuf_comp_info *cci; int ret; CBUF_TAKE(); cci = cbuf_comp_info_get(spdid); if (unlikely(!cci)) ERR_THROW(-ENOMEM, done); ret = cci->target_size; done: CBUF_RELEASE(); return ret; }
int cbuf_unmap_at(spdid_t s_spd, unsigned int cbid, spdid_t d_spd, vaddr_t d_addr) { struct cbuf_info *cbi; int ret = 0, err = 0; u32_t off; assert(d_addr); CBUF_TAKE(); cbi = cmap_lookup(&cbufs, cbid); if (unlikely(!cbi)) ERR_THROW(-EINVAL, done); if (unlikely(cbi->owner.spdid != s_spd)) ERR_THROW(-EPERM, done); assert(cbi->size == round_to_page(cbi->size)); /* unmap pages in only the d_spd client */ for (off = 0 ; off < cbi->size ; off += PAGE_SIZE) err |= mman_release_page(d_spd, d_addr + off, 0); err |= valloc_free(s_spd, d_spd, (void*)d_addr, cbi->size/PAGE_SIZE); if (unlikely(err)) ERR_THROW(-EFAULT, done); assert(!err); done: CBUF_RELEASE(); return ret; }
td_t tsplit(spdid_t spdid, td_t td, char *param, int len, tor_flags_t tflags, long evtid) { td_t ret = -1; struct torrent *t, *nt; int channel, direction; LOCK(); if (tor_isnull(td)) ERR_THROW(-EINVAL, done); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); if (len > 1) ERR_THROW(-EINVAL, done); channel = (int)(*param - '0'); if (channel > 9 || channel < 0) ERR_THROW(-EINVAL, done); if (!channels[channel].exists) ERR_THROW(-ENOENT, done); nt = tor_alloc(&channels[channel], tflags); if (!nt) ERR_THROW(-ENOMEM, done); ret = nt->td; direction = channels[channel].direction; if (direction == COS_TRANS_DIR_LTOC) { if (tflags != TOR_READ) ERR_THROW(-EINVAL, free); if (channels[channel].t) ERR_THROW(-EBUSY, free); } if (direction == COS_TRANS_DIR_CTOL && tflags != TOR_WRITE) ERR_THROW(-EINVAL, free); if (direction == COS_TRANS_DIR_LTOC) { nt->evtid = evtid; channels[channel].t = nt; } else { nt->evtid = 0; } done: UNLOCK(); return ret; free: tor_free(nt); goto done; }
td_t tsplit(spdid_t spdid, td_t tid, char *param, int len, tor_flags_t tflags, long evtid) { td_t ret = -ENOMEM; struct torrent *t; if (tid != td_root) return -EINVAL; netif_event_create(spdid); t = tor_alloc((void*)1, tflags); if (!t) ERR_THROW(-ENOMEM, err); ret = t->td; err: return ret; }
int tread(spdid_t spdid, td_t td, int cbid, int sz) { struct torrent *t; char *buf; int ret = -1; if (tor_isnull(td)) return -EINVAL; t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); if (!(t->flags & TOR_WRITE)) ERR_THROW(-EACCES, done); buf = cbuf2buf(cbid, sz); if (!buf) ERR_THROW(-EINVAL, done); ret = netif_event_wait(spdid, buf, sz); /* // debug ? */ /* if (debug_first == 1) { */ /* ret = netif_event_xmit(spdid, debug_buf, debug_amnt); */ /* } */ done: return ret; }
td_t tsplit(spdid_t spdid, td_t tid, char *param, int len, tor_flags_t tflags, long evtid) { td_t ret = -1; struct torrent *t; struct connection *c; if (tor_isnull(tid)) return -EINVAL; LOCK(); c = http_new_connection(0, evtid); if (!c) ERR_THROW(-ENOMEM, err); /* ignore the param for now */ t = tor_alloc(c, tflags); if (!t) ERR_THROW(-ENOMEM, free); c->conn_id = ret = t->td; err: UNLOCK(); return ret; free: http_free_connection(c); goto err; }
/** * Run a command without using the shell. * * return 0 if the command run and exited with 0 status; Otherwise * return -1 * */ int run_program(struct netcf *ncf, const char *const *argv, char **output) { pid_t childpid = -1; int exitstatus, waitret; char *argv_str; int ret = -1; char errbuf[128]; char *outtext = NULL; int outfd = -1; FILE *outfile = NULL; size_t outlen; if (!output) output = &outtext; argv_str = argv_to_string(argv); ERR_NOMEM(argv_str == NULL, ncf); exec_program(ncf, argv, argv_str, &childpid, &outfd); ERR_BAIL(ncf); printf("Attempting to execute %s\n", argv_str); outfile = fdopen(outfd, "r"); ERR_THROW_STRERROR(outfile == NULL,ncf, EEXEC, "Failed to create file stream for output while executing '%s': %s", argv_str, errbuf); *output = fread_file(outfile, &outlen); ERR_THROW_STRERROR(*output == NULL, ncf, EEXEC, "Error while reading output from execution of '%s': %s", argv_str, errbuf); /* finished with the stream. Close it so the child can exit. */ fclose(outfile); outfile = NULL; while ((waitret = waitpid(childpid, &exitstatus, 0) == -1) && errno == EINTR) { /* empty loop */ } ERR_THROW_STRERROR(waitret == -1, ncf, EEXEC, "Failed waiting for completion of '%s': %s", argv_str, errbuf); ERR_THROW(!WIFEXITED(exitstatus) && WIFSIGNALED(exitstatus), ncf, EEXEC, "'%s' terminated by signal: %d", argv_str, WTERMSIG(exitstatus)); ERR_THROW(!WIFEXITED(exitstatus), ncf, EEXEC, "'%s' terminated improperly", argv_str); ERR_THROW(WEXITSTATUS(exitstatus) == EXIT_ENOENT, ncf, EEXEC, "Running '%s' program not found", argv_str); ERR_THROW(WEXITSTATUS(exitstatus) == EXIT_CANNOT_INVOKE, ncf, EEXEC, "Running '%s' program located but not usable", argv_str); ERR_THROW(WEXITSTATUS(exitstatus) == EXIT_SIGMASK, ncf, EEXEC, "Running '%s' failed to reset child process signal mask", argv_str); ERR_THROW(WEXITSTATUS(exitstatus) == EXIT_DUP2, ncf, EEXEC, "Running '%s' failed to dup2 child process stdout/stderr", argv_str); ERR_THROW(WEXITSTATUS(exitstatus) == EXIT_INVALID_IN_THIS_STATE, ncf, EINVALIDOP, "Running '%s' operation is invalid in this state", argv_str); ERR_THROW(WEXITSTATUS(exitstatus) != 0, ncf, EEXEC, "Running '%s' failed with exit code %d: %s", argv_str, WEXITSTATUS(exitstatus), *output); ret = 0; error: if (outfile) fclose(outfile); else if (outfd >= 0) close(outfd); FREE(outtext); FREE(argv_str); return ret; }
/* * For a certain principal, collect any unreferenced and not_in * free list cbufs so that they can be reused. This is the * garbage-collection mechanism. * * Collect cbufs and add them onto the shared component's ring buffer. * * This function is semantically complicated. It can return no cbufs * even if they are available to force the pool of cbufs to be * expanded (the client will call cbuf_create in this case). * Or, the common case: it can return a number of available cbufs. */ int cbuf_collect(spdid_t spdid, unsigned long size) { struct cbuf_info *cbi; struct cbuf_comp_info *cci; struct cbuf_shared_page *csp; struct cbuf_bin *bin; int ret = 0; printl("cbuf_collect\n"); CBUF_TAKE(); cci = cbuf_comp_info_get(spdid); tracking_start(&cci->track, CBUF_COLLECT); if (unlikely(!cci)) ERR_THROW(-ENOMEM, done); if (size + cci->allocated_size <= cci->target_size) goto done; csp = cci->csp; if (unlikely(!csp)) ERR_THROW(-EINVAL, done); assert(csp->ring.size == CSP_BUFFER_SIZE); ret = CK_RING_SIZE(cbuf_ring, &csp->ring); if (ret != 0) goto done; /* * Go through all cbufs we own, and report all of them that * have no current references to them. Unfortunately, this is * O(N*M), N = min(num cbufs, PAGE_SIZE/sizeof(int)), and M = * num components. */ size = round_up_to_page(size); bin = cbuf_comp_info_bin_get(cci, size); if (!bin) ERR_THROW(0, done); cbi = bin->c; do { if (!cbi) break; /* * skip cbufs which are in freelist. Coordinates with cbuf_free to * detect such cbufs correctly. * We must check refcnt first and then next pointer. * * If do not check refcnt: the manager may check "next" before cbuf_free * (when it is NULL), then switch to client who calls cbuf_free to set * "next", decrease refcnt and add cbuf to freelist. Then switch back to * manager, but now it will collect this in-freelist cbuf. * * Furthermore we must check refcnt before the "next" pointer: * If not, similar to above case, the manager maybe preempted by client * between the manager checks "next" and refcnt. Therefore the manager * finds the "next" is null and refcnt is 0, and collect this cbuf. * Short-circuit can prevent reordering. */ assert(cbi->owner.m); if (!CBUF_REFCNT(cbi->owner.m) && !CBUF_IS_IN_FREELIST(cbi->owner.m) && !cbuf_referenced(cbi)) { struct cbuf_ring_element el = { .cbid = cbi->cbid }; cbuf_references_clear(cbi); if (!CK_RING_ENQUEUE_SPSC(cbuf_ring, &csp->ring, &el)) break; /* * Prevent other collection collecting those cbufs. * The manager checks if the shared ring buffer is empty upon * the entry, if not, it just returns. This is not enough to * prevent double-collection. The corner case is: * after the last one in ring buffer is dequeued and * before it is added to the free-list, the manager * appears. It may collect the last one again. */ cbi->owner.m->next = (struct cbuf_meta *)1; if (++ret == CSP_BUFFER_SIZE) break; } cbi = FIRST_LIST(cbi, next, prev); } while (cbi != bin->c); if (ret) cbuf_thd_wake_up(cci, ret*size); done: tracking_end(&cci->track, CBUF_COLLECT); CBUF_RELEASE(); return ret; } /* * Called by cbuf_deref. */ int cbuf_delete(spdid_t spdid, unsigned int cbid) { struct cbuf_comp_info *cci; struct cbuf_info *cbi; struct cbuf_meta *meta; int ret = -EINVAL, sz; printl("cbuf_delete\n"); CBUF_TAKE(); tracking_start(NULL, CBUF_DEL); cci = cbuf_comp_info_get(spdid); if (unlikely(!cci)) goto done; cbi = cmap_lookup(&cbufs, cbid); if (unlikely(!cbi)) goto done; meta = cbuf_meta_lookup(cci, cbid); /* * Other threads can access the meta data simultaneously. For * example, others call cbuf2buf which increase the refcnt. */ CBUF_REFCNT_ATOMIC_DEC(meta); /* Find the owner of this cbuf */ if (cbi->owner.spdid != spdid) { cci = cbuf_comp_info_get(cbi->owner.spdid); if (unlikely(!cci)) goto done; } if (cbuf_free_unmap(cci, cbi)) goto done; if (cci->allocated_size < cci->target_size) { cbuf_thd_wake_up(cci, cci->target_size - cci->allocated_size); } ret = 0; done: tracking_end(NULL, CBUF_DEL); CBUF_RELEASE(); return ret; } /* * Called by cbuf2buf to retrieve a given cbid. */ int cbuf_retrieve(spdid_t spdid, unsigned int cbid, unsigned long size) { struct cbuf_comp_info *cci, *own; struct cbuf_info *cbi; struct cbuf_meta *meta, *own_meta; struct cbuf_maps *map; vaddr_t dest; void *page; int ret = -EINVAL, off; printl("cbuf_retrieve\n"); CBUF_TAKE(); tracking_start(NULL, CBUF_RETRV); cci = cbuf_comp_info_get(spdid); if (!cci) {printd("no cci\n"); goto done; } cbi = cmap_lookup(&cbufs, cbid); if (!cbi) {printd("no cbi\n"); goto done; } /* shouldn't cbuf2buf your own buffer! */ if (cbi->owner.spdid == spdid) { printd("owner\n"); goto done; } meta = cbuf_meta_lookup(cci, cbid); if (!meta) {printd("no meta\n"); goto done; } assert(!(meta->nfo & ~CBUF_INCONSISENT)); map = malloc(sizeof(struct cbuf_maps)); if (!map) {printd("no map\n"); ERR_THROW(-ENOMEM, done); } if (size > cbi->size) {printd("too big\n"); goto done; } assert(round_to_page(cbi->size) == cbi->size); size = cbi->size; /* TODO: change to MAPPING_READ */ if (cbuf_alloc_map(spdid, &map->addr, NULL, cbi->mem, size, MAPPING_RW)) { printc("cbuf mgr map fail spd %d mem %p sz %lu cbid %u\n", spdid, cbi->mem, size, cbid); goto free; } INIT_LIST(map, next, prev); ADD_LIST(&cbi->owner, map, next, prev); CBUF_PTR_SET(meta, map->addr); map->spdid = spdid; map->m = meta; meta->sz = cbi->size >> PAGE_ORDER; meta->cbid_tag.cbid = cbid; own = cbuf_comp_info_get(cbi->owner.spdid); if (unlikely(!own)) goto done; /* * We need to inherit the relinquish bit from the sender. * Otherwise, this cbuf cannot be returned to the manager. */ own_meta = cbuf_meta_lookup(own, cbid); if (CBUF_RELINQ(own_meta)) CBUF_FLAG_ADD(meta, CBUF_RELINQ); ret = 0; done: tracking_end(NULL, CBUF_RETRV); CBUF_RELEASE(); return ret; free: free(map); goto done; } vaddr_t cbuf_register(spdid_t spdid, unsigned int cbid) { struct cbuf_comp_info *cci; struct cbuf_meta_range *cmr; void *p; vaddr_t dest, ret = 0; printl("cbuf_register\n"); CBUF_TAKE(); tracking_start(NULL, CBUF_REG); cci = cbuf_comp_info_get(spdid); if (unlikely(!cci)) goto done; cmr = cbuf_meta_lookup_cmr(cci, cbid); if (cmr) ERR_THROW(cmr->dest, done); /* Create the mapping into the client */ if (cbuf_alloc_map(spdid, &dest, &p, NULL, PAGE_SIZE, MAPPING_RW)) goto done; assert((unsigned int)p == round_to_page(p)); cmr = cbuf_meta_add(cci, cbid, p, dest); assert(cmr); ret = cmr->dest; done: tracking_end(NULL, CBUF_REG); CBUF_RELEASE(); return ret; } static void cbuf_shrink(struct cbuf_comp_info *cci, int diff) { int i, sz; struct cbuf_bin *bin; struct cbuf_info *cbi, *next, *head; for (i = cci->nbin-1 ; i >= 0 ; i--) { bin = &cci->cbufs[i]; sz = (int)bin->size; if (!bin->c) continue; cbi = FIRST_LIST(bin->c, next, prev); while (cbi != bin->c) { next = FIRST_LIST(cbi, next, prev); if (!cbuf_free_unmap(cci, cbi)) { diff -= sz; if (diff <= 0) return; } cbi = next; } if (!cbuf_free_unmap(cci, cbi)) { diff -= sz; if (diff <= 0) return; } } if (diff > 0) cbuf_mark_relinquish_all(cci); }