long devtabread(struct chan *c, void *buf, long n, int64_t off) { ERRSTACK(1); int i; struct dev *dev; char *alloc, *e, *p; alloc = kzmalloc(READSTR, KMALLOC_WAIT); if (alloc == NULL) error(ENOMEM, NULL); p = alloc; e = p + READSTR; for (i = 0; &devtab[i] < __devtabend; i++) { dev = &devtab[i]; printd("p %p e %p e-p %d\n", p, e, e - p); printd("do %d %s\n", i, dev->name); p += snprintf(p, e - p, "#%s\n", dev->name); } if (waserror()) { kfree(alloc); nexterror(); } n = readstr(off, buf, n, alloc); kfree(alloc); poperror(); return n; }
/* * parse a command written to a device */ struct cmdbuf *parsecmd(char *p, int n) { ERRSTACK(1); struct cmdbuf *volatile cb; int nf; char *sp; nf = ncmdfield(p, n); /* allocate Cmdbuf plus string pointers plus copy of string including \0 */ sp = kzmalloc(sizeof(*cb) + nf * sizeof(char *) + n + 1, 0); cb = (struct cmdbuf *)sp; cb->f = (char **)(&cb[1]); cb->buf = (char *)(&cb->f[nf]); if (current != NULL && waserror()) { kfree(cb); nexterror(); } memmove(cb->buf, p, n); if (current != NULL) poperror(); /* dump new line and null terminate */ if (n > 0 && cb->buf[n - 1] == '\n') n--; cb->buf[n] = '\0'; cb->nf = tokenize(cb->buf, cb->f, nf - 1); cb->f[cb->nf] = NULL; return cb; }
static int pipewstat(struct chan *c, uint8_t *dp, int n) { ERRSTACK(2); struct dir *d; Pipe *p; int d1; if (c->qid.type & QTDIR) error(EPERM, ERROR_FIXME); p = c->aux; if (strcmp(current->user, p->user) != 0) error(EPERM, ERROR_FIXME); d = kzmalloc(sizeof(*d) + n, 0); if (waserror()) { kfree(d); nexterror(); } n = convM2D(dp, n, d, (char *)&d[1]); if (n == 0) error(ENODATA, ERROR_FIXME); d1 = NETTYPE(c->qid.path) == Qdata1; if (!emptystr(d->name)) { validwstatname(d->name); if (strlen(d->name) >= KNAMELEN) error(ENAMETOOLONG, ERROR_FIXME); if (strncmp(p->pipedir[1 + !d1].name, d->name, KNAMELEN) == 0) error(EEXIST, ERROR_FIXME); strncpy(p->pipedir[1 + d1].name, d->name, KNAMELEN); } if (d->mode != ~0UL) p->pipedir[d1 + 1].perm = d->mode & 0777; poperror(); kfree(d); return n; }
int syscreate(char *path, int mode, uint32_t perm) { ERRSTACK(2); int fd; struct chan *c; if (waserror()) { poperror(); return -1; } openmode(mode & ~O_EXCL); /* error check only; OEXCL okay here */ c = namec(path, Acreate, mode, perm); if (waserror()) { cclose(c); nexterror(); } fd = newfd(c, mode); /* 9ns mode is the O_FLAGS and perm is glibc mode */ if (fd < 0) error(-fd, ERROR_FIXME); poperror(); poperror(); return fd; }
int sysfversion(int fd, unsigned int msize, char *vers, unsigned int arglen) { ERRSTACK(2); int m; struct chan *c; if (waserror()) { poperror(); return -1; } /* check there's a NUL in the version string */ if (arglen == 0 || memchr(vers, 0, arglen) == 0) error(EINVAL, ERROR_FIXME); c = fdtochan(¤t->open_files, fd, O_RDWR, 0, 1); if (waserror()) { cclose(c); nexterror(); } m = mntversion(c, vers, msize, arglen); poperror(); cclose(c); poperror(); return m; }
/* Could pass in the fdt instead of the proc, but we used to need the to_proc * for now so we can claim a VFS FD. Careful, we don't close the old chan. */ int sys_dup_to(struct proc *from_proc, unsigned int from_fd, struct proc *to_proc, unsigned int to_fd) { ERRSTACK(1); int ret; struct chan *c; if (waserror()) { poperror(); return -1; } c = fdtochan(&from_proc->open_files, from_fd, -1, 0, 1); if (c->qid.type & QTAUTH) { cclose(c); error(EPERM, ERROR_FIXME); } ret = insert_obj_fdt(&to_proc->open_files, c, to_fd, 0, TRUE, FALSE); /* drop the ref from fdtochan. if insert succeeded, there is one other ref * stored in the FDT */ cclose(c); if (ret < 0) error(EFAIL, "Can't insert FD %d into FDG", to_fd); poperror(); return 0; }
/* * called when a process writes to an interface's 'data' */ static void ipifckick(void *x) { ERRSTACK(1); struct conv *c = x; struct block *bp; struct Ipifc *ifc; bp = qget(c->wq); if (bp == NULL) return; ifc = (struct Ipifc *)c->ptcl; if (!canrlock(&ifc->rwlock)) { freeb(bp); return; } if (waserror()) { runlock(&ifc->rwlock); nexterror(); } if (ifc->m == NULL || ifc->m->pktin == NULL) freeb(bp); else (*ifc->m->pktin) (c->p->f, ifc, bp); runlock(&ifc->rwlock); poperror(); }
/* * associate an address with the interface. This wipes out any previous * addresses. This is a macro that means, remove all the old interfaces * and add a new one. */ static char *ipifcconnect(struct conv *c, char **argv, int argc) { ERRSTACK(1); char *err; struct Ipifc *ifc; ifc = (struct Ipifc *)c->ptcl; if (ifc->m == NULL) return "ipifc not yet bound to device"; if (waserror()) { wunlock(&ifc->rwlock); nexterror(); } wlock(&ifc->rwlock); while (ifc->lifc) { err = ipifcremlifc(ifc, ifc->lifc); if (err) error(err); } wunlock(&ifc->rwlock); poperror(); err = ipifcadd(ifc, argv, argc, 0, NULL); if (err) return err; Fsconnected(c, NULL); return NULL; }
long kchanio(void *vc, void *buf, int n, int mode) { ERRSTACK(1); int r; struct chan *c; c = vc; if (waserror()) { poperror(); return -1; } if (mode == O_READ) r = devtab[c->type].read(c, buf, n, c->offset); else if (mode == O_WRITE) r = devtab[c->type].write(c, buf, n, c->offset); else error(ENOSYS, "kchanio: use only O_READ xor O_WRITE"); spin_lock(&c->lock); c->offset += r; spin_unlock(&c->lock); poperror(); return r; }
int syscreate(char *path, int mode, uint32_t perm) { ERRSTACK(2); int fd; struct chan *c; if (waserror()) { poperror(); return -1; } openmode(mode & ~OEXCL); /* error check only; OEXCL okay here */ c = namec(path, Acreate, mode, perm); if (waserror()) { cclose(c); nexterror(); } fd = newfd(c); if (fd < 0) error(Enofd); poperror(); poperror(); return fd; }
int pprint(char *fmt, ...) { ERRSTACK(2); int n; struct chan *c; va_list arg; char buf[2 * PRINTSIZE]; if (up == NULL || current->fgrp == NULL) return 0; c = current->fgrp->fd[2]; if (c == 0 || (c->mode != O_WRITE && c->mode != O_RDWR)) return 0; n = snprintf(buf, sizeof buf, "%s %lud: ", current->text, current->pid); va_start(arg, fmt); n = vsnprintf(buf + n, sizeof(buf), fmt, arg); va_end(arg); if (waserror()) return 0; devtab[c->type]->write(c, buf, n, c->offset); poperror(); spin_lock(&c->lock); c->offset += n; spin_unlock(&c->lock); return n; }
/* * copy the contents of memory into a string of blocks. * return NULL on error. */ struct block *mem2bl(uint8_t * p, int len) { ERRSTACK(1); int n; struct block *b, *first, **l; first = NULL; l = &first; if (waserror()) { freeblist(first); nexterror(); } do { n = len; if (n > Maxatomic) n = Maxatomic; *l = b = block_alloc(n, MEM_WAIT); /* TODO consider extra_data */ memmove(b->wp, p, n); b->wp += n; p += n; len -= n; l = &b->next; } while (len > 0); poperror(); return first; }
static long regresswrite(struct chan *c, void *a, long n, int64_t unused) { ERRSTACK(1); uintptr_t pc; struct cmdbuf *cb; cb = parsecmd(a, n); if (waserror()) { kfree(cb); nexterror(); } switch((int)(c->qid.path)){ case Monitorctlqid: if(strncmp(a, "ktest", 5) == 0){ run_registered_ktest_suites(); } else { error(EFAIL, "regresswrite: only commands are %s", ctlcommands); } break; case Monitordataqid: if (onecmd(cb->nf, cb->f, NULL) < 0) n = -1; break; default: error(EBADFD, ERROR_FIXME); } kfree(cb); poperror(); return n; }
int perfmon_open_event(const struct core_set *cset, struct perfmon_session *ps, const struct perfmon_event *pev) { ERRSTACK(1); int i; struct perfmon_alloc *pa = perfmon_create_alloc(pev); if (waserror()) { perfmon_destroy_alloc(pa); nexterror(); } smp_do_in_cores(cset, perfmon_do_cores_alloc, pa); for (i = 0; i < num_cores; i++) { if (core_set_getcpu(cset, i)) { counter_t ccno = pa->cores_counters[i]; if (unlikely(ccno < 0)) { perfmon_destroy_alloc(pa); return (int) ccno; } } } /* The perfmon_alloc data structure will not be visible to userspace, * until the perfmon_install_session_alloc() completes, and at that * time the smp_do_in_cores(perfmon_do_cores_alloc) will have run on * all cores. * The perfmon_alloc data structure will never be changed once published. */ i = perfmon_install_session_alloc(ps, pa); poperror(); return i; }
void netlogctl(struct Fs *f, char *s, int n) { ERRSTACK(1); int i, set = 0; Netlogflag *fp; struct cmdbuf *cb; struct cmdtab *ct; cb = parsecmd(s, n); if (waserror()) { kfree(cb); nexterror(); } if (cb->nf < 2) error(EINVAL, ERROR_FIXME); ct = lookupcmd(cb, routecmd, ARRAY_SIZE(routecmd)); switch (ct->index) { case CMset: set = 1; break; case CMclear: set = 0; break; case CMonly: parseip(f->alog->iponly, cb->f[1]); if (ipcmp(f->alog->iponly, IPnoaddr) == 0) f->alog->iponlyset = 0; else f->alog->iponlyset = 1; kfree(cb); poperror(); return; default: cmderror(cb, "unknown ip control message"); } for (i = 1; i < cb->nf; i++) { for (fp = flags; fp->name; fp++) if (strcmp(fp->name, cb->f[i]) == 0) break; if (fp->name == NULL) continue; if (set) f->alog->logmask |= fp->mask; else f->alog->logmask &= ~fp->mask; } kfree(cb); poperror(); }
/* * Increment the reference count of a network device. * If id < 0, return an unused ether device. */ static int openfile(struct ether *nif, int id) { ERRSTACK(1); struct netfile *f, **fp, **efp; if (id >= 0) { f = nif->f[id]; if (f == 0) error(Enodev); qlock(&f->qlock); qreopen(f->in); f->inuse++; qunlock(&f->qlock); return id; } qlock(&nif->qlock); if (waserror()) { qunlock(&nif->qlock); nexterror(); } efp = &nif->f[nif->nfile]; for (fp = nif->f; fp < efp; fp++) { f = *fp; if (f == 0) { f = kzmalloc(sizeof(struct netfile), 0); if (f == 0) exhausted("memory"); /* since we lock before netifinit (if we ever call that...) */ qlock_init(&f->qlock); f->in = qopen(nif->limit, Qmsg, 0, 0); if (f->in == NULL) { kfree(f); exhausted("memory"); } *fp = f; qlock(&f->qlock); } else { qlock(&f->qlock); if (f->inuse) { qunlock(&f->qlock); continue; } } f->inuse = 1; qreopen(f->in); netown(f, current->user, 0); qunlock(&f->qlock); qunlock(&nif->qlock); poperror(); return fp - nif->f; } error(Enodev); return -1; /* not reached */ }
long devbwrite(struct chan *c, struct block *bp, uint32_t offset) { ERRSTACK(1); long n; if (waserror()) { freeb(bp); nexterror(); } n = devtab[c->type].write(c, bp->rp, BLEN(bp), offset); poperror(); freeb(bp); return n; }
static void tmpfs_remove(struct chan *c) { ERRSTACK(1); struct tmpfs *tmpfs = chan_to_tmpfs(c); /* This is a bit of a pain - when remove fails, we won't get a chance to * close the chan. See notes in tree_chan_remove() and sysremove(). */ if (waserror()) { kref_put(&tmpfs->users); nexterror(); } tree_chan_remove(c); kref_put(&tmpfs->users); poperror(); }
void netlogclose(struct Fs *f) { ERRSTACK(1); spin_lock(&f->alog->lock); if (waserror()) { spin_unlock(&f->alog->lock); nexterror(); } f->alog->opens--; if (f->alog->opens == 0) { kfree(f->alog->buf); f->alog->buf = NULL; } spin_unlock(&f->alog->lock); poperror(); }
struct block *devbread(struct chan *c, long n, uint32_t offset) { ERRSTACK(1); struct block *bp; bp = allocb(n); if (bp == 0) error(Enomem); if (waserror()) { freeb(bp); nexterror(); } bp->wp += devtab[c->type].read(c, bp->wp, n, offset); poperror(); return bp; }
/* * get next block from a queue (up to a limit) */ struct block *qbread(struct queue *q, int len) { ERRSTACK(1); struct block *b, *nb; int n; qlock(&q->rlock); if (waserror()) { qunlock(&q->rlock); nexterror(); } spin_lock_irqsave(&q->lock); if (!qwait(q)) { /* queue closed */ spin_unlock_irqsave(&q->lock); qunlock(&q->rlock); poperror(); return NULL; } /* if we get here, there's at least one block in the queue */ b = qremove(q); n = BLEN(b); /* split block if it's too big and this is not a message queue */ nb = b; if (n > len) { PANIC_EXTRA(b); if ((q->state & Qmsg) == 0) { n -= len; b = allocb(n); memmove(b->wp, nb->rp + len, n); b->wp += n; qputback(q, b); } nb->wp = nb->rp + len; } /* restart producer */ qwakeup_iunlock(q); poperror(); qunlock(&q->rlock); return nb; }
static void chan_release(struct kref *kref) { struct chan *c = container_of(kref, struct chan, ref); ERRSTACK(1); /* this style discards the error from close(). picture it as * if (waserror()) { } else { close(); } chanfree_no_matter_what(); */ if (!waserror()) { printd("releasing chan %p, type %d\n", c, c->type); /* -1 means there is no dev yet. wants a noop for close() */ if (c->type != -1) devtab[c->type].close(c); } /* need to poperror regardless of whether we error'd or not */ poperror(); /* and chan free no matter what */ chanfree(c); }
/* * create a pipe, no streams are created until an open */ static struct chan *pipeattach(char *spec) { ERRSTACK(2); Pipe *p; struct chan *c; c = devattach(devname(), spec); p = kzmalloc(sizeof(Pipe), 0); if (p == 0) error(ENOMEM, ERROR_FIXME); if (waserror()) { freepipe(p); nexterror(); } p->pipedir = kzmalloc(sizeof(pipedir), 0); if (p->pipedir == 0) error(ENOMEM, ERROR_FIXME); memmove(p->pipedir, pipedir, sizeof(pipedir)); kstrdup(&p->user, current->user); kref_init(&p->ref, pipe_release, 1); qlock_init(&p->qlock); p->q[0] = qopen(pipealloc.pipeqsize, Qcoalesce, 0, 0); if (p->q[0] == 0) error(ENOMEM, ERROR_FIXME); p->q[1] = qopen(pipealloc.pipeqsize, Qcoalesce, 0, 0); if (p->q[1] == 0) error(ENOMEM, ERROR_FIXME); poperror(); spin_lock(&(&pipealloc)->lock); p->path = ++pipealloc.path; spin_unlock(&(&pipealloc)->lock); c->qid.path = NETQID(2 * p->path, Qdir); c->qid.vers = 0; c->qid.type = QTDIR; c->aux = p; c->dev = 0; /* taps. */ SLIST_INIT(&p->data_taps[0]); /* already = 0; set to be futureproof */ SLIST_INIT(&p->data_taps[1]); spinlock_init(&p->tap_lock); return c; }
int syspipe(int fd[2]) { ERRSTACK(1); struct dev *d; struct chan *c[2]; static char *names[] = { "data", "data1" }; d = &devtab[devno("pipe", 0)]; c[0] = 0; c[1] = 0; fd[0] = -1; fd[1] = -1; if (waserror()) { /* need to remove from the fd table and make sure the chan is closed * exactly once. if fd[i] >= 0, then the fd is valid (or it was!) and * the fd table has the only ref (newfd() currently decrefs/consumes the * reference). cclose() doesn't care if you pass it 0 (like kfree()). */ if (fd[0] >= 0) close_fd(¤t->open_files, fd[0]); else cclose(c[0]); if (fd[1] >= 0) close_fd(¤t->open_files, fd[1]); else cclose(c[1]); poperror(); return -1; } c[0] = namec("#pipe", Atodir, 0, 0); c[1] = cclone(c[0]); if (walk(&c[0], &names[0], 1, FALSE, NULL) < 0) error(EINVAL, ERROR_FIXME); if (walk(&c[1], &names[1], 1, FALSE, NULL) < 0) error(EINVAL, ERROR_FIXME); c[0] = d->open(c[0], O_RDWR); c[1] = d->open(c[1], O_RDWR); fd[0] = newfd(c[0], 0); if (fd[0] < 0) error(-fd[0], ERROR_FIXME); fd[1] = newfd(c[1], 0); if (fd[1] < 0) error(-fd[1], ERROR_FIXME); poperror(); return 0; }
int fgrpclose(struct fgrp *f, int fd) { ERRSTACK(1); if (waserror()) { poperror(); return -1; } /* * Take no reference on the chan because we don't really need the * data structure, and are calling fdtochan only for error checks. * fdclose takes care of processes racing through here. */ fdtochan(f, fd, -1, 0, 0); fdclose(f, fd); poperror(); return 0; }
void netlogopen(struct Fs *f) { ERRSTACK(1); spin_lock(&f->alog->lock); if (waserror()) { spin_unlock(&f->alog->lock); nexterror(); } if (f->alog->opens == 0) { if (f->alog->buf == NULL) f->alog->buf = kzmalloc(Nlog, 0); f->alog->rptr = f->alog->buf; f->alog->end = f->alog->buf + Nlog; } f->alog->opens++; spin_unlock(&f->alog->lock); poperror(); }
int syschdir(char *path) { ERRSTACK(1); struct chan *c; struct pgrp *pg; if (waserror()) { poperror(); return -1; } c = namec(path, Atodir, 0, 0); pg = current->pgrp; cclose(pg->dot); pg->dot = c; poperror(); return 0; }
static long pipebwrite(struct chan *c, struct block *bp, uint32_t junk) { ERRSTACK(2); long n; Pipe *p; //Prog *r; if (waserror()) { /* avoid exceptions when pipe is a mounted queue */ /* if((c->flag & CMSG) == 0) { r = up->iprog; if(r != NULL && r->kill == NULL) r->kill = "write on closed pipe"; } */ set_errno(EPIPE); nexterror(); } p = c->aux; switch (NETTYPE(c->qid.path)) { case Qdata0: if (c->flag & O_NONBLOCK) n = qbwrite_nonblock(p->q[1], bp); else n = qbwrite(p->q[1], bp); break; case Qdata1: if (c->flag & O_NONBLOCK) n = qbwrite_nonblock(p->q[0], bp); else n = qbwrite(p->q[0], bp); break; default: n = 0; panic("pipebwrite"); } poperror(); return n; }
int sysclose(int fd) { ERRSTACK(1); struct fd_table *fdt = ¤t->open_files; if (waserror()) { poperror(); return -1; } /* * Take no reference on the chan because we don't really need the * data structure, and are calling fdtochan only for error checks. * fdclose takes care of processes racing through here. */ fdtochan(fdt, fd, -1, 0, 0); fdclose(fdt, fd); poperror(); return 0; }
long netlogread(struct Fs *f, void *a, uint32_t unused, long n) { ERRSTACK(1); int i, d; char *p, *rptr; qlock(&f->alog->qlock); if (waserror()) { qunlock(&f->alog->qlock); nexterror(); } for (;;) { spin_lock(&f->alog->lock); if (f->alog->len) { if (n > f->alog->len) n = f->alog->len; d = 0; rptr = f->alog->rptr; f->alog->rptr += n; if (f->alog->rptr >= f->alog->end) { d = f->alog->rptr - f->alog->end; f->alog->rptr = f->alog->buf + d; } f->alog->len -= n; spin_unlock(&f->alog->lock); i = n - d; p = a; memmove(p, rptr, i); memmove(p + i, f->alog->buf, d); break; } else spin_unlock(&f->alog->lock); rendez_sleep(&f->alog->r, netlogready, f); } qunlock(&f->alog->qlock); poperror(); return n; }