/* Per-connection read thread. */ static void * np_conn_read_proc(void *a) { Npconn *conn = (Npconn *)a; Npsrv *srv = conn->srv; Npreq *req; Npfcall *fc; pthread_detach(pthread_self()); for (;;) { if (np_trans_recv(conn->trans, &fc, conn->msize) < 0) { np_logerr (srv, "recv error - " "dropping connection to '%s'", conn->client_id); break; } if (!fc) /* EOF */ break; _debug_trace (srv, fc); /* Encapsulate fc in a request and hand to srv worker threads. * In np_req_alloc, req->fid is looked up/initialized. */ req = np_req_alloc(conn, fc); if (!req) { np_logmsg (srv, "out of memory in receive path - " "dropping connection to '%s'", conn->client_id); free (fc); break; } /* Enqueue request for processing by next available worker * thread, except P9_TFLUSH which is handled immediately. */ if (fc->type == P9_TFLUSH) { if (np_flush (req, fc)) { np_req_respond_flush (req); np_req_unref(req); } xpthread_mutex_lock (&srv->lock); srv->tpool->stats.nreqs[P9_TFLUSH]++; xpthread_mutex_unlock (&srv->lock); } else { xpthread_mutex_lock(&srv->lock); np_srv_add_req(srv, req); xpthread_mutex_unlock(&srv->lock); } } /* Just got EOF on read, or some other fatal error for the * connection like out of memory. */ np_conn_flush (conn); xpthread_mutex_lock(&conn->lock); while (conn->refcount > 0) xpthread_cond_wait(&conn->refcond, &conn->lock); xpthread_mutex_unlock(&conn->lock); np_conn_destroy(conn); return NULL; }
static Npfcall* np_process_request(Npreq *req, Npstats *stats) { Npfcall *rc = NULL; Npfcall *tc = req->tcall; int ecode, valid_op = 1; u64 rbytes = 0, wbytes = 0; np_uerror(0); switch (tc->type) { case P9_TSTATFS: rc = np_statfs(req, tc); break; case P9_TLOPEN: rc = np_lopen(req, tc); break; case P9_TLCREATE: rc = np_lcreate(req, tc); break; case P9_TSYMLINK: rc = np_symlink(req, tc); break; case P9_TMKNOD: rc = np_mknod(req, tc); break; case P9_TRENAME: rc = np_rename(req, tc); break; case P9_TREADLINK: rc = np_readlink(req, tc); break; case P9_TGETATTR: rc = np_getattr(req, tc); break; case P9_TSETATTR: rc = np_setattr(req, tc); break; case P9_TXATTRWALK: rc = np_xattrwalk(req, tc); break; case P9_TXATTRCREATE: rc = np_xattrcreate(req, tc); break; case P9_TREADDIR: rc = np_readdir(req, tc); break; case P9_TFSYNC: rc = np_fsync(req, tc); break; case P9_TLOCK: rc = np_lock(req, tc); break; case P9_TGETLOCK: rc = np_getlock(req, tc); break; case P9_TLINK: rc = np_link(req, tc); break; case P9_TMKDIR: rc = np_mkdir(req, tc); break; case P9_TVERSION: rc = np_version(req, tc); break; case P9_TAUTH: rc = np_auth(req, tc); break; case P9_TATTACH: rc = np_attach(req, tc); break; case P9_TFLUSH: rc = np_flush(req, tc); break; case P9_TWALK: rc = np_walk(req, tc); break; case P9_TREAD: rc = np_read(req, tc); rbytes = rc->u.rread.count; break; case P9_TWRITE: rc = np_write(req, tc); wbytes = rc->u.rwrite.count; break; case P9_TCLUNK: rc = np_clunk(req, tc); break; case P9_TREMOVE: rc = np_remove(req, tc); break; default: /* N.B. shouldn't get here - unhandled ops are * caught in np_deserialize (). */ np_uerror(ENOSYS); valid_op = 0; break; } if ((ecode = np_rerror())) { if (rc) free(rc); rc = np_create_rlerror(ecode); } if (valid_op) { xpthread_mutex_lock (&stats->lock); stats->rbytes += rbytes; stats->wbytes += wbytes; stats->nreqs[tc->type]++; xpthread_mutex_unlock (&stats->lock); } return rc; }