void list_directory (tux_req_t *req, int cachemiss) { struct getdents_callback64 buf; struct linux_dirent64 *dirp0; mm_segment_t oldmm; int total; Dprintk("list_directory(%p, %d), dentry: %p.\n", req, cachemiss, req->dentry); if (!req->cwd_dentry) TUX_BUG(); if (!cachemiss) { add_tux_atom(req, list_directory); queue_cachemiss(req); return; } dirp0 = tux_kmalloc(DIRENT_SIZE); buf.current_dir = dirp0; buf.previous = NULL; buf.count = DIRENT_SIZE; buf.error = 0; oldmm = get_fs(); set_fs(KERNEL_DS); set_fs(KERNEL_DS); total = vfs_readdir(req->in_file, filldir64, &buf); set_fs(oldmm); if (buf.previous) total = DIRENT_SIZE - buf.count; Dprintk("total: %d (buf.error: %d, buf.previous %p)\n", total, buf.error, buf.previous); if (total < 0) { kfree(dirp0); req_err(req); add_req_to_workqueue(req); return; } if (!total) { kfree(dirp0); req->in_file->f_pos = 0; add_req_to_workqueue(req); return; } if (!req->cwd_dentry) TUX_BUG(); add_tux_atom(req, list_directory); req->dirp0 = dirp0; req->curroff = 0; req->total = total; add_tux_atom(req, do_dir_line); add_req_to_workqueue(req); }
static int cachemiss_thread (void *data) { tux_req_t *req; struct k_sigaction *ka; DECLARE_WAITQUEUE(wait, current); iothread_t *iot = data; int nr = iot->ti->cpu, wake_up; Dprintk("iot %p/%p got started.\n", iot, current); drop_permissions(); spin_lock(&iot->async_lock); iot->threads++; sprintf(current->comm, "async IO %d/%d", nr, iot->threads); spin_lock_irq(¤t->sighand->siglock); ka = current->sighand->action + SIGCHLD-1; ka->sa.sa_handler = SIG_IGN; siginitsetinv(¤t->blocked, sigmask(SIGCHLD)); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); spin_unlock(&iot->async_lock); #ifdef CONFIG_SMP { cpumask_t mask; if (cpu_isset(nr, cpu_online_map)) { cpus_clear(mask); cpu_set(nr, mask); set_cpus_allowed(current, mask); } } #endif add_wait_queue_exclusive(&iot->async_sleep, &wait); for (;;) { while (!list_empty(&iot->async_queue) && (req = get_cachemiss(iot))) { if (!req->atom_idx) { add_tux_atom(req, flush_request); add_req_to_workqueue(req); continue; } tux_schedule_atom(req, 1); if (signal_pending(current)) flush_all_signals(); } if (signal_pending(current)) flush_all_signals(); if (!list_empty(&iot->async_queue)) continue; if (iot->shutdown) { Dprintk("iot %p/%p got shutdown!\n", iot, current); break; } __set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&iot->async_queue)) { Dprintk("iot %p/%p going to sleep.\n", iot, current); schedule(); Dprintk("iot %p/%p got woken up.\n", iot, current); } __set_current_state(TASK_RUNNING); } remove_wait_queue(&iot->async_sleep, &wait); wake_up = 0; spin_lock(&iot->async_lock); if (!--iot->threads) wake_up = 1; spin_unlock(&iot->async_lock); Dprintk("iot %p/%p has finished shutdown!\n", iot, current); if (wake_up) { Dprintk("iot %p/%p waking up master.\n", iot, current); wake_up(&iot->wait_shutdown); } return 0; }
static void do_dir_line (tux_req_t *req, int cachemiss) { struct linux_dirent64 *dirp, *dirp0; char string0[MAX_OBJECTNAME_LEN+200], *tmp; int len, curroff, total, str_len = 0; int err, flag = cachemiss ? 0 : LOOKUP_ATOMIC; struct nameidata base = { }; struct dentry *dentry = NULL; struct inode *inode = NULL; struct vfsmount *mnt = NULL; if (req->proto->check_req_err(req, cachemiss)) return; tmp = NULL; dirp0 = req->dirp0; curroff = req->curroff; total = req->total; dirp = (struct linux_dirent64 *)((char *)dirp0 + curroff); if (!dirp->d_name || !dirp->d_name[0]) goto next_dir; /* * Hide .xxxxx files: */ if (dirp->d_name[0] == '.') goto next_dir; Dprintk("<%s T:%d (off:%Ld) (len:%d)>\n", dirp->d_name, dirp->d_type, dirp->d_off, dirp->d_reclen); if (tux_hide_unreadable) { switch (dirp->d_type) { default: goto next_dir; case DT_UNKNOWN: case DT_REG: case DT_DIR: case DT_LNK: /* valid entries - fall through. */ ; } } len = strlen(dirp->d_name); if (len >= MAX_OBJECTNAME_LEN) { dirp->d_name[MAX_OBJECTNAME_LEN] = 0; len = MAX_OBJECTNAME_LEN-1; } if (!req->dentry) TUX_BUG(); base.flags = flag; base.last_type = LAST_ROOT; base.dentry = dget(req->dentry); base.mnt = mntget(req->cwd_mnt); switch_docroot(req); err = path_walk(dirp->d_name, &base); Dprintk("path_walk() returned %d.\n", err); if (err) { if (err == -EWOULDBLOCKIO) { add_tux_atom(req, do_dir_line); queue_cachemiss(req); return; } goto next_dir; } dentry = base.dentry; mnt = base.mnt; if (!dentry) TUX_BUG(); if (IS_ERR(dentry)) TUX_BUG(); inode = dentry->d_inode; if (!inode) TUX_BUG(); if (!dirp->d_type) dirp->d_type = get_d_type(dentry); if (tux_hide_unreadable) { umode_t mode; mode = inode->i_mode; if (mode & tux_mode_forbidden) goto out_dput; if (!(mode & tux_mode_allowed)) goto out_dput; err = permission(inode, MAY_READ, NULL); if (err) goto out_dput; if (dirp->d_type == DT_DIR) { err = permission(inode, MAY_EXEC, NULL); if (err) goto out_dput; } } tmp = req->proto->print_dir_line(req, string0, dirp->d_name, len, dirp->d_type, dentry, inode); if (tmp) str_len = tmp-string0; out_dput: dput(dentry); mntput(mnt); next_dir: curroff += dirp->d_reclen; if (tmp && (tmp != string0)) Dprintk("writing line (len: %d): <%s>\n", strlen(string0), string0); if (curroff < total) { req->dirp0 = dirp0; req->curroff = curroff; add_tux_atom(req, do_dir_line); } else { kfree(dirp0); req->dirp0 = NULL; req->curroff = 0; // falls back to the list_directory atom } if (tmp && (tmp != string0)) __send_async_message(req, string0, 200, str_len, 0); else add_req_to_workqueue(req); }