void lwt_fini () { int i; lwt_control(0, 0); for (i = 0; i < cfs_num_online_cpus(); i++) while (lwt_cpus[i].lwtc_current_page != NULL) { lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page; if (cfs_list_empty (&lwtp->lwtp_list)) { lwt_cpus[i].lwtc_current_page = NULL; } else { lwt_cpus[i].lwtc_current_page = cfs_list_entry (lwtp->lwtp_list.next, lwt_page_t, lwtp_list); cfs_list_del (&lwtp->lwtp_list); } __free_page (lwtp->lwtp_page); LIBCFS_FREE (lwtp, sizeof (*lwtp)); } }
int cfs_wi_check_events (void) { int n = 0; cfs_workitem_t *wi; cfs_list_t *q; cfs_spin_lock(&cfs_wi_data.wi_glock); for (;;) { /** rerunq is always empty for userspace */ if (!cfs_list_empty(&cfs_wi_data.wi_scheds[1].ws_runq)) q = &cfs_wi_data.wi_scheds[1].ws_runq; else if (!cfs_list_empty(&cfs_wi_data.wi_scheds[0].ws_runq)) q = &cfs_wi_data.wi_scheds[0].ws_runq; else break; wi = cfs_list_entry(q->next, cfs_workitem_t, wi_list); cfs_list_del_init(&wi->wi_list); LASSERT (wi->wi_scheduled); wi->wi_scheduled = 0; cfs_spin_unlock(&cfs_wi_data.wi_glock); n++; (*wi->wi_action) (wi); cfs_spin_lock(&cfs_wi_data.wi_glock); } cfs_spin_unlock(&cfs_wi_data.wi_glock); return n; }
/* Return the first tx from tx_list with piggybacked zc_ack * from zcack_list when possible. If tx_list is empty, return * brand new noop tx for zc_ack from zcack_list. Return NULL * if an error happened */ usock_tx_t * usocklnd_try_piggyback(cfs_list_t *tx_list_p, cfs_list_t *zcack_list_p) { usock_tx_t *tx; usock_zc_ack_t *zc_ack; /* assign tx and zc_ack */ if (cfs_list_empty(tx_list_p)) tx = NULL; else { tx = cfs_list_entry(tx_list_p->next, usock_tx_t, tx_list); cfs_list_del(&tx->tx_list); /* already piggybacked or partially send */ if (tx->tx_msg.ksm_zc_cookies[1] != 0 || tx->tx_resid != tx->tx_nob) return tx; } if (cfs_list_empty(zcack_list_p)) { /* nothing to piggyback */ return tx; } else { zc_ack = cfs_list_entry(zcack_list_p->next, usock_zc_ack_t, zc_list); cfs_list_del(&zc_ack->zc_list); } if (tx != NULL) /* piggyback the zc-ack cookie */ tx->tx_msg.ksm_zc_cookies[1] = zc_ack->zc_cookie; else /* cannot piggyback, need noop */ tx = usocklnd_create_noop_tx(zc_ack->zc_cookie); LIBCFS_FREE (zc_ack, sizeof(*zc_ack)); return tx; }
int lwt_control (int enable, int clear) { lwt_page_t *p; int i; int j; if (!cfs_capable(CFS_CAP_SYS_ADMIN)) return (-EPERM); if (!enable) { LWT_EVENT(0,0,0,0); lwt_enabled = 0; cfs_mb(); /* give people some time to stop adding traces */ cfs_schedule_timeout(10); } for (i = 0; i < cfs_num_online_cpus(); i++) { p = lwt_cpus[i].lwtc_current_page; if (p == NULL) return (-ENODATA); if (!clear) continue; for (j = 0; j < lwt_pages_per_cpu; j++) { memset(p->lwtp_events, 0, PAGE_CACHE_SIZE); p = cfs_list_entry (p->lwtp_list.next, lwt_page_t, lwtp_list); } } if (enable) { lwt_enabled = 1; cfs_mb(); LWT_EVENT(0,0,0,0); } return (0); }
int lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *total_size, void *user_ptr, int user_size) { const int events_per_page = PAGE_CACHE_SIZE / sizeof(lwt_event_t); const int bytes_per_page = events_per_page * sizeof(lwt_event_t); lwt_page_t *p; int i; int j; if (!cfs_capable(CFS_CAP_SYS_ADMIN)) return (-EPERM); *ncpu = cfs_num_online_cpus(); *total_size = cfs_num_online_cpus() * lwt_pages_per_cpu * bytes_per_page; *now = get_cycles(); if (user_ptr == NULL) return (0); for (i = 0; i < cfs_num_online_cpus(); i++) { p = lwt_cpus[i].lwtc_current_page; if (p == NULL) return -ENODATA; for (j = 0; j < lwt_pages_per_cpu; j++) { if (copy_to_user(user_ptr, p->lwtp_events, bytes_per_page)) return -EFAULT; user_ptr = ((char *)user_ptr) + bytes_per_page; p = cfs_list_entry(p->lwtp_list.next, lwt_page_t, lwtp_list); } } return (0); }
static int cfs_wi_scheduler (void *arg) { int id = (int)(long_ptr_t) arg; int serial = (id == -1); char name[24]; cfs_wi_sched_t *sched; if (serial) { sched = &cfs_wi_data.wi_scheds[cfs_wi_data.wi_nsched - 1]; cfs_daemonize("wi_serial_sd"); } else { /* will be sched = &cfs_wi_data.wi_scheds[id] in the future */ sched = &cfs_wi_data.wi_scheds[0]; snprintf(name, sizeof(name), "cfs_wi_sd%03d", id); cfs_daemonize(name); } cfs_block_allsigs(); cfs_wi_sched_lock(sched); while (!sched->ws_shuttingdown) { int nloops = 0; int rc; cfs_workitem_t *wi; while (!cfs_list_empty(&sched->ws_runq) && nloops < CFS_WI_RESCHED) { wi = cfs_list_entry(sched->ws_runq.next, cfs_workitem_t, wi_list); LASSERT (wi->wi_scheduled && !wi->wi_running); cfs_list_del_init(&wi->wi_list); wi->wi_running = 1; wi->wi_scheduled = 0; cfs_wi_sched_unlock(sched); nloops++; rc = (*wi->wi_action) (wi); cfs_wi_sched_lock(sched); if (rc != 0) /* WI should be dead, even be freed! */ continue; wi->wi_running = 0; if (cfs_list_empty(&wi->wi_list)) continue; LASSERT (wi->wi_scheduled); /* wi is rescheduled, should be on rerunq now, we * move it to runq so it can run action now */ cfs_list_move_tail(&wi->wi_list, &sched->ws_runq); } if (!cfs_list_empty(&sched->ws_runq)) { cfs_wi_sched_unlock(sched); /* don't sleep because some workitems still * expect me to come back soon */ cfs_cond_resched(); cfs_wi_sched_lock(sched); continue; } cfs_wi_sched_unlock(sched); cfs_wait_event_interruptible_exclusive(sched->ws_waitq, !cfs_wi_sched_cansleep(sched), rc); cfs_wi_sched_lock(sched); } cfs_wi_sched_unlock(sched); cfs_spin_lock(&cfs_wi_data.wi_glock); cfs_wi_data.wi_nthreads--; cfs_spin_unlock(&cfs_wi_data.wi_glock); return 0; }