int cfs_wi_check_events (void) { int n = 0; cfs_workitem_t *wi; cfs_list_t *q; cfs_spin_lock(&cfs_wi_data.wi_glock); for (;;) { /** rerunq is always empty for userspace */ if (!cfs_list_empty(&cfs_wi_data.wi_scheds[1].ws_runq)) q = &cfs_wi_data.wi_scheds[1].ws_runq; else if (!cfs_list_empty(&cfs_wi_data.wi_scheds[0].ws_runq)) q = &cfs_wi_data.wi_scheds[0].ws_runq; else break; wi = cfs_list_entry(q->next, cfs_workitem_t, wi_list); cfs_list_del_init(&wi->wi_list); LASSERT (wi->wi_scheduled); wi->wi_scheduled = 0; cfs_spin_unlock(&cfs_wi_data.wi_glock); n++; (*wi->wi_action) (wi); cfs_spin_lock(&cfs_wi_data.wi_glock); } cfs_spin_unlock(&cfs_wi_data.wi_glock); return n; }
/** * cancel a workitem: */ int cfs_wi_cancel (cfs_workitem_t *wi) { cfs_wi_sched_t *sched = cfs_wi_to_sched(wi); int rc; LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */ LASSERT (!sched->ws_shuttingdown); cfs_wi_sched_lock(sched); /* * return 0 if it's running already, otherwise return 1, which * means the workitem will not be scheduled and will not have * any race with wi_action. */ rc = !(wi->wi_running); if (wi->wi_scheduled) { /* cancel pending schedules */ LASSERT (!cfs_list_empty(&wi->wi_list)); cfs_list_del_init(&wi->wi_list); wi->wi_scheduled = 0; } LASSERT (cfs_list_empty(&wi->wi_list)); cfs_wi_sched_unlock(sched); return rc; }
/* * Workitem scheduled with (serial == 1) is strictly serialised not only with * itself, but also with others scheduled this way. * * Now there's only one static serialised queue, but in the future more might * be added, and even dynamic creation of serialised queues might be supported. */ void cfs_wi_schedule(cfs_workitem_t *wi) { cfs_wi_sched_t *sched = cfs_wi_to_sched(wi); LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */ LASSERT (!sched->ws_shuttingdown); cfs_wi_sched_lock(sched); if (!wi->wi_scheduled) { LASSERT (cfs_list_empty(&wi->wi_list)); wi->wi_scheduled = 1; if (!wi->wi_running) { cfs_list_add_tail(&wi->wi_list, &sched->ws_runq); #ifdef __KERNEL__ cfs_waitq_signal(&sched->ws_waitq); #endif } else { cfs_list_add(&wi->wi_list, &sched->ws_rerunq); } } LASSERT (!cfs_list_empty(&wi->wi_list)); cfs_wi_sched_unlock(sched); return; }
static int usocklnd_send_tx_immediately(usock_conn_t *conn, usock_tx_t *tx) { int rc; int rc2; int partial_send = 0; usock_peer_t *peer = conn->uc_peer; LASSERT (peer != NULL); /* usocklnd_enqueue_tx() turned it on for us */ LASSERT(conn->uc_sending); //counter_imm_start++; rc = usocklnd_send_tx(conn, tx); if (rc == 0) { /* partial send or connection closed */ pthread_mutex_lock(&conn->uc_lock); cfs_list_add(&tx->tx_list, &conn->uc_tx_list); conn->uc_sending = 0; pthread_mutex_unlock(&conn->uc_lock); partial_send = 1; } else { usocklnd_destroy_tx(peer->up_ni, tx); /* NB: lnetmsg was finalized, so we *must* return 0 */ if (rc < 0) { /* real error */ usocklnd_conn_kill(conn); return 0; } /* rc == 1: tx was sent completely */ rc = 0; /* let's say to caller 'Ok' */ //counter_imm_complete++; } pthread_mutex_lock(&conn->uc_lock); conn->uc_sending = 0; /* schedule write handler */ if (partial_send || (conn->uc_state == UC_READY && (!cfs_list_empty(&conn->uc_tx_list) || !cfs_list_empty(&conn->uc_zcack_list)))) { conn->uc_tx_deadline = cfs_time_shift(usock_tuns.ut_timeout); conn->uc_tx_flag = 1; rc2 = usocklnd_add_pollrequest(conn, POLL_TX_SET_REQUEST, POLLOUT); if (rc2 != 0) usocklnd_conn_kill_locked(conn); else usocklnd_wakeup_pollthread(conn->uc_pt_idx); } pthread_mutex_unlock(&conn->uc_lock); return rc; }
/* * Add object to list of dirty objects in tx handle. */ static void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh) { if (!cfs_list_empty(&obj->oo_sa_linkage)) return; down(&oh->ot_sa_lock); write_lock(&obj->oo_attr_lock); if (likely(cfs_list_empty(&obj->oo_sa_linkage))) cfs_list_add(&obj->oo_sa_linkage, &oh->ot_sa_list); write_unlock(&obj->oo_attr_lock); up(&oh->ot_sa_lock); }
static void cfs_wi_sched_shutdown(cfs_wi_sched_t *sched) { cfs_wi_sched_lock(sched); LASSERT(cfs_list_empty(&sched->ws_runq)); LASSERT(cfs_list_empty(&sched->ws_rerunq)); sched->ws_shuttingdown = 1; #ifdef __KERNEL__ cfs_waitq_broadcast(&sched->ws_waitq); #endif cfs_wi_sched_unlock(sched); }
void lwt_fini () { int i; lwt_control(0, 0); for (i = 0; i < cfs_num_online_cpus(); i++) while (lwt_cpus[i].lwtc_current_page != NULL) { lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page; if (cfs_list_empty (&lwtp->lwtp_list)) { lwt_cpus[i].lwtc_current_page = NULL; } else { lwt_cpus[i].lwtc_current_page = cfs_list_entry (lwtp->lwtp_list.next, lwt_page_t, lwtp_list); cfs_list_del (&lwtp->lwtp_list); } __free_page (lwtp->lwtp_page); LIBCFS_FREE (lwtp, sizeof (*lwtp)); } }
int ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { struct socket *sock = conn->ksnc_sock; int nob; int rc; int flags; if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */ conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */ tx->tx_nob == tx->tx_resid && /* frist sending */ tx->tx_msg.ksm_csum == 0) /* not checksummed */ ksocknal_lib_csum_tx(tx); nob = ks_query_iovs_length(tx->tx_iov, tx->tx_niov); flags = (!cfs_list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT; rc = ks_send_iovs(sock, tx->tx_iov, tx->tx_niov, flags, 0); KsPrint((4, "ksocknal_lib_send_iov: conn %p sock %p rc %d\n", conn, sock, rc)); return rc; }
void lc_watchdog_delete(struct lc_watchdog *lcw) { int dead; ENTRY; LASSERT(lcw != NULL); cfs_timer_disarm(&lcw->lcw_timer); lcw_update_time(lcw, "stopped"); cfs_spin_lock_bh(&lcw->lcw_lock); cfs_spin_lock_bh(&lcw_pending_timers_lock); if (unlikely(!cfs_list_empty(&lcw->lcw_list))) { cfs_list_del_init(&lcw->lcw_list); lcw->lcw_refcount--; /* -1 ref for pending list */ } lcw->lcw_refcount--; /* -1 ref for owner */ dead = lcw->lcw_refcount == 0; cfs_spin_unlock_bh(&lcw_pending_timers_lock); cfs_spin_unlock_bh(&lcw->lcw_lock); if (dead) LIBCFS_FREE(lcw, sizeof(*lcw)); cfs_down(&lcw_refcount_sem); if (--lcw_refcount == 0) lcw_dispatch_stop(); cfs_up(&lcw_refcount_sem); EXIT; }
/* must be called with resource lock held */ static int lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt) { struct lnet_res_container *container = the_lnet.ln_md_containers[cpt]; /* NB we are passed an allocated, but inactive md. * if we return success, caller may lnet_md_unlink() it. * otherwise caller may only lnet_md_free() it. */ /* This implementation doesn't know how to create START events or * disable END events. Best to LASSERT our caller is compliant so * we find out quickly... */ /* TODO - reevaluate what should be here in light of * the removal of the start and end events * maybe there we shouldn't even allow LNET_EQ_NONE!) * LASSERT (eq == NULL); */ if (!LNetHandleIsInvalid(eq_handle)) { md->md_eq = lnet_handle2eq(&eq_handle); if (md->md_eq == NULL) return -ENOENT; (*md->md_eq->eq_refs[cpt])++; } lnet_res_lh_initialize(container, &md->md_lh); LASSERT(cfs_list_empty(&md->md_list)); cfs_list_add(&md->md_list, &container->rec_active); return 0; }
/* * Free llog handle and header data if exists. Used in llog_close() only */ void llog_free_handle(struct llog_handle *loghandle) { LASSERT(loghandle != NULL); /* failed llog_init_handle */ if (!loghandle->lgh_hdr) goto out; if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN) LASSERT(cfs_list_empty(&loghandle->u.phd.phd_entry)); else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) LASSERT(cfs_list_empty(&loghandle->u.chd.chd_head)); LASSERT(sizeof(*(loghandle->lgh_hdr)) == LLOG_CHUNK_SIZE); OBD_FREE(loghandle->lgh_hdr, LLOG_CHUNK_SIZE); out: OBD_FREE_PTR(loghandle); }
static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq) { struct ll_inode_info *lli = NULL; spin_lock(&lcq->lcq_lock); if (!cfs_list_empty(&lcq->lcq_head)) { lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info, lli_close_list); cfs_list_del_init(&lli->lli_close_list); } else if (cfs_atomic_read(&lcq->lcq_stop))
static int is_watchdog_fired(void) { int rc; if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) return 1; cfs_spin_lock_bh(&lcw_pending_timers_lock); rc = !cfs_list_empty(&lcw_pending_timers); cfs_spin_unlock_bh(&lcw_pending_timers_lock); return rc; }
static void lc_watchdog_del_pending(struct lc_watchdog *lcw) { cfs_spin_lock_bh(&lcw->lcw_lock); if (unlikely(!cfs_list_empty(&lcw->lcw_list))) { cfs_spin_lock_bh(&lcw_pending_timers_lock); cfs_list_del_init(&lcw->lcw_list); lcw->lcw_refcount--; /* -1 ref for pending list */ cfs_spin_unlock_bh(&lcw_pending_timers_lock); } cfs_spin_unlock_bh(&lcw->lcw_lock); }
/** Queues DONE_WRITING if * - done writing is allowed; * - inode has no no dirty pages; */ void ll_queue_done_writing(struct inode *inode, unsigned long flags) { struct ll_inode_info *lli = ll_i2info(inode); struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob); ENTRY; spin_lock(&lli->lli_lock); lli->lli_flags |= flags; if ((lli->lli_flags & LLIF_DONE_WRITING) && cfs_list_empty(&club->cob_pending_list)) { struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq; if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) CWARN("ino %lu/%u(flags %u) som valid it just after " "recovery\n", inode->i_ino, inode->i_generation, lli->lli_flags); /* DONE_WRITING is allowed and inode has no dirty page. */ spin_lock(&lcq->lcq_lock); LASSERT(cfs_list_empty(&lli->lli_close_list)); CDEBUG(D_INODE, "adding inode %lu/%u to close list\n", inode->i_ino, inode->i_generation); cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head); /* Avoid a concurrent insertion into the close thread queue: * an inode is already in the close thread, open(), write(), * close() happen, epoch is closed as the inode is marked as * LLIF_EPOCH_PENDING. When pages are written inode should not * be inserted into the queue again, clear this flag to avoid * it. */ lli->lli_flags &= ~LLIF_DONE_WRITING; cfs_waitq_signal(&lcq->lcq_waitq); spin_unlock(&lcq->lcq_lock); } spin_unlock(&lli->lli_lock); EXIT; }
/* Return the first tx from tx_list with piggybacked zc_ack * from zcack_list when possible. If tx_list is empty, return * brand new noop tx for zc_ack from zcack_list. Return NULL * if an error happened */ usock_tx_t * usocklnd_try_piggyback(cfs_list_t *tx_list_p, cfs_list_t *zcack_list_p) { usock_tx_t *tx; usock_zc_ack_t *zc_ack; /* assign tx and zc_ack */ if (cfs_list_empty(tx_list_p)) tx = NULL; else { tx = cfs_list_entry(tx_list_p->next, usock_tx_t, tx_list); cfs_list_del(&tx->tx_list); /* already piggybacked or partially send */ if (tx->tx_msg.ksm_zc_cookies[1] != 0 || tx->tx_resid != tx->tx_nob) return tx; } if (cfs_list_empty(zcack_list_p)) { /* nothing to piggyback */ return tx; } else { zc_ack = cfs_list_entry(zcack_list_p->next, usock_zc_ack_t, zc_list); cfs_list_del(&zc_ack->zc_list); } if (tx != NULL) /* piggyback the zc-ack cookie */ tx->tx_msg.ksm_zc_cookies[1] = zc_ack->zc_cookie; else /* cannot piggyback, need noop */ tx = usocklnd_create_noop_tx(zc_ack->zc_cookie); LIBCFS_FREE (zc_ack, sizeof(*zc_ack)); return tx; }
int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand) { int rc = 0; down_write(&ioctl_list_sem); if (cfs_list_empty(&hand->item)) rc = -ENOENT; else cfs_list_del_init(&hand->item); up_write(&ioctl_list_sem); return rc; }
/** * Frees addrrange structures of \a list. * * For each struct addrrange structure found on \a list it frees * cfs_expr_list list attached to it and frees the addrrange itself. * * \retval none */ static void free_addrranges(cfs_list_t *list) { while (!cfs_list_empty(list)) { struct addrrange *ar; ar = cfs_list_entry(list->next, struct addrrange, ar_link); cfs_expr_list_free_list(&ar->ar_numaddr_ranges); cfs_list_del(&ar->ar_link); LIBCFS_FREE(ar, sizeof(struct addrrange)); } }
/** records that a write is in flight */ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page) { struct ll_inode_info *lli = ll_i2info(club->cob_inode); ENTRY; spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_SOM_DIRTY; if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage)) cfs_list_add(&page->cpg_pending_linkage, &club->cob_pending_list); spin_unlock(&lli->lli_lock); EXIT; }
int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand) { int rc = 0; down_write(&ioctl_list_sem); if (!cfs_list_empty(&hand->item)) rc = -EBUSY; else cfs_list_add_tail(&hand->item, &ioctl_list); up_write(&ioctl_list_sem); return rc; }
void ksocknal_tx_fini_callback(ksock_conn_t * conn, ksock_tx_t * tx) { /* remove tx/conn from conn's outgoing queue */ spin_lock_bh(&conn->ksnc_scheduler->kss_lock); cfs_list_del(&tx->tx_list); if (cfs_list_empty(&conn->ksnc_tx_queue)) cfs_list_del(&conn->ksnc_tx_list); spin_unlock_bh(&conn->ksnc_scheduler->kss_lock); /* complete send; tx -ref */ ksocknal_tx_decref(tx); }
/* * Release spill block dbuf hold for all dirty SAs. */ void osd_object_sa_dirty_rele(struct osd_thandle *oh) { struct osd_object *obj; down(&oh->ot_sa_lock); while (!cfs_list_empty(&oh->ot_sa_list)) { obj = cfs_list_entry(oh->ot_sa_list.next, struct osd_object, oo_sa_linkage); sa_spill_rele(obj->oo_sa_hdl); write_lock(&obj->oo_attr_lock); cfs_list_del_init(&obj->oo_sa_linkage); write_unlock(&obj->oo_attr_lock); } up(&oh->ot_sa_lock); }
/* XXX: * 0. it only works when called from wi->wi_action. * 1. when it returns no one shall try to schedule the workitem. */ void cfs_wi_exit(cfs_workitem_t *wi) { cfs_wi_sched_t *sched = cfs_wi_to_sched(wi); LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */ LASSERT (!sched->ws_shuttingdown); cfs_wi_sched_lock(sched); #ifdef __KERNEL__ LASSERT (wi->wi_running); #endif if (wi->wi_scheduled) { /* cancel pending schedules */ LASSERT (!cfs_list_empty(&wi->wi_list)); cfs_list_del_init(&wi->wi_list); } LASSERT (cfs_list_empty(&wi->wi_list)); wi->wi_scheduled = 1; /* LBUG future schedule attempts */ cfs_wi_sched_unlock(sched); return; }
static inline int have_expired_capa(void) { struct obd_capa *ocapa = NULL; int expired = 0; /* if ll_capa_list has client capa to expire or ll_idle_capas has * expired capa, return 1. */ cfs_spin_lock(&capa_lock); if (!cfs_list_empty(ll_capa_list)) { ocapa = cfs_list_entry(ll_capa_list->next, struct obd_capa, c_list); expired = capa_is_to_expire(ocapa); if (!expired) update_capa_timer(ocapa, capa_renewal_time(ocapa)); } else if (!cfs_list_empty(&ll_idle_capas)) {
static inline int cfs_wi_sched_cansleep(cfs_wi_sched_t *sched) { cfs_wi_sched_lock(sched); if (sched->ws_shuttingdown) { cfs_wi_sched_unlock(sched); return 0; } if (!cfs_list_empty(&sched->ws_runq)) { cfs_wi_sched_unlock(sched); return 0; } cfs_wi_sched_unlock(sched); return 1; }
/** records that a write has completed */ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) { struct ll_inode_info *lli = ll_i2info(club->cob_inode); int rc = 0; ENTRY; spin_lock(&lli->lli_lock); if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) { cfs_list_del_init(&page->cpg_pending_linkage); rc = 1; } spin_unlock(&lli->lli_lock); if (rc) ll_queue_done_writing(club->cob_inode, 0); EXIT; }
/* return a page that has 'len' bytes left at the end */ static struct cfs_trace_page * cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) { struct cfs_trace_page *tage; if (tcd->tcd_cur_pages > 0) { __LASSERT(!cfs_list_empty(&tcd->tcd_pages)); tage = cfs_tage_from_list(tcd->tcd_pages.prev); if (tage->used + len <= CFS_PAGE_SIZE) return tage; } if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { if (tcd->tcd_cur_stock_pages > 0) { tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); -- tcd->tcd_cur_stock_pages; cfs_list_del_init(&tage->linkage); } else { tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC); if (tage == NULL) { if (printk_ratelimit()) printk(CFS_KERN_WARNING "cannot allocate a tage (%ld)\n", tcd->tcd_cur_pages); return NULL; } } tage->used = 0; tage->cpu = cfs_smp_processor_id(); tage->type = tcd->tcd_type; cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages); tcd->tcd_cur_pages++; if (tcd->tcd_cur_pages > 8 && thread_running) { struct tracefiled_ctl *tctl = &trace_tctl; /* * wake up tracefiled to process some pages. */ cfs_waitq_signal(&tctl->tctl_waitq); } return tage; } return NULL; }
static void ksocknal_write_space (struct sock *sk) { ksock_conn_t *conn; int wspace; int min_wpace; /* interleave correctly with closing sockets... */ LASSERT(!in_irq()); read_lock(&ksocknal_data.ksnd_global_lock); conn = sk->sk_user_data; wspace = SOCKNAL_WSPACE(sk); min_wpace = SOCKNAL_MIN_WSPACE(sk); CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n", sk, wspace, min_wpace, conn, (conn == NULL) ? "" : (conn->ksnc_tx_ready ? " ready" : " blocked"), (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ? " scheduled" : " idle"), (conn == NULL) ? "" : (cfs_list_empty (&conn->ksnc_tx_queue) ? " empty" : " queued")); if (conn == NULL) { /* raced with ksocknal_terminate_conn */ LASSERT (sk->sk_write_space != &ksocknal_write_space); sk->sk_write_space (sk); read_unlock(&ksocknal_data.ksnd_global_lock); return; } if (wspace >= min_wpace) { /* got enough space */ ksocknal_write_callback(conn); /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the * ENOMEM check in ksocknal_transmit is race-free (think about * it). */ clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags); } read_unlock(&ksocknal_data.ksnd_global_lock); }
static int vvp_object_print(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o) { struct ccc_object *obj = lu2ccc(o); struct inode *inode = obj->cob_inode; struct ll_inode_info *lli; (*p)(env, cookie, "(%s %d %d) inode: %p ", cfs_list_empty(&obj->cob_pending_list) ? "-" : "+", obj->cob_transient_pages, cfs_atomic_read(&obj->cob_mmap_cnt), inode); if (inode) { lli = ll_i2info(inode); (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID, inode->i_ino, inode->i_generation, inode->i_mode, inode->i_nlink, atomic_read(&inode->i_count), lli->lli_clob, PFID(&lli->lli_fid)); } return 0; }
int ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) { struct socket *sock = conn->ksnc_sock; lnet_kiov_t *kiov = tx->tx_kiov; int rc; int nob; int nkiov; int flags; nkiov = tx->tx_nkiov; nob = ks_query_kiovs_length(tx->tx_kiov, nkiov); flags = (!cfs_list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT; rc = ks_send_kiovs(sock, tx->tx_kiov, nkiov, flags, 0); KsPrint((4, "ksocknal_lib_send_kiov: conn %p sock %p rc %d\n", conn, sock, rc)); return rc; }