/* * Return value: * < 0 Interrupt was ignored (masked or not delivered for other reasons) * = 0 Interrupt was coalesced (previous irq is still pending) * > 0 Number of CPUs interrupt was delivered to */ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) { struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; int ret = -1, i = 0; struct kvm_irq_routing_table *irq_rt; struct hlist_node *n; trace_kvm_set_irq(irq, level, irq_source_id); /* Not possible to detect if the guest uses the PIC or the * IOAPIC. So set the bit in both. The guest will ignore * writes to the unused one. */ rcu_read_lock(); irq_rt = rcu_dereference(kvm->irq_routing); if (irq < irq_rt->nr_rt_entries) hlist_for_each_entry(e, n, &irq_rt->map[irq], link) irq_set[i++] = *e; rcu_read_unlock(); while(i--) { int r; r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level); if (r < 0) continue; ret = r + ((ret < 0) ? 0 : ret); } return ret; }
static void faf_poll_notify_nodes(unsigned long dvfs_id) { struct dvfs_file_struct *dvfs_file; struct faf_polled_fd *polled_fd; struct faf_polled_fd_node *polled_fd_node; struct hlist_node *pos; dvfs_file = _kddm_get_object_no_ft(dvfs_file_struct_ctnr, dvfs_id); if (dvfs_file && dvfs_file->file) { /* TODO: still required? */ if (atomic_read (&dvfs_file->file->f_count) == 0) dvfs_file->file = NULL; } if (!dvfs_file || !dvfs_file->file) goto out_put_dvfs_file; mutex_lock(&faf_polled_fd_mutex); polled_fd = __faf_polled_fd_find(dvfs_id); if (!polled_fd) goto out_unlock; hlist_for_each_entry(polled_fd_node, pos, &polled_fd->nodes, list) faf_poll_notify_node(polled_fd_node->node_id, dvfs_id); out_unlock: mutex_unlock(&faf_polled_fd_mutex); out_put_dvfs_file: _kddm_put_object(dvfs_file_struct_ctnr, dvfs_id); }
/* * collect all mounts that receive propagation from the mount in @list, * and return these additional mounts in the same list. * @list: the list of mounts to be unmounted. * * vfsmount lock must be held for write */ int propagate_umount(struct hlist_head *list) { struct mount *mnt; hlist_for_each_entry(mnt, list, mnt_hash) __propagate_umount(mnt); return 0; }
void ioc_set_changed(struct io_context *ioc, int which) { struct io_cq *icq; struct hlist_node *n; hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) set_bit(which, &icq->changed); }
/* Calculate mask of events for a list of marks */ u32 fsnotify_recalc_mask(struct hlist_head *head) { u32 new_mask = 0; struct fsnotify_mark *mark; hlist_for_each_entry(mark, head, obj_list) new_mask |= mark->mask; return new_mask; }
void au_dpri_dalias(struct inode *inode) { struct dentry *d; spin_lock(&inode->i_lock); hlist_for_each_entry(d, &inode->i_dentry, d_alias) au_dpri_dentry(d); spin_unlock(&inode->i_lock); }
void ax25_link_failed(ax25_cb *ax25, int reason) { struct ax25_linkfail *lf; spin_lock_bh(&linkfail_lock); hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node) lf->func(ax25, reason); spin_unlock_bh(&linkfail_lock); }
void sync_blocks(void) { struct hlist_head *head = block_htable; int i = BLOCK_HASH_SIZE; struct hlist_node *node; struct block *block; while (i > 0) { hlist_for_each_entry(block, node, head, b_hnode) flush_block(block); head++; i--; } }
static void fib_flush(struct net *net) { int flushed = 0; struct fib_table *tb; struct hlist_node *node; struct hlist_head *head; unsigned int h; for (h = 0; h < FIB_TABLE_HASHSZ; h++) { head = &net->ipv4.fib_table_hash[h]; hlist_for_each_entry(tb, node, head, tb_hlist) flushed += tb->tb_flush(tb); } if (flushed) rt_cache_flush(net, -1); }
void au_dpri_whlist(struct au_nhash *whlist) { unsigned long ul, n; struct hlist_head *head; struct au_vdir_wh *pos; n = whlist->nh_num; head = whlist->nh_head; for (ul = 0; ul < n; ul++) { hlist_for_each_entry(pos, head, wh_hash) dpri("b%d, %.*s, %d\n", pos->wh_bindex, pos->wh_str.len, pos->wh_str.name, pos->wh_str.len); head++; } }