struct cluster_barrier *alloc_cluster_barrier(unique_id_t key) { struct cluster_barrier *barrier; int r, i; if (!key) key = get_unique_id(&barrier_id_root); if (hashtable_find (barrier_table, key)) return ERR_PTR(-EEXIST); barrier = kmalloc (sizeof(struct cluster_barrier), GFP_KERNEL); if (!barrier) return ERR_PTR(-ENOMEM); for (i = 0; i < 2; i++) { krgnodes_clear (barrier->core[i].nodes_in_barrier); krgnodes_clear (barrier->core[i].nodes_to_wait); init_waitqueue_head(&barrier->core[i].waiting_tsk); barrier->core[i].in_barrier = 0; } spin_lock_init(&barrier->lock); barrier->id.key = key; barrier->id.toggle = 0; r = hashtable_add (barrier_table, barrier->id.key, barrier); if (r) { kfree (barrier); return ERR_PTR(r); } return barrier; }
int mm_remove_object (void *object, struct kddm_set *set, objid_t objid) { struct mm_struct *mm = object; /* Ensure that no thread uses this signal_struct copy */ down_write(&mm->remove_sem); up_write(&mm->remove_sem); /* Take the mmap_sem to avoid race condition with clean_up_mm_struct */ atomic_inc(&mm->mm_count); down_write(&mm->mmap_sem); mmput(mm); up_write(&mm->mmap_sem); mm->mm_id = 0; krgnodes_clear(mm->copyset); mmdrop(mm); return 0; }
void init_node_discovering(void) { int i; krgnodes_setall(krgnode_possible_map); krgnodes_clear(krgnode_present_map); krgnodes_clear(krgnode_online_map); #ifdef CONFIG_KRG_HOTPLUG for (i = 0; i < KERRIGHED_MAX_NODES; i++) { universe[i].state = 0; universe[i].subid = -1; } #endif if (ISSET_KRG_INIT_FLAGS(KRG_INITFLAGS_NODEID)) { #ifdef CONFIG_KRG_HOTPLUG universe[kerrighed_node_id].state = 1; #endif set_krgnode_present(kerrighed_node_id); } }
static void rpc_worker(struct work_struct *data) { static unsigned long l = 0; krgnodemask_t n; int r; r = 0; l++; krgnodes_clear(n); krgnode_set(0, n); r = rpc_async(RPC_PINGPONG, 0, &l, sizeof(l)); if(r<0) return; }