kerrighed_node_t krg_lock_pid_location(pid_t pid) { kerrighed_node_t node = KERRIGHED_NODE_ID_NONE; struct task_kddm_object *obj; #ifdef CONFIG_KRG_EPM struct timespec back_off_time = { .tv_sec = 0, .tv_nsec = 1000000 /* 1 ms */ }; #endif if (!(pid & GLOBAL_PID_MASK)) goto out; for (;;) { obj = krg_task_readlock(pid); if (likely(obj)) { node = obj->node; } else { krg_task_unlock(pid); break; } #ifdef CONFIG_KRG_EPM if (likely(node != KERRIGHED_NODE_ID_NONE)) break; /* * Task is migrating. * Back off and hope that it will stop migrating. */ krg_task_unlock(pid); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(timespec_to_jiffies(&back_off_time) + 1); #else break; #endif } out: return node; } void krg_unlock_pid_location(pid_t pid) { krg_task_unlock(pid); }
static int create_pid_kddm_object(struct pid *pid, int early) { int nr = pid_knr(pid); struct pid_kddm_object *obj; struct task_kddm_object *task_obj; obj = _kddm_grab_object(pid_kddm_set, nr); if (IS_ERR(obj)) { _kddm_put_object(pid_kddm_set, nr); return PTR_ERR(obj); } BUG_ON(!obj); task_obj = krg_task_readlock(nr); spin_lock(&pid_kddm_lock); BUG_ON(early && pid->kddm_obj); if (!pid->kddm_obj) { obj->pid = pid; obj->active = 1; if (early) obj->attach_pending = 1; BUG_ON(obj->task_obj); if (task_obj) { BUG_ON(task_obj->pid_obj); /* * These rcu_assign_pointer are not really needed, * but are cleaner :) */ rcu_assign_pointer(obj->task_obj, task_obj); rcu_assign_pointer(obj->task_obj->pid_obj, obj); } pid->kddm_obj = obj; } BUG_ON(pid->kddm_obj != obj); spin_unlock(&pid_kddm_lock); krg_task_unlock(nr); _kddm_put_object(pid_kddm_set, nr); return 0; }
int __krg_pid_link_task(pid_t nr) { struct pid *pid; struct task_kddm_object *task_obj; int r = 0; pid = krg_get_pid(nr); if (!pid) { r = -ENOMEM; goto out; } task_obj = krg_task_readlock(nr); __pid_link_task(pid, task_obj); krg_task_unlock(nr); krg_end_get_pid(pid); krg_put_pid(pid); out: return r; }
void __krg_task_unlock(struct task_struct *task) { krg_task_unlock(task_pid_knr(task)); }