/** * @author Pascal Gallard */ static struct task_kddm_object *task_writelock(pid_t pid, int nested) { struct task_kddm_object *obj; /* Filter well known cases of no task kddm object. */ if (!(pid & GLOBAL_PID_MASK)) return NULL; obj = _kddm_grab_object_no_ft(task_kddm_set, pid); if (likely(obj)) { if (!nested) down_write(&obj->sem); else down_write_nested(&obj->sem, SINGLE_DEPTH_NESTING); if (obj->write_locked == 2) { /* Dying object */ up_write(&obj->sem); _kddm_put_object(task_kddm_set, pid); return NULL; } /* Marker for unlock. Dirty but temporary. */ obj->write_locked = 1; } return obj; }
static struct kern_ipc_perm *kcb_ipc_shm_lock(struct ipc_ids *ids, int id) { shmid_object_t *shp_object; struct shmid_kernel *shp; int index; rcu_read_lock(); index = ipcid_to_idx(id); shp_object = _kddm_grab_object_no_ft(ids->krgops->data_kddm_set, index); if (!shp_object) goto error; shp = shp_object->local_shp; BUG_ON(!shp); mutex_lock(&shp->shm_perm.mutex); if (shp->shm_perm.deleted) { mutex_unlock(&shp->shm_perm.mutex); goto error; } return &(shp->shm_perm); error: _kddm_put_object(ids->krgops->data_kddm_set, index); rcu_read_unlock(); return ERR_PTR(-EINVAL); }
struct pid *krg_get_pid(int nr) { struct pid_kddm_object *obj; struct pid *pid; rcu_read_lock(); pid = find_kpid(nr); rcu_read_unlock(); /* * No need to get a reference on pid since we know that it is used on * another node: nobody will free it for the moment. */ if (!pid) return no_pid(nr); spin_lock(&pid_kddm_lock); obj = pid->kddm_obj; BUG_ON(!obj); BUG_ON(obj->pid != pid); if (likely(obj->active)) { obj->attach_pending++; spin_unlock(&pid_kddm_lock); return pid; } /* Slow path: we must grab the kddm object. */ spin_unlock(&pid_kddm_lock); obj = _kddm_grab_object_no_ft(pid_kddm_set, nr); if (IS_ERR(obj)) return NULL; BUG_ON(obj != pid->kddm_obj); BUG_ON(obj->pid != pid); spin_lock(&pid_kddm_lock); __get_pid(obj); spin_unlock(&pid_kddm_lock); _kddm_put_object(pid_kddm_set, nr); return pid; }
static struct pid *no_pid(int nr) { struct pid_namespace *ns; struct pid_kddm_object *obj; struct pid *pid; obj = _kddm_grab_object_no_ft(pid_kddm_set, nr); if (IS_ERR(obj)) return NULL; BUG_ON(!obj); spin_lock(&pid_kddm_lock); rcu_read_lock(); pid = find_kpid(nr); /* Double check once locked */ rcu_read_unlock(); /* * No need to get a reference on pid since we know that it is used on * another node: nobody will free it for the moment. */ if (!pid) { ns = find_get_krg_pid_ns(); pid = __alloc_pid(ns, &nr); put_pid_ns(ns); if (!pid) goto out_unlock; obj->pid = pid; pid->kddm_obj = obj; } BUG_ON(pid->kddm_obj != obj); __get_pid(obj); out_unlock: spin_unlock(&pid_kddm_lock); _kddm_put_object(pid_kddm_set, nr); return pid; }
void krg_ipc_shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { struct kddm_set *mm_set; int index; key_t key; index = ipcid_to_idx(shp->shm_perm.id); key = shp->shm_perm.key; mm_set = shp->shm_file->f_dentry->d_inode->i_mapping->kddm_set; if (key != IPC_PRIVATE) { _kddm_grab_object_no_ft(shm_ids(ns).krgops->key_kddm_set, key); _kddm_remove_frozen_object(shm_ids(ns).krgops->key_kddm_set, key); } local_shm_unlock(shp); _kddm_remove_frozen_object(shm_ids(ns).krgops->data_kddm_set, index); _destroy_kddm_set(mm_set); krg_ipc_rmid(&shm_ids(ns), index); }