void shm_destroy_orphaned(struct ipc_namespace *ns) { down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); up_write(&shm_ids(ns).rwsem); }
void krg_shm_exit_ns(struct ipc_namespace *ns) { if (shm_ids(ns).krgops) { _destroy_kddm_set(shm_ids(ns).krgops->data_kddm_set); _destroy_kddm_set(shm_ids(ns).krgops->key_kddm_set); _destroy_kddm_set(shm_ids(ns).krgops->map_kddm_set); kfree(shm_ids(ns).krgops); } }
void exit_shm(struct task_struct *task) { struct ipc_namespace *ns = task->nsproxy->ipc_ns; if (shm_ids(ns).in_use == 0) return; /* Destroy all already created segments, but not mapped yet */ down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); up_write(&shm_ids(ns).rwsem); }
static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); return container_of(ipcp, struct shmid_kernel, shm_perm); }
static inline int shm_checkid(struct ipc_namespace *ns, struct shmid_kernel *s, int id) { if (ipc_checkid(&shm_ids(ns), &s->shm_perm, id)) return -EIDRM; return 0; }
void shm_init_ns(struct ipc_namespace *ns) { ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; ns->shm_tot = 0; ipc_init_ids(&shm_ids(ns)); }
static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); }
/** Notify the creation of a new shm segment to Kerrighed. * * @author Renaud Lottiaux */ int krg_ipc_shm_newseg (struct ipc_namespace *ns, struct shmid_kernel *shp) { shmid_object_t *shp_object; struct kddm_set *kddm; long *key_index; int index, err; BUG_ON(!shm_ids(ns).krgops); index = ipcid_to_idx(shp->shm_perm.id); shp_object = _kddm_grab_object_manual_ft( shm_ids(ns).krgops->data_kddm_set, index); BUG_ON(shp_object); shp_object = kmem_cache_alloc(shmid_object_cachep, GFP_KERNEL); if (!shp_object) { err = -ENOMEM; goto err_put; } /* Create a KDDM set to host segment pages */ kddm = _create_new_kddm_set (kddm_def_ns, 0, SHM_MEMORY_LINKER, kerrighed_node_id, PAGE_SIZE, &shp->shm_perm.id, sizeof(int), 0); if (IS_ERR(kddm)) { err = PTR_ERR(kddm); goto err_put; } shp->shm_file->f_dentry->d_inode->i_mapping->kddm_set = kddm; shp->shm_file->f_op = &krg_shm_file_operations; shp_object->set_id = kddm->id; shp_object->local_shp = shp; _kddm_set_object(shm_ids(ns).krgops->data_kddm_set, index, shp_object); if (shp->shm_perm.key != IPC_PRIVATE) { key_index = _kddm_grab_object(shm_ids(ns).krgops->key_kddm_set, shp->shm_perm.key); *key_index = index; _kddm_put_object (shm_ids(ns).krgops->key_kddm_set, shp->shm_perm.key); } shp->shm_perm.krgops = shm_ids(ns).krgops; err_put: _kddm_put_object(shm_ids(ns).krgops->data_kddm_set, index); return 0; }
/* * shm_lock_(check_)down routines are called in the paths where the rw_mutex * is held to protect access to the idr tree. */ static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id); if (IS_ERR(ipcp)) return (struct shmid_kernel *)ipcp; return container_of(ipcp, struct shmid_kernel, shm_perm); }
void shm_exit_ns(struct ipc_namespace *ns) { int i; struct shmid_kernel *shp; mutex_lock(&shm_ids(ns).mutex); for (i = 0; i <= shm_ids(ns).max_id; i++) { shp = shm_lock(ns, i); if (shp == NULL) continue; do_shm_rmid(ns, shp); } mutex_unlock(&shm_ids(ns).mutex); ipc_fini_ids(ns->ids[IPC_SHM_IDS]); kfree(ns->ids[IPC_SHM_IDS]); ns->ids[IPC_SHM_IDS] = NULL; }
/* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file * file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); }
void krg_ipc_shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { struct kddm_set *mm_set; int index; key_t key; index = ipcid_to_idx(shp->shm_perm.id); key = shp->shm_perm.key; mm_set = shp->shm_file->f_dentry->d_inode->i_mapping->kddm_set; if (key != IPC_PRIVATE) { _kddm_grab_object_no_ft(shm_ids(ns).krgops->key_kddm_set, key); _kddm_remove_frozen_object(shm_ids(ns).krgops->key_kddm_set, key); } local_shm_unlock(shp); _kddm_remove_frozen_object(shm_ids(ns).krgops->data_kddm_set, index); _destroy_kddm_set(mm_set); krg_ipc_rmid(&shm_ids(ns), index); }
void shm_exit_ns(struct ipc_namespace *ns) { struct shmid_kernel *shp; int next_id; int total, in_use; down_write(&shm_ids(ns).rw_mutex); in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { shp = idr_find(&shm_ids(ns).ipcs_idr, next_id); if (shp == NULL) continue; ipc_lock_by_ptr(&shp->shm_perm); do_shm_rmid(ns, shp); total++; } up_write(&shm_ids(ns).rw_mutex); kfree(ns->ids[IPC_SHM_IDS]); ns->ids[IPC_SHM_IDS] = NULL; }
int krg_shm_flush_set(struct ipc_namespace *ns) { struct krgipc_ops *shmops; down_write(&sem_ids(ns).rw_mutex); shmops = shm_ids(ns).krgops; _kddm_flush_set(shmops->data_kddm_set, ipc_flusher, NULL); _kddm_flush_set(shmops->map_kddm_set, ipc_flusher, NULL); _kddm_flush_set(shmops->key_kddm_set, ipc_flusher, NULL); up_write(&sem_ids(ns).rw_mutex); return 0; }
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { ipc_rmid(&shm_ids(ns), &s->shm_perm); }
void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); }
static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp) { return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); }
static inline struct shmid_kernel *shm_rmid(struct ipc_namespace *ns, int id) { return (struct shmid_kernel *)ipc_rmid(&shm_ids(ns), id); }
void krg_ipc_shm_rmkey(struct ipc_namespace *ns, key_t key) { _kddm_remove_object(shm_ids(ns).krgops->key_kddm_set, key); }
int krg_shm_init_ns(struct ipc_namespace *ns) { int r; struct krgipc_ops *shm_ops = kmalloc(sizeof(struct krgipc_ops), GFP_KERNEL); if (!shm_ops) { r = -ENOMEM; goto err; } shm_ops->map_kddm_set = create_new_kddm_set(kddm_def_ns, SHMMAP_KDDM_ID, IPCMAP_LINKER, KDDM_RR_DEF_OWNER, sizeof(ipcmap_object_t), KDDM_LOCAL_EXCLUSIVE); if (IS_ERR(shm_ops->map_kddm_set)) { r = PTR_ERR(shm_ops->map_kddm_set); goto err_map; } shm_ops->key_kddm_set = create_new_kddm_set(kddm_def_ns, SHMKEY_KDDM_ID, SHMKEY_LINKER, KDDM_RR_DEF_OWNER, sizeof(long), KDDM_LOCAL_EXCLUSIVE); if (IS_ERR(shm_ops->key_kddm_set)) { r = PTR_ERR(shm_ops->key_kddm_set); goto err_key; } shm_ops->data_kddm_set = create_new_kddm_set(kddm_def_ns, SHMID_KDDM_ID, SHMID_LINKER, KDDM_RR_DEF_OWNER, sizeof(shmid_object_t), KDDM_LOCAL_EXCLUSIVE | KDDM_NEED_SAFE_WALK); if (IS_ERR(shm_ops->data_kddm_set)) { r = PTR_ERR(shm_ops->data_kddm_set); goto err_data; } shm_ops->ipc_lock = kcb_ipc_shm_lock; shm_ops->ipc_unlock = kcb_ipc_shm_unlock; shm_ops->ipc_findkey = kcb_ipc_shm_findkey; shm_ids(ns).krgops = shm_ops; return 0; err_data: _destroy_kddm_set(shm_ops->key_kddm_set); err_key: _destroy_kddm_set(shm_ops->map_kddm_set); err_map: kfree(shm_ops); err: return r; }
void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); }
void for_all_ipc_shm(string ipc_cb) { for_all_ipc_ids(init_nsproxy.ipc_ns, &shm_ids(init_nsproxy.ipc_ns), ipc_cb); }