/* * /proc/pid/fd needs a special permission handler so that a process can still * access /proc/self/fd after it has executed a setuid(). */ int proc_fd_permission(struct inode *inode, int mask) { int rv = generic_permission(inode, mask); if (rv == 0) return 0; if (task_tgid(current) == proc_pid(inode)) rv = 0; return rv; }
/** * Attempt to set an fcntl lock. * For now, this just goes away to the server. Later it may be more awesome. */ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 0; u16 op = CEPH_MDS_OP_SETFILELOCK; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_lock, fl_pid:%d", fl->fl_pid); /* set wait bit as appropriate, then make command as Ceph expects it*/ if (F_SETLKW == cmd) wait = 1; if (F_GETLK == cmd) op = CEPH_MDS_OP_GETFILELOCK; if (F_RDLCK == fl->fl_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->fl_type) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl); if (!err) { if ( op != CEPH_MDS_OP_GETFILELOCK ){ dout("mds locked, locking locally"); err = posix_lock_file(file, fl, NULL); if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { /* undo! This should only happen if * the kernel detects local * deadlock. */ ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on posix_lock_file, undid lock", err); } } } else if (err == -ERESTARTSYS) { dout("undoing lock\n"); ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); } return err; }
int memory_engine_takeover(memory_engine_t *engine,int alignaddr) { int res = 0; memory_node_t *node = NULL; struct task_struct *grptask = NULL; shm_debug("mem_engine_takeover start, (0x%08X)\n",alignaddr); if ((NULL == engine) || (0 == alignaddr)) return -EINVAL; down(&(engine->m_mutex)); /* find alignaddr */ res = _FindNode_alignaddr(engine, alignaddr, &node); if (0 != res) goto err_exit; /* if the node found is a free one, there could be invalid operations */ if (NULL != node->m_next_free) { shm_error("node(%#.8x) already freed\n",alignaddr); res = -EFAULT; goto err_exit; } /* change usrtaskid to current */ node->m_usrtaskid = task_tgid_vnr(current); grptask = pid_task(task_tgid(current),PIDTYPE_PID); if (NULL == grptask) memset(node->m_usrtaskname,0,16); else strncpy(node->m_usrtaskname,grptask->comm,16); up(&(engine->m_mutex)); shm_debug("memory_engine_takeover OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); shm_error("memory_engine_takeover failed!!! (0x%08X)\n", alignaddr); return res; }
/* * /proc/pid/fd needs a special permission handler so that a process can still * access /proc/self/fd after it has executed a setuid(). */ int proc_fd_permission(struct inode *inode, int mask) { struct task_struct *task; int rv = generic_permission(inode, mask); if (task_tgid(current) == proc_pid(inode)) rv = 0; task = get_proc_task(inode); if (task == NULL) return rv; if (gr_acl_handle_procpidmem(task)) rv = -EACCES; put_task_struct(task); return rv; }
int ceph_flock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 1; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_flock, fl_pid:%d", fl->fl_pid); /* set wait bit, then clear it out of cmd*/ if (cmd & LOCK_NB) wait = 0; cmd = cmd & (LOCK_SH | LOCK_EX | LOCK_UN); /* set command sequence that Ceph wants to see: shared lock, exclusive lock, or unlock */ if (LOCK_SH == cmd) lock_cmd = CEPH_LOCK_SHARED; else if (LOCK_EX == cmd) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, lock_cmd, wait, fl); if (!err) { err = flock_lock_file_wait(file, fl); if (err) { ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on flock_lock_file_wait, undid lock", err); } } else if (err == -ERESTARTSYS) { dout("undoing lock\n"); ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, CEPH_LOCK_UNLOCK, 0, fl); } return err; }
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { struct k_clock *kc = clockid_to_kclock(which_clock); struct k_itimer *new_timer; int error, new_timer_id; sigevent_t event; int it_id_set = IT_ID_NOT_SET; if (!kc) return -EINVAL; if (!kc->timer_create) return -EOPNOTSUPP; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); retry: if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) { error = -EAGAIN; goto out; } spin_lock_irq(&idr_lock); error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id); spin_unlock_irq(&idr_lock); if (error) { if (error == -EAGAIN) goto retry; /* * Weird looking, but we return EAGAIN if the IDR is * full (proper POSIX return value for this) */ error = -EAGAIN; goto out; } it_id_set = IT_ID_SET; new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->it_overrun = -1; if (timer_event_spec) { if (copy_from_user(&event, timer_event_spec, sizeof (event))) { error = -EFAULT; goto out; } rcu_read_lock(); new_timer->it_pid = get_pid(good_sigevent(&event)); rcu_read_unlock(); if (!new_timer->it_pid) { error = -EINVAL; goto out; } } else { event.sigev_notify = SIGEV_SIGNAL; event.sigev_signo = SIGALRM; event.sigev_value.sival_int = new_timer->it_id; new_timer->it_pid = get_pid(task_tgid(current)); } new_timer->it_sigev_notify = event.sigev_notify; new_timer->sigq->info.si_signo = event.sigev_signo; new_timer->sigq->info.si_value = event.sigev_value; new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } error = kc->timer_create(new_timer); if (error) goto out; spin_lock_irq(¤t->sighand->siglock); new_timer->it_signal = current->signal; list_add(&new_timer->list, ¤t->signal->posix_timers); spin_unlock_irq(¤t->sighand->siglock); return 0; /* * In the case of the timer belonging to another task, after * the task is unlocked, the timer is owned by the other task * and may cease to exist at any time. Don't use or modify * new_timer after the unlock call. */ out: release_posix_timer(new_timer, it_id_set); return error; }
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { struct k_clock *kc = clockid_to_kclock(which_clock); struct k_itimer *new_timer; int error, new_timer_id; sigevent_t event; int it_id_set = IT_ID_NOT_SET; if (!kc) return -EINVAL; if (!kc->timer_create) return -EOPNOTSUPP; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); retry: if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) { error = -EAGAIN; goto out; } spin_lock_irq(&idr_lock); error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id); spin_unlock_irq(&idr_lock); if (error) { if (error == -EAGAIN) goto retry; error = -EAGAIN; goto out; } it_id_set = IT_ID_SET; new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->it_overrun = -1; if (timer_event_spec) { if (copy_from_user(&event, timer_event_spec, sizeof (event))) { error = -EFAULT; goto out; } rcu_read_lock(); new_timer->it_pid = get_pid(good_sigevent(&event)); rcu_read_unlock(); if (!new_timer->it_pid) { error = -EINVAL; goto out; } } else { event.sigev_notify = SIGEV_SIGNAL; event.sigev_signo = SIGALRM; event.sigev_value.sival_int = new_timer->it_id; new_timer->it_pid = get_pid(task_tgid(current)); } new_timer->it_sigev_notify = event.sigev_notify; new_timer->sigq->info.si_signo = event.sigev_signo; new_timer->sigq->info.si_value = event.sigev_value; new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } error = kc->timer_create(new_timer); if (error) goto out; spin_lock_irq(¤t->sighand->siglock); new_timer->it_signal = current->signal; list_add(&new_timer->list, ¤t->signal->posix_timers); spin_unlock_irq(¤t->sighand->siglock); return 0; out: release_posix_timer(new_timer, it_id_set); return error; }
int memory_engine_allocate(memory_engine_t *engine, size_t size, size_t alignment, memory_node_t **node) { int res = 0; memory_node_t *new_node = NULL; struct task_struct *grptask = NULL; shm_debug("memory_engine_allocate start. (%d, %d)\n", size, alignment); if ((engine == NULL) || (node == NULL)) return -EINVAL; #ifdef SHM_GUARD_BYTES_ENABLE //add gurad bytes. if (engine->m_cache_or_noncache == SHM_CACHE){ size += SHM_GUARD_BYTES; } #endif down(&(engine->m_mutex)); if (size > engine->m_size_free) { shm_error("heap has not enough (%u) bytes for (%u) bytes\n", engine->m_size_free, size); res = -ENOMEM; goto err_exit; } /* Find a free node in heap */ new_node = _FindNode_size(engine, size, alignment); if (new_node == NULL) { memory_node_t *pLastNode = NULL; pLastNode = engine->m_root.m_prev_free; if (pLastNode) shm_error("heap has not enough liner memory for (%u) bytes, free blocks:%u(max free block:%u)\n", size, engine->m_num_freeblock, pLastNode->m_size); else shm_error("heap has not enough liner memory, no free blocks!!!\n"); res = -ENOMEM; goto err_exit; } /* Do we have enough memory after the allocation to split it? */ if (MEMNODE_ALIGN_SIZE(new_node) - size > engine->m_threshold) _Split(engine, new_node, size + new_node->m_offset);/* Adjust the node size. */ else engine->m_num_freeblock--; engine->m_num_usedblock++; /* Remove the node from the free list. */ new_node->m_prev_free->m_next_free = new_node->m_next_free; new_node->m_next_free->m_prev_free = new_node->m_prev_free; new_node->m_next_free = new_node->m_prev_free = NULL; /* Fill in the information. */ new_node->m_alignment = alignment; /*record pid/thread name in node info, for debug usage*/ new_node->m_threadid = task_pid_vnr(current);/*(current)->pid;*/ /* qzhang@marvell * record creating task id,user task id * by default user task id is creating task id * until memory_engine_lock invoked */ new_node->m_taskid = new_node->m_usrtaskid= task_tgid_vnr(current); strncpy(new_node->m_threadname, current->comm, 16); grptask = pid_task(task_tgid(current),PIDTYPE_PID); if (NULL != grptask) { strncpy(new_node->m_taskname,grptask->comm,16); strncpy(new_node->m_usrtaskname,grptask->comm,16); } else { memset(new_node->m_taskname,0,16); memset(new_node->m_usrtaskname,0,16); } new_node->m_phyaddress = MEMNODE_ALIGN_ADDR(new_node); memory_engine_insert_shm_node(&(engine->m_shm_root), new_node); /* Adjust the number of free bytes. */ engine->m_size_free -= new_node->m_size; engine->m_size_used += new_node->m_size; engine->m_peak_usedmem = max(engine->m_peak_usedmem, engine->m_size_used); /* Return the pointer to the node. */ *node = new_node; #ifdef SHM_GUARD_BYTES_ENABLE //fill gurad bytes with SHM_GUARD_DATA if (engine->m_cache_or_noncache == SHM_CACHE) { memset((void *)(MEMNODE_ALIGN_ADDR(new_node)- engine->m_base + engine->m_virt_base + MEMNODE_ALIGN_SIZE(new_node) - SHM_GUARD_BYTES), SHM_GUARD_DATA, SHM_GUARD_BYTES); } #endif up(&(engine->m_mutex)); shm_debug("Allocated %u (%u) bytes @ 0x%08X (0x%08X) for align (%u)\n", MEMNODE_ALIGN_SIZE(new_node), new_node->m_size, MEMNODE_ALIGN_ADDR(new_node), new_node->m_addr, new_node->m_alignment); shm_debug("memory_engine_allocate OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); *node = NULL; shm_error("memory_engine_allocate failed !!! (%d, %d) (%d %s)\n", size, alignment, current->pid, current->comm); return res; }