int pthread_spin_unlock (pthread_spinlock_t *lock) { if (lock == NULL) { dmsg_r(stderr, "%s: pid %d, tid %d EINVAL\n", __FUNCTION__, (int)getpid(), (unsigned)pthread_self()); return EINVAL; } // Check if lock is busy and owned by the caller thread. if (lock->val != pthread_self()) { dmsg_r(stderr, "libpthread: %s: pid %d, tid %d, val %d, EPERM\n", __FUNCTION__, (int)getpid(), (int)pthread_self(), (int)lock->val); return EPERM; } // Update lock's control informations lock->val = __PTHREAD_OBJECT_FREE; cpu_wbflush(); cpu_invalid_dcache_line(lock); dmsg_r(stderr, "%s: pid %d, tid %d, 0x%x unlocked\n", __FUNCTION__, (int)getpid(), (unsigned)pthread_self(), (unsigned)lock); return 0; }
int sys_close (uint_t fd) { register struct task_s *task; register struct thread_s *this; struct vfs_file_s *file; error_t err; file = NULL; this = current_thread; task = current_task; if((fd >= CONFIG_TASK_FILE_MAX_NR) || (task_fd_lookup(task, fd, &file))) { this->info.errno = EBADFD; return -1; } if((err = vfs_close(file, NULL))) { this->info.errno = err; return -1; } task_fd_put(task, fd); cpu_wbflush(); return 0; }
int pthread_spin_destroy (pthread_spinlock_t *lock) { if (lock == NULL) return EINVAL; // Check if lock is not occupied by a thread. // If lock is occupied, then return immediately with EBUSY error code. if(lock->val != __PTHREAD_OBJECT_FREE) return EBUSY; // Update lock's control informations. lock->val = __PTHREAD_OBJECT_DESTROYED; cpu_invalid_dcache_line(lock); cpu_wbflush(); return 0; }
static EVENT_HANDLER(barrier_broadcast_event) { register struct barrier_s *barrier; barrier = event_get_argument(event); #if CONFIG_BARRIER_ACTIVE_WAIT register uint_t next_phase; next_phase = ~(barrier->phase) & 0x1; barrier->state[next_phase] = 0; barrier->state[barrier->phase] = 1; barrier->phase = next_phase; cpu_wbflush(); #else barrier_do_broadcast(barrier); #endif return 0; }
static void boot_signal_op(struct boot_info_s *info, uint_t cpu_nr) { bsb_t *bsb; struct arch_bib_header_s *header; uint_t *tty; header = (struct arch_bib_header_s*) info->arch_info; tty = (uint_t*) header->bootstrap_tty; if((info == NULL) || (cpu_nr > info->onln_cpu_nr)) { boot_dmsg(tty, "ERROR: %s: Invalid Arguments [0x%x, %d]\n", info, cpu_nr); while(1); } bsb = (bsb_t*) info->data; bsb->cpu_count = cpu_nr; bsb->boot_signal = CONFIG_BOOT_SIGNAL_SIGNATURE; cpu_wbflush(); }
error_t barrier_destroy(struct barrier_s *barrier) { register uint_t cntr; kmem_req_t req; if(barrier->signature != BARRIER_ID) return EINVAL; if((barrier->owner != NULL) && (barrier->owner != current_task)) return EINVAL; req.type = KMEM_PAGE; #if ARCH_HAS_BARRIERS (void) arch_barrier_destroy(barrier->cluster, barrier->hwid); #else if(barrier->owner == NULL) cntr = barrier->index; else cntr = atomic_get(&barrier->waiting); if(cntr != 0) return EBUSY; #endif /* ARCH_HAS_BARRIERS */ barrier->signature = 0; cpu_wbflush(); for(cntr = 0; cntr < BARRIER_WQDB_NR; cntr++) { req.ptr = barrier->pages_tbl[cntr]; kmem_free(&req); } if(barrier->owner == NULL) spinlock_destroy(&barrier->lock); return 0; }
//one thread //inline void remote_fifo_release(struct remote_fifo_s *remote_fifo) { remote_fifo->rdidx = (remote_fifo->rdidx + 1) % remote_fifo->slot_nbr; cpu_wbflush(); }
/* * FIXME: define spinlock_rdlock() so all locking on task->th_lock * becoms rdlock but on join/detach/destroy */ int sys_thread_wakeup(pthread_t tid, pthread_t *tid_tbl, uint_t count) { struct task_s *task; struct thread_s *this; struct thread_s *target; pthread_t tbl[100]; void *listner; uint_t event; sint_t i; error_t err; this = current_thread; task = this->task; i = -1; if(tid_tbl != NULL) { if((((uint_t)tid_tbl + (count*sizeof(pthread_t))) >= CONFIG_KERNEL_OFFSET) || (count == 0) || (count > 100)) { err = -1; goto fail_tid_tbl; } if((err = cpu_uspace_copy(&tbl[0], tid_tbl, sizeof(pthread_t*) * count))) goto fail_usapce; if(tbl[0] != tid) { err = -2; goto fail_first_tid; } } else { count = 1; tbl[0] = tid; } for(i = 0; i < count; i++) { tid = tbl[i]; if(tid > task->max_order) { err = -3; goto fail_tid; } target = task->th_tbl[tid]; if((target == NULL) || (target->signature != THREAD_ID)) { err = -4; goto fail_target; } listner = sched_get_listner(target, SCHED_OP_UWAKEUP); event = sched_event_make(target,SCHED_OP_UWAKEUP); if(this->info.isTraced == true) { printk(INFO,"%s: tid %d --> tid %d [%d][%d]\n", __FUNCTION__, this->info.order, tid, cpu_time_stamp(), i); } sched_event_send(listner,event); cpu_wbflush(); } return 0; fail_target: fail_tid: fail_first_tid: fail_usapce: fail_tid_tbl: printk(INFO, "%s: cpu %d, pid %d, tid %x, i %d, count %d, ttid %x, request has failed with err %d [%d]\n", __FUNCTION__, cpu_get_id(), task->pid, this, i, count, tid, err, cpu_time_stamp()); this->info.errno = EINVAL; return -1; }