int pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched) { if (attr == NULL || inheritsched == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_attr_getscope(const pthread_attr_t *attr, int *scope) { if (attr == NULL || scope == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *schedparam) { if (attr == NULL || schedparam == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_attr_setguardsize(pthread_attr_t *attr, int stacksize) { if (attr == NULL || stacksize < 0) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
// FIXME: implemente this int pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr) { (void) stackaddr; if (attr == NULL) return_errno(EINVAL, EINVAL); return_errno(ENOSYS, ENOSYS); }
int thread_rwlock_unlock(rwlock_t *rwlock) { /* consistency checks */ if (rwlock == NULL) return_errno(FALSE, EINVAL); if (!(rwlock->rw_state & THREAD_RWLOCK_INITIALIZED)) return_errno(FALSE, EDEADLK); /* unlock lock */ if (rwlock->rw_mode == RWLOCK_RW) { /* read-write unlock is simple */ if (!thread_mutex_unlock(&(rwlock->rw_mutex_rw))) return FALSE; } else { /* read-only unlock is more complicated to get right */ if (!_thread_mutex_lock(&(rwlock->rw_mutex_rd), FALSE)) return FALSE; rwlock->rw_readers--; if (rwlock->rw_readers == 0) { if (!thread_mutex_unlock(&(rwlock->rw_mutex_rw))) { rwlock->rw_readers++; thread_mutex_unlock(&(rwlock->rw_mutex_rd)); return FALSE; } } rwlock->rw_mode = RWLOCK_RD; thread_mutex_unlock(&(rwlock->rw_mutex_rd)); } return TRUE; }
static int _thread_cond_signal(cond_t *cond, int broadcast) { /* consistency checks */ if (cond == NULL) return_errno(FALSE, EINVAL); if (!(cond->cn_state & THREAD_COND_INITIALIZED)) return_errno(FALSE, EDEADLK); // do something only if there is at least one waiters (POSIX semantics) if (cond->cn_waiters > 0) { // signal the condition do { thread_t *t = dequeue(&cond->wait_queue); assert (t != NULL); thread_resume(t); // t could also be a timed out thread, but it doesn't matter cond->cn_waiters--; } while (broadcast && !queue_isempty(&cond->wait_queue)); // and give other threads a chance to grab the CPU CAP_SET_SYSCALL(); thread_yield(); CAP_CLEAR_SYSCALL(); } /* return to caller */ return TRUE; }
int pthread_attr_getguardsize(const pthread_attr_t *attr, int *stacksize) { if (attr == NULL || stacksize == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int thread_join(thread_t *t, void **ret) { if (t == NULL) return_errno(FALSE, EINVAL); if ( !( t->joinable ) ) return_errno(FALSE, EINVAL); assert(t->state != GHOST); // A thread can be joined only once if (t->join_thread) return_errno(FALSE, EACCES); t->join_thread = current_thread; // Wait for the thread to complete tdebug( "**** thread state: %d\n" ,t->state); if (t->state != ZOMBIE) { CAP_SET_SYSCALL(); thread_suspend_self(0); CAP_CLEAR_SYSCALL(); } // clean up the dead thread if (ret != NULL) *ret = t->ret; free_thread( t ); return TRUE; }
int pthread_mutexattr_getprotocol(pthread_mutexattr_t *attr, int *protocol) { (void) protocol; if (attr == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_mutexattr_getprioceiling(pthread_mutexattr_t *attr, int *prioceiling) { (void) prioceiling; if (attr == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched) { (void) inheritsched; if (attr == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_attr_setschedparam(pthread_attr_t *attr, struct sched_param *schedparam) { (void) schedparam; if (attr == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_mutexattr_gettype(pthread_mutexattr_t *attr, int *type) { (void) type; if (attr == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_attr_setschedpolicy(pthread_attr_t *attr, int schedpolicy) { (void) schedpolicy; if (attr == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared) { (void) pshared; if (attr == NULL) return_errno(EINVAL, EINVAL); /* not supported */ return_errno(ENOSYS, ENOSYS); }
int thread_attr_init(thread_attr_t attr) { if (attr == NULL) return_errno(FALSE, EINVAL); if (attr->thread) return_errno(FALSE, EPERM); attr->joinable = TRUE; return TRUE; }
int thread_key_delete(thread_key_t key) { if (key >= THREAD_KEY_MAX) return_errno(FALSE, EINVAL); if (!thread_keytab[key].used) return_errno(FALSE, EINVAL); thread_keytab[key].used = FALSE; return TRUE; }
void *thread_key_getdata(thread_key_t key) { if (key >= THREAD_KEY_MAX) return_errno(NULL, EINVAL); if (!thread_keytab[key].used) return_errno(NULL, EINVAL); if (current_thread->key_data_value == NULL) return NULL; return (void *)current_thread->key_data_value[key]; }
int pthread_mutex_getprioceiling(pthread_mutex_t *mutex, int *prioceiling) { (void) prioceiling; if (mutex == NULL) return_errno(EINVAL, EINVAL); if (*mutex == PTHREAD_MUTEX_INITIALIZER) if (pthread_mutex_init(mutex, NULL) != OK) return errno; /* not supported */ return_errno(ENOSYS, ENOSYS); }
int pthread_setconcurrency(int new_level) { if (new_level < 0) return_errno(EINVAL, EINVAL); pthread_concurrency = new_level; return OK; }
int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) { int s; if (attr == NULL) return_errno(EINVAL, EINVAL); if (detachstate == PTHREAD_CREATE_DETACHED) s = THREAD_CREATE_DETACHED; else if (detachstate == PTHREAD_CREATE_JOINABLE) s = THREAD_CREATE_JOINABLE; else return_errno(EINVAL, EINVAL); if (!thread_attr_set((thread_attr_t)(*attr), THREAD_ATTR_JOINABLE, s)) return errno; return OK; }
/* ** AT-FORK SUPPORT */ int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)) { (void) prepare; (void) parent; (void) child; return_errno(ENOSYS, ENOSYS); }
static int sc_hdlr_open(void *_p){ char path[((sc_fs_t*)(_p))->data_len]; sc_fs_t *p; fs_node_t *root; process_t *this_p; this_p = sched_running()->parent; /* initials */ p = (sc_fs_t*)_p; copy_from_user(path, p->data, p->data_len, this_p); /* identify file system and call its open callback */ mutex_lock(&this_p->mtx); root = (path[0] == '/') ? (fs_root) : this_p->cwd; mutex_unlock(&this_p->mtx); if(root->ops->open == 0x0) return_errno(E_NOIMP); DEBUG("path \"%s\", mode %#x\n", path, p->mode); fs_lock(); p->fd = root->ops->open(root, path, p->mode, this_p); fs_unlock(); DEBUG("created fd with id %d, \"%s\"\n", p->fd, strerror(errno)); return E_OK; }
int pthread_condattr_destroy(pthread_condattr_t *attr) { if (attr == NULL) return_errno(EINVAL, EINVAL); /* nothing to do for us */ return OK; }
static int fcntl(fs_filed_t *fd, int cmd, void *data, process_t *this_p){ f_mode_t *mode; mode = (f_mode_t*)data; /* handle file descriptor mode commands */ switch(cmd){ case F_MODE_GET: *mode = fd->mode; break; case F_MODE_SET: fd->mode = (*mode & ~fd->mode_mask) | (fd->mode & fd->mode_mask); if(fd->mode != *mode) return_errno(E_NOSUP); break; default: // NOTE not setting errno is intentional to // allow sc_hdlr_fcntl to overwrite it return -E_NOIMP; } return E_OK; }
static int sc_hdlr_rmnode(void *_p){ char path[((sc_fs_t*)(_p))->data_len]; sc_fs_t *p; fs_node_t *root; process_t *this_p; this_p = sched_running()->parent; /* initials */ p = (sc_fs_t*)_p; copy_from_user(path, p->data, p->data_len, this_p); DEBUG("%s\n", path); /* identify file system and call its rmnode callback */ mutex_lock(&this_p->mtx); root = (path[0] == '/') ? (fs_root) : this_p->cwd; mutex_unlock(&this_p->mtx); if(root->ops->node_rm == 0x0) return_errno(E_NOIMP); fs_lock(); (void)root->ops->node_rm(root, path); fs_unlock(); return -errno; }
static int sc_hdlr_close(void *_p){ sc_fs_t *p; fs_filed_t *fd; process_t *this_p; this_p = sched_running()->parent; /* initials */ p = (sc_fs_t*)_p; fd = fs_fd_acquire(p->fd, this_p); DEBUG("fd %d%s\n", p->fd, (fd == 0x0 ? " (invalid)" : "")); if(fd == 0x0) return_errno(E_INVAL); /* handle close */ fs_lock(); (void)fd->node->ops->close(fd, this_p); fs_unlock(); // NOTE fs_fd_release must not be called, since // close has already deleted the decriptor return E_OK; }
int pthread_cond_destroy(pthread_cond_t *cond) { if (cond == NULL) return_errno(EINVAL, EINVAL); free(*cond); *cond = NULL; return OK; }
int thread_cond_timedwait(cond_t *cond, mutex_t *mutex, const struct timespec *abstime) { unsigned long timeout = 0; int sus_rv = 0; /* consistency checks */ if (cond == NULL || mutex == NULL) return_errno(FALSE, EINVAL); if (!(cond->cn_state & THREAD_COND_INITIALIZED)) return_errno(FALSE, EDEADLK); /* add us to the number of waiters */ cond->cn_waiters++; /* unlock mutex (caller had to lock it first) */ thread_mutex_unlock(mutex); /* wait until the condition is signaled */ assert (current_thread); enqueue(&cond->wait_queue, current_thread); if (abstime) { struct timeval tv; gettimeofday(&tv, NULL); timeout = (abstime->tv_sec - tv.tv_sec) * 1000000 + (abstime->tv_nsec / 1000 - tv.tv_usec); } CAP_SET_SYSCALL(); sus_rv = thread_suspend_self(timeout); CAP_CLEAR_SYSCALL(); /* relock mutex */ thread_mutex_lock(mutex); // FIXME: this is wrong, we should retry after INTERRUPTED if (sus_rv == TIMEDOUT || sus_rv == INTERRUPTED) { /* we timed out */ /* remove us from the number of waiters */ /* our thread could possibly be already removed by thread_cond_signal() */ if (queue_remove(&cond->wait_queue, current_thread)) cond->cn_waiters--; return_errno(FALSE, ETIMEDOUT); } else return TRUE; }