struct wind_task *get_wind_task(TASK_ID tid) { struct wind_task *task = find_wind_task(tid); /* * Grab the task lock, assuming that the task might have been * deleted, and/or maybe we have been lucky, and some random * opaque pointer might lead us to something which is laid in * valid memory but certainly not to a task object. Last * chance is pthread_mutex_lock() in threadobj_lock() * detecting a wrong mutex kind and bailing out. * * XXX: threadobj_lock() disables cancellability for the * caller upon success, until the lock is dropped in * threadobj_unlock(), so there is no way it may vanish while * holding the lock. Therefore we need no cleanup handler * here. */ if (task == NULL || threadobj_lock(&task->thobj) == -EINVAL) return NULL; /* Check the magic word again, while we hold the lock. */ if (threadobj_get_magic(&task->thobj) != task_magic) { threadobj_unlock(&task->thobj); return NULL; } return task; }
static void *task_trampoline(void *arg) { struct wind_task *task = arg; struct wind_task_args *args = &task->args; struct service svc; int ret; ret = __bt(threadobj_prologue(&task->thobj, task->name)); if (ret) goto done; COPPERPLATE_PROTECT(svc); ret = __bt(registry_add_file(&task->fsobj, O_RDONLY, "/vxworks/tasks/%s", task->name)); if (ret) warning("failed to export task %s to registry", task->name); COPPERPLATE_UNPROTECT(svc); /* Wait for someone to run taskActivate() upon us. */ threadobj_wait_start(&task->thobj); args->entry(args->arg0, args->arg1, args->arg2, args->arg3, args->arg4, args->arg5, args->arg6, args->arg7, args->arg8, args->arg9); done: threadobj_lock(&task->thobj); threadobj_set_magic(&task->thobj, ~task_magic); threadobj_unlock(&task->thobj); pthread_exit((void *)(long)ret); }
struct wind_task *get_wind_task_or_self(TASK_ID tid) { struct wind_task *current; if (tid) return get_wind_task(tid); current = wind_task_current(); if (current == NULL) return NULL; /* This one might block but can't fail, it is ours. */ threadobj_lock(¤t->thobj); return current; }
int rt_alarm_wait(RT_ALARM *alarm) { struct threadobj *current = threadobj_current(); struct sched_param_ex param_ex; struct trank_alarm_wait *aw; struct alchemy_alarm *acb; int ret, prio, pulses; acb = find_alarm(alarm); if (acb == NULL) return -EINVAL; threadobj_lock(current); prio = threadobj_get_priority(current); if (prio != threadobj_irq_prio) { param_ex.sched_priority = threadobj_irq_prio; /* Working on self, so -EIDRM can't happen. */ threadobj_set_schedparam(current, SCHED_FIFO, ¶m_ex); } threadobj_unlock(current); aw = acb->arg; /* * Emulate the original behavior: wait for the next pulse (no * event buffering, broadcast to all waiters), while * preventing spurious wakeups. */ __RT(pthread_mutex_lock(&aw->lock)); pulses = aw->alarm_pulses; for (;;) { ret = -__RT(pthread_cond_wait(&aw->event, &aw->lock)); if (ret || aw->alarm_pulses != pulses) break; } __RT(pthread_mutex_unlock(&aw->lock)); return __bt(ret); }
ssize_t read_threads(struct fsobj *fsobj, char *buf, size_t size, off_t offset, void *priv) { struct thread_data *thread_data, *p; char sbuf[64], pbuf[16], tbuf[64]; struct threadobj_stat statbuf; struct sysgroup_memspec *obj; struct threadobj *thobj; const char *sched_class; ssize_t len = 0; int ret, count; ret = heapobj_bind_session(__node_info.session_label); if (ret) return ret; sysgroup_lock(); count = sysgroup_count(thread); sysgroup_unlock(); if (count == 0) goto out; /* * We don't want to hold the sysgroup lock for too long, since * it could be contended by a real-time task. So we pull all * the per-thread data we need into a local array, before * printing out its contents after we dropped the lock. */ thread_data = p = malloc(sizeof(*p) * count); if (thread_data == NULL) { len = -ENOMEM; goto out; } sysgroup_lock(); for_each_sysgroup(obj, thread) { if (p - thread_data >= count) break; thobj = container_of(obj, struct threadobj, memspec); ret = threadobj_lock(thobj); if (ret) continue; strncpy(p->name, thobj->name, sizeof(p->name) - 1); p->name[sizeof(p->name) - 1] = '\0'; p->pid = thobj->pid; p->priority = thobj->priority; p->policy = thobj->policy; threadobj_stat(thobj, &statbuf); threadobj_unlock(thobj); p->status = statbuf.status; p->cpu = statbuf.cpu; p->timeout = statbuf.timeout; p->schedlock = statbuf.schedlock; p++; } sysgroup_unlock(); count = p - thread_data; if (count == 0) goto out_free; len = sprintf(buf, "%-3s %-6s %-5s %-8s %-8s %-10s %s\n", "CPU", "PID", "CLASS", "PRI", "TIMEOUT", "STAT", "NAME"); for (p = thread_data; count > 0; count--) { if (kill(p->pid, 0)) continue; snprintf(pbuf, sizeof(pbuf), "%3d", p->priority); format_time(p->timeout, tbuf, sizeof(tbuf)); format_thread_status(p, sbuf, sizeof(sbuf)); switch (p->policy) { case SCHED_RT: sched_class = "rt"; break; case SCHED_RR: sched_class = "rr"; break; #ifdef SCHED_SPORADIC case SCHED_SPORADIC: sched_class = "pss"; break; #endif #ifdef SCHED_TP case SCHED_TP: sched_class = "tp"; break; #endif #ifdef SCHED_QUOTA case SCHED_QUOTA: sched_class = "quota"; break; #endif #ifdef SCHED_QUOTA case SCHED_WEAK: sched_class = "weak"; break; #endif default: sched_class = "other"; break; } len += sprintf(buf + len, "%3u %-6d %-5s %-8s %-8s %-10s %s\n", p->cpu, p->pid, sched_class, pbuf, tbuf, sbuf, p->name); p++; } out_free: free(thread_data); out: heapobj_unbind_session(); return len; }