int timer_settime(timer_t timerid, int flags, FAR const struct itimerspec *value, FAR struct itimerspec *ovalue) { FAR struct posix_timer_s *timer = (FAR struct posix_timer_s *)timerid; irqstate_t state; int delay; int ret = OK; /* Some sanity checks */ if (!timer || !value) { errno = EINVAL; return ERROR; } /* Disarm the timer (in case the timer was already armed when timer_settime() * is called). */ (void)wd_cancel(timer->pt_wdog); /* If the it_value member of value is zero, the timer will not be re-armed */ if (value->it_value.tv_sec <= 0 && value->it_value.tv_nsec <= 0) { return OK; } /* Setup up any repititive timer */ if (value->it_interval.tv_sec > 0 || value->it_interval.tv_nsec > 0) { (void)clock_time2ticks(&value->it_interval, &timer->pt_delay); } else { timer->pt_delay = 0; } /* We need to disable timer interrupts through the following section so * that the system timer is stable. */ state = irqsave(); /* Check if abstime is selected */ if ((flags & TIMER_ABSTIME) != 0) { #ifdef CONFIG_DISABLE_CLOCK /* Absolute timing depends upon having access to clock functionality */ errno = ENOSYS; return ERROR; #else /* Calculate a delay corresponding to the absolute time in 'value'. * NOTE: We have internal knowledge the clock_abstime2ticks only * returns an error if clockid != CLOCK_REALTIME. */ (void)clock_abstime2ticks(CLOCK_REALTIME, &value->it_value, &delay); #endif } else { /* Calculate a delay assuming that 'value' holds the relative time * to wait. We have internal knowledge that clock_time2ticks always * returns success. */ (void)clock_time2ticks(&value->it_value, &delay); } /* If the time is in the past or now, then set up the next interval * instead (assuming a repititive timer). */ if (delay <= 0) { delay = timer->pt_delay; } /* Then start the watchdog */ if (delay > 0) { timer->pt_last = delay; ret = wd_start(timer->pt_wdog, delay, (wdentry_t)timer_timeout, 1, (uint32_t)((uintptr_t)timer)); } irqrestore(state); return ret; }
int sched_setparam(pid_t pid, FAR const struct sched_param *param) { FAR struct tcb_s *rtcb; FAR struct tcb_s *tcb; int errcode; int ret; /* Verify that the requested priority is in the valid range */ if (!param) { errcode = EINVAL; goto errout_with_errcode; } /* Prohibit modifications to the head of the ready-to-run task * list while adjusting the priority */ sched_lock(); /* Check if the task to reprioritize is the calling task */ rtcb = this_task(); if (pid == 0 || pid == rtcb->pid) { tcb = rtcb; } /* The PID is not the calling task, we will have to search for it */ else { tcb = sched_gettcb(pid); if (!tcb) { /* No task with this PID was found */ errcode = ESRCH; goto errout_with_lock; } } #ifdef CONFIG_SCHED_SPORADIC /* Update parameters associated with SCHED_SPORADIC */ if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC) { FAR struct sporadic_s *sporadic; irqstate_t flags; int repl_ticks; int budget_ticks; if (param->sched_ss_max_repl < 1 || param->sched_ss_max_repl > CONFIG_SCHED_SPORADIC_MAXREPL) { errcode = EINVAL; goto errout_with_lock; } /* Convert timespec values to system clock ticks */ (void)clock_time2ticks(¶m->sched_ss_repl_period, &repl_ticks); (void)clock_time2ticks(¶m->sched_ss_init_budget, &budget_ticks); /* Avoid zero/negative times */ if (repl_ticks < 1) { repl_ticks = 1; } if (budget_ticks < 1) { budget_ticks = 1; } /* The replenishment period must be greater than or equal to the * budget period. */ #if 1 /* REVISIT: In the current implementation, the budget cannot exceed * half the duty. */ if (repl_ticks < (2 * budget_ticks)) #else if (repl_ticks < budget_ticks) #endif { errcode = EINVAL; goto errout_with_lock; } /* Stop/reset current sporadic scheduling */ flags = enter_critical_section(); ret = sched_sporadic_reset(tcb); if (ret >= 0) { /* Save the sporadic scheduling parameters and reset to the * beginning to the replenishment interval. */ tcb->timeslice = budget_ticks; sporadic = rtcb->sporadic; DEBUGASSERT(sporadic != NULL); sporadic->hi_priority = param->sched_priority; sporadic->low_priority = param->sched_ss_low_priority; sporadic->max_repl = param->sched_ss_max_repl; sporadic->repl_period = repl_ticks; sporadic->budget = budget_ticks; /* And restart at the next replenishment interval */ ret = sched_sporadic_start(tcb); } /* Restore interrupts and handler errors */ leave_critical_section(flags); if (ret < 0) { errcode = -ret; goto errout_with_lock; } } #endif /* Then perform the reprioritization */ ret = sched_reprioritize(tcb, param->sched_priority); sched_unlock(); return ret; errout_with_lock: set_errno(errcode); sched_unlock(); return ERROR; errout_with_errcode: set_errno(errcode); return ERROR; }
int nanosleep(FAR const struct timespec *rqtp, FAR struct timespec *rmtp) { irqstate_t flags; systime_t starttick; sigset_t set; struct siginfo value; int errval; #ifdef CONFIG_DEBUG_ASSERTIONS /* Warning avoidance */ int ret; #endif /* nanosleep() is a cancellation point */ (void)enter_cancellation_point(); if (!rqtp || rqtp->tv_nsec < 0 || rqtp->tv_nsec >= 1000000000) { errval = EINVAL; goto errout; } /* Get the start time of the wait. Interrupts are disabled to prevent * timer interrupts while we do tick-related calculations before and * after the wait. */ flags = enter_critical_section(); starttick = clock_systimer(); /* Set up for the sleep. Using the empty set means that we are not * waiting for any particular signal. However, any unmasked signal can * still awaken sigtimedwait(). */ (void)sigemptyset(&set); /* nanosleep is a simple application of sigtimedwait. */ #ifdef CONFIG_DEBUG_ASSERTIONS /* Warning avoidance */ ret = sigtimedwait(&set, &value, rqtp); #else (void)sigtimedwait(&set, &value, rqtp); #endif /* sigtimedwait() cannot succeed. It should always return error with * either (1) EAGAIN meaning that the timeout occurred, or (2) EINTR * meaning that some other unblocked signal was caught. */ errval = get_errno(); DEBUGASSERT(ret < 0 && (errval == EAGAIN || errval == EINTR)); if (errval == EAGAIN) { /* The timeout "error" is the normal, successful result */ leave_critical_section(flags); leave_cancellation_point(); return OK; } /* If we get there, the wait has failed because we were awakened by a * signal. Return the amount of "unwaited" time if rmtp is non-NULL. */ if (rmtp) { systime_t elapsed; systime_t remaining; int ticks; /* REVISIT: The conversion from time to ticks and back could * be avoided. clock_timespec_subtract() would be used instead * to get the time difference. */ /* First get the number of clock ticks that we were requested to * wait. */ (void)clock_time2ticks(rqtp, &ticks); /* Get the number of ticks that we actually waited */ elapsed = clock_systimer() - starttick; /* The difference between the number of ticks that we were requested * to wait and the number of ticks that we actualy waited is that * amount of time that we failed to wait. */ if (elapsed >= (uint32_t)ticks) { remaining = 0; } else { remaining = (uint32_t)ticks - elapsed; } (void)clock_ticks2time((int)remaining, rmtp); } leave_critical_section(flags); errout: set_errno(errval); leave_cancellation_point(); return ERROR; }
int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr, pthread_startroutine_t start_routine, pthread_addr_t arg) { FAR struct pthread_tcb_s *ptcb; FAR struct join_s *pjoin; struct sched_param param; int policy; int errcode; pid_t pid; int ret; #ifdef HAVE_TASK_GROUP bool group_joined = false; #endif /* If attributes were not supplied, use the default attributes */ if (!attr) { attr = &g_default_pthread_attr; } /* Allocate a TCB for the new task. */ ptcb = (FAR struct pthread_tcb_s *)kmm_zalloc(sizeof(struct pthread_tcb_s)); if (!ptcb) { sdbg("ERROR: Failed to allocate TCB\n"); return ENOMEM; } #ifdef HAVE_TASK_GROUP /* Bind the parent's group to the new TCB (we have not yet joined the * group). */ ret = group_bind(ptcb); if (ret < 0) { errcode = ENOMEM; goto errout_with_tcb; } #endif #ifdef CONFIG_ARCH_ADDRENV /* Share the address environment of the parent task group. */ ret = up_addrenv_attach(ptcb->cmn.group, (FAR struct tcb_s *)g_readytorun.head); if (ret < 0) { errcode = -ret; goto errout_with_tcb; } #endif /* Allocate a detachable structure to support pthread_join logic */ pjoin = (FAR struct join_s *)kmm_zalloc(sizeof(struct join_s)); if (!pjoin) { sdbg("ERROR: Failed to allocate join\n"); errcode = ENOMEM; goto errout_with_tcb; } /* Allocate the stack for the TCB */ ret = up_create_stack((FAR struct tcb_s *)ptcb, attr->stacksize, TCB_FLAG_TTYPE_PTHREAD); if (ret != OK) { errcode = ENOMEM; goto errout_with_join; } /* Should we use the priority and scheduler specified in the pthread * attributes? Or should we use the current thread's priority and * scheduler? */ if (attr->inheritsched == PTHREAD_INHERIT_SCHED) { /* Get the priority (and any other scheduling parameters) for this * thread. */ ret = sched_getparam(0, ¶m); if (ret == ERROR) { errcode = get_errno(); goto errout_with_join; } /* Get the scheduler policy for this thread */ policy = sched_getscheduler(0); if (policy == ERROR) { errcode = get_errno(); goto errout_with_join; } } else { /* Use the scheduler policy and policy the attributes */ policy = attr->policy; param.sched_priority = attr->priority; #ifdef CONFIG_SCHED_SPORADIC param.sched_ss_low_priority = attr->low_priority; param.sched_ss_max_repl = attr->max_repl; param.sched_ss_repl_period.tv_sec = attr->repl_period.tv_sec; param.sched_ss_repl_period.tv_nsec = attr->repl_period.tv_nsec; param.sched_ss_init_budget.tv_sec = attr->budget.tv_sec; param.sched_ss_init_budget.tv_nsec = attr->budget.tv_nsec; #endif } #ifdef CONFIG_SCHED_SPORADIC if (policy == SCHED_SPORADIC) { FAR struct sporadic_s *sporadic; int repl_ticks; int budget_ticks; /* Convert timespec values to system clock ticks */ (void)clock_time2ticks(¶m.sched_ss_repl_period, &repl_ticks); (void)clock_time2ticks(¶m.sched_ss_init_budget, &budget_ticks); /* The replenishment period must be greater than or equal to the * budget period. */ if (repl_ticks < budget_ticks) { errcode = EINVAL; goto errout_with_join; } /* Initialize the sporadic policy */ ret = sched_sporadic_initialize(&ptcb->cmn); if (ret >= 0) { sporadic = ptcb->cmn.sporadic; DEBUGASSERT(sporadic != NULL); /* Save the sporadic scheduling parameters */ sporadic->hi_priority = param.sched_priority; sporadic->low_priority = param.sched_ss_low_priority; sporadic->max_repl = param.sched_ss_max_repl; sporadic->repl_period = repl_ticks; sporadic->budget = budget_ticks; /* And start the first replenishment interval */ ret = sched_sporadic_start(&ptcb->cmn); } /* Handle any failures */ if (ret < 0) { errcode = -ret; goto errout_with_join; } } #endif /* Initialize the task control block */ ret = pthread_schedsetup(ptcb, param.sched_priority, pthread_start, start_routine); if (ret != OK) { errcode = EBUSY; goto errout_with_join; } /* Configure the TCB for a pthread receiving on parameter * passed by value */ pthread_argsetup(ptcb, arg); #ifdef HAVE_TASK_GROUP /* Join the parent's task group */ ret = group_join(ptcb); if (ret < 0) { errcode = ENOMEM; goto errout_with_join; } group_joined = true; #endif /* Attach the join info to the TCB. */ ptcb->joininfo = (FAR void *)pjoin; /* Set the appropriate scheduling policy in the TCB */ ptcb->cmn.flags &= ~TCB_FLAG_POLICY_MASK; switch (policy) { default: DEBUGPANIC(); case SCHED_FIFO: ptcb->cmn.flags |= TCB_FLAG_SCHED_FIFO; break; #if CONFIG_RR_INTERVAL > 0 case SCHED_RR: ptcb->cmn.flags |= TCB_FLAG_SCHED_RR; ptcb->cmn.timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); break; #endif #ifdef CONFIG_SCHED_SPORADIC case SCHED_SPORADIC: ptcb->cmn.flags |= TCB_FLAG_SCHED_SPORADIC; break; #endif #if 0 /* Not supported */ case SCHED_OTHER: ptcb->cmn.flags |= TCB_FLAG_SCHED_OTHER; break; #endif } /* Get the assigned pid before we start the task (who knows what * could happen to ptcb after this!). Copy this ID into the join structure * as well. */ pid = (int)ptcb->cmn.pid; pjoin->thread = (pthread_t)pid; /* Initialize the semaphores in the join structure to zero. */ ret = sem_init(&pjoin->data_sem, 0, 0); if (ret == OK) { ret = sem_init(&pjoin->exit_sem, 0, 0); } /* Activate the task */ sched_lock(); if (ret == OK) { ret = task_activate((FAR struct tcb_s *)ptcb); } if (ret == OK) { /* Wait for the task to actually get running and to register * its join structure. */ (void)pthread_takesemaphore(&pjoin->data_sem); /* Return the thread information to the caller */ if (thread) { *thread = (pthread_t)pid; } if (!pjoin->started) { ret = EINVAL; } sched_unlock(); (void)sem_destroy(&pjoin->data_sem); } else { sched_unlock(); dq_rem((FAR dq_entry_t *)ptcb, (FAR dq_queue_t *)&g_inactivetasks); (void)sem_destroy(&pjoin->data_sem); (void)sem_destroy(&pjoin->exit_sem); errcode = EIO; goto errout_with_join; } return ret; errout_with_join: sched_kfree(pjoin); ptcb->joininfo = NULL; errout_with_tcb: #ifdef HAVE_TASK_GROUP /* Clear group binding */ if (ptcb && !group_joined) { ptcb->cmn.group = NULL; } #endif sched_releasetcb((FAR struct tcb_s *)ptcb, TCB_FLAG_TTYPE_PTHREAD); return errcode; }