portTASK_FUNCTION(Timer_Task, pvParameters) { sec = 3; vTaskDelay(MSEC2TICK(3000) ); while (sec > 0) { vTaskDelay(MSEC2TICK(1000) ); sec--; } }
static void accel_data_report_worker(void *arg) { struct sensor_accel_info *info; struct report_info *rinfo; struct report_info_data *rinfo_data; struct sensor_event_data *event_data; struct timespec ts; uint16_t payload_size; payload_size = (PRESSURE_READING_NUM * sizeof(struct sensor_event_data)) + (REPORTING_SENSORS * sizeof(struct report_info)); info = arg; if (info->callback) { rinfo_data = malloc(sizeof(struct report_info_data) + payload_size); if (!rinfo_data) goto out; rinfo_data->num_sensors_reporting = REPORTING_SENSORS; rinfo = rinfo_data->reportinfo; rinfo->id = info->sensor_id; rinfo->flags = 0; event_data = (struct sensor_event_data *)&rinfo->data_payload[0]; up_rtc_gettime(&ts); rinfo->reference_time = timespec_to_nsec(&ts); gb_debug("[%u.%03u]\n", ts.tv_sec, (ts.tv_nsec / 1000000)); #ifdef BATCH_PROCESS_ENABLED /* * Batch sensor data values and its time_deltas * until max fifo event count */ #else /* Single sensor event data */ rinfo->readings = PRESSURE_READING_NUM; event_data->time_delta = 0; event_data->data_value[0] = data++; event_data->data_value[1] = data++; event_data->data_value[2] = data++; #endif gb_debug("report sensor: %d\n", rinfo->id); info->callback(info->sensor_id, rinfo_data, payload_size); free(rinfo_data); } out: /* cancel any work and reset ourselves */ if (!work_available(&info->data_report_work)) work_cancel(LPWORK, &info->data_report_work); /* if not already scheduled, schedule start */ if (work_available(&info->data_report_work)) work_queue(LPWORK, &info->data_report_work, accel_data_report_worker, info, MSEC2TICK(1200)); }
void up_timer_initialize(void) { uint32_t tctl; /* Make sure the timer interrupts are disabled */ up_disable_irq(IMX_IRQ_SYSTIMER); /* Make sure that timer1 is disabled */ putreg32(0, IMX_TIMER1_TCTL); putreg32(0, IMX_TIMER1_TPRER); /* Select restart mode with source = PERCLK1. In restart mode, after * the compare value is reached, the counter resets to 0x00000000, the * compare event (COMP) bit of the timer status register is set, an * interrupt is issued if the interrupt request enable (IRQEN) bit of * the corresponding TCTL register is set, and the counter resumes * counting. */ tctl = TCTL_CLKSOURCE_PERCLK1; putreg32(tctl, IMX_TIMER1_TCTL); /* The timer is driven by PERCLK1. Set prescaler for division by one * so that the clock is driven at PERCLK1. * * putreg(0, IMX_TIMER1_TPRER); -- already the case * * Set the compare register so that the COMP interrupt is generated * with a period of USEC_PER_TICK. The value IMX_PERCLK1_FREQ/1000 * (defined in board.h) is the number of counts in millisecond, so: */ putreg32(MSEC2TICK(IMX_PERCLK1_FREQ / 1000), IMX_TIMER1_TCMP); /* Configure to provide timer COMP interrupts when TCN increments * to TCMP. */ tctl |= TIMER_TCTL_IRQEN; putreg32(tctl, IMX_TIMER1_TCTL); /* Finally, enable the timer (be be the last operation on TCTL) */ tctl |= TIMER_TCTL_TEN; putreg32(tctl, IMX_TIMER1_TCTL); /* Attach and enable the timer interrupt */ irq_attach(IMX_IRQ_SYSTIMER, (xcpt_t)up_timerisr); up_enable_irq(IMX_IRQ_SYSTIMER); }
systime_t clock_systimer(void) { #ifdef CONFIG_SCHED_TICKLESS # ifdef CONFIG_SYSTEM_TIME64 struct timespec ts; /* Get the time from the platform specific hardware */ (void)up_timer_gettime(&ts); /* Convert to a 64-bit value in microseconds, then in clock tick units */ return USEC2TICK(1000000 * (uint64_t)ts.tv_sec + (uint64_t)ts.tv_nsec / 1000); # else /* CONFIG_SYSTEM_TIME64 */ struct timespec ts; uint64_t tmp; /* Get the time from the platform specific hardware */ (void)up_timer_gettime(&ts); /* Convert to a 64- then a 32-bit value */ tmp = MSEC2TICK(1000 * (uint64_t)ts.tv_sec + (uint64_t)ts.tv_nsec / 1000000); return (systime_t)(tmp & 0x00000000ffffffff); # endif /* CONFIG_SYSTEM_TIME64 */ #else /* CONFIG_SCHED_TICKLESS */ # ifdef CONFIG_SYSTEM_TIME64 irqstate_t flags; systime_t sample; /* 64-bit accesses are not atomic on most architectures. */ flags = irqsave(); sample = g_system_timer; irqrestore(flags); return sample; # else /* CONFIG_SYSTEM_TIME64 */ /* Return the current system time */ return g_system_timer; # endif /* CONFIG_SYSTEM_TIME64 */ #endif /* CONFIG_SCHED_TICKLESS */ }
int poll(FAR struct pollfd *fds, nfds_t nfds, int timeout) { WDOG_ID wdog; sem_t sem; int count = 0; int ret; sem_init(&sem, 0, 0); ret = poll_setup(fds, nfds, &sem); if (ret >= 0) { if (timeout >= 0) { /* Wait for the poll event with a timeout. Note that the * millisecond timeout has to be converted to system clock * ticks for wd_start */ wdog = wd_create(); wd_start(wdog, MSEC2TICK(timeout), poll_timeout, 1, (uint32_t)&sem); poll_semtake(&sem); wd_delete(wdog); } else { /* Wait for the poll event with no timeout */ poll_semtake(&sem); } /* Teardown the poll operation and get the count of events */ ret = poll_teardown(fds, nfds, &count); } sem_destroy(&sem); /* Check for errors */ if (ret < 0) { set_errno(-ret); return ERROR; } return count; }
/* schedule/cancel longpress event as needed */ static void ara_key_longpress_update(struct ara_key_context *key, bool active) { irqstate_t flags; flags = irqsave(); if (active) { /* if not already scheduled, schedule the longpress event */ if (work_available(&key->longpress_work)) work_queue(HPWORK, &key->longpress_work, ara_key_longpress_worker, key, MSEC2TICK(ARA_KEY_LONGPRESS_TIME_MS)); } else { /* if key is released, cancel any pending longpress events */ if (!work_available(&key->longpress_work)) work_cancel(HPWORK, &key->longpress_work); } irqrestore(flags); }
portTASK_FUNCTION(Item_Task, pvParameters ) { int i,j; u8 itemOnMap; portTickType xLastWakeTime = xTaskGetTickCount(); portTickType rand; PA_LoadSpritePal(DOWN_SCREEN, ITEM_PAL, (void*)ITEM_Pal); while(1) { if(length<MAX_BODY_LENGTH) { int itemOnMap = FALSE; for(i=0; i<MAX_MAP_X_LENGTH; i++) for(j=0; j<MAX_MAP_Y_LENGTH; j++) if(map[i][j].state==MAP_STATE_ITEM) itemOnMap = TRUE; if(itemOnMap==FALSE) { u8 x; u8 y; do{ x = xTaskGetTickCount()%MAX_MAP_X_LENGTH; y = xTaskGetTickCount()%MAX_MAP_Y_LENGTH; }while((map[x][y].state!=MAP_STATE_NULL)|| (x==1) || x==(MAX_MAP_X_LENGTH-2) || (y==1) || y==(MAX_MAP_Y_LENGTH-2)); map[x][y].state = MAP_STATE_ITEM; PA_CreateSprite(DOWN_SCREEN, ITEM, (void*)ITEM_Sprite, OBJ_SIZE_32X16, TRUE, ITEM_PAL, map[x][y].x-8, map[x][y].y); rand = xTaskGetTickCount(); rand = (rand%2)*2; //0 or 2 PA_StartSpriteAnim(DOWN_SCREEN, ITEM, rand, rand+1, 1); map[x][y].itemSpriteNum = ITEM; } } vTaskDelayUntil(&xLastWakeTime, MSEC2TICK(5555)); } }
portTASK_FUNCTION(Screen_Task, pvParameters) { PA_LoadSpritePal(UP_SCREEN, UPSCR_BODY_PAL, (void*) man_Pal); PA_LoadSpritePal(UP_SCREEN, NUM_PAL, (void*) number_Pal); PA_LoadSpritePal(UP_SCREEN, WORD_PAL, (void*) word_Pal); PA_CreateSprite(UP_SCREEN, WORD, (void*) word_Sprite, OBJ_SIZE_32X32, TRUE, WORD_PAL, 8, 22); PA_CreateSprite(UP_SCREEN, WORD+1, (void*) word_Sprite, OBJ_SIZE_32X32, TRUE, WORD_PAL, 40, 22); PA_CreateSprite(UP_SCREEN, WORD+2, (void*) number_Sprite, OBJ_SIZE_32X32, TRUE, NUM_PAL, 72, 22); PA_CreateSprite(UP_SCREEN, WORD+3, (void*) number_Sprite, OBJ_SIZE_32X32, TRUE, NUM_PAL, 97, 22); PA_SetSpriteAnim (UP_SCREEN, WORD, 2); PA_SetSpriteAnim (UP_SCREEN, WORD+1, 3); PA_SetSpriteAnim (UP_SCREEN, WORD+2, MAX_BODY_LENGTH/10); PA_SetSpriteAnim (UP_SCREEN, WORD+3, MAX_BODY_LENGTH%10); PA_CreateSprite(UP_SCREEN, UPSCRBODY, (void*) man_Sprite, OBJ_SIZE_16X32, TRUE, UPSCR_BODY_PAL, 12, 52); PA_CreateSprite(UP_SCREEN, UPSCRNUM, (void*) number_Sprite, OBJ_SIZE_32X32, TRUE, NUM_PAL, 30, 58); PA_SetSpriteAnim (UP_SCREEN, UPSCRNUM, 10); PA_CreateSprite(UP_SCREEN, UPSCRNUM+1, (void*) number_Sprite, OBJ_SIZE_32X32, TRUE, NUM_PAL, 55, 58); PA_CreateSprite(UP_SCREEN, UPSCRNUM+2, (void*) number_Sprite, OBJ_SIZE_32X32, TRUE, NUM_PAL, 80, 58); PA_StartSpriteAnim(UP_SCREEN,UPSCRBODY,8, 10, 3); while (1) { PA_SetSpriteAnim (UP_SCREEN, UPSCRNUM+1, length/10); PA_SetSpriteAnim (UP_SCREEN, UPSCRNUM+2, length%10); vTaskDelay(MSEC2TICK(100)); } }
// 4*4 key matrix 입력을 6*6 입력으로 바꿈 // 기존의 key scan하는 방식과 동일 portTASK_FUNCTION(Puzzle_Key_Task, pvParameters) { // Variables u8 key, scan = 0; u8 key_pressed = FALSE; while (1) { if (!key_pressed) { write_puzzle(0x20 >> scan); key = scan * 6; switch (read_puzzle()) { case 32 : key += 1; break; case 16 : key += 2; break; case 8 : key += 3; break; case 4 : key += 4; break; case 2 : key += 5; break; case 1 : key += 6; break; default : key = 255; break; } scan++; if (scan == 6) scan = 0; if (key <= 36) { key_pressed = TRUE; xQueueSend(KeyQueue, &key, 0); } } if (key_pressed && (read_puzzle() == 0)) key_pressed = FALSE; vTaskDelay(MSEC2TICK(25)); }
portTASK_FUNCTION(Unit_Task, pvParameters ) { body bd[MAX_BODY_LENGTH]; int i; u8 x = 5; u8 y = 5; unit.x = 5; unit.y = 5; u8 pfx; u8 pfy; u8 pfDirection; u8 tempx; u8 tempy; u8 tempDirection; unit.direction = D_DOWN; portTickType xLastWakeTime = xTaskGetTickCount(); PA_LoadSpritePal(DOWN_SCREEN, C_PAL, (void*) C_Pal); PA_LoadSpritePal(DOWN_SCREEN, BODY_PAL, (void*) man_Pal); PA_CreateSprite(DOWN_SCREEN, C, (void*) C_Sprite, OBJ_SIZE_32X32, TRUE, C_PAL, map[unit.x][unit.y].x-8, map[unit.x][unit.y].y-16); while (1) { x = unit.x; y = unit.y; switch (unit.direction) { case D_UP: y--; break; case D_DOWN: y++; break; case D_LEFT: x--; break; case D_RIGHT: x++; break; default: break; } if ((map[x][y].state == MAP_STATE_NULL) || (map[x][y].state == MAP_STATE_ITEM)) { pfx = unit.x; pfy = unit.y; pfDirection = unit.direction; if ((map[x][y].state == MAP_STATE_ITEM)) { if (length < MAX_BODY_LENGTH) // to max length { length++; fireCreate(map); } if (length == MAX_BODY_LENGTH) { dropshipCreate(map); } map[x][y].state = MAP_STATE_NULL; PA_StopSpriteAnim(DOWN_SCREEN,map[x][y].itemSpriteNum); PA_DeleteSprite(DOWN_SCREEN, map[x][y].itemSpriteNum); PA_CreateSprite(DOWN_SCREEN, BODY + (length-1), (void*) man_Sprite, OBJ_SIZE_16X32, TRUE, BODY_PAL, 0, 0); } unit.x = x; unit.y = y; PA_SetSpriteXY(DOWN_SCREEN, C, map[unit.x][unit.y].x - 8, map[unit.x][unit.y].y - 16); for (i = 0; i < length; i++) { tempx = bd[i].x; tempy = bd[i].y; tempDirection = bd[i].direction; map[tempx][tempy].state = MAP_STATE_NULL; bd[i].x = pfx; bd[i].y = pfy; map[pfx][pfy].state = MAP_STATE_C_BODY; bd[i].direction = pfDirection; pfx = tempx; pfy = tempy; pfDirection = tempDirection; PA_SetSpriteXY(DOWN_SCREEN, BODY + i, map[bd[i].x][bd[i].y].x, (map[bd[i].x][bd[i].y].y)-16); if(pfDirection!=bd[i].direction) PA_StartSpriteAnim(DOWN_SCREEN, BODY + i, bd[i].direction, bd[i].direction+2, 7); } } if((map[x][y].state == MAP_STATE_WALL) || map[x][y].state == MAP_STATE_C_BODY) //실패 { AS_SoundQuickPlay(die); PA_ResetSpriteSys (); PA_ResetBgSysScreen (DOWN_SCREEN); PA_LoadBackground(DOWN_SCREEN, 3, &FAILSCREEN); vTaskSuspend(NULL); } if(map[x][y].state == MAP_STATE_EXIT) //클리어 { PA_ResetSpriteSys (); PA_ResetBgSysScreen (DOWN_SCREEN); PA_LoadBackground(DOWN_SCREEN, 3, &CLEARSCREEN); vTaskSuspend(NULL); } vTaskDelayUntil(&xLastWakeTime, MSEC2TICK(300-(length*10)) ); } }
int sigtimedwait(FAR const sigset_t *set, FAR struct siginfo *info, FAR const struct timespec *timeout) { FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head; sigset_t intersection; FAR sigpendq_t *sigpend; irqstate_t saved_state; int32_t waitticks; int ret = ERROR; DEBUGASSERT(rtcb->waitdog == NULL); sched_lock(); /* Not necessary */ /* Several operations must be performed below: We must determine if any * signal is pending and, if not, wait for the signal. Since signals can * be posted from the interrupt level, there is a race condition that * can only be eliminated by disabling interrupts! */ saved_state = irqsave(); /* Check if there is a pending signal corresponding to one of the * signals in the pending signal set argument. */ intersection = *set & sig_pendingset(rtcb); if (intersection != NULL_SIGNAL_SET) { /* One or more of the signals in intersections is sufficient to cause * us to not wait. Pick the lowest numbered signal and mark it not * pending. */ sigpend = sig_removependingsignal(rtcb, sig_lowest(&intersection)); ASSERT(sigpend); /* Return the signal info to the caller if so requested */ if (info) { memcpy(info, &sigpend->info, sizeof(struct siginfo)); } /* Then dispose of the pending signal structure properly */ sig_releasependingsignal(sigpend); irqrestore(saved_state); /* The return value is the number of the signal that awakened us */ ret = sigpend->info.si_signo; } /* We will have to wait for a signal to be posted to this task. */ else { /* Save the set of pending signals to wait for */ rtcb->sigwaitmask = *set; /* Check if we should wait for the timeout */ if (timeout) { /* Convert the timespec to system clock ticks, making sure that * the resulting delay is greater than or equal to the requested * time in nanoseconds. */ #ifdef CONFIG_HAVE_LONG_LONG uint64_t waitticks64 = ((uint64_t)timeout->tv_sec * NSEC_PER_SEC + (uint64_t)timeout->tv_nsec + NSEC_PER_TICK - 1) / NSEC_PER_TICK; DEBUGASSERT(waitticks64 <= UINT32_MAX); waitticks = (uint32_t)waitticks64; #else uint32_t waitmsec; DEBUGASSERT(timeout->tv_sec < UINT32_MAX / MSEC_PER_SEC); waitmsec = timeout->tv_sec * MSEC_PER_SEC + (timeout->tv_nsec + NSEC_PER_MSEC - 1) / NSEC_PER_MSEC; waitticks = MSEC2TICK(waitmsec); #endif /* Create a watchdog */ rtcb->waitdog = wd_create(); DEBUGASSERT(rtcb->waitdog); if (rtcb->waitdog) { /* This little bit of nonsense is necessary for some * processors where sizeof(pointer) < sizeof(uint32_t). * see wdog.h. */ wdparm_t wdparm; wdparm.pvarg = (FAR void *)rtcb; /* Start the watchdog */ wd_start(rtcb->waitdog, waitticks, (wdentry_t)sig_timeout, 1, wdparm.dwarg); /* Now wait for either the signal or the watchdog */ up_block_task(rtcb, TSTATE_WAIT_SIG); /* We no longer need the watchdog */ wd_delete(rtcb->waitdog); rtcb->waitdog = NULL; } /* REVISIT: And do what if there are no watchdog timers? The wait * will fail and we will return something bogus. */ } /* No timeout, just wait */ else { /* And wait until one of the unblocked signals is posted */ up_block_task(rtcb, TSTATE_WAIT_SIG); } /* We are running again, clear the sigwaitmask */ rtcb->sigwaitmask = NULL_SIGNAL_SET; /* When we awaken, the cause will be in the TCB. Get the signal number * or timeout) that awakened us. */ if (GOOD_SIGNO(rtcb->sigunbinfo.si_signo)) { /* We were awakened by a signal... but is it one of the signals that * we were waiting for? */ if (sigismember(set, rtcb->sigunbinfo.si_signo)) { /* Yes.. the return value is the number of the signal that * awakened us. */ ret = rtcb->sigunbinfo.si_signo; } else { /* No... then set EINTR and report an error */ set_errno(EINTR); ret = ERROR; } } else { /* Otherwise, we must have been awakened by the timeout. Set EGAIN * and return an error. */ DEBUGASSERT(rtcb->sigunbinfo.si_signo == SIG_WAIT_TIMEOUT); set_errno(EAGAIN); ret = ERROR; } /* Return the signal info to the caller if so requested */ if (info) { memcpy(info, &rtcb->sigunbinfo, sizeof(struct siginfo)); } irqrestore(saved_state); } sched_unlock(); return ret; }
void up_unblock_task(struct tcb_s *tcb) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) && (tcb->task_state <= LAST_BLOCKED_STATE)); /* Remove the task from the blocked task list */ sched_removeblocked(tcb); /* Reset its timeslice. This is only meaningful for round * robin tasks but it doesn't here to do it for everything */ #if CONFIG_RR_INTERVAL > 0 tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); #endif /* Add the task in the correct location in the prioritized * g_readytorun task list */ if (sched_addreadytorun(tcb)) { /* The currently active task has changed! We need to do * a context switch to the new task. * * Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { /* Restore the exception context of the new task that is ready to * run (probably tcb). This is the new rtcb at the head of the * g_readytorun task list. */ struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head; #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Then switch contexts */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_unblock_task(struct tcb_s *tcb) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) && (tcb->task_state <= LAST_BLOCKED_STATE)); /* Remove the task from the blocked task list */ sched_removeblocked(tcb); /* Reset its timeslice. This is only meaningful for round * robin tasks but it doesn't here to do it for everything */ #if CONFIG_RR_INTERVAL > 0 tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); #endif /* Add the task in the correct location in the prioritized * g_readytorun task list */ if (sched_addreadytorun(tcb)) { /* The currently active task has changed! We need to do * a context switch to the new task. * * Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Then switch contexts */ up_restorestate(rtcb->xcp.regs); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif } /* We are not in an interrupt handler. Copy the user C context * into the TCB of the task that was previously active. if * up_saveusercontext returns a non-zero value, then this is really the * previously running task restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the new task that is ready to * run (probably tcb). This is the new rtcb at the head of the * g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } }
portTASK_FUNCTION(Key_Task, pvParameters) { vTaskDelay(MSEC2TICK(1000000) ); }
void up_unblock_task(FAR struct tcb_s *tcb) { FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) && (tcb->task_state <= LAST_BLOCKED_STATE)); /* dbg("Unblocking TCB=%p\n", tcb); */ /* Remove the task from the blocked task list */ sched_removeblocked(tcb); /* Reset its timeslice. This is only meaningful for round * robin tasks but it doesn't here to do it for everything */ #if CONFIG_RR_INTERVAL > 0 tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); #endif /* Add the task in the correct location in the prioritized * g_readytorun task list */ if (sched_addreadytorun(tcb)) { /* The currently active task has changed! We need to do * a context switch to the new task. * * Are we in an interrupt handler? */ if (IN_INTERRUPT) { /* Yes, then we have to do things differently. * Just copy the current context into the OLD rtcb. */ SAVE_IRQCONTEXT(rtcb); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (FAR struct tcb_s*)g_readytorun.head; /* dbg("New Active Task TCB=%p\n", rtcb); */ /* Then setup so that the context will be performed on exit * from the interrupt. */ SET_IRQCONTEXT(rtcb); } /* We are not in an interrupt handler. Copy the user C context * into the TCB of the task that was previously active. if * SAVE_USERCONTEXT returns a non-zero value, then this is really the * previously running task restarting! */ else if (!SAVE_USERCONTEXT(rtcb)) { /* Restore the exception context of the new task that is ready to * run (probably tcb). This is the new rtcb at the head of the * g_readytorun task list. */ rtcb = (FAR struct tcb_s*)g_readytorun.head; /* dbg("New Active Task TCB=%p\n", rtcb); */ /* Then switch contexts */ RESTORE_USERCONTEXT(rtcb); } } }
int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr, pthread_startroutine_t start_routine, pthread_addr_t arg) { FAR struct pthread_tcb_s *ptcb; FAR struct join_s *pjoin; struct sched_param param; int policy; int errcode; pid_t pid; int ret; #ifdef HAVE_TASK_GROUP bool group_joined = false; #endif /* If attributes were not supplied, use the default attributes */ if (!attr) { attr = &g_default_pthread_attr; } /* Allocate a TCB for the new task. */ ptcb = (FAR struct pthread_tcb_s *)kmm_zalloc(sizeof(struct pthread_tcb_s)); if (!ptcb) { sdbg("ERROR: Failed to allocate TCB\n"); return ENOMEM; } #ifdef HAVE_TASK_GROUP /* Bind the parent's group to the new TCB (we have not yet joined the * group). */ ret = group_bind(ptcb); if (ret < 0) { errcode = ENOMEM; goto errout_with_tcb; } #endif #ifdef CONFIG_ARCH_ADDRENV /* Share the address environment of the parent task group. */ ret = up_addrenv_attach(ptcb->cmn.group, (FAR struct tcb_s *)g_readytorun.head); if (ret < 0) { errcode = -ret; goto errout_with_tcb; } #endif /* Allocate a detachable structure to support pthread_join logic */ pjoin = (FAR struct join_s *)kmm_zalloc(sizeof(struct join_s)); if (!pjoin) { sdbg("ERROR: Failed to allocate join\n"); errcode = ENOMEM; goto errout_with_tcb; } /* Allocate the stack for the TCB */ ret = up_create_stack((FAR struct tcb_s *)ptcb, attr->stacksize, TCB_FLAG_TTYPE_PTHREAD); if (ret != OK) { errcode = ENOMEM; goto errout_with_join; } /* Should we use the priority and scheduler specified in the pthread * attributes? Or should we use the current thread's priority and * scheduler? */ if (attr->inheritsched == PTHREAD_INHERIT_SCHED) { /* Get the priority (and any other scheduling parameters) for this * thread. */ ret = sched_getparam(0, ¶m); if (ret == ERROR) { errcode = get_errno(); goto errout_with_join; } /* Get the scheduler policy for this thread */ policy = sched_getscheduler(0); if (policy == ERROR) { errcode = get_errno(); goto errout_with_join; } } else { /* Use the scheduler policy and policy the attributes */ policy = attr->policy; param.sched_priority = attr->priority; #ifdef CONFIG_SCHED_SPORADIC param.sched_ss_low_priority = attr->low_priority; param.sched_ss_max_repl = attr->max_repl; param.sched_ss_repl_period.tv_sec = attr->repl_period.tv_sec; param.sched_ss_repl_period.tv_nsec = attr->repl_period.tv_nsec; param.sched_ss_init_budget.tv_sec = attr->budget.tv_sec; param.sched_ss_init_budget.tv_nsec = attr->budget.tv_nsec; #endif } #ifdef CONFIG_SCHED_SPORADIC if (policy == SCHED_SPORADIC) { FAR struct sporadic_s *sporadic; int repl_ticks; int budget_ticks; /* Convert timespec values to system clock ticks */ (void)clock_time2ticks(¶m.sched_ss_repl_period, &repl_ticks); (void)clock_time2ticks(¶m.sched_ss_init_budget, &budget_ticks); /* The replenishment period must be greater than or equal to the * budget period. */ if (repl_ticks < budget_ticks) { errcode = EINVAL; goto errout_with_join; } /* Initialize the sporadic policy */ ret = sched_sporadic_initialize(&ptcb->cmn); if (ret >= 0) { sporadic = ptcb->cmn.sporadic; DEBUGASSERT(sporadic != NULL); /* Save the sporadic scheduling parameters */ sporadic->hi_priority = param.sched_priority; sporadic->low_priority = param.sched_ss_low_priority; sporadic->max_repl = param.sched_ss_max_repl; sporadic->repl_period = repl_ticks; sporadic->budget = budget_ticks; /* And start the first replenishment interval */ ret = sched_sporadic_start(&ptcb->cmn); } /* Handle any failures */ if (ret < 0) { errcode = -ret; goto errout_with_join; } } #endif /* Initialize the task control block */ ret = pthread_schedsetup(ptcb, param.sched_priority, pthread_start, start_routine); if (ret != OK) { errcode = EBUSY; goto errout_with_join; } /* Configure the TCB for a pthread receiving on parameter * passed by value */ pthread_argsetup(ptcb, arg); #ifdef HAVE_TASK_GROUP /* Join the parent's task group */ ret = group_join(ptcb); if (ret < 0) { errcode = ENOMEM; goto errout_with_join; } group_joined = true; #endif /* Attach the join info to the TCB. */ ptcb->joininfo = (FAR void *)pjoin; /* Set the appropriate scheduling policy in the TCB */ ptcb->cmn.flags &= ~TCB_FLAG_POLICY_MASK; switch (policy) { default: DEBUGPANIC(); case SCHED_FIFO: ptcb->cmn.flags |= TCB_FLAG_SCHED_FIFO; break; #if CONFIG_RR_INTERVAL > 0 case SCHED_RR: ptcb->cmn.flags |= TCB_FLAG_SCHED_RR; ptcb->cmn.timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); break; #endif #ifdef CONFIG_SCHED_SPORADIC case SCHED_SPORADIC: ptcb->cmn.flags |= TCB_FLAG_SCHED_SPORADIC; break; #endif #if 0 /* Not supported */ case SCHED_OTHER: ptcb->cmn.flags |= TCB_FLAG_SCHED_OTHER; break; #endif } /* Get the assigned pid before we start the task (who knows what * could happen to ptcb after this!). Copy this ID into the join structure * as well. */ pid = (int)ptcb->cmn.pid; pjoin->thread = (pthread_t)pid; /* Initialize the semaphores in the join structure to zero. */ ret = sem_init(&pjoin->data_sem, 0, 0); if (ret == OK) { ret = sem_init(&pjoin->exit_sem, 0, 0); } /* Activate the task */ sched_lock(); if (ret == OK) { ret = task_activate((FAR struct tcb_s *)ptcb); } if (ret == OK) { /* Wait for the task to actually get running and to register * its join structure. */ (void)pthread_takesemaphore(&pjoin->data_sem); /* Return the thread information to the caller */ if (thread) { *thread = (pthread_t)pid; } if (!pjoin->started) { ret = EINVAL; } sched_unlock(); (void)sem_destroy(&pjoin->data_sem); } else { sched_unlock(); dq_rem((FAR dq_entry_t *)ptcb, (FAR dq_queue_t *)&g_inactivetasks); (void)sem_destroy(&pjoin->data_sem); (void)sem_destroy(&pjoin->exit_sem); errcode = EIO; goto errout_with_join; } return ret; errout_with_join: sched_kfree(pjoin); ptcb->joininfo = NULL; errout_with_tcb: #ifdef HAVE_TASK_GROUP /* Clear group binding */ if (ptcb && !group_joined) { ptcb->cmn.group = NULL; } #endif sched_releasetcb((FAR struct tcb_s *)ptcb, TCB_FLAG_TTYPE_PTHREAD); return errcode; }
static int ft80x_fade(FAR struct ft80x_dev_s *priv, FAR const struct ft80x_fade_s *fade) { systime_t start; systime_t elapsed; int32_t delay; int32_t duty; int16_t endduty; int16_t delta; /* 0% corresponds to the value 0, but 100% corresponds to the value 128 */ endduty = (uint16_t)((uint16_t)fade->duty << 7) / 100; /* Get the change in duty from the current to the terminal duty. */ duty = (int32_t)(ft80x_read_byte(priv, FT80X_REG_PWM_DUTY) & 0x7f); delta = endduty - (int16_t)duty; /* The "smoothness" of the steps will depend on the resolution of the * system timer. The minimum delay is <= 2 * system_clock_period. * * We will try for a FADE_STEP_MSEC delay, but we will try to adapt to * whatever we get is we are working close the system time resolution. * For human factors reasons, any delay less than 100 MS or so should * appear more or less smooth. * * The delay calculation should never overflow: * * Max delay: 16,700 msec (MAX_FADE_DELAY) * Min clock period: 1 usec * Max delay: 16,700,000 ticks * INT32_MAX 2,147,483,647 */ delay = MSEC2TICK((int32_t)fade->delay); if (delay <= 0) { delay = 1; } start = clock_systimer(); do { /* Wait for FADE_STEP_MSEC msec (or whatever we get) */ (void)nxsig_usleep(FADE_STEP_MSEC * 1000); /* Get the elapsed time */ elapsed = clock_systimer() - start; if (elapsed > INT32_MAX || (int32_t)elapsed >= delay) { duty = endduty; } else { /* Interpolate to get the next PWM duty in the fade. This * calculation should never overflow: * * Max delta: 128 * Max elapsed: 16,700,000 ticks * Max numerator: 2,137,600,000 * Min denominator: 1 * Max duty: 2,137,600,000 * INT32_MAX 2,147,483,647 */ duty += ((int32_t)delta * (int32_t)elapsed) / delay; if (duty > 128) { duty = 128; } else if (duty < 0) { duty = 0; } } /* The set the new backlight PWM duty */ ft80x_write_byte(priv, FT80X_REG_PWM_DUTY, (uint8_t)duty); } while (duty != endduty); return OK; }
int sched_setscheduler(pid_t pid, int policy, const struct sched_param *param) { FAR struct tcb_s *tcb; #if CONFIG_RR_INTERVAL > 0 irqstate_t saved_state; #endif int ret; /* Check for supported scheduling policy */ #if CONFIG_RR_INTERVAL > 0 if (policy != SCHED_FIFO && policy != SCHED_RR) #else if (policy != SCHED_FIFO) #endif { set_errno(EINVAL); return ERROR; } /* Check if the task to modify the calling task */ if (pid == 0 ) { pid = getpid(); } /* Verify that the pid corresponds to a real task */ tcb = sched_gettcb(pid); if (!tcb) { set_errno(ESRCH); return ERROR; } /* Prohibit any context switches while we muck with priority and scheduler * settings. */ sched_lock(); #if CONFIG_RR_INTERVAL > 0 /* Further, disable timer interrupts while we set up scheduling policy. */ saved_state = irqsave(); if (policy == SCHED_RR) { /* Set round robin scheduling */ tcb->flags |= TCB_FLAG_ROUND_ROBIN; tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); } else { /* Set FIFO scheduling */ tcb->flags &= ~TCB_FLAG_ROUND_ROBIN; tcb->timeslice = 0; } irqrestore(saved_state); #endif /* Set the new priority */ ret = sched_reprioritize(tcb, param->sched_priority); sched_unlock(); if (ret != OK) { return ERROR; } else { return OK; } }
int sched_unlock(void) { FAR struct tcb_s *rtcb = (FAR struct tcb_s *)g_readytorun.head; /* Check for some special cases: (1) rtcb may be NULL only during * early boot-up phases, and (2) sched_unlock() should have no * effect if called from the interrupt level. */ if (rtcb && !up_interrupt_context()) { /* Prevent context switches throughout the following */ irqstate_t flags = irqsave(); /* Decrement the preemption lock counter */ if (rtcb->lockcount) { rtcb->lockcount--; } /* Check if the lock counter has decremented to zero. If so, * then pre-emption has been re-enabled. */ if (rtcb->lockcount <= 0) { rtcb->lockcount = 0; /* Release any ready-to-run tasks that have collected in * g_pendingtasks. */ if (g_pendingtasks.head) { up_release_pending(); } #if CONFIG_RR_INTERVAL > 0 /* If (1) the task that was running supported round-robin * scheduling and (2) if its time slice has already expired, but * (3) it could not slice out because pre-emption was disabled, * then we need to swap the task out now and reassess the interval * timer for the next time slice. */ if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR && rtcb->timeslice == 0) { /* Yes.. that is the situation. But one more thing. The call * to up_release_pending() above may have actually replaced * the task at the head of the read-to-run list. In that case, * we need only to reset the timeslice value back to the * maximum. */ if (rtcb != (FAR struct tcb_s *)g_readytorun.head) { rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); } #ifdef CONFIG_SCHED_TICKLESS else { sched_timer_reassess(); } #endif } #endif #ifdef CONFIG_SCHED_SPORADIC #if CONFIG_RR_INTERVAL > 0 else #endif /* If (1) the task that was running supported sporadic scheduling * and (2) if its budget slice has already expired, but (3) it * could not slice out because pre-emption was disabled, then we * need to swap the task out now and reassess the interval timer * for the next time slice. */ if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC && rtcb->timeslice < 0) { /* Yes.. that is the situation. Force the low-priority state * now */ sched_sporadic_lowpriority(rtcb); #ifdef CONFIG_SCHED_TICKLESS /* Make sure that the call to up_release_pending() did not * change the currently active task. */ if (rtcb == (FAR struct tcb_s *)g_readytorun.head) { sched_timer_reassess(); } #endif } #endif } irqrestore(flags); } return OK; }
uint32_t sched_roundrobin_process(FAR struct tcb_s *tcb, uint32_t ticks, bool noswitches) { uint32_t ret; int decr; /* How much can we decrement the timeslice delay? If 'ticks' is greater * than the timeslice value, then we ignore any excess amount. * * 'ticks' should never be greater than the remaining timeslice. We try * to handle that gracefully but it would be an error in the scheduling * if there ever were the case. */ DEBUGASSERT(tcb != NULL && ticks <= tcb->timeslice); decr = MIN(tcb->timeslice, ticks); /* Decrement the timeslice counter */ tcb->timeslice -= decr; /* Did decrementing the timeslice counter cause the timeslice to expire? * * If the task has pre-emption disabled. Then we will let the timeslice * count go negative as a indication of this situation. */ ret = tcb->timeslice; if (tcb->timeslice <= 0 && tcb->lockcount == 0) { /* We will also suppress context switches if we were called via one * of the unusual cases handled by sched_timer_reasses(). In that * case, we will return a value of one so that the timer will expire * as soon as possible and we can perform this action in the normal * timer expiration context. * * This is kind of kludge, but I am not to concerned because I hope * that the situation is impossible or at least could only occur on * rare corner-cases. */ if (noswitches) { ret = 1; } else { /* Reset the timeslice. */ tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); ret = tcb->timeslice; /* We know we are at the head of the ready to run prioritized * list. We must be the highest priority task eligible for * execution. Check the next task in the ready to run list. If * it is the same priority, then we need to relinquish the CPU and * give that task a shot. */ if (tcb->flink && tcb->flink->sched_priority >= tcb->sched_priority) { /* Just resetting the task priority to its current value. * This this will cause the task to be rescheduled behind any * other tasks at the same priority. */ up_reprioritize_rtr(tcb, tcb->sched_priority); } } } return ret; }