int group_initialize(FAR struct task_tcb_s *tcb) { FAR struct task_group_s *group; #if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV) irqstate_t flags; #endif DEBUGASSERT(tcb && tcb->cmn.group); group = tcb->cmn.group; #ifdef HAVE_GROUP_MEMBERS /* Allocate space to hold GROUP_INITIAL_MEMBERS members of the group */ group->tg_members = (FAR pid_t *)kmm_malloc(GROUP_INITIAL_MEMBERS*sizeof(pid_t)); if (!group->tg_members) { kmm_free(group); return -ENOMEM; } /* Assign the PID of this new task as a member of the group. */ group->tg_members[0] = tcb->cmn.pid; /* Initialize the non-zero elements of group structure and assign it to * the tcb. */ group->tg_mxmembers = GROUP_INITIAL_MEMBERS; /* Number of members in allocation */ #endif #if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV) /* Add the initialized entry to the list of groups */ flags = enter_critical_section(); group->flink = g_grouphead; g_grouphead = group; leave_critical_section(flags); #endif /* Save the ID of the main task within the group of threads. This needed * for things like SIGCHILD. It ID is also saved in the TCB of the main * task but is also retained in the group which may persist after the main * task has exited. */ #if !defined(CONFIG_DISABLE_PTHREAD) && defined(CONFIG_SCHED_HAVE_PARENT) group->tg_task = tcb->cmn.pid; #endif /* Mark that there is one member in the group, the main task */ group->tg_nmembers = 1; return OK; }
FAR struct mqueue_msg_s *mq_msgalloc(void) { FAR struct mqueue_msg_s *mqmsg; irqstate_t flags; /* If we were called from an interrupt handler, then try to get the message * from generally available list of messages. If this fails, then try the * list of messages reserved for interrupt handlers */ if (up_interrupt_context()) { /* Try the general free list */ mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfree); if (mqmsg == NULL) { /* Try the free list reserved for interrupt handlers */ mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfreeirq); } } /* We were not called from an interrupt handler. */ else { /* Try to get the message from the generally available free list. * Disable interrupts -- we might be called from an interrupt handler. */ flags = enter_critical_section(); mqmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&g_msgfree); leave_critical_section(flags); /* If we cannot a message from the free list, then we will have to * allocate one. */ if (mqmsg == NULL) { mqmsg = (FAR struct mqueue_msg_s *) kmm_malloc((sizeof (struct mqueue_msg_s))); /* Check if we allocated the message */ if (mqmsg != NULL) { /* Yes... remember that this message was dynamically allocated */ mqmsg->type = MQ_ALLOC_DYN; } } } return mqmsg; }
static void l2_inv_all(void) { /* invalidate all ways */ enter_critical_section(); PL310_L2CC->InvalidateByWay = way_mask; while (PL310_L2CC->InvalidateByWay & way_mask); PL310_L2CC->CacheSync = 0; exit_critical_section(); }
/**************************************************************************** * Name: timer_start * * Description: * Is used to Start a timer. The reload value is copied to the counter. * And the running bit it set. There is no problem in Starting a running * timer. But it will restart the timeout. * * Input Parameters: * id - Returned from timer_allocate; * * Returned Value: * None. * ****************************************************************************/ void timer_start(bl_timer_id id) { DEBUGASSERT(id >= 0 && id < arraySize(timers) && (timers[id].ctl & inuse)); irqstate_t s = enter_critical_section(); timers[id].count = timers[id].reload; timers[id].ctl |= running; leave_critical_section(s); }
void mm_takesemaphore(FAR struct mm_heap_s *heap) { #ifdef CONFIG_SMP irqstate_t flags = enter_critical_section(); #endif pid_t my_pid = getpid(); /* Do I already have the semaphore? */ if (heap->mm_holder == my_pid) { /* Yes, just increment the number of references that I have */ heap->mm_counts_held++; } else { int ret; /* Take the semaphore (perhaps waiting) */ mseminfo("PID=%d taking\n", my_pid); do { ret = _SEM_WAIT(&heap->mm_semaphore); /* The only case that an error should occur here is if the wait * was awakened by a signal. */ if (ret < 0) { #if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__) DEBUGASSERT(ret == -EINTR || ret == -ECANCELED); #else int errcode = get_errno(); DEBUGASSERT(errcode == EINTR || errcode == ECANCELED); ret = -errcode; #endif } } while (ret == -EINTR); /* We have it (or some awful, unexpected error occurred). Claim * the semaphore and return. */ heap->mm_holder = my_pid; heap->mm_counts_held = 1; } #ifdef CONFIG_SMP leave_critical_section(flags); #endif mseminfo("Holder=%d count=%d\n", heap->mm_holder, heap->mm_counts_held); }
FAR sigq_t *sig_allocatependingsigaction(void) { FAR sigq_t *sigq; irqstate_t flags; /* Check if we were called from an interrupt handler. */ if (up_interrupt_context()) { /* Try to get the pending signal action structure from the free list */ sigq = (FAR sigq_t *)sq_remfirst(&g_sigpendingaction); /* If so, then try the special list of structures reserved for * interrupt handlers */ if (!sigq) { sigq = (FAR sigq_t *)sq_remfirst(&g_sigpendingirqaction); } } /* If we were not called from an interrupt handler, then we are * free to allocate pending signal action structures if necessary. */ else { /* Try to get the pending signal action structure from the free list */ flags = enter_critical_section(); sigq = (FAR sigq_t *)sq_remfirst(&g_sigpendingaction); leave_critical_section(flags); /* Check if we got one. */ if (!sigq) { /* No...Try the resource pool */ if (!sigq) { sigq = (FAR sigq_t *)kmm_malloc((sizeof (sigq_t))); } /* Check if we got an allocated message */ if (sigq) { sigq->type = SIG_ALLOC_DYN; } } } return sigq; }
/* Callback for JVMTI_EVENT_VM_START */ static void JNICALL cbVMStart(jvmtiEnv *jvmti, JNIEnv *env) { enter_critical_section(jvmti); { /* Indicate VM has started */ gdata->vm_is_started = JNI_TRUE; } exit_critical_section(jvmti); }
void register_int_handler(unsigned int vector, int_handler func, void *arg) { if (vector >= NR_IRQS) return; enter_critical_section(); handler[vector].func = func; handler[vector].arg = arg; exit_critical_section(); }
void target_shutdown(void) { enter_critical_section(); if(fbcon_display()) htcleo_display_shutdown(); platform_exit(); msm_proc_comm(PCOM_POWER_DOWN, 0, 0); for (;;) ; }
void clock_synchronize(void) { irqstate_t flags; /* Re-initialize the time value to match the RTC */ flags = enter_critical_section(); clock_inittime(); leave_critical_section(flags); }
void nvram_display_list() { NvramVar* var = (NvramVar*) gNvramList->next; enter_critical_section(); while(var != (void*) gNvramList) { printf("0x%08x: %s = %s\n", var, var->name, var->string); var = var->next; } printf("\n"); exit_critical_section(); }
void timer_delete(timer_list_t *timer) { enter_critical_section(); if (list_in_list(&timer->node)) { list_delete(&timer->node); } exit_critical_section(); }
xcpt_t board_button_irq(int id, xcpt_t irqhandler) { xcpt_t oldhandler = NULL; if (id >=0 && id < NUM_BUTTONS) { irqstate_t flags; /* Disable interrupts until we are done. This guarantees that the * following operations are atomic. */ flags = enter_critical_section(); /* Get/set the old button handler * * REVISIT: Keeping copies of the hander in RAM seems wasteful * since the OS already has this information internally. */ #if 0 /* REVISIT */ oldhandler = g_button_handlers[id]; g_button_handlers[id] = irqhandler; #else oldhandler = NULL; #endif /* Are we attaching or detaching? */ if (irqhandler != NULL) { /* Configure the interrupt */ efm32_gpioirq(g_button_configs[id]); /* Attach and enable the interrupt */ (void)irq_attach(g_button_irqs[id], irqhandler); efm32_gpioirqenable(g_button_irqs[id]); } else { /* Disable and detach the interrupt */ efm32_gpioirqdisable(g_button_irqs[id]); (void)irq_detach(g_button_irqs[id]); } leave_critical_section(flags); } /* Return the old button handler (so that it can be restored) */ return oldhandler; }
status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, time_t interval) { enter_critical_section(); qtimer_set_physical_timer(interval, callback, arg); exit_critical_section(); return 0; }
static jlong next_thread_id() { // mark the thread - with lock // TODO replace total ordering lock with private lock - perf. issue jlong result = -1; enter_critical_section(jvmti_env, threadID_lock); { result = avail_thread_id++; } exit_critical_section(jvmti_env, threadID_lock); return result; }
void target_reboot(unsigned reboot_reason) { enter_critical_section(); if(fbcon_display()) htcleo_display_shutdown(); platform_exit(); writel(reboot_reason, LK_BOOTREASON_ADDR); writel(reboot_reason^MARK_LK_TAG, LK_BOOTREASON_ADDR + 4); reboot(reboot_reason); }
void bootlinux_direct(void *kernel, unsigned machtype, unsigned *tags) { void (*entry)(unsigned,unsigned,unsigned*) = kernel; enter_critical_section(); /* do any platform specific cleanup before kernel entry */ platform_uninit(); arch_disable_cache(UCACHE); arch_disable_mmu(); entry(0, machtype, tags); }
void modifyreg8(unsigned int addr, uint8_t clearbits, uint8_t setbits) { irqstate_t flags; uint8_t regval; flags = enter_critical_section(); regval = getreg8(addr); regval &= ~clearbits; regval |= setbits; putreg8(regval, addr); leave_critical_section(flags); }
void register_int_handler(unsigned int vector, int_handler handler, void *arg) { if (vector >= INT_VECTORS) panic("register_int_handler: vector out of range %d\n", vector); enter_critical_section(); int_handler_table[vector].handler = handler; int_handler_table[vector].arg = arg; exit_critical_section(); }
void platform_deinit_interrupts(void) { enter_critical_section(); writel(0, VIC_INT_MASTEREN); writel(0xffffffff, VIC_INT_CLEAR0); writel(0xffffffff, VIC_INT_CLEAR1); writel(0, VIC_INT_SELECT0); writel(0, VIC_INT_SELECT1); writel(0xffffffff, VIC_INT_TYPE0); writel(0xffffffff, VIC_INT_TYPE1); writel(0, VIC_CONFIG); exit_critical_section(); }
void modifyreg8(uint16_t addr, uint8_t clearbits, uint8_t setbits) { irqstate_t flags; uint8_t regval; flags = enter_critical_section(); regval = inp(addr); regval &= ~clearbits; regval |= setbits; outp(regval, addr); leave_critical_section(flags); }
int sem_reset(FAR sem_t *sem, int16_t count) { irqstate_t flags; DEBUGASSERT(sem != NULL && count >= 0); /* Don't allow any context switches that may result from the following * sem_post() operations. */ sched_lock(); /* Prevent any access to the semaphore by interrupt handlers while we are * performing this operation. */ flags = enter_critical_section(); /* A negative count indicates that the negated number of threads are * waiting to take a count from the semaphore. Loop here, handing * out counts to any waiting threads. */ while (sem->semcount < 0 && count > 0) { /* Give out one counting, waking up one of the waiting threads * and, perhaps, kicking off a lot of priority inheritance * logic (REVISIT). */ DEBUGVERIFY(sem_post(sem)); count--; } /* We exit the above loop with either (1) no threads waiting for the * (i.e., with sem->semcount >= 0). In this case, 'count' holds the * the new value of the semaphore count. OR (2) with threads still * waiting but all of the semaphore counts exhausted: The current * value of sem->semcount is already correct in this case. */ if (sem->semcount >= 0) { sem->semcount = count; } /* Allow any pending context switches to occur now */ leave_critical_section(flags); sched_unlock(); return OK; }
int syslog_remove_intbuffer(void) { irqstate_t flags; uint32_t inndx; uint32_t outndx; uint32_t endndx; int inuse = 0; int ch; /* Extraction of the character and adjustment of the circular buffer * indices must be performed in a critical section to protect from * concurrent modification from interrupt handlers. */ flags = enter_critical_section(); /* How much space is left in the inbuffer? */ inndx = (uint32_t)g_syslog_intbuffer.si_inndx; outndx = (uint32_t)g_syslog_intbuffer.si_outndx; if (inndx != outndx) { /* Handle the case where the inndx has wrapped around */ endndx = inndx; if (endndx < outndx) { endndx += SYSLOG_INTBUFSIZE; } inuse = (int)(endndx - outndx); /* Take the next character from the interrupt buffer */ ch = g_syslog_intbuffer.si_buffer[outndx]; /* Increment the OUT index, handling wrap-around */ if (++outndx >= SYSLOG_INTBUFSIZE) { outndx -= SYSLOG_INTBUFSIZE; } g_syslog_intbuffer.si_outndx = (uint16_t)outndx; } leave_critical_section(flags); /* Now we can send the extracted character to the SYSLOG device */ return (inuse > 0) ? ch : EOF; }
static uint8_t z16f_disableuartirq(struct uart_dev_s *dev) { struct z16f_uart_s *priv = (struct z16f_uart_s*)dev->priv; irqstate_t flags = enter_critical_section(); uint8_t state = priv->rxenabled ? STATE_RXENABLED : STATE_DISABLED | \ priv->txenabled ? STATE_TXENABLED : STATE_DISABLED; z16f_txint(dev, false); z16f_rxint(dev, false); leave_critical_section(flags); return state; }
/* Callback for JVMTI_EVENT_THREAD_END */ static void JNICALL cbThreadEnd(jvmtiEnv *jvmti, JNIEnv *env, jthread thread) { enter_critical_section(jvmti); { /* It's possible we get here right after VmDeath event, be careful */ if ( !gdata->vm_is_dead ) { char tname[MAX_THREAD_NAME_LENGTH]; get_thread_name(jvmti, thread, tname, sizeof(tname)); stdout_message("ThreadEnd %s\n", tname); } } exit_critical_section(jvmti); }
/* zynq specific halt */ void platform_halt(platform_halt_action suggested_action, platform_halt_reason reason) { switch (suggested_action) { default: case HALT_ACTION_SHUTDOWN: case HALT_ACTION_HALT: printf("HALT: spinning forever... (reason = %d)\n", reason); enter_critical_section(); for(;;) arch_idle(); break; case HALT_ACTION_REBOOT: printf("REBOOT\n"); enter_critical_section(); for (;;) { zynq_slcr_unlock(); SLCR->PSS_RST_CTRL = 1; } break; } }
usbtrace_idset_t usbtrace_enable(usbtrace_idset_t idset) { irqstate_t flags; usbtrace_idset_t ret; /* The following read and write must be atomic */ flags = enter_critical_section(); ret = g_maskedidset; g_maskedidset = idset; leave_critical_section(flags); return ret; }
static int try_to_wake_up(task_t *t) { t->state = READY; enter_critical_section(); scheduler->enqueue_task(t, 0); task_schedule(); exit_critical_section(); return 0; }
void kmain(void) { task_t *task_shell; int ret; /*************** Init Arch ****************/ arch_early_init(); show_logo(); /*************** Init Platform ****************/ platform_init(); timer_init(); buses_init(); /*************** Init Task ****************/ task_init(); task_create_init(); /*************** Init Workqueu ****************/ init_workqueues(); /*************** Init File System ****************/ register_filesystem(&fat_fs); /*************** Creating Shell TASK ****************/ task_shell = task_alloc("shell", 0x2000, 5); if (NULL == task_shell) { return; } ret = task_create(task_shell, init_shell, 0); if (ret) { printk("Create init shell task failed\n"); } sema_init(&sem, 1); arch_enable_ints(); while(1) { enter_critical_section(); arch_idle(); task_schedule(); exit_critical_section(); } task_free(task_shell); }
int sam_dumpgpio(uint32_t pinset, const char *msg) { irqstate_t flags; uintptr_t base; unsigned int port; /* Get the base address associated with the PIO port */ port = (pinset & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT; base = SAM_PION_BASE(port); /* The following requires exclusive access to the GPIO registers */ flags = enter_critical_section(); lldbg("PIO%c pinset: %08x base: %08x -- %s\n", g_portchar[port], pinset, base, msg); lldbg(" PSR: %08x OSR: %08x IFSR: %08x ODSR: %08x\n", getreg32(base + SAM_PIO_PSR_OFFSET), getreg32(base + SAM_PIO_OSR_OFFSET), getreg32(base + SAM_PIO_IFSR_OFFSET), getreg32(base + SAM_PIO_ODSR_OFFSET)); lldbg(" PDSR: %08x IMR: %08x ISR: %08x MDSR: %08x\n", getreg32(base + SAM_PIO_PDSR_OFFSET), getreg32(base + SAM_PIO_IMR_OFFSET), getreg32(base + SAM_PIO_ISR_OFFSET), getreg32(base + SAM_PIO_MDSR_OFFSET)); lldbg(" ABCDSR: %08x %08x IFSCSR: %08x PPDSR: %08x\n", getreg32(base + SAM_PIO_ABCDSR1_OFFSET), getreg32(base + SAM_PIO_ABCDSR2_OFFSET), getreg32(base + SAM_PIO_IFSCSR_OFFSET), getreg32(base + SAM_PIO_PPDSR_OFFSET)); lldbg(" PUSR: %08x SCDR: %08x OWSR: %08x AIMMR: %08x\n", getreg32(base + SAM_PIO_PUSR_OFFSET), getreg32(base + SAM_PIO_SCDR_OFFSET), getreg32(base + SAM_PIO_OWSR_OFFSET), getreg32(base + SAM_PIO_AIMMR_OFFSET)); lldbg(" ESR: %08x LSR: %08x ELSR: %08x FELLSR: %08x\n", getreg32(base + SAM_PIO_ESR_OFFSET), getreg32(base + SAM_PIO_LSR_OFFSET), getreg32(base + SAM_PIO_ELSR_OFFSET), getreg32(base + SAM_PIO_FELLSR_OFFSET)); lldbg(" FRLHSR: %08x LOCKSR: %08x WPMR: %08x WPSR: %08x\n", getreg32(base + SAM_PIO_FRLHSR_OFFSET), getreg32(base + SAM_PIO_LOCKSR_OFFSET), getreg32(base + SAM_PIO_WPMR_OFFSET), getreg32(base + SAM_PIO_WPSR_OFFSET)); lldbg(" PCMR: %08x PCIMR: %08x PCISR: %08x PCRHR: %08x\n", getreg32(base + SAM_PIO_PCMR_OFFSET), getreg32(base + SAM_PIO_PCIMR_OFFSET), getreg32(base + SAM_PIO_PCISR_OFFSET), getreg32(base + SAM_PIO_PCRHR_OFFSET)); lldbg("SCHMITT: %08x DRIVER:%08x\n", getreg32(base + SAM_PIO_SCHMITT_OFFSET), getreg32(base + SAM_PIO_DRIVER_OFFSET)); lldbg(" KER: %08x KRCR: %08x KDR: %08x KIMR: %08x\n", getreg32(base + SAM_PIO_KER_OFFSET), getreg32(base + SAM_PIO_KRCR_OFFSET), getreg32(base + SAM_PIO_KDR_OFFSET), getreg32(base + SAM_PIO_KIMR_OFFSET)); lldbg(" KSR: %08x KKPR: %08x KKRR: %08x\n", getreg32(base + SAM_PIO_KSR_OFFSET), getreg32(base + SAM_PIO_KKPR_OFFSET), getreg32(base + SAM_PIO_KKRR_OFFSET)); lldbg(" PCMR: %08x PCIMR: %08x PCISR: %08x PCRHR: %08x\n", getreg32(base + SAM_PIO_PCMR_OFFSET), getreg32(base + SAM_PIO_PCIMR_OFFSET), getreg32(base + SAM_PIO_PCISR_OFFSET), getreg32(base + SAM_PIO_PCRHR_OFFSET)); leave_critical_section(flags); return OK; }