static int dm320_putcmap(FAR struct fb_vtable_s *vtable, FAR struct fb_cmap_s *cmap) { irqstate_t flags; uint16_t regval; uint8_t y; uint8_t u; uint8_t v; int len int i; #ifdef CONFIG_DEBUG if (!vtable || !cmap || !cmap->read || !cmap->green || !cmap->blue) { return -EINVAL; } #endif flags = enter_critical_section(); for (i = cmap.first, len = 0; i < 256 && len < cmap.len, i++, len++) { /* Convert the RGB to YUV */ nxgl_rgb2yuv(cmap->red[i], cmap->green[i], cmap->blue[i], &y, &u, &v); /* Program the CLUT */ while (getreg16(DM320_OSD_MISCCTL) & 0x8); putreg16(((uint16_t)y) << 8 | uint16_t(u)), DM320_OSD_CLUTRAMYCB); putreg16(((uint16_t)v << 8 | i), DM320_OSD_CLUTRAMCR); } /* Select RAM clut */ #if !defined(CONFIG_DM320_OSD0_DISABLE) && !defined(CONFIG_DM320_OSD0_RGB16) regval = getreg16(DM320_OSD_OSDWIN0MD); regval |= 0x1000; putreg16(regval, DM320_OSD_OSDWIN0MD); #endif #if !defined(CONFIG_DM320_OSD1_DISABLE) && !defined(CONFIG_DM320_OSD1_RGB16) regval = getreg16(DM320_OSD_OSDWIN1MD); regval |= 0x1000; putreg16(regval, DM320_OSD_OSDWIN1MD); #endif leave_critical_section(flags); return 0; }
static void xmc4_dump_nvic(const char *msg, int irq) { irqstate_t flags; flags = enter_critical_section(); irqinfo("NVIC (%s, irq=%d):\n", msg, irq); irqinfo(" INTCTRL: %08x VECTAB: %08x\n", getreg32(NVIC_INTCTRL), getreg32(NVIC_VECTAB)); #if 0 irqinfo(" SYSH ENABLE MEMFAULT: %08x BUSFAULT: %08x USGFAULT: %08x SYSTICK: %08x\n", getreg32(NVIC_SYSHCON_MEMFAULTENA), getreg32(NVIC_SYSHCON_BUSFAULTENA), getreg32(NVIC_SYSHCON_USGFAULTENA), getreg32(NVIC_SYSTICK_CTRL_ENABLE)); #endif irqinfo(" IRQ ENABLE: %08x %08x %08x %08x\n", getreg32(NVIC_IRQ0_31_ENABLE), getreg32(NVIC_IRQ32_63_ENABLE), getreg32(NVIC_IRQ64_95_ENABLE), getreg32(NVIC_IRQ96_127_ENABLE)); irqinfo(" SYSH_PRIO: %08x %08x %08x\n", getreg32(NVIC_SYSH4_7_PRIORITY), getreg32(NVIC_SYSH8_11_PRIORITY), getreg32(NVIC_SYSH12_15_PRIORITY)); irqinfo(" IRQ PRIO: %08x %08x %08x %08x\n", getreg32(NVIC_IRQ0_3_PRIORITY), getreg32(NVIC_IRQ4_7_PRIORITY), getreg32(NVIC_IRQ8_11_PRIORITY), getreg32(NVIC_IRQ12_15_PRIORITY)); irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ16_19_PRIORITY), getreg32(NVIC_IRQ20_23_PRIORITY), getreg32(NVIC_IRQ24_27_PRIORITY), getreg32(NVIC_IRQ28_31_PRIORITY)); irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ32_35_PRIORITY), getreg32(NVIC_IRQ36_39_PRIORITY), getreg32(NVIC_IRQ40_43_PRIORITY), getreg32(NVIC_IRQ44_47_PRIORITY)); irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ48_51_PRIORITY), getreg32(NVIC_IRQ52_55_PRIORITY), getreg32(NVIC_IRQ56_59_PRIORITY), getreg32(NVIC_IRQ60_63_PRIORITY)); irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ64_67_PRIORITY), getreg32(NVIC_IRQ68_71_PRIORITY), getreg32(NVIC_IRQ72_75_PRIORITY), getreg32(NVIC_IRQ76_79_PRIORITY)); irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ80_83_PRIORITY), getreg32(NVIC_IRQ84_87_PRIORITY), getreg32(NVIC_IRQ88_91_PRIORITY), getreg32(NVIC_IRQ92_95_PRIORITY)); irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ96_99_PRIORITY), getreg32(NVIC_IRQ100_103_PRIORITY), getreg32(NVIC_IRQ104_107_PRIORITY), getreg32(NVIC_IRQ108_111_PRIORITY)); #if NR_VECTORS > 111 irqinfo(" %08x %08x\n", getreg32(NVIC_IRQ112_115_PRIORITY), getreg32(NVIC_IRQ116_119_PRIORITY)); #endif leave_critical_section(flags); }
static int sig_queueaction(FAR struct tcb_s *stcb, siginfo_t *info) { FAR sigactq_t *sigact; FAR sigq_t *sigq; irqstate_t flags; int ret = OK; sched_lock(); DEBUGASSERT(stcb != NULL && stcb->group != NULL); /* Find the group sigaction associated with this signal */ sigact = sig_findaction(stcb->group, info->si_signo); /* Check if a valid signal handler is available and if the signal is * unblocked. NOTE: There is no default action. */ if ((sigact) && (sigact->act.sa_u._sa_sigaction)) { /* Allocate a new element for the signal queue. NOTE: * sig_allocatependingsigaction will force a system crash if it is * unable to allocate memory for the signal data */ sigq = sig_allocatependingsigaction(); if (!sigq) { ret = -ENOMEM; } else { /* Populate the new signal queue element */ sigq->action.sighandler = sigact->act.sa_u._sa_sigaction; sigq->mask = sigact->act.sa_mask; memcpy(&sigq->info, info, sizeof(siginfo_t)); /* Put it at the end of the pending signals list */ flags = enter_critical_section(); sq_addlast((FAR sq_entry_t *)sigq, &(stcb->sigpendactionq)); leave_critical_section(flags); } } sched_unlock(); return ret; }
void tp_release(threadpool_t pool) { struct threadpool *tp = (struct threadpool *) pool; enter_critical_section(tp); tp->thread_die = ~0; tp->list = NULL; tp->wait_count = tp->thread_count; signal_exec(tp); if (tp->wait_count) wait_for_completion(tp); leave_critical_section(tp); sync_done(tp); free(tp); }
static __inline void exec_loop(struct threadpool *tp, int thread) { int i; LISTITEM *item; leave_critical_section(tp); item = tp->list; for (i = 0; i < thread && item; i++) item = item->next; while (item) { tp->run(thread, item->data); for (i = 0; i <= tp->thread_count && item; i++) item = item->next; } enter_critical_section(tp); }
ssize_t ADC::read(file *filp, char *buffer, size_t len) { const size_t maxsize = sizeof(adc_msg_s) * _channel_count; if (len > maxsize) { len = maxsize; } /* block interrupts while copying samples to avoid racing with an update */ irqstate_t flags = enter_critical_section(); memcpy(buffer, _samples, len); leave_critical_section(flags); return len; }
/************************************************************************************ * Name: lpc31_setup_overcurrent * * Description: * Setup to receive an interrupt-level callback if an overcurrent condition is * detected. * * Input Parameters: * handler - New overcurrent interrupt handler * arg - The argument that will accompany the interrupt * * Returned Value: * Zero (OK) returned on success; a negated errno value is returned on failure. * ************************************************************************************/ #if 0 /* Not ready yet */ int lpc31_setup_overcurrent(xcpt_t handler, void *arg) { irqstate_t flags; /* Disable interrupts until we are done. This guarantees that the * following operations are atomic. */ flags = enter_critical_section(); /* Configure the interrupt */ #warning Missing logic leave_critical_section(flags); return OK; }
int sem_trywait(FAR sem_t *sem) { FAR struct tcb_s *rtcb = this_task(); irqstate_t flags; int ret = ERROR; /* This API should not be called from interrupt handlers */ DEBUGASSERT(up_interrupt_context() == false); /* Assume any errors reported are due to invalid arguments. */ set_errno(EINVAL); if (sem) { /* The following operations must be performed with interrupts disabled * because sem_post() may be called from an interrupt handler. */ flags = enter_critical_section(); /* Any further errors could only occurr because the semaphore is not * available. */ set_errno(EAGAIN); /* If the semaphore is available, give it to the requesting task */ if (sem->semcount > 0) { /* It is, let the task take the semaphore */ sem->semcount--; rtcb->waitsem = NULL; ret = OK; } /* Interrupts may now be enabled. */ leave_critical_section(flags); } return ret; }
int setlogmask(int mask) { uint8_t oldmask; irqstate_t flags; /* These operations must be exclusive with respect to other threads as well * as interrupts. */ flags = enter_critical_section(); oldmask = g_syslog_mask; g_syslog_mask = (uint8_t)mask; leave_critical_section(flags); return oldmask; }
void hcs12_gpiowrite(uint16_t pinset, bool value) { uint8_t portndx = HCS12_PORTNDX(pinset); uint8_t pin = HCS12_PIN(pinset); irqstate_t flags = enter_critical_section(); DEBUGASSERT((pinset & GPIO_DIRECTION) == GPIO_OUTPUT); if (HCS12_PIMPORT(pinset)) { pim_gpiowrite(portndx, pin, value); } else { mebi_gpiowrite(portndx, pin, value); } leave_critical_section(flags); }
static void accept_connection(LOCAL_OPTIONS *opt) { SOCKADDR_UNION addr; char from_address[IPLEN]; int s; socklen_t addrlen; addrlen=sizeof(SOCKADDR_UNION); while((s=accept(opt->fd, &addr.sa, &addrlen))<0) { switch(get_last_socket_error()) { case EINTR: break; /* retry */ case EMFILE: case ENFILE: #ifdef ENOBUFS case ENOBUFS: #endif case ENOMEM: sleep(1); /* temporarily out of resources - short delay */ default: sockerror("accept"); return; /* error */ } } s_ntop(from_address, &addr); s_log(LOG_DEBUG, "%s accepted FD=%d from %s", opt->servname, s, from_address); if(max_clients && num_clients>=max_clients) { s_log(LOG_WARNING, "Connection rejected: too many clients (>=%d)", max_clients); closesocket(s); return; } #ifdef FD_CLOEXEC fcntl(s, F_SETFD, FD_CLOEXEC); /* close socket in child execvp */ #endif if(create_client(opt->fd, s, alloc_client_session(opt, s, s), client)) { s_log(LOG_ERR, "Connection rejected: create_client failed"); closesocket(s); return; } enter_critical_section(CRIT_CLIENTS); /* for multi-cpu machines */ num_clients++; leave_critical_section(CRIT_CLIENTS); }
int stm32_dumpgpio(uint32_t pinset, const char *msg) { irqstate_t flags; uint32_t base; unsigned int port; /* Get the base address associated with the GPIO port */ port = (pinset & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT; base = g_gpiobase[port]; /* The following requires exclusive access to the GPIO registers */ flags = enter_critical_section(); DEBUGASSERT(port < STM32F7_NGPIO); gpioinfo("GPIO%c pinset: %08x base: %08x -- %s\n", g_portchar[port], pinset, base, msg); if ((getreg32(STM32_RCC_AHB1ENR) & RCC_AHB1ENR_GPIOEN(port)) != 0) { gpioinfo(" MODE: %08x OTYPE: %04x OSPEED: %08x PUPDR: %08x\n", getreg32(base + STM32_GPIO_MODER_OFFSET), getreg32(base + STM32_GPIO_OTYPER_OFFSET), getreg32(base + STM32_GPIO_OSPEED_OFFSET), getreg32(base + STM32_GPIO_PUPDR_OFFSET)); gpioinfo(" IDR: %04x ODR: %04x LCKR: %05x\n", getreg32(base + STM32_GPIO_IDR_OFFSET), getreg32(base + STM32_GPIO_ODR_OFFSET), getreg32(base + STM32_GPIO_LCKR_OFFSET)); gpioinfo(" AFRH: %08x AFRL: %08x\n", getreg32(base + STM32_GPIO_AFRH_OFFSET), getreg32(base + STM32_GPIO_AFRL_OFFSET)); } else { gpioinfo(" GPIO%c not enabled: AHB1ENR: %08x\n", g_portchar[port], getreg32(STM32_RCC_AHB1ENR)); } leave_critical_section(flags); return OK; }
void *client(void *arg) { CLI *c=arg; #ifdef DEBUG_STACK_SIZE stack_info(1); /* initialize */ #endif s_log(LOG_DEBUG, "%s started", c->opt->servname); if(alloc_fd(c->local_rfd.fd)) return NULL; if(c->local_wfd.fd!=c->local_rfd.fd) if(alloc_fd(c->local_wfd.fd)) return NULL; #ifndef USE_WIN32 if(c->opt->option.remote && c->opt->option.program) c->local_rfd.fd=c->local_wfd.fd=connect_local(c); /* connect and exec options specified together */ /* spawn local program instead of stdio */ #endif c->remote_fd.fd=-1; c->ssl=NULL; cleanup(c, do_client(c)); #ifdef USE_FORK if(!c->opt->option.remote) /* 'exec' specified */ child_status(); /* null SIGCHLD handler was used */ #else enter_critical_section(CRIT_CLIENTS); /* for multi-cpu machines */ s_log(LOG_DEBUG, "%s finished (%d left)", c->opt->servname, --num_clients); leave_critical_section(CRIT_CLIENTS); #endif free(c); #ifdef DEBUG_STACK_SIZE stack_info(0); /* display computed value */ #endif #ifdef USE_WIN32 _endthread(); #endif #ifdef USE_UCONTEXT s_log(LOG_DEBUG, "Context %ld closed", ready_head->id); s_poll_wait(NULL, 0); /* wait on poll() */ s_log(LOG_ERR, "INTERNAL ERROR: failed to drop context"); #endif return NULL; }
static void composite_disconnect(FAR struct usbdevclass_driver_s *driver, FAR struct usbdev_s *dev) { FAR struct composite_dev_s *priv; irqstate_t flags; usbtrace(TRACE_CLASSDISCONNECT, 0); #ifdef CONFIG_DEBUG_FEATURES if (!driver || !dev) { usbtrace(TRACE_CLSERROR(USBCOMPOSITE_TRACEERR_INVALIDARG), 0); return; } #endif /* Extract reference to private data */ priv = ((FAR struct composite_driver_s *)driver)->dev; #ifdef CONFIG_DEBUG_FEATURES if (!priv) { usbtrace(TRACE_CLSERROR(USBCOMPOSITE_TRACEERR_EP0NOTBOUND), 0); return; } #endif /* Reset the configuration and inform the constituent class drivers of * the disconnection. */ flags = enter_critical_section(); priv->config = COMPOSITE_CONFIGIDNONE; CLASS_DISCONNECT(priv->dev1, dev); CLASS_DISCONNECT(priv->dev2, dev); leave_critical_section(flags); /* Perform the soft connect function so that we will we can be * re-enumerated. */ DEV_CONNECT(dev); }
void nuc_gpiowrite(gpio_cfgset_t pinset, bool value) { #ifndef NUC_LOW irqstate_t flags; uintptr_t base; #endif int port; int pin; /* Decode the port and pin. Use the port number to get the GPIO base * address. */ port = (pinset & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT; pin = (pinset & GPIO_PIN_MASK) >> GPIO_PIN_SHIFT; DEBUGASSERT((unsigned)port <= NUC_GPIO_PORTE); /* Only the low density NUC100/120 chips support bit-band access to GPIO * pins. */ #ifdef NUC_LOW putreg32((uint32_t)value, NUC_PORT_PDIO(port, pin)); #else /* Get the base address of the GPIO port registers */ base = NUC_GPIO_CTRL_BASE(port); /* Disable interrupts -- the following operations must be atomic */ flags = enter_critical_section(); /* Allow writing only to the selected pin in the DOUT register */ putreg32(~(1 << pin), base + NUC_GPIO_DMASK_OFFSET); /* Set the pin to the selected value and re-enable interrupts */ putreg32(((uint32_t)value << pin), base + NUC_GPIO_DOUT_OFFSET); leave_critical_section(flags); #endif }
static FAR sigpendq_t *sig_addpendingsignal(FAR struct tcb_s *stcb, FAR siginfo_t *info) { FAR struct task_group_s *group; FAR sigpendq_t *sigpend; irqstate_t flags; DEBUGASSERT(stcb != NULL && stcb->group != NULL); group = stcb->group; /* Check if the signal is already pending for the group */ sigpend = sig_findpendingsignal(group, info->si_signo); if (sigpend) { /* The signal is already pending... retain only one copy */ memcpy(&sigpend->info, info, sizeof(siginfo_t)); } /* No... There is nothing pending in the group for this signo */ else { /* Allocate a new pending signal entry */ sigpend = sig_allocatependingsignal(); if (sigpend) { /* Put the signal information into the allocated structure */ memcpy(&sigpend->info, info, sizeof(siginfo_t)); /* Add the structure to the group pending signal list */ flags = enter_critical_section(); sq_addlast((FAR sq_entry_t *)sigpend, &group->tg_sigpendingq); leave_critical_section(flags); } } return sigpend; }
static void z16f_rxint(struct uart_dev_s *dev, bool enable) { struct z16f_uart_s *priv = (struct z16f_uart_s*)dev->priv; irqstate_t flags = enter_critical_section(); if (enable) { #ifndef CONFIG_SUPPRESS_SERIAL_INTS up_enable_irq(priv->rxirq); #endif } else { up_disable_irq(priv->rxirq); } priv->rxenabled = enable; leave_critical_section(flags); }
xcpt_t board_button_irq(int id, xcpt_t irqhandler) { xcpt_t oldhandler = NULL; irqstate_t flags; int irq; /* Verify that the button ID is within range */ if ((unsigned)id < BOARD_NUM_BUTTONS) { /* Return the current button handler and set the new interrupt handler */ oldhandler = g_buttonisr[id]; g_buttonisr[id] = irqhandler; /* Disable interrupts until we are done */ flags = enter_critical_section(); /* Configure the interrupt. Either attach and enable the new * interrupt or disable and detach the old interrupt handler. */ irq = g_buttonirq[id]; if (irqhandler) { /* Attach then enable the new interrupt handler */ (void)irq_attach(irq, irqhandler); up_enable_irq(irq); } else { /* Disable then detach the old interrupt handler */ up_disable_irq(irq); (void)irq_detach(irq); } leave_critical_section(flags); } return oldhandler; }
int hcs12_dumpgpio(uint16_t pinset, const char *msg) { uint8_t portndx = HCS12_PORTNDX(pinset); irqstate_t flags = enter_critical_section(); gpioinfo("pinset: %08x -- %s\n", pinset, msg); if (HCS12_PIMPORT(pinset)) { hcs12_pimdump(portndx); } else { hcs12_mebidump(portndx); } leave_critical_section(flags); return OK; }
int icmpv6_rwait(FAR struct icmpv6_rnotify_s *notify, FAR struct timespec *timeout) { struct timespec abstime; irqstate_t flags; int ret; ninfo("Waiting...\n"); /* And wait for the Neighbor Advertisement (or a timeout). Interrupts will * be re-enabled while we wait. */ flags = enter_critical_section(); DEBUGVERIFY(clock_gettime(CLOCK_REALTIME, &abstime)); abstime.tv_sec += timeout->tv_sec; abstime.tv_nsec += timeout->tv_nsec; if (abstime.tv_nsec >= 1000000000) { abstime.tv_sec++; abstime.tv_nsec -= 1000000000; } /* REVISIT: If net_timedwait() is awakened with signal, we will return * the wrong error code. */ (void)net_timedwait(¬ify->rn_sem, &abstime); ret = notify->rn_result; /* Remove our wait structure from the list (we may no longer be at the * head of the list). */ (void)icmpv6_rwait_cancel(notify); /* Re-enable interrupts and return the result of the wait */ leave_critical_section(flags); return ret; }
static void ajoy_disable(void) { irqstate_t flags; int i; /* Disable each joystick interrupt */ flags = enter_critical_section(); for (i = 0; i < AJOY_NGPIOS; i++) { sam_pioirqdisable(g_joyirq[i]); } leave_critical_section(flags); /* Nullify the handler and argument */ g_ajoyhandler = NULL; g_ajoyarg = NULL; }
static void ajoy_disable(void) { irqstate_t flags; int i; /* Disable each joystick interrupt */ flags = enter_critical_section(); for (i = 0; i < AJOY_NGPIOS; i++) { (void)stm32_gpiosetevent(g_joygpio[i], false, false, false, NULL, NULL); } leave_critical_section(flags); /* Nullify the handler and argument */ g_ajoyhandler = NULL; g_ajoyarg = NULL; }
int mm_trysemaphore(FAR struct mm_heap_s *heap) { #ifdef CONFIG_SMP irqstate_t flags = enter_critical_section(); #endif pid_t my_pid = getpid(); int ret; /* Do I already have the semaphore? */ if (heap->mm_holder == my_pid) { /* Yes, just increment the number of references that I have */ heap->mm_counts_held++; ret = OK; } else { /* Try to take the semaphore (perhaps waiting) */ ret = _SEM_TRYWAIT(&heap->mm_semaphore); if (ret < 0) { _SEM_GETERROR(ret); goto errout; } /* We have it. Claim the heap and return */ heap->mm_holder = my_pid; heap->mm_counts_held = 1; ret = OK; } errout: #ifdef CONFIG_SMP leave_critical_section(flags); #endif return ret; }
static void efm32_dumpnvic(const char *msg, int irq) { irqstate_t flags; flags = enter_critical_section(); irqinfo("NVIC (%s, irq=%d):\n", msg, irq); irqinfo(" INTCTRL: %08x VECTAB: %08x\n", getreg32(NVIC_INTCTRL), getreg32(NVIC_VECTAB)); irqinfo(" SYSH ENABLE MEMFAULT: %08x BUSFAULT: %08x USGFAULT: %08x SYSTICK: %08x\n", getreg32(NVIC_SYSHCON_MEMFAULTENA), getreg32(NVIC_SYSHCON_BUSFAULTENA), getreg32(NVIC_SYSHCON_USGFAULTENA), getreg32(NVIC_SYSTICK_CTRL_ENABLE)); irqinfo(" IRQ ENABLE: %08x %08x %08x\n", getreg32(NVIC_IRQ0_31_ENABLE), getreg32(NVIC_IRQ32_63_ENABLE), getreg32(NVIC_IRQ64_95_ENABLE)); irqinfo(" SYSH_PRIO: %08x %08x %08x\n", getreg32(NVIC_SYSH4_7_PRIORITY), getreg32(NVIC_SYSH8_11_PRIORITY), getreg32(NVIC_SYSH12_15_PRIORITY)); irqinfo(" IRQ PRIO: %08x %08x %08x %08x\n", getreg32(NVIC_IRQ0_3_PRIORITY), getreg32(NVIC_IRQ4_7_PRIORITY), getreg32(NVIC_IRQ8_11_PRIORITY), getreg32(NVIC_IRQ12_15_PRIORITY)); irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ16_19_PRIORITY), getreg32(NVIC_IRQ20_23_PRIORITY), getreg32(NVIC_IRQ24_27_PRIORITY), getreg32(NVIC_IRQ28_31_PRIORITY)); #if NR_VECTORS >= (EFM32_IRQ_INTERRUPTS + 32) irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ32_35_PRIORITY), getreg32(NVIC_IRQ36_39_PRIORITY), getreg32(NVIC_IRQ40_43_PRIORITY), getreg32(NVIC_IRQ44_47_PRIORITY)); #if NR_VECTORS >= (EFM32_IRQ_INTERRUPTS + 48) irqinfo(" %08x %08x %08x %08x\n", getreg32(NVIC_IRQ48_51_PRIORITY), getreg32(NVIC_IRQ52_55_PRIORITY), getreg32(NVIC_IRQ56_59_PRIORITY), getreg32(NVIC_IRQ60_63_PRIORITY)); #if NR_VECTORS >= (EFM32_IRQ_INTERRUPTS + 64) irqinfo(" %08x\n", getreg32(NVIC_IRQ64_67_PRIORITY)); #endif #endif #endif leave_critical_section(flags); }
static FAR sigpendq_t *sig_findpendingsignal(FAR struct task_group_s *group, int signo) { FAR sigpendq_t *sigpend = NULL; irqstate_t flags; DEBUGASSERT(group != NULL); /* Pending sigals can be added from interrupt level. */ flags = enter_critical_section(); /* Seach the list for a sigpendion on this signal */ for (sigpend = (FAR sigpendq_t *)group->tg_sigpendingq.head; (sigpend && sigpend->info.si_signo != signo); sigpend = sigpend->flink); leave_critical_section(flags); return sigpend; }
void wd_recover(FAR struct tcb_s *tcb) { irqstate_t flags; /* The task is being deleted. If it is waiting for any timed event, then * tcb->waitdog will be non-NULL. Cancel the watchdog now so that no * events occur after the watchdog expires. Obviously there are lots of * race conditions here so this will most certainly have to be revisited in * the future. */ flags = enter_critical_section(); if (tcb->waitdog) { (void)wd_cancel(tcb->waitdog); (void)wd_delete(tcb->waitdog); tcb->waitdog = NULL; } leave_critical_section(flags); }
/**************************************************************************** * Name: timer_allocate * * Description: * Is used to allocate a timer. Allocation does not involve memory * allocation as the data for the timer are compile time generated. * See OPT_BL_NUMBER_TIMERS * * There is an inherent priority to the timers in that the first timer * allocated is the first timer run per tick. * * There are 3 modes of operation for the timers. All modes support an * optional call back on expiration. * * modeOneShot - Specifies a one-shot timer. After notification timer * is resource is freed. * modeRepeating - Specifies a repeating timer that will reload and * call an optional. * modeTimeout - Specifies a persistent start / stop timer. * * modeStarted - Or'ed in to start the timer when allocated * * * Input Parameters: * mode - One of bl_timer_modes_t with the Optional modeStarted * msfromnow - The reload and initial value for the timer in Ms. * fc - A pointer or NULL (0). If it is non null it can be any * of the following: * * a) A bl_timer_cb_t populated on the users stack or * in the data segment. The values are copied into the * internal data structure of the timer and therefore do * not have to persist after the call to timer_allocate * * b) The address of null_cb. This is identical to passing * null for the value of fc. * * Returned Value: * On success a value from 0 - OPT_BL_NUMBER_TIMERS-1 that is * the bl_timer_id for subsequent timer operations * -1 on failure. This indicates there are no free timers. * ****************************************************************************/ bl_timer_id timer_allocate(bl_timer_modes_t mode, time_ms_t msfromnow, bl_timer_cb_t *fc) { bl_timer_id t; irqstate_t s = enter_critical_section(); for (t = arraySize(timers) - 1; (int8_t)t >= 0; t--) { if ((timers[t].ctl & inuse) == 0) { timers[t].reload = msfromnow; timers[t].count = msfromnow; timers[t].usr = fc ? *fc : null_cb; timers[t].ctl = (mode & (modeMsk | running)) | (inuse); break; } } leave_critical_section(s); return t; }
static int lpc43_RIT_isr(int irq, FAR void *context) { irqstate_t flags; flags = enter_critical_section(); putreg32(RIT_CTRL_INT, LPC43_RIT_CTRL); internal_timer += (uint64_t)RIT_TIMER_RESOLUTION; if (alarm > 0 && internal_timer >= alarm) { /* handle expired alarm */ g_ts.tv_sec = (uint32_t)(internal_timer / 1000000000); g_ts.tv_nsec = (uint32_t)(internal_timer % 1000000000); sched_alarm_expiration(&g_ts); } leave_critical_section(flags); return OK; }
static int auth_libwrap(CLI *c) { #ifdef USE_LIBWRAP struct request_info request; int result; enter_critical_section(CRIT_NTOA); /* libwrap is not mt-safe */ request_init(&request, RQ_DAEMON, c->opt->servname, RQ_FILE, c->local_rfd.fd, 0); fromhost(&request); result=hosts_access(&request); leave_critical_section(CRIT_NTOA); if (!result) { log(LOG_WARNING, "Connection from %s:%d REFUSED by libwrap", c->accepting_address, ntohs(c->addr.sin_port)); log(LOG_DEBUG, "See hosts_access(5) for details"); return -1; /* FAILED */ } #endif return 0; /* OK */ }
int board_button_irq(int id, xcpt_t irqhandler, FAR void *arg) { if (id >=0 && id < NUM_BUTTONS) { irqstate_t flags; /* Disable interrupts until we are done. This guarantees that the * following operations are atomic. */ flags = enter_critical_section(); /* Are we attaching or detaching? */ if (irqhandler != NULL) { /* Configure the interrupt */ efm32_gpioirq(g_button_configs[id]); /* Attach and enable the interrupt */ (void)irq_attach(g_button_irqs[id], irqhandler, arg); efm32_gpioirqenable(g_button_irqs[id]); } else { /* Disable and detach the interrupt */ efm32_gpioirqdisable(g_button_irqs[id]); (void)irq_detach(g_button_irqs[id]); } leave_critical_section(flags); } /* Return the old button handler (so that it can be restored) */ return OK; }