/*! \brief sigaction() for the specified thread. A \a threadID is < 0 specifies the current thread. */ int sigaction_etc(thread_id threadID, int signal, const struct sigaction *act, struct sigaction *oldAction) { struct thread *thread; cpu_status state; status_t error = B_OK; if (signal < 1 || signal > MAX_SIGNO || (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0) return B_BAD_VALUE; state = disable_interrupts(); GRAB_THREAD_LOCK(); thread = (threadID < 0 ? thread_get_current_thread() : thread_get_thread_struct_locked(threadID)); if (thread) { if (oldAction) { // save previous sigaction structure memcpy(oldAction, &thread->sig_action[signal - 1], sizeof(struct sigaction)); } if (act) { T(SigAction(thread, signal, act)); // set new sigaction structure memcpy(&thread->sig_action[signal - 1], act, sizeof(struct sigaction)); thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS; } if (act && act->sa_handler == SIG_IGN) { // remove pending signal if it should now be ignored atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal)); } else if (act && act->sa_handler == SIG_DFL && (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) { // remove pending signal for those signals whose default // action is to ignore them atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal)); } } else error = B_BAD_THREAD_ID; RELEASE_THREAD_LOCK(); restore_interrupts(state); return error; }
static status_t keyboard_close(void *cookie) { TRACE("ps2: keyboard_close enter\n"); delete_packet_buffer(sKeyBuffer); delete_sem(sKeyboardSem); atomic_and(&ps2_device[PS2_DEVICE_KEYB].flags, ~PS2_FLAG_ENABLED); atomic_and(&sKeyboardOpenMask, 0); TRACE("ps2: keyboard_close done\n"); return B_OK; }
/** Set the value of a GPIO. \param gpio_id identifier of the GPIO, must be created by GPIO_MAKE_ID(). \param value \ref true for VCC, \ref false for GND. */ void gpio_write(gpio gpio_id, bool value) { int pin = gpio_id & 0xF; unsigned int mask; volatile unsigned int * ptr = (volatile unsigned int *) (gpio_id >> 4); /* I Cannot do the test since not every PIC has PORTA and PORTG if(((ptr < GPIO_PORTA) || (ptr > GPIO_PORTG)) && ptr) ERROR(GPIO_INVALID_GPIO, &gpio_id); */ if(ptr == GPIO_NONE) return; ptr += 2; if(value == false) { mask = ~(1 << pin); atomic_and(ptr, mask); } else if(value == true) { mask = 1 << pin; atomic_or(ptr, mask); } else ERROR(GPIO_INVALID_VALUE, &value); }
static void user_mutex_unlock_locked(int32* mutex, addr_t physicalAddress, uint32 flags) { if (UserMutexEntry* entry = sUserMutexTable.Lookup(physicalAddress)) { // Someone is waiting -- set the locked flag. It might still be set, // but when using userland atomic operations, the caller will usually // have cleared it already. int32 oldValue = atomic_or(mutex, B_USER_MUTEX_LOCKED); // unblock the first thread entry->locked = true; entry->condition.NotifyOne(); if ((flags & B_USER_MUTEX_UNBLOCK_ALL) != 0 || (oldValue & B_USER_MUTEX_DISABLED) != 0) { // unblock all the other waiting threads as well for (UserMutexEntryList::Iterator it = entry->otherEntries.GetIterator(); UserMutexEntry* otherEntry = it.Next();) { otherEntry->locked = true; otherEntry->condition.NotifyOne(); } } } else { // no one is waiting -- clear locked flag atomic_and(mutex, ~(int32)B_USER_MUTEX_LOCKED); } }
static int32 InterruptHandler(void* data) { int32 handled = B_UNHANDLED_INTERRUPT; DeviceInfo& di = *((DeviceInfo*)data); int32* flags = &(di.flags); // Is someone already handling an interrupt for this device? if (atomic_or(flags, SKD_HANDLER_INSTALLED) & SKD_HANDLER_INSTALLED) return B_UNHANDLED_INTERRUPT; if (InterruptIsVBI()) { // was interrupt a VBI? ClearVBI(); // clear interrupt handled = B_HANDLED_INTERRUPT; // Release vertical blanking semaphore. sem_id& sem = di.sharedInfo->vertBlankSem; if (sem >= 0) { int32 blocked; if ((get_sem_count(sem, &blocked) == B_OK) && (blocked < 0)) { release_sem_etc(sem, -blocked, B_DO_NOT_RESCHEDULE); handled = B_INVOKE_SCHEDULER; } } } atomic_and(flags, ~SKD_HANDLER_INSTALLED); // note we're not in handler anymore return handled; }
void sndboard_rethink(void) { struct toccata_data *data = &toccata; atomic_and(&uae_int_requested, ~0x200); if (data->toccata_irq) atomic_or(&uae_int_requested, 0x200); }
int sigprocmask(int how, const sigset_t *set, sigset_t *oldSet) { struct thread *thread = thread_get_current_thread(); sigset_t oldMask = atomic_get(&thread->sig_block_mask); if (set != NULL) { T(SigProcMask(how, *set)); switch (how) { case SIG_BLOCK: atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS); break; case SIG_UNBLOCK: atomic_and(&thread->sig_block_mask, ~*set); break; case SIG_SETMASK: atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS); break; default: return B_BAD_VALUE; } update_current_thread_signals_flag(); } if (oldSet != NULL) *oldSet = oldMask; return B_OK; }
static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { EduState *edu = opaque; if (addr < 0x80 && size != 4) { return; } if (addr >= 0x80 && size != 4 && size != 8) { return; } switch (addr) { case 0x04: edu->addr4 = ~val; break; case 0x08: if (atomic_read(&edu->status) & EDU_STATUS_COMPUTING) { break; } /* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only * set in this function and it is under the iothread mutex. */ qemu_mutex_lock(&edu->thr_mutex); edu->fact = val; atomic_or(&edu->status, EDU_STATUS_COMPUTING); qemu_cond_signal(&edu->thr_cond); qemu_mutex_unlock(&edu->thr_mutex); break; case 0x20: if (val & EDU_STATUS_IRQFACT) { atomic_or(&edu->status, EDU_STATUS_IRQFACT); } else { atomic_and(&edu->status, ~EDU_STATUS_IRQFACT); } break; case 0x60: edu_raise_irq(edu, val); break; case 0x64: edu_lower_irq(edu, val); break; case 0x80: dma_rw(edu, true, &val, &edu->dma.src, false); break; case 0x88: dma_rw(edu, true, &val, &edu->dma.dst, false); break; case 0x90: dma_rw(edu, true, &val, &edu->dma.cnt, false); break; case 0x98: if (!(val & EDU_DMA_RUN)) { break; } dma_rw(edu, true, &val, &edu->dma.cmd, true); break; } }
/** Set the direction of a GPIO. \param gpio_id identifier of the GPIO, must be created by GPIO_MAKE_ID() \param dir direction of the GPIO, either \ref GPIO_OUTPUT or \ref GPIO_INPUT . */ void gpio_set_dir(gpio gpio_id, int dir) { int pin = gpio_id & 0xF; unsigned int mask; volatile unsigned int * ptr = (volatile unsigned int *) (gpio_id >> 4); /* I Cannot do the test since not every PIC has PORTA and PORTG if(((ptr < GPIO_PORTA) || (ptr > GPIO_PORTG)) && ptr) ERROR(GPIO_INVALID_GPIO, &gpio_id); */ if(ptr == GPIO_NONE) return; if(dir == GPIO_OUTPUT) { mask = ~(1 << pin); atomic_and(ptr, mask); } else if (dir == GPIO_INPUT) { mask = 1 << pin; atomic_or(ptr, mask); } else ERROR(GPIO_INVALID_DIR, &dir); }
static int32 eng_interrupt(void *data) { int32 handled = B_UNHANDLED_INTERRUPT; device_info *di = (device_info *)data; shared_info *si = di->si; int32 *flags = &(si->flags); vuint32 *regs; /* is someone already handling an interrupt for this device? */ if (atomic_or(flags, SKD_HANDLER_INSTALLED) & SKD_HANDLER_INSTALLED) { goto exit0; } /* get regs */ regs = di->regs; /* was it a VBI? */ if (caused_vbi(regs)) { /*clear the interrupt*/ clear_vbi(regs); /*release the semaphore*/ handled = thread_interrupt_work(flags, regs, si); } /* note that we're not in the handler any more */ atomic_and(flags, ~SKD_HANDLER_INSTALLED); exit0: return handled; }
static status_t user_mutex_lock_locked(int32* mutex, addr_t physicalAddress, const char* name, uint32 flags, bigtime_t timeout, MutexLocker& locker) { // mark the mutex locked + waiting int32 oldValue = atomic_or(mutex, B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING); // The mutex might have been unlocked (or disabled) in the meantime. if ((oldValue & (B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING)) == 0 || (oldValue & B_USER_MUTEX_DISABLED) != 0) { // clear the waiting flag and be done atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING); return B_OK; } // we have to wait // add the entry to the table UserMutexEntry entry; entry.address = physicalAddress; entry.locked = false; add_user_mutex_entry(&entry); // wait ConditionVariableEntry waitEntry; entry.condition.Init((void*)physicalAddress, "user mutex"); entry.condition.Add(&waitEntry); locker.Unlock(); status_t error = waitEntry.Wait(flags, timeout); locker.Lock(); // dequeue if (!remove_user_mutex_entry(&entry)) { // no one is waiting anymore -- clear the waiting flag atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING); } if (error != B_OK && (entry.locked || (*mutex & B_USER_MUTEX_DISABLED) != 0)) { // timeout or interrupt, but the mutex was unlocked or disabled in time error = B_OK; } return error; }
static status_t driver_free(void* cookie) { TRACE("cx23882: driver free\n"); atomic_and(&sOpenMask, ~(1 << ((interface_cookie *)cookie)->dev_id)); free(cookie); return B_OK; }
/*! Updates the thread::flags field according to what signals are pending. Interrupts must be disabled and the thread lock must be held. */ static void update_thread_signals_flag(struct thread* thread) { if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask)) atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING); else atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING); }
void gpio_set_opendrain(gpio gpio_id, int opendrain) { /* Since this is a configuration function, it's not time-critical so I can do it the "dumb" way */ int port, pin, mask; volatile unsigned int * ptr; pin = gpio_id & 0xF; port = gpio_id >> 4; if(!(gpio_id & 0xFFF0)) /* GPIO_NONE */ return; #if ODC_EXIST(A) if(port == (unsigned int) GPIO_PORTA) { ptr = (volatile unsigned int *) &ODCA; } else #endif #if ODC_EXIST(B) if(port == (unsigned int) GPIO_PORTB) { ptr = (volatile unsigned int *) &ODCB; } else #endif #if ODC_EXIST(C) if(port == (unsigned int) GPIO_PORTC) { ptr = (volatile unsigned int *) &ODCC; } else #endif #if ODC_EXIST(D) if(port == (unsigned int) GPIO_PORTD) { ptr = (volatile unsigned int *) &ODCD; } else #endif #if ODC_EXIST(E) if(port == (unsigned int) GPIO_PORTE) { ptr = (volatile unsigned int *) &ODCE; } else #endif #if ODC_EXIST(F) if(port == (unsigned int) GPIO_PORTF) { ptr = (volatile unsigned int *) &ODCF; } else #endif #if ODC_EXIST(G) if(port == (unsigned int) GPIO_PORTG) { ptr = (volatile unsigned int *) &ODCG; } else #endif { ERROR(GPIO_INVALID_GPIO, &gpio_id); } if(opendrain) { mask = 1 << pin; atomic_or(ptr, mask); } else { mask = ~(1 << pin); atomic_and(ptr, mask); } }
void BTimeSource::DirectStop(bigtime_t at, bool immediate) { CALLED(); if (fBuf) atomic_and(&fBuf->isrunning, 0); else fStarted = false; }
void CryptTask::Put(ThreadContext* threadContext) { for (int32 i = 0; i < sThreadCount; i++) { if (fContext.fThreadContexts[i] == threadContext) { atomic_and(&fUsedThreadContexts, ~(1L << i)); break; } } }
void mutex_unlock(mutex *lock) { // clear the locked flag int32 oldValue = atomic_and(&lock->lock, ~(int32)B_USER_MUTEX_LOCKED); if ((oldValue & B_USER_MUTEX_WAITING) != 0 && (oldValue & B_USER_MUTEX_DISABLED) == 0) { _kern_mutex_unlock(&lock->lock, 0); } }
void release_spinlock(spinlock *lock) { #if DEBUG_SPINLOCK_LATENCIES test_latency(lock); #endif if (sNumCPUs > 1) { if (are_interrupts_enabled()) panic("release_spinlock: attempt to release lock %p with " "interrupts enabled\n", lock); #if B_DEBUG_SPINLOCK_CONTENTION { int32 count = atomic_and(&lock->lock, 0) - 1; if (count < 0) { panic("release_spinlock: lock %p was already released\n", lock); } else { // add to the total count -- deal with carry manually if ((uint32)atomic_add(&lock->count_low, count) + count < (uint32)count) { atomic_add(&lock->count_high, 1); } } } #else if (atomic_and((int32*)lock, 0) != 1) panic("release_spinlock: lock %p was already released\n", lock); #endif } else { #if DEBUG_SPINLOCKS if (are_interrupts_enabled()) { panic("release_spinlock: attempt to release lock %p with " "interrupts enabled\n", lock); } if (atomic_and((int32*)lock, 0) != 1) panic("release_spinlock: lock %p was already released\n", lock); #endif #if DEBUG_SPINLOCK_LATENCIES test_latency(lock); #endif } }
static void hwtrap_check_int(void) { if (currprefs.uaeboard < 2) return; if (hwtrap_waiting == 0) { atomic_and(&uae_int_requested, ~0x2000); } else { atomic_or(&uae_int_requested, 0x2000); set_special_exter(SPCFLAG_UAEINT); } }
static status_t compat_free(void *cookie) { struct ifnet *ifp = cookie; if_printf(ifp, "compat_free()\n"); // TODO: empty out the send queue atomic_and(&ifp->open_count, 0); put_module(NET_STACK_MODULE_NAME); return B_OK; }
int32 BBlockFIFO::EndGet() { if (!(atomic_and(&_mFlags, ~flagPendingGet) & flagPendingGet)) return B_ERROR; int32 o = _mPendingGet - _mGetOff; _mGetOff = _mPendingGet; if (o < 0) o += _mAreaSize; // part of buffer is now free to put into again status_t err = release_sem_etc(_mPutSem, o, B_DO_NOT_RESCHEDULE); LEAVE_GET return err; }
bool rethink_traps(void) { if (currprefs.uaeboard < 2) return false; if (istrapwait()) { atomic_or(&uae_int_requested, 0x4000); set_special_exter(SPCFLAG_UAEINT); return true; } else { atomic_and(&uae_int_requested, ~0x4000); return false; } }
static int32 et6000Interrupt(void *data) { int32 handled = B_UNHANDLED_INTERRUPT; ET6000DeviceInfo *di = (ET6000DeviceInfo *)data; ET6000SharedInfo *si = di->si; int32 *flags = &(si->flags); #if DEBUG > 0 pd->total_interrupts++; #endif /* is someone already handling an interrupt for this device? */ if (atomic_or(flags, ET6000_HANDLER_INSTALLED) & ET6000_HANDLER_INSTALLED) { #if DEBUG > 0 kprintf("ET6000: Already in handler!\n"); #endif goto exit0; } switch (et6000aclInterruptCause(si->mmRegs)) { case ET6000_ACL_INT_CAUSE_NONE: handled = B_UNHANDLED_INTERRUPT; break; case ET6000_ACL_INT_CAUSE_READ: et6000aclReadInterruptClear(si->mmRegs); handled = B_HANDLED_INTERRUPT; break; case ET6000_ACL_INT_CAUSE_WRITE: et6000aclWriteInterruptClear(si->mmRegs); handled = B_HANDLED_INTERRUPT; break; case ET6000_ACL_INT_CAUSE_BOTH: /* Can it be at all? */ et6000aclReadInterruptClear(si->mmRegs); et6000aclWriteInterruptClear(si->mmRegs); handled = B_HANDLED_INTERRUPT; break; } #if DEBUG > 0 /* increment the counter for this device */ if (handled == B_HANDLED_INTERRUPT) di->interrupt_count++; #endif /* note that we're not in the handler any more */ atomic_and(flags, ~ET6000_HANDLER_INSTALLED); exit0: return handled; }
void x86_restart_syscall(iframe* frame) { Thread* thread = thread_get_current_thread(); atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL); atomic_or(&thread->flags, THREAD_FLAGS_SYSCALL_RESTARTED); // Get back the original system call number and modify the frame to // re-execute the syscall instruction. frame->ax = frame->orig_rax; frame->ip -= 2; TSYSCALL(RestartSyscall()); }
static status_t b57_close(void *cookie) { struct be_b57_dev *pUmDevice = (struct be_b57_dev *)(cookie); if (cookie == NULL) return B_OK; LM_DisableInterrupt(&pUmDevice->lm_dev); LM_Halt(&pUmDevice->lm_dev); pUmDevice->lm_dev.InitDone = 0; atomic_and(&pUmDevice->opened, 0); return B_OK; }
static status_t keyboard_open(const char *name, uint32 flags, void **_cookie) { status_t status; TRACE("ps2: keyboard_open %s\n", name); if (atomic_or(&sKeyboardOpenMask, 1) != 0) return B_BUSY; status = probe_keyboard(); if (status != B_OK) { INFO("ps2: keyboard probing failed\n"); ps2_service_notify_device_removed(&ps2_device[PS2_DEVICE_KEYB]); goto err1; } INFO("ps2: keyboard found\n"); sKeyboardSem = create_sem(0, "keyboard_sem"); if (sKeyboardSem < 0) { status = sKeyboardSem; goto err1; } sKeyBuffer = create_packet_buffer(KEY_BUFFER_SIZE * sizeof(at_kbd_io)); if (sKeyBuffer == NULL) { status = B_NO_MEMORY; goto err2; } *_cookie = NULL; ps2_device[PS2_DEVICE_KEYB].disconnect = &ps2_keyboard_disconnect; ps2_device[PS2_DEVICE_KEYB].handle_int = &keyboard_handle_int; atomic_or(&ps2_device[PS2_DEVICE_KEYB].flags, PS2_FLAG_ENABLED); TRACE("ps2: keyboard_open %s success\n", name); return B_OK; err2: delete_sem(sKeyboardSem); err1: atomic_and(&sKeyboardOpenMask, 0); TRACE("ps2: keyboard_open %s failed\n", name); return status; }
static status_t b57_open(const char *name, uint32 flags, void **cookie) { struct be_b57_dev *pDevice = NULL; int i; *cookie = NULL; for (i = 0; i < cards_found; i++) { if (strcmp(dev_list[i],name) == 0) { *cookie = pDevice = &be_b57_dev_cards[i]; break; } } if (*cookie == NULL) return B_FILE_NOT_FOUND; if (atomic_or(&pDevice->opened, 1)) { *cookie = pDevice = NULL; return B_BUSY; } install_io_interrupt_handler(pDevice->pci_data.u.h0.interrupt_line, b57_interrupt, *cookie, 0); if (LM_InitializeAdapter(&pDevice->lm_dev) != LM_STATUS_SUCCESS) { atomic_and(&pDevice->opened,0); remove_io_interrupt_handler(pDevice->pci_data.u.h0.interrupt_line, b57_interrupt, *cookie); *cookie = NULL; return B_ERROR; } /*QQ_InitQueue(&pDevice->rx_out_of_buf_q.Container, MAX_RX_PACKET_DESC_COUNT);*/ //pDevice->lm_dev.PhyCrcCount = 0; LM_EnableInterrupt(&pDevice->lm_dev); dprintf("Broadcom 57xx adapter successfully inited at %s:\n", name); dprintf("MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", pDevice->lm_dev.NodeAddress[0], pDevice->lm_dev.NodeAddress[1], pDevice->lm_dev.NodeAddress[2], pDevice->lm_dev.NodeAddress[3], pDevice->lm_dev.NodeAddress[4], pDevice->lm_dev.NodeAddress[5]); dprintf("PCI Data: 0x%08x\n", pDevice->pci_data.u.h0.base_registers[0]); dprintf("IRQ: %d\n", pDevice->pci_data.u.h0.interrupt_line); return B_OK; }
int32 BBlockFIFO::EndPut(bool atEndOfData) { if (!(atomic_and(&_mFlags, ~flagPendingPut) & flagPendingPut)) return B_ERROR; int32 o = _mPendingPut - _mPutOff; _mPutOff = _mPendingPut; if (o < 0) o += _mAreaSize; // part of buffer is now full to get from again status_t err = release_sem_etc(_mGetSem, o, B_DO_NOT_RESCHEDULE); if (atEndOfData) { atomic_or(&_mFlags, flagEndOfData); delete_sem(_mGetSem); _mGetSem = -1; } LEAVE_PUT return err; }
/* * We purposely use a thread, so that users are forced to wait for the status * register. */ static void *edu_fact_thread(void *opaque) { EduState *edu = opaque; while (1) { uint32_t val, ret = 1; qemu_mutex_lock(&edu->thr_mutex); while ((atomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 && !edu->stopping) { qemu_cond_wait(&edu->thr_cond, &edu->thr_mutex); } if (edu->stopping) { qemu_mutex_unlock(&edu->thr_mutex); break; } val = edu->fact; qemu_mutex_unlock(&edu->thr_mutex); while (val > 0) { ret *= val--; } /* * We should sleep for a random period here, so that students are * forced to check the status properly. */ qemu_mutex_lock(&edu->thr_mutex); edu->fact = ret; qemu_mutex_unlock(&edu->thr_mutex); atomic_and(&edu->status, ~EDU_STATUS_COMPUTING); if (atomic_read(&edu->status) & EDU_STATUS_IRQFACT) { qemu_mutex_lock_iothread(); edu_raise_irq(edu, FACT_IRQ); qemu_mutex_unlock_iothread(); } } return NULL; }
static bool notify_debugger(struct thread *thread, int signal, struct sigaction *handler, bool deadly) { uint64 signalMask = SIGNAL_TO_MASK(signal); // first check the ignore signal masks the debugger specified for the thread if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) { atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask); return true; } if (atomic_get(&thread->debug_info.ignore_signals) & signalMask) return true; // deliver the event return user_debug_handle_signal(signal, handler, deadly); }