/* Up or "V" operation on a semaphore. Increments SEMA's value and wakes up one thread of those waiting for SEMA, if any. This function may be called from an interrupt handler. */ void sema_up (struct semaphore *sema) { enum intr_level old_level; ASSERT (sema != NULL); old_level = intr_disable (); if (!list_empty (&(sema->waiters))) { thread_unblock (list_entry (list_pop_highest_priority (&(sema->waiters)), struct thread, elem)); }
/*! Initializes the serial port device for queued interrupt-driven I/O. With interrupt-driven I/O we don't waste CPU time waiting for the serial device to become ready. */ void serial_init_queue(void) { enum intr_level old_level; if (mode == UNINIT) init_poll(); ASSERT(mode == POLL); intr_register_ext(0x20 + 4, serial_interrupt, "serial"); mode = QUEUE; old_level = intr_disable(); write_ier(); intr_set_level(old_level); }
/*! Sleeps for approximately TICKS timer ticks. Interrupts must be turned on. */ void timer_sleep(int64_t ticks) { int64_t start = timer_ticks(); enum intr_level old_level; ASSERT(intr_get_level() == INTR_ON); old_level = intr_disable(); thread_sleep(); thread_current()->wake_time = start + ticks; thread_block(); intr_set_level(old_level); }
void __mp_unlock(struct __mp_lock *mpl) { struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; u_int64_t s; s = intr_disable(); if (--cpu->mplc_depth == 0) { mpl->mpl_ticket++; sparc_membar(StoreStore | LoadStore); } intr_restore(s); }
void noc_receive() { int id = get_cpuid(); done[id] = 0; exc_register(18,&__data_resp_handler); //exc_register(19,&__data_resp_handler); intr_unmask_all(); intr_enable(); //puts("Interrupt handler setup"); while(done[id] != 1){;} intr_disable(); return; }
/* Sleeps for approximately TICKS timer ticks. Interrupts must be turned on. */ void timer_sleep (int64_t ticks) { ASSERT (intr_get_level () == INTR_ON); enum intr_level level = intr_disable(); if(ticks > 0) { thread_current()->sleep_time = ticks; thread_block(); } intr_set_level(level); }
/* Up or "V" operation on a semaphore. Increments SEMA's value and wakes up one thread of those waiting for SEMA, if any. This function may be called from an interrupt handler. */ void sema_up (struct semaphore *sema) { enum intr_level old_level; ASSERT (sema != NULL); old_level = intr_disable (); if (!list_empty (&sema->waiters)) { list_sort (&sema->waiters, (list_less_func *) &compare_priority, NULL); struct thread *waiter_thread = list_entry (list_pop_front (&sema->waiters), struct thread, elem); thread_unblock (waiter_thread); }
void spinlock_enter(void) { struct thread *td; td = curthread; if (td->td_md.md_spinlock_count == 0) { td->td_md.md_spinlock_count = 1; td->td_md.md_saved_sstatus_ie = intr_disable(); } else td->td_md.md_spinlock_count++; critical_enter(); }
static int ichss_set(device_t dev, const struct cf_setting *set) { struct ichss_softc *sc; uint8_t bmval, new_val, old_val, req_val; uint64_t rate; register_t regs; /* Look up appropriate bit value based on frequency. */ sc = device_get_softc(dev); if (CPUFREQ_CMP(set->freq, sc->sets[0].freq)) req_val = 0; else if (CPUFREQ_CMP(set->freq, sc->sets[1].freq)) req_val = ICHSS_CTRL_BIT; else return (EINVAL); DPRINT("ichss: requested setting %d\n", req_val); /* Disable interrupts and get the other register contents. */ regs = intr_disable(); old_val = ICH_GET_REG(sc->ctrl_reg) & ~ICHSS_CTRL_BIT; /* * Disable bus master arbitration, write the new value to the control * register, and then re-enable bus master arbitration. */ bmval = ICH_GET_REG(sc->bm_reg) | ICHSS_BM_DISABLE; ICH_SET_REG(sc->bm_reg, bmval); ICH_SET_REG(sc->ctrl_reg, old_val | req_val); ICH_SET_REG(sc->bm_reg, bmval & ~ICHSS_BM_DISABLE); /* Get the new value and re-enable interrupts. */ new_val = ICH_GET_REG(sc->ctrl_reg); intr_restore(regs); /* Check if the desired state was indeed selected. */ if (req_val != (new_val & ICHSS_CTRL_BIT)) { device_printf(sc->dev, "transition to %d failed\n", req_val); return (ENXIO); } /* Re-initialize our cycle counter if we don't know this new state. */ if (sc->sets[req_val].freq == CPUFREQ_VAL_UNKNOWN) { cpu_est_clockrate(0, &rate); sc->sets[req_val].freq = rate / 1000000; DPRINT("ichss: set calibrated new rate of %d\n", sc->sets[req_val].freq); } return (0); }
int main(void) { // register exception handlers for (unsigned i = 0; i < 32; i++) { exc_register(i, &fault_handler); } exc_register(8, &trap_handler); exc_register(18, &intr_handler); exc_register(19, &intr_handler); exc_register(20, &intr_handler); exc_register(21, &intr_handler); // unmask interrupts intr_unmask_all(); // clear pending flags intr_clear_all_pending(); // enable interrupts intr_enable(); // a that prints "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_" N times and does some self-checking volatile unsigned starts = 0; volatile unsigned ends = 0; volatile unsigned sent = 0; for (unsigned k = 0; k < N; k++) { starts++; for (unsigned i = 0; i < 32; i++) { putchar('@'+i); sent+=i; } putchar('\n'); ends++; if (sent != 496*(k+1) || starts != ends) { LEDS = 0x55; abort(); } } // diable interrupts again intr_disable(); // call exception vector number 8 trap(8); // trigger illegal operation fault asm volatile(".word 0xffffffff"); // illegal operation // trigger illegal memory access fault, never reached (*((volatile _IODEV unsigned *)0xffffffff)) = 0; return 0; }
int __mp_release_all_but_one(struct __mp_lock *mpl) { struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; u_int64_t s; int rv; s = intr_disable(); rv = cpu->mplc_depth; cpu->mplc_depth = 1; intr_restore(s); return (rv - 1); }
void __mp_lock(struct __mp_lock *mpl) { struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; u_int64_t s; s = intr_disable(); if (cpu->mplc_depth++ == 0) cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users); intr_restore(s); __mp_lock_spin(mpl, cpu->mplc_ticket); sparc_membar(LoadLoad | LoadStore); }
/* Sleeps for approximately TICKS timer ticks. Interrupts must be turned on. */ void timer_sleep (int64_t ticks) { int64_t start = timer_ticks (); ASSERT (intr_get_level () == INTR_ON); enum intr_level old_level = intr_disable (); // sleep the thread for `ticks` seconds, // until the tick becomes [start + ticks] thread_sleep_until (start + ticks); intr_set_level (old_level); }
void spinlock_enter(void) { struct thread *td; register_t pil; td = curthread; if (td->td_md.md_spinlock_count == 0) { pil = intr_disable(); td->td_md.md_saved_pil = pil; } td->td_md.md_spinlock_count++; critical_enter(); }
/* dismiss the alarm and unblock the thread */ static void dismiss_alarm (struct alarm *alrm) { enum intr_level old_level; ASSERT (is_alarm (alrm)); /* remove from alarm_list, critical section */ old_level = intr_disable (); list_remove (&alrm->elem); thread_unblock (alrm->thrd); /* unblock the thread */ intr_set_level (old_level); }
/*! Down or "P" operation on a semaphore. Waits for SEMA's value to become positive and then atomically decrements it. This function may sleep, so it must not be called within an interrupt handler. This function may be called with interrupts disabled, but if it sleeps then the next scheduled thread will probably turn interrupts back on. */ void sema_down(struct semaphore *sema) { enum intr_level old_level; ASSERT(sema != NULL); ASSERT(!intr_context()); old_level = intr_disable(); while (sema->value == 0) { list_push_back(&sema->waiters, &thread_current()->elem); thread_block(); } sema->value--; intr_set_level(old_level); }
/* Sleeps for approximately TICKS timer ticks. Interrupts must be turned on. */ void timer_sleep (int64_t ticks) { enum intr_level old_level; ASSERT (intr_get_level () == INTR_ON); if (ticks > 0) { thread_current ()->sleep_ticks = ticks; old_level = intr_disable (); thread_block (); intr_set_level (old_level); } }
/* Sleeps for approximately TICKS timer ticks. Interrupts must be turned on. */ void timer_sleep (int64_t ticks) { ASSERT (intr_get_level () == INTR_ON); /* ousiri */ if(ticks>0) { struct thread *cur = thread_current (); cur->sleeping_ticks = ticks; enum intr_level old_intr_level = intr_disable (); thread_block (); intr_set_level (old_intr_level); } }
/* Up or "V" operation on a semaphore. Increments SEMA's value and wakes up one thread of those waiting for SEMA, if any. This function may be called from an interrupt handler. */ void sema_up (struct semaphore *sema) { enum intr_level old_level; struct thread * wakeup_thread = NULL; ASSERT (sema != NULL); old_level = intr_disable (); if (!list_empty (&sema->waiters)) { wakeup_thread = list_entry (list_pop_front (&sema->waiters), struct thread, elem); thread_unblock (wakeup_thread); }
/* Up or "V" operation on a semaphore. Increments SEMA's value and wakes up one thread of those waiting for SEMA, if any. This function may be called from an interrupt handler. */ void sema_up (struct semaphore *sema) { enum intr_level old_level; ASSERT (sema != NULL); old_level = intr_disable (); if (!list_empty (&sema->waiters)) { //unblock the waiting thread with the highest priority //struct thread *max_waiting_thread = list_entry(list_max(&sema->waiters, left_less_than_right, NULL), struct thread, elem); //list_remove(&max_waiting_thread->elem); //thread_unblock(max_waiting_thread); thread_unblock(list_entry(list_pop_front(&sema->waiters), struct thread, elem)); }
/*===========================================================================* * init_profile_clock * *===========================================================================*/ PUBLIC void init_profile_clock(u32_t freq) { int r, irq; intr_disable(); if((irq = arch_init_profile_clock(freq)) >= 0) { /* Register interrupt handler for statistical system profiling. */ profile_clock_hook.proc_nr_e = CLOCK; put_irq_handler(&profile_clock_hook, irq, profile_clock_handler); enable_irq(&profile_clock_hook); } intr_enable(); }
/* Up or "V" operation on a semaphore. Increments SEMA's value and wakes up one thread of those waiting for SEMA, if any. This function may be called from an interrupt handler. */ void sema_up (struct semaphore *sema) { enum intr_level old_level; ASSERT (sema != NULL); old_level = intr_disable (); sema->value++; if (!list_empty (&sema->waiters)) { struct list_elem *e = list_max (&sema->waiters, priority_less, NULL); list_remove (e); thread_unblock(list_entry (e, struct thread, elem)); } intr_set_level (old_level); }
void alarm_check(void) { struct alarm* al; enum intr_level old_level; old_level = intr_disable(); if (!list_empty(&alarms)) { al = list_entry(list_begin(&alarms), struct alarm, elem); if (al->expiration < timer_ticks()) { list_remove(&al->elem); thread_unblock(al->th); } }
/* Sleeps for approximately TICKS timer ticks. Interrupts must be turned on. */ void timer_sleep (int64_t ticks) { struct thread *cur = thread_current (); ASSERT (intr_get_level () == INTR_ON); /* 2015.09.15. Add for avoid busy-waiting(s) */ cur->sleep_ticks = timer_ticks() + ticks; list_insert_ordered(&waiting_list, &cur->waitingelem, &is_small_sleep_ticks, NULL); enum intr_level old_level = intr_disable (); thread_block (); intr_set_level(old_level); /* 2015.09.15. Add for avoid busy-waiting(e) */ }
/* 关闭inode或减少inode的打开数 */ void inode_close(struct inode* inode) { /* 若没有进程再打开此文件,将此inode去掉并释放空间 */ enum intr_status old_status = intr_disable(); if (--inode->i_open_cnts == 0) { list_remove(&inode->inode_tag); // 将I结点从part->open_inodes中去掉 /* inode_open时为实现inode被所有进程共享,已经在sys_malloc为inode分配了内核空间 */ struct task_struct* cur = running_thread(); uint32_t* cur_pagedir_bak = cur->pgdir; cur->pgdir = NULL; sys_free(inode); // 释放inode的内核空间 cur->pgdir = cur_pagedir_bak; } intr_set_status(old_status); }
/* * There are i486 based upgrade products for i386 machines. * In this case, BIOS doesn't enable CPU cache. */ static void init_i486_on_386(void) { register_t saveintr; #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) need_post_dma_flush = 1; #endif saveintr = intr_disable(); load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */ intr_restore(saveintr); }
/* Sleeps for approximately TICKS timer ticks. Interrupts must be turned on. */ void timer_sleep (int64_t ticks) { struct thread *t = thread_current (); enum intr_level old_level = intr_get_level (); t->awake_time = timer_ticks() + ticks; ASSERT (old_level == INTR_ON); intr_disable (); list_insert_ordered (&sleep_list, &t->sleepelem, time_compare, NULL); thread_block (); intr_set_level (old_level); }
void spinlock_enter(void) { struct thread *td; register_t daif; td = curthread; if (td->td_md.md_spinlock_count == 0) { daif = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = daif; } else td->td_md.md_spinlock_count++; critical_enter(); }
void ofw_pci_dmamap_sync_stst_order_common(void) { static u_char buf[VIS_BLOCKSIZE] __aligned(VIS_BLOCKSIZE); register_t reg, s; s = intr_disable(); reg = rd(fprs); wr(fprs, reg | FPRS_FEF, 0); __asm __volatile("stda %%f0, [%0] %1" : : "r" (buf), "n" (ASI_BLK_COMMIT_S)); membar(Sync); wr(fprs, reg, 0); intr_restore(s); }
/* Acquires LOCK, sleeping until it becomes available if necessary. The lock must not already be held by the current thread.And also it will take care of the donation of priority. This function may sleep, so it must not be called within an interrupt handler. This function may be called with interrupts disabled, but interrupts will be turned back on if we need to sleep. */ void lock_acquire (struct lock *lock) { ASSERT (lock != NULL); ASSERT (!intr_context ()); ASSERT (!lock_held_by_current_thread (lock)); enum intr_level old_level; old_level = intr_disable (); /* call a function that will check if the thread can donate its priority */ check_for_donation(lock); intr_set_level(old_level); sema_down (&lock->semaphore); lock->holder = thread_current (); }//end of lock_accquire function