uint8_t pfndb_type(unsigned pfn) { uint8_t t; ipfn_t *p = &pfndb[pfn]; assert(pfn <= pfndb_max); spinlock(&pfndblock); t = p->type; spinunlock(&pfndblock); return t; }
void pfndb_add(unsigned pfn, uint8_t t) { uint8_t ot; ipfn_t *p = &pfndb[pfn]; assert(pfn <= pfndb_max); spinlock(&pfndblock); ot = p->type; /* Init-time overlap checking */ if (ot > t) { spinunlock(&pfndblock); return; } p->type = t; spinunlock(&pfndblock); pfndb_stats_dectype(ot); pfndb_stats_inctype(t); }
void pfndb_subst(uint8_t t1, uint8_t t2) { unsigned i; spinlock(&pfndblock); for (i = 0; i < pfndb_max; i++) if (pfndb[i].type == t1) { pfndb[i].type = t2; pfndb_stats_dectype(t1); pfndb_stats_inctype(t2); } spinunlock(&pfndblock); }
void *pfndb_getptr(unsigned pfn) { void *ptr; ipfn_t *p = &pfndb[pfn]; assert(pfn <= pfndb_max); spinlock(&pfndblock); ptr = (void *) p->ptr; spinunlock(&pfndblock); return ptr; }
void pmap_commit(struct pmap *pmap) { if (pmap == NULL) pmap = pmap_current(); spinlock(&pmap->lock); if (pmap->tlbflush & TLBF_GLOBAL) __flush_tlbs(-1, TLBF_GLOBAL); else if (pmap->tlbflush & TLBF_LOCAL) __flush_tlbs(pmap->cpumap, TLBF_LOCAL); pmap->tlbflush = 0; spinunlock(&pmap->lock); }
void pfndb_settype(unsigned pfn, uint8_t t) { uint8_t ot; ipfn_t *p = &pfndb[pfn]; assert(pfn <= pfndb_max); spinlock(&pfndblock); ot = p->type; p->type = t; spinunlock(&pfndblock); pfndb_stats_dectype(ot); pfndb_stats_inctype(t); }
l1e_t pmap_setl1e(struct pmap *pmap, vaddr_t va, l1e_t nl1e) { l1e_t ol1e, *l1p; if (pmap == NULL) pmap = pmap_current(); if (pmap == pmap_current()) l1p = __val1tbl(va) + L1OFF(va); else panic("set to different pmap voluntarily not supported."); spinlock(&pmap->lock); ol1e = *l1p; pmap->tlbflush = __tlbflushp(ol1e, nl1e); __setl1e(l1p, nl1e); spinunlock(&pmap->lock); return ol1e; }
// (i) This thread is responsible for waking up of timed out sleeping // threads on timers. // (ii)Certain threads that are sleeping on certain events must be // waken up (nic thread, fdc thread, ...) void timerthread(void) { struct timerobject *t1, *t2; unsigned long oflags; unsigned long tmsec; struct kthread *thr, *thr1; int i, j, inc, cid; // Remember this thread address in a global variable. timed_event_thread = (struct kthread *)current_thread; while (1) { // Increase priorities of long awiting threads ( > 3 seconds) if (priocompute == 1) { CLI; spinlock(ready_qlock); priocompute = 0; tmsec = (secs & 0x000FFFFF) * 1000 + millesecs; for (i=0; i<31; i++) { // Total queue i is taken out thr = (struct kthread *)ready_qhead[i]; ready_qhead[i] = NULL; ready_qtail[i] = NULL; while (thr != NULL) { thr1 = (struct kthread *)thr->kt_qnext; if (tmsec - thr->kt_schedinf.sc_thrreadystart > 3000) inc = 4; else inc = 2; // Priority must be increased // Insert in the suitable queue. thr->kt_schedinf.sc_cpupri += inc; // Irrespective of the class/type cid = thr->kt_schedinf.sc_cid - 1; if (thr->kt_schedinf.sc_cpupri > maxpri[cid]) thr->kt_schedinf.sc_cpupri = maxpri[cid]; j = (thr->kt_schedinf.sc_cpupri > thr->kt_schedinf.sc_inhpri) ? ((159 - thr->kt_schedinf.sc_cpupri) / 5) : ((159 - thr->kt_schedinf.sc_inhpri) / 5); thr->kt_qnext = NULL; thr->kt_qprev = (struct kthread *)ready_qtail[j]; if (ready_qtail[j] != NULL) { ready_qtail[j]->kt_qnext = thr; ready_qtail[j] = thr; } else ready_qhead[j] = ready_qtail[j] = thr; thr = thr1; } } spinunlock(ready_qlock); STI; } // Check if nic thread is waiting for transmit interrupt // for long time if (tx_wait_pkt != NULL) { _lock(&nic.busy); if ((tx_wait_pkt != NULL) && (secs - nicwait_start) > 5) { event_wakeup((struct event_t *)&tx_wait_pkt->threadwait); } unlock(&nic.busy); } // Check if fdc thread is waiting for disk interrupt //for long time // Process timer objects if (timeoutflag == 1) { _lock(&timerq_lock); timeoutflag = 0; if (timers != NULL) { t1 = (struct timerobject *)timers; while (t1 != NULL && t1->timer_count <= 0) { t2 = (struct timerobject *)t1->timer_next; if (t2 != NULL) { t2->timer_count += (t1->timer_count); t2->timer_prev = NULL; } // Add this timed object to the corresponding process. t1->timer_prev = t1->timer_next = NULL; t1->timer_count = -1; // Not in use if (t1->timer_handler == (int (*)(void *))wakeup) { // Wakeup operation if (t1->timer_ownerthread->kt_schedinf.sc_state == THREAD_STATE_SLEEP) wakeup(t1->timer_ownerthread); } else if (t1->timer_handler != NULL) // Otherwise discard { t1->timer_handler(t1->timer_handlerparam); } t1 = t2; } timers = t1; } unlock(&timerq_lock); } // One round is completed. Sleep on event object event_sleep(&timerthr_sleep, NULL); } }
void scheduler(void) { struct kthread *new_thread = NULL; struct kthread *t; int priority, p; unsigned long oflags; int i, cid; runrun = kprunrun = 0; CLI; // Interrupts are always disabled when scheduler is running spinlock(ready_qlock); for (i=0; i<MAXREADYQ; i++) { if (ready_qhead[i] == NULL) continue; // Go for the next Q t = new_thread = (struct kthread *)ready_qhead[i]; priority = t->kt_schedinf.sc_usrpri + ((t->kt_schedinf.sc_cpupri > t->kt_schedinf.sc_inhpri) ? t->kt_schedinf.sc_cpupri : t->kt_schedinf.sc_inhpri); t = (struct kthread *)t->kt_qnext; while (t != NULL) { p = t->kt_schedinf.sc_usrpri + ((t->kt_schedinf.sc_cpupri > t->kt_schedinf.sc_inhpri) ? t->kt_schedinf.sc_cpupri : t->kt_schedinf.sc_inhpri); if (priority > p) { priority = p; new_thread = t; } t = (struct kthread *)t->kt_qnext; } break; } // No thread is chosen, may be idle thread is running and // no other thread is ready. // Or if selected thread is idle thread Do not context switch, // if current thread is runnable. if ((new_thread == NULL) || ((new_thread->kt_schedinf.sc_cid == SCHED_CLASS_IDLE) && (current_thread->kt_schedinf.sc_state == THREAD_STATE_RUNNING))) { current_thread->kt_schedinf.sc_thrreadystart = (secs & 0x000fffff) * 1000 + millesecs; current_thread->kt_schedinf.sc_tqleft = TIME_QUANTUM; spinunlock(ready_qlock); STI; return; } // If current thread is runnable add this to ready Q if (current_thread->kt_schedinf.sc_state == THREAD_STATE_RUNNING) { current_thread->kt_schedinf.sc_state = THREAD_STATE_READY; cid = (current_thread->kt_schedinf.sc_cpupri > current_thread->kt_schedinf.sc_inhpri) ? ((159 - current_thread->kt_schedinf.sc_cpupri) / 5) : ((159 - current_thread->kt_schedinf.sc_inhpri) / 5); current_thread->kt_qnext = NULL; current_thread->kt_qprev = (struct kthread *)ready_qtail[cid]; if (ready_qtail[cid] != NULL) { ready_qtail[cid]->kt_qnext = (struct kthread *)current_thread; ready_qtail[cid] = current_thread; } else ready_qhead[cid] = ready_qtail[cid] = current_thread; } // Remove the new thread from the ready q cid = (new_thread->kt_schedinf.sc_cpupri > new_thread->kt_schedinf.sc_inhpri) ? ((159 - new_thread->kt_schedinf.sc_cpupri) / 5) : ((159 - new_thread->kt_schedinf.sc_inhpri) / 5); if (new_thread->kt_qnext != NULL) (new_thread->kt_qnext)->kt_qprev = new_thread->kt_qprev; else ready_qtail[cid]= new_thread->kt_qprev; if (new_thread->kt_qprev != NULL) (new_thread->kt_qprev)->kt_qnext = new_thread->kt_qnext; else ready_qhead[cid] = new_thread->kt_qnext; new_thread->kt_qnext = NULL; new_thread->kt_qprev = NULL; new_thread->kt_schedinf.sc_tqleft = TIME_QUANTUM; new_thread->kt_schedinf.sc_thrreadystart = (secs & 0x000fffff) * 1000 + millesecs; spinunlock(ready_qlock); context_switch(new_thread); STI; return; }
static inline ALWAYS_INLINE void PIO_UNLOCK(int fd) { spinunlock(&pio_locks[fd]); }
void AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags) { /* XXX: IRQ in flags? */ spinunlock(Handle); }