/* Wait for the queue to be non-empty or closed. Returns TRUE for a successful * wait, FALSE on Qclose (without error) * * Called with q ilocked. May error out, back through the caller, with * the irqsave lock unlocked. */ static bool qwait(struct queue *q) { /* wait for data */ for (;;) { if (q->bfirst != NULL) break; if (q->state & Qclosed) { if (++q->eof > 3) { spin_unlock_irqsave(&q->lock); error(EFAIL, "multiple reads on a closed queue"); } if (*q->err && strcmp(q->err, errno_to_string(ECONNABORTED)) != 0) { spin_unlock_irqsave(&q->lock); error(EFAIL, q->err); } return FALSE; } /* We set Qstarve regardless of whether we are non-blocking or not. * Qstarve tracks the edge detection of the queue being empty. */ q->state |= Qstarve; if (q->state & Qnonblock) { spin_unlock_irqsave(&q->lock); error(EAGAIN, "queue empty"); } spin_unlock_irqsave(&q->lock); /* may throw an error() */ rendez_sleep(&q->rr, notempty, q); spin_lock_irqsave(&q->lock); } return TRUE; }
/* Block, waiting for the queue to be non-empty or closed. Returns with * the spinlock held. Returns TRUE when there queue is not empty, FALSE if it * was naturally closed. Throws an error o/w. */ static bool qwait_and_ilock(struct queue *q, int qio_flags) { while (1) { spin_lock_irqsave(&q->lock); if (q->bfirst != NULL) return TRUE; if (q->state & Qclosed) { if (++q->eof > 3) { spin_unlock_irqsave(&q->lock); error(EFAIL, "multiple reads on a closed queue"); } if (q->err[0]) { spin_unlock_irqsave(&q->lock); error(EFAIL, q->err); } return FALSE; } /* We set Qstarve regardless of whether we are non-blocking or not. * Qstarve tracks the edge detection of the queue being empty. */ q->state |= Qstarve; if (qio_flags & QIO_NON_BLOCK) { spin_unlock_irqsave(&q->lock); error(EAGAIN, "queue empty"); } spin_unlock_irqsave(&q->lock); /* may throw an error() */ rendez_sleep(&q->rr, notempty, q); } }
int qpass(struct queue *q, struct block *b) { int dlen, len, dowakeup; /* sync with qread */ dowakeup = 0; spin_lock_irqsave(&q->lock); if (q->len >= q->limit) { freeblist(b); spin_unlock_irqsave(&q->lock); return -1; } if (q->state & Qclosed) { len = blocklen(b); freeblist(b); spin_unlock_irqsave(&q->lock); return len; } /* add buffer to queue */ if (q->bfirst) q->blast->next = b; else q->bfirst = b; len = BALLOC(b); dlen = BLEN(b); QDEBUG checkb(b, "qpass"); while (b->next) { b = b->next; QDEBUG checkb(b, "qpass"); len += BALLOC(b); dlen += BLEN(b); } q->blast = b; q->len += len; q->dlen += dlen; if (q->len >= q->limit / 2) q->state |= Qflow; if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = 1; } spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->rr); qwake_cb(q, FDTAP_FILT_READABLE); } return len; }
/* * Put character, possibly a rune, into read queue at interrupt time. * Called at interrupt time to process a character. */ int kbdputc(struct queue *unused_queue, int ch) { int i, n; char buf[3]; // Akaros does not use Rune et al. //Rune r; int r; char *next; if (kbd.ir == NULL) return 0; /* in case we're not inited yet */ spin_lock_irqsave(&kbd.lockputc); /* just a mutex */ r = ch; //n = runetochar(buf, &r); // Fake Rune support. n = 1; buf[0] = r; for (i = 0; i < n; i++) { next = kbd.iw + 1; if (next >= kbd.ie) next = kbd.istage; if (next == kbd.ir) break; *kbd.iw = buf[i]; kbd.iw = next; } spin_unlock_irqsave(&kbd.lockputc); return 0; }
static void kmesgputs(char *str, int n) { unsigned int nn, d; spin_lock_irqsave(&kmesg.lk); /* take the tail of huge writes */ if (n > sizeof kmesg.buf) { d = n - sizeof kmesg.buf; str += d; n -= d; } /* slide the buffer down to make room */ nn = kmesg.n; if (nn + n >= sizeof kmesg.buf) { d = nn + n - sizeof kmesg.buf; if (d) memmove(kmesg.buf, kmesg.buf + d, sizeof kmesg.buf - d); nn -= d; } /* copy the data in */ memmove(kmesg.buf + nn, str, n); nn += n; kmesg.n = nn; spin_unlock_irqsave(&kmesg.lk); }
/* * flow control, get producer going again * called with q ilocked */ static void qwakeup_iunlock(struct queue *q) { int dowakeup = 0; /* * if writer flow controlled, restart * * This used to be * q->len < q->limit/2 * but it slows down tcp too much for certain write sizes. * I really don't understand it completely. It may be * due to the queue draining so fast that the transmission * stalls waiting for the app to produce more data. - presotto */ if ((q->state & Qflow) && q->len < q->limit) { q->state &= ~Qflow; dowakeup = 1; } spin_unlock_irqsave(&q->lock); /* wakeup flow controlled writers */ if (dowakeup) { if (q->kick) q->kick(q->arg); rendez_wakeup(&q->wr); } qwake_cb(q, FDTAP_FILT_WRITABLE); }
long qibwrite(struct queue *q, struct block *b) { int n, dowakeup; dowakeup = 0; n = BLEN(b); spin_lock_irqsave(&q->lock); QDEBUG checkb(b, "qibwrite"); if (q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; q->len += BALLOC(b); q->dlen += n; if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = 1; } spin_unlock_irqsave(&q->lock); if (dowakeup) { if (q->kick) q->kick(q->arg); rendez_wakeup(&q->rr); qwake_cb(q, FDTAP_FILT_READABLE); } return n; }
void flush_lcd(unsigned long priv) { struct cdata_t *cdata = (struct cdata *)priv; unsigned char *fb; unsigned char *pixel; int index; int offset; int i; int j; spin_lock_irqsave(&cdata->lock); fb = (unsigned char *)cdata->fb; pixel = cdata->buf; index = cdata->index; offset = cdata->offset; spin_unlock_irqsave(&cdata->lock); for (i = 0; i < index; i++) { writeb(pixel[i], fb+offset); offset++; if (offset >= LCD_SIZE) offset = 0; // Lab for (j = 0; j < 100000; j++); } cdata->index = 0; cdata->offset = offset; }
/* * Mark a queue as closed. No further IO is permitted. * All blocks are released. */ void qclose(struct queue *q) { struct block *bfirst; if (q == NULL) return; /* mark it */ spin_lock_irqsave(&q->lock); q->state |= Qclosed; q->state &= ~(Qflow | Qstarve | Qdropoverflow); q->err[0] = 0; bfirst = q->bfirst; q->bfirst = 0; q->len = 0; q->dlen = 0; spin_unlock_irqsave(&q->lock); /* free queued blocks */ freeblist(bfirst); /* wake up readers/writers */ rendez_wakeup(&q->rr); rendez_wakeup(&q->wr); qwake_cb(q, FDTAP_FILT_HANGUP); }
/* * copy from offset in the queue */ struct block *qcopy_old(struct queue *q, int len, uint32_t offset) { int sofar; int n; struct block *b, *nb; uint8_t *p; nb = block_alloc(len, MEM_WAIT); spin_lock_irqsave(&q->lock); /* go to offset */ b = q->bfirst; for (sofar = 0;; sofar += n) { if (b == NULL) { spin_unlock_irqsave(&q->lock); return nb; } n = BLEN(b); if (sofar + n > offset) { p = b->rp + offset - sofar; n -= offset - sofar; break; } QDEBUG checkb(b, "qcopy"); b = b->next; } /* copy bytes from there */ for (sofar = 0; sofar < len;) { if (n > len - sofar) n = len - sofar; PANIC_EXTRA(b); memmove(nb->wp, p, n); qcopycnt += n; sofar += n; nb->wp += n; b = b->next; if (b == NULL) break; n = BLEN(b); p = b->rp; } spin_unlock_irqsave(&q->lock); return nb; }
// primitive barrier function. all cores call this. void waiton_barrier(barrier_t* barrier) { uint8_t local_ready = barrier->ready; spin_lock_irqsave(&barrier->lock); barrier->current_count--; if (barrier->current_count) { spin_unlock_irqsave(&barrier->lock); while (barrier->ready == local_ready) cpu_relax(); } else { spin_unlock_irqsave(&barrier->lock); reset_barrier(barrier); wmb(); barrier->ready++; } }
/* Aka, the IMR. Simply reading the data port are OCW1s. */ uint16_t pic_get_mask(void) { uint16_t ret; spin_lock_irqsave(&piclock); ret = (inb(PIC2_DATA) << 8) | inb(PIC1_DATA); spin_unlock_irqsave(&piclock); return ret; }
int commit_checklist_nowait(checklist_t* list, checklist_mask_t* mask) { int e = 0; if ((e = commit_checklist_wait(list, mask))) return e; // give up the lock, since we won't wait for completion spin_unlock_irqsave(&list->lock); return e; }
void pic_mask_irq(struct irq_handler *unused, int trap_nr) { int irq = trap_nr - PIC1_OFFSET; spin_lock_irqsave(&piclock); if (irq > 7) outb(PIC2_DATA, inb(PIC2_DATA) | (1 << (irq - 8))); else outb(PIC1_DATA, inb(PIC1_DATA) | (1 << irq)); spin_unlock_irqsave(&piclock); }
void pic_send_eoi(int trap_nr) { int irq = trap_nr - PIC1_OFFSET; spin_lock_irqsave(&piclock); // all irqs beyond the first seven need to be chained to the slave if (irq > 7) outb(PIC2_CMD, PIC_EOI); outb(PIC1_CMD, PIC_EOI); spin_unlock_irqsave(&piclock); }
/* Returns an unused u16, or -1 on failure (pool full or corruption). * * The invariant is that the stackpointer (TOS) will always point to the next * slot that can be popped, if there are any. All free slots will be below the * TOS, ranging from indexes [0, TOS), where if TOS == 0, then there are no free * slots to push. The last valid slot is when TOS == size - 1. */ int get_u16(struct u16_pool *id) { uint16_t v; spin_lock_irqsave(&id->lock); if (id->tos == id->size) { spin_unlock_irqsave(&id->lock); return -1; } v = id->ids[id->tos++]; spin_unlock_irqsave(&id->lock); /* v is ours, we can freely read and write its check field */ if (id->check[v] != 0xfe) { printk("BAD! %d is already allocated (0x%x)\n", v, id->check[v]); return -1; } id->check[v] = 0x5a; return v; }
// Must be called after commit_checklist // Assumed we held the lock if we ever call this int waiton_checklist(checklist_t* list) { extern atomic_t outstanding_calls; // can consider breakout out early, like above, and erroring out while (!checklist_is_clear(list)) cpu_relax(); spin_unlock_irqsave(&list->lock); // global counter of wrappers either waited on or being contended for. atomic_dec(&outstanding_calls); return 0; }
/* * mark a queue as no longer hung up. resets the wake_cb. */ void qreopen(struct queue *q) { spin_lock_irqsave(&q->lock); q->state &= ~Qclosed; q->state |= Qstarve; q->eof = 0; q->limit = q->inilim; q->wake_cb = 0; q->wake_data = 0; spin_unlock_irqsave(&q->lock); }
void pic_unmask_irq(struct irq_handler *unused, int trap_nr) { int irq = trap_nr - PIC1_OFFSET; printd("PIC unmask for TRAP %d, IRQ %d\n", trap_nr, irq); spin_lock_irqsave(&piclock); if (irq > 7) { outb(PIC2_DATA, inb(PIC2_DATA) & ~(1 << (irq - 8))); outb(PIC1_DATA, inb(PIC1_DATA) & 0xfb); // make sure irq2 is unmasked } else outb(PIC1_DATA, inb(PIC1_DATA) & ~(1 << irq)); spin_unlock_irqsave(&piclock); }
static uint16_t __pic_get_irq_reg(int ocw3) { uint16_t ret; spin_lock_irqsave(&piclock); /* OCW3 to PIC CMD to get the register values. PIC2 is chained, and * represents IRQs 8-15. PIC1 is IRQs 0-7, with 2 being the chain */ outb(PIC1_CMD, ocw3); outb(PIC2_CMD, ocw3); ret = (inb(PIC2_CMD) << 8) | inb(PIC1_CMD); spin_unlock_irqsave(&piclock); return ret; }
static void perfmon_do_cores_alloc(void *opaque) { struct perfmon_alloc *pa = (struct perfmon_alloc *) opaque; struct perfmon_cpu_context *cctx = PERCPU_VARPTR(counters_env); int i; spin_lock_irqsave(&cctx->lock); if (perfmon_is_fixed_event(&pa->ev)) { uint64_t fxctrl_value = read_msr(MSR_CORE_PERF_FIXED_CTR_CTRL), tmp; i = PMEV_GET_EVENT(pa->ev.event); if (i >= (int) cpu_caps.fix_counters_x_proc) { i = -EINVAL; } else if (fxctrl_value & (FIXCNTR_MASK << i)) { i = -EBUSY; } else { cctx->fixed_counters[i] = pa->ev; PMEV_SET_EN(cctx->fixed_counters[i].event, 1); tmp = perfmon_get_fixevent_mask(&pa->ev, i, fxctrl_value); perfmon_enable_fix_event(i, TRUE); write_msr(MSR_CORE_PERF_FIXED_CTR0 + i, -(int64_t) pa->ev.trigger_count); write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL, tmp); } } else { for (i = 0; i < (int) cpu_caps.counters_x_proc; i++) { if (cctx->counters[i].event == 0) { if (!perfmon_event_available(i)) warn_once("Counter %d is free but not available", i); else break; } } if (i < (int) cpu_caps.counters_x_proc) { cctx->counters[i] = pa->ev; PMEV_SET_EN(cctx->counters[i].event, 1); perfmon_enable_event(i, TRUE); write_msr(MSR_IA32_PERFCTR0 + i, -(int64_t) pa->ev.trigger_count); write_msr(MSR_ARCH_PERFMON_EVENTSEL0 + i, cctx->counters[i].event); } else { i = -ENOSPC; } } spin_unlock_irqsave(&cctx->lock); pa->cores_counters[core_id()] = (counter_t) i; }
void percpu_counter_set(struct percpu_counter *fbc, int64_t amount) { int cpu; unsigned long flags; spin_lock_irqsave(&fbc->lock); for_each_possible_cpu(cpu) { int32_t *pcount = _PERCPU_VARPTR(*fbc->counters, cpu); *pcount = 0; } fbc->count = amount; spin_unlock_irqsave(&fbc->lock); }
void put_u16(struct u16_pool *id, int v) { /* we could check for if v is in range before dereferencing. */ if (id->check[v] != 0x5a) { printk("BAD! freeing non-allocated: %d(0x%x)\n", v, id->check[v]); return; } id->check[v] = 0xfe; spin_lock_irqsave(&id->lock); id->ids[--id->tos] = v; spin_unlock_irqsave(&id->lock); }
void __print_func_entry(const char *func, const char *file) { char tentabs[] = "\t\t\t\t\t\t\t\t\t\t"; // ten tabs and a \0 char *ourtabs = &tentabs[10 - MIN(tab_depth, 10)]; if (!printx_on) return; if (is_blacklisted(func)) return; spin_lock_irqsave(&lock); __print_hdr(); printk("%s%s() in %s\n", ourtabs, func, file); spin_unlock_irqsave(&lock); tab_depth++; }
/* * get next block from a queue, return null if nothing there */ struct block *qget(struct queue *q) { int dowakeup; struct block *b; /* sync with qwrite */ spin_lock_irqsave(&q->lock); b = q->bfirst; if (b == NULL) { q->state |= Qstarve; spin_unlock_irqsave(&q->lock); return NULL; } q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); QDEBUG checkb(b, "qget"); /* if writer flow controlled, restart */ if ((q->state & Qflow) && q->len < q->limit / 2) { q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->wr); /* We only send the writable event on wakeup, which is edge triggered */ qwake_cb(q, FDTAP_FILT_WRITABLE); } return b; }
/* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ int64_t __percpu_counter_sum(struct percpu_counter *fbc) { int64_t ret; int cpu; unsigned long flags; spin_lock_irqsave(&fbc->lock); ret = fbc->count; for_each_online_cpu(cpu) { int32_t *pcount = _PERCPU_VARPTR(*fbc->counters, cpu); ret += *pcount; } spin_unlock_irqsave(&fbc->lock); return ret; }
/* Mark a queue as closed. Wakeup any readers. Don't remove queued blocks. * * msg will be the errstr received by any waiters (qread, qbread, etc). If * there is no message, which is what also happens during a natural qclose(), * those waiters will simply return 0. qwriters will always error() on a * closed/hungup queue. */ void qhangup(struct queue *q, char *msg) { /* mark it */ spin_lock_irqsave(&q->lock); q->state |= Qclosed; if (msg == 0 || *msg == 0) q->err[0] = 0; else strlcpy(q->err, msg, ERRMAX); spin_unlock_irqsave(&q->lock); /* wake up readers/writers */ rendez_wakeup(&q->rr); rendez_wakeup(&q->wr); qwake_cb(q, FDTAP_FILT_HANGUP); }
static void perfmon_do_cores_status(void *opaque) { struct perfmon_status_env *env = (struct perfmon_status_env *) opaque; struct perfmon_cpu_context *cctx = PERCPU_VARPTR(counters_env); int coreno = core_id(); counter_t ccno = env->pa->cores_counters[coreno]; spin_lock_irqsave(&cctx->lock); if (perfmon_is_fixed_event(&env->pa->ev)) env->pef->cores_values[coreno] = read_msr(MSR_CORE_PERF_FIXED_CTR0 + ccno); else env->pef->cores_values[coreno] = read_msr(MSR_IA32_PERFCTR0 + ccno); spin_unlock_irqsave(&cctx->lock); }
/* given a queue, makes a single block with header_len reserved space in the * block main body, and the contents of [offset, len + offset) pointed to in the * new blocks ext_data. */ struct block *qclone(struct queue *q, int header_len, int len, uint32_t offset) { int ret; struct block *newb = block_alloc(header_len, MEM_WAIT); /* the while loop should rarely be used: it would require someone * concurrently adding to the queue. */ do { /* TODO: RCU: protecting the q list (b->next) (need read lock) */ spin_lock_irqsave(&q->lock); ret = __blist_clone_to(q->bfirst, newb, len, offset); spin_unlock_irqsave(&q->lock); if (ret) block_add_extd(newb, ret, MEM_WAIT); } while (ret); return newb; }
/* * Called by a uart interrupt for console input. * * turn '\r' into '\n' before putting it into the queue. */ int kbdcr2nl(struct queue *ignored_queue, int ch) { char *next; spin_lock_irqsave(&kbd.lockputc); /* just a mutex */ if (ch == '\r' && !kbd.raw) ch = '\n'; next = kbd.iw + 1; if (next >= kbd.ie) next = kbd.istage; if (next != kbd.ir) { *kbd.iw = ch; kbd.iw = next; } spin_unlock_irqsave(&kbd.lockputc); return 0; }