/* * _sync_buf() * Sync back buffer if dirty * * Write back the 1st sector, or the whole buffer, as appropriate */ static void _sync_buf(struct buf *b, int from_qio) { ASSERT_DEBUG(b->b_flags & (B_SEC0 | B_SECS), "sync_buf: not ref'ed"); /* * Skip it if not dirty */ if (!(b->b_flags & B_DIRTY)) { return; } /* * Do the I/O--whole buffer, or just 1st sector if that was * the only sector referenced. */ if (!from_qio) { get(b); } if (b->b_flags & B_SECS) { write_secs(b->b_start, b->b_data, b->b_nsec); } else { write_secs(b->b_start, b->b_data, 1); } p_lock(&b->b_lock); b->b_flags &= ~B_DIRTY; v_lock(&b->b_lock); /* * If there are possible handles, clear them too */ if (b->b_handles) { bzero(b->b_handles, b->b_nhandle * sizeof(void *)); } }
/* * v_sema() * Leave semaphore */ void v_sema(struct sema *s) { int val; p_lock(&s->s_locked); val = (s->s_val += 1); v_lock(&s->s_locked); if (val <= 0) { (void)send(s->s_portmaster, FS_SEEK); } }
static bool check_called_func_count(uint64_t *counter) { bool ret = false; p_lock(); if (memcmp(called_counter, counter, sizeof(called_counter)) == 0) { ret = true; } else { ret = false; } p_unlock(); return ret; }
/* * p_sema() * Enter semaphore */ int p_sema(struct sema *s) { int val; p_lock(&s->s_locked); val = (s->s_val -= 1); v_lock(&s->s_locked); if (val >= 0) { return(0); } if (send(s->s_port, FS_READ)) { return(-1); } return(0); }
/* * get() * Access buffer, interlocking with BG */ static void get(struct buf *b) { if (!(b->b_flags & B_BUSY)) { return; } p_lock(&b->b_lock); if (!(b->b_flags & B_BUSY)) { v_lock(&b->b_lock); return; } b->b_flags |= B_WANT; v_lock(&b->b_lock); mutex_thread(0); ASSERT_DEBUG(!(b->b_flags & (B_WANT|B_BUSY)), "get: still busy/wanted"); }
/* * bg_thread() * Endless loop to take QIO operations and execute them */ static void bg_thread(int dummy) { uint next = 0, want; struct qio *q; struct buf *b; /* * Become ephemeral */ (void)sched_op(SCHEDOP_EPHEM, 0); /* * Endless loop, serving background requests */ for (;;) { /* * Get next operation */ mutex_thread(0); q = &qios[next++]; if (next >= NQIO) { next = 0; } /* * Execute it */ exec_qio(b = q->q_buf, q->q_op); /* * Flag completion */ q->q_op = 0; p_lock(&b->b_lock); want = b->b_flags & B_WANT; b->b_flags &= ~(B_BUSY | B_WANT); v_lock(&b->b_lock); if (want) { mutex_thread(fg_pid); } } }
static void called_func_count(enum func_type type) { p_lock(); switch (type) { case PIPELINE_FUNC_FETCH: case PIPELINE_FUNC_MAIN: case PIPELINE_FUNC_THROW: if (called_counter[type] != 0) { goto done; } break; default: break; } called_counter[type]++; done: p_unlock(); }
static void called_counter_reset(void) { p_lock(); memset(&called_counter, 0, sizeof(called_counter)); p_unlock(); }