/*===========================================================================* * do_fs_reply * *===========================================================================*/ static void *do_fs_reply(struct job *job) { struct vmnt *vmp; struct fproc *rfp; if ((vmp = find_vmnt(who_e)) == NULL) panic("Couldn't find vmnt for endpoint %d", who_e); rfp = job->j_fp; if (rfp == NULL || rfp->fp_endpoint == NONE) { printf("VFS: spurious reply from %d\n", who_e); return(NULL); } if (rfp->fp_task != who_e) printf("VFS: expected %d to reply, not %d\n", rfp->fp_task, who_e); *rfp->fp_sendrec = m_in; rfp->fp_task = NONE; vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */ if (rfp->fp_wtid != invalid_thread_id) worker_signal(worker_get(rfp->fp_wtid)); /* Continue this thread */ else printf("VFS: consistency error: reply for finished job\n"); return(NULL); }
/*===========================================================================* * do_reply * *===========================================================================*/ static void do_reply(struct worker_thread *wp) { struct vmnt *vmp = NULL; if(who_e != VM_PROC_NR && (vmp = find_vmnt(who_e)) == NULL) panic("Couldn't find vmnt for endpoint %d", who_e); if (wp->w_task != who_e) { printf("VFS: tid %d: expected %d to reply, not %d\n", wp->w_tid, wp->w_task, who_e); return; } /* It should be impossible to trigger the following case, but it is here for * consistency reasons: worker_stop() resets w_sendrec but not w_task. */ if (wp->w_sendrec == NULL) { printf("VFS: tid %d: late reply from %d ignored\n", wp->w_tid, who_e); return; } *wp->w_sendrec = m_in; wp->w_sendrec = NULL; wp->w_task = NONE; if(vmp) vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */ worker_signal(wp); /* Continue this thread */ }
/*===========================================================================* * do_fs_reply * *===========================================================================*/ static void *do_fs_reply(struct job *job) { struct vmnt *vmp; struct worker_thread *wp; if ((vmp = find_vmnt(who_e)) == NULL) panic("Couldn't find vmnt for endpoint %d", who_e); wp = worker_get(job->j_fp->fp_wtid); if (wp == NULL) { printf("VFS: spurious reply from %d\n", who_e); return(NULL); } if (wp->w_task != who_e) { printf("VFS: expected %d to reply, not %d\n", wp->w_task, who_e); return(NULL); } *wp->w_fs_sendrec = m_in; wp->w_task = NONE; vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */ worker_signal(wp); /* Continue this thread */ return(NULL); }
void FinalizerHandler::finish_collection(STATE) { queue_objects(); if(iterator_) { delete iterator_; iterator_ = NULL; } worker_signal(); }
void FinalizerHandler::finish(STATE, GCToken gct) { if(!self_) { if(process_list_ || !lists_->empty() || !live_list_->empty()) { rubinius::bug("FinalizerHandler worker thread dead during halt"); } else { return; } } finishing_ = true; while(true) { { StopTheWorld stw(state, gct, 0); if(!process_list_) { if(live_list_->empty() && lists_->empty()) break; // Everything is garbage when halting so keep adding live objects to // finalize queue until done. if(!live_list_->empty()) { for(FinalizeObjects::iterator i = live_list_->begin(); i != live_list_->end(); ++i) { i->queued(); } queue_objects(); } first_process_item(); if(!process_list_) break; } } worker_signal(); { utilities::thread::Mutex::LockGuard lg(supervisor_lock_); state->vm()->set_call_frame(0); GCIndependent indy(state); if(process_list_) supervisor_wait(); } } if(!lists_->empty() || !live_list_->empty() || process_list_ != NULL) rubinius::bug("FinalizerHandler exiting with pending finalizers"); stop_thread(state); }
void FinalizerHandler::stop_thread(STATE) { SYNC(state); if(!self_) return; pthread_t os = self_->os_thread(); { utilities::thread::Mutex::LockGuard lg(worker_lock_); // Thread might have already been stopped exit_ = true; worker_signal(); } void* return_value; pthread_join(os, &return_value); self_ = NULL; }
void tll_downgrade(tll_t *tllp) { /* Downgrade three-level-lock tll from write-only to read-serialized, or from * read-serialized to read-only. Caveat: as we can't know whether the next * lock type on the write queue is actually read-only or write-only, we can't * grant access to that type. It will be granted access once we unlock. Also, * because we apply write-bias, we can't grant access to read-serialized * either, unless nothing is queued on the write-only stack. */ assert(self != NULL); assert(tllp != NULL); assert(tllp->t_owner == self); switch(tllp->t_current) { case TLL_WRITE: tllp->t_current = TLL_READSER; break; case TLL_READSER: /* If nothing is queued on write-only, but there is a pending lock * requesting read-serialized, grant it and keep the lock type. */ if (tllp->t_write == NULL && tllp->t_serial != NULL) { tllp->t_owner = tllp->t_serial; tllp->t_serial = tllp->t_serial->w_next; /* Remove head */ tllp->t_owner->w_next = NULL; assert(!(tllp->t_status & TLL_PEND)); tllp->t_status |= TLL_PEND; worker_signal(tllp->t_owner); } else { tllp->t_current = TLL_READ; tllp->t_owner = NULL; } tllp->t_readonly++; /* Either way, there's one more read-only lock */ break; default: panic("VFS: Incorrect lock state"); } if (tllp->t_current != TLL_WRITE && tllp->t_current != TLL_READSER) assert(tllp->t_owner == NULL); }
int tll_unlock(tll_t *tllp) { /* Unlock a previously locked three-level-lock tll */ int signal_owner = 0; assert(self != NULL); assert(tllp != NULL); if (tllp->t_owner == NULL || tllp->t_owner != self) { /* This unlock must have been done by a read-only lock */ tllp->t_readonly--; assert(tllp->t_readonly >= 0); assert(tllp->t_current == TLL_READ || tllp->t_current == TLL_READSER); /* If a read-serialized lock is trying to upgrade and there are no more * read-only locks, the lock can now be upgraded to write-only */ if ((tllp->t_status & TLL_UPGR) && tllp->t_readonly == 0) signal_owner = 1; } if (tllp->t_owner == self && tllp->t_current == TLL_WRITE) assert(tllp->t_readonly == 0); if(tllp->t_owner == self || (tllp->t_owner == NULL && tllp->t_readonly == 0)){ /* Let another read-serialized or write-only request obtain access. * Write-only has priority, but only after the last read-only access * has left. Read-serialized access will only be granted if there is * no pending write-only access request. */ struct worker_thread *new_owner; new_owner = NULL; tllp->t_owner = NULL; /* Remove owner of lock */ if (tllp->t_write != NULL) { if (tllp->t_readonly == 0) { new_owner = tllp->t_write; tllp->t_write = tllp->t_write->w_next; } } else if (tllp->t_serial != NULL) { new_owner = tllp->t_serial; tllp->t_serial = tllp->t_serial->w_next; } /* New owner is head of queue or NULL if no proc is available */ if (new_owner != NULL) { tllp->t_owner = new_owner; tllp->t_owner->w_next = NULL; assert(tllp->t_owner != self); signal_owner = 1; } } /* If no one is using this lock, mark it as not in use */ if (tllp->t_owner == NULL) { if (tllp->t_readonly == 0) tllp->t_current = TLL_NONE; else tllp->t_current = TLL_READ; } if (tllp->t_current == TLL_NONE || tllp->t_current == TLL_READ) { if (!signal_owner) { tllp->t_owner = NULL; } } /* If we have a new owner or the current owner managed to upgrade its lock, * tell it to start/continue running */ if (signal_owner) { assert(!(tllp->t_status & TLL_PEND)); tllp->t_status |= TLL_PEND; worker_signal(tllp->t_owner); } return(OK); }
static int tll_append(tll_t *tllp, tll_access_t locktype) { struct worker_thread *queue; assert(self != NULL); assert(tllp != NULL); assert(locktype != TLL_NONE); /* Read-only and write-only requests go to the write queue. Read-serialized * requests go to the serial queue. Then we wait for an event to signal it's * our turn to go. */ queue = NULL; if (locktype == TLL_READ || locktype == TLL_WRITE) { if (tllp->t_write == NULL) tllp->t_write = self; else queue = tllp->t_write; } else { if (tllp->t_serial == NULL) tllp->t_serial = self; else queue = tllp->t_serial; } if (queue != NULL) { /* Traverse to end of queue */ while (queue->w_next != NULL) queue = queue->w_next; queue->w_next = self; } self->w_next = NULL; /* End of queue */ /* Now wait for the event it's our turn */ worker_wait(); tllp->t_current = locktype; tllp->t_status &= ~TLL_PEND; tllp->t_owner = self; if (tllp->t_current == TLL_READ) { tllp->t_readonly++; tllp->t_owner = NULL; } else if (tllp->t_current == TLL_WRITE) assert(tllp->t_readonly == 0); /* Due to the way upgrading and downgrading works, read-only requests are * scheduled to run after a downgraded lock is released (because they are * queued on the write-only queue which has priority). This results from the * fact that the downgrade operation cannot know whether the next locktype on * the write-only queue is really write-only or actually read-only. However, * that means that read-serialized requests stay queued, while they could run * simultaneously with read-only requests. See if there are any and grant * the head request access */ if (tllp->t_current == TLL_READ && tllp->t_serial != NULL) { tllp->t_owner = tllp->t_serial; tllp->t_serial = tllp->t_serial->w_next; tllp->t_owner->w_next = NULL; assert(!(tllp->t_status & TLL_PEND)); tllp->t_status |= TLL_PEND; worker_signal(tllp->t_owner); } return(OK); }