static void ObtainLock(struct Lock *lock, char type) { PROCESS pid; assert(LWP_CurrentProcess(&pid) == 0); if (!lock->initialized) Lock_Init(lock); lwp_LEAVE(pid); lwp_mutex_lock(&lock->_access); { /* now start waiting, writers wait until all readers have left, all * lockers wait for the excl flag to be cleared */ /* this is a safe cancellation point because we (should) only hold * the access mutex, and we take ourselves off the pending list in the * cleanup handler */ while (lock->excl || (type == 'W' && lock->readers)) pthread_cond_wait(&lock->wakeup, &lock->_access); /* Obtain the correct lock flags, read locks increment readers, write * locks set the excl flag and shared locks do both */ if (type != 'R') lock->excl = pid; if (type != 'W') lock->readers++; /* signal other threads, there might be more readers */ if (type == 'R') pthread_cond_broadcast(&lock->wakeup); lwp_dbg(LWP_DBG_LOCKS, "%c+ pid %p lock %p\n", type, pid, lock); } lwp_mutex_unlock(&lock->_access); lwp_YIELD(pid); }
int IOMGR_Select(int fds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { PROCESS pid; int retval; struct timeval to = { 0, 0 }; if (LWP_CurrentProcess(&pid)) return LWP_EBADPID; /* avoid clobbering of timeout, existing programs using LWP don't * like that behaviour */ if (timeout) to = *timeout; lwp_LEAVE(pid); retval = select(fds, readfds, writefds, exceptfds, timeout ? &to : NULL); lwp_YIELD(pid); return retval; }
static void ReleaseLock(struct Lock *lock, char type) { PROCESS pid; assert(LWP_CurrentProcess(&pid) == 0); /* acquire the lock-access mutex */ lwp_mutex_lock(&lock->_access); if (type != 'R') { assert(lock->excl == pid); lock->excl = NULL; } if (type != 'W') lock->readers--; lwp_dbg(LWP_DBG_LOCKS, "%c- pid %p lock %p\n", type, pid, lock) /* if we cleared the lock, signal the next pending locker */ if (!lock->excl && !lock->readers) pthread_cond_signal(&lock->wakeup); /* and release the lock-access mutex */ lwp_mutex_unlock(&lock->_access); }
static void rxi_ListenerProc(fd_set * rfds, int *tnop, struct rx_call **newcallp) { afs_uint32 host; u_short port; struct rx_packet *p = (struct rx_packet *)0; osi_socket socket; struct clock cv; afs_int32 nextPollTime; /* time to next poll FD before sleeping */ int lastPollWorked, doingPoll; /* true iff last poll was useful */ struct timeval tv, *tvp; int code; #ifdef AFS_NT40_ENV int i; #endif PROCESS pid; char name[MAXTHREADNAMELENGTH] = "srv_0"; clock_NewTime(); lastPollWorked = 0; nextPollTime = 0; code = LWP_CurrentProcess(&pid); if (code) { fprintf(stderr, "rxi_Listener: Can't get my pid.\n"); exit(1); } rx_listenerPid = pid; if (swapNameProgram) (*swapNameProgram) (pid, "listener", &name[0]); for (;;) { /* Grab a new packet only if necessary (otherwise re-use the old one) */ if (p) { rxi_RestoreDataBufs(p); } else { if (!(p = rxi_AllocPacket(RX_PACKET_CLASS_RECEIVE))) osi_Panic("rxi_ListenerProc: no packets!"); /* Shouldn't happen */ } /* Wait for the next event time or a packet to arrive. */ /* event_RaiseEvents schedules any events whose time has come and * then atomically computes the time to the next event, guaranteeing * that this is positive. If there is no next event, it returns 0 */ clock_NewTime(); if (!rxevent_RaiseEvents(&cv)) tvp = NULL; else { /* It's important to copy cv to tv, because the 4.3 documentation * for select threatens that *tv may be updated after a select, in * future editions of the system, to indicate how much of the time * period has elapsed. So we shouldn't rely on tv not being altered. */ tv.tv_sec = cv.sec; /* Time to next event */ tv.tv_usec = cv.usec; tvp = &tv; } rx_AtomicIncrement(rx_stats.selects, rx_stats_mutex); *rfds = rx_selectMask; if (lastPollWorked || nextPollTime < clock_Sec()) { /* we're catching up, or haven't tried to for a few seconds */ doingPoll = 1; nextPollTime = clock_Sec() + 4; /* try again in 4 seconds no matter what */ tv.tv_sec = tv.tv_usec = 0; /* make sure we poll */ tvp = &tv; code = select((int)(rx_maxSocketNumber + 1), rfds, 0, 0, tvp); } else { doingPoll = 0; code = IOMGR_Select((int)(rx_maxSocketNumber + 1), rfds, 0, 0, tvp); } lastPollWorked = 0; /* default is that it didn't find anything */ if (quitListening) { quitListening = 0; LWP_DestroyProcess(pid); } switch (code) { case 0: /* Timer interrupt: * If it was a timer interrupt then we can assume that * the time has advanced by roughly the value of the * previous timeout, and that there is now at least * one pending event. */ clock_NewTime(); break; case -1: /* select or IOMGR_Select returned failure */ debugSelectFailure++; /* update debugging counter */ clock_NewTime(); break; case -2: /* IOMGR_Cancel: * IOMGR_Cancel is invoked whenever a new event is * posted that is earlier than any existing events. * So we re-evaluate the time, and then go back to * reschedule events */ clock_NewTime(); break; default: /* Packets have arrived, presumably: * If it wasn't a timer interrupt, then no event should have * timed out yet (well some event may have, but only just...), so * we don't bother looking to see if any have timed out, but just * go directly to reading the data packets */ clock_NewTime(); if (doingPoll) lastPollWorked = 1; #ifdef AFS_NT40_ENV for (i = 0; p && i < rfds->fd_count; i++) { socket = rfds->fd_array[i]; if (rxi_ReadPacket(socket, p, &host, &port)) { *newcallp = NULL; p = rxi_ReceivePacket(p, socket, host, port, tnop, newcallp); if (newcallp && *newcallp) { if (p) { rxi_FreePacket(p); } if (swapNameProgram) { (*swapNameProgram) (rx_listenerPid, name, 0); rx_listenerPid = 0; } return; } } } #else for (socket = rx_minSocketNumber; p && socket <= rx_maxSocketNumber; socket++) { if (!FD_ISSET(socket, rfds)) continue; if (rxi_ReadPacket(socket, p, &host, &port)) { p = rxi_ReceivePacket(p, socket, host, port, tnop, newcallp); if (newcallp && *newcallp) { if (p) { rxi_FreePacket(p); } if (swapNameProgram) { (*swapNameProgram) (rx_listenerPid, name, 0); rx_listenerPid = 0; } return; } } } #endif break; } } /* NOTREACHED */ }