void cm_InitDaemon(int nDaemons) { static osi_once_t once; long pid; thread_t phandle; int i; cm_nDaemons = (nDaemons > CM_MAX_DAEMONS) ? CM_MAX_DAEMONS : nDaemons; if (osi_Once(&once)) { lock_InitializeRWLock(&cm_daemonLock, "cm_daemonLock", LOCK_HIERARCHY_DAEMON_GLOBAL); osi_EndOnce(&once); /* creating IP Address Change monitor daemon */ phandle = thrd_Create((SecurityAttrib) 0, 0, (ThreadFunc) cm_IpAddrDaemon, 0, 0, &pid, "cm_IpAddrDaemon"); osi_assertx(phandle != NULL, "cm_IpAddrDaemon thread creation failure"); thrd_CloseHandle(phandle); /* creating pinging daemon */ phandle = thrd_Create((SecurityAttrib) 0, 0, (ThreadFunc) cm_Daemon, 0, 0, &pid, "cm_Daemon"); osi_assertx(phandle != NULL, "cm_Daemon thread creation failure"); thrd_CloseHandle(phandle); for(i=0; i < cm_nDaemons; i++) { phandle = thrd_Create((SecurityAttrib) 0, 0, (ThreadFunc) cm_BkgDaemon, (LPVOID)i, 0, &pid, "cm_BkgDaemon"); osi_assertx(phandle != NULL, "cm_BkgDaemon thread creation failure"); thrd_CloseHandle(phandle); } } }
void lock_ConvertWToR(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ConvertWToRProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread"); /* convert write lock to read lock */ lockp->flags &= ~OSI_LOCKFLAG_EXCL; lockp->readers++; osi_assertx(lockp->readers == 1, "read lock not one"); if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp); } else { /* and finally release the big lock */ LeaveCriticalSection(csp); } }
void lock_ObtainWrite(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i=lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ObtainWriteProc)(lockp); return; } if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) lock_VerifyOrderRW(lockRefH, lockRefT, lockp); } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockp->flags & OSI_LOCKFLAG_EXCL) { osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD"); } else { for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) { osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD"); } } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) || (lockp->readers > 0)) { lockp->waiters++; osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp); lockp->waiters--; osi_assertx(lockp->waiters >= 0, "waiters underflow"); osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid[0] = tid; } osi_assertx(lockp->readers == 0, "write lock readers present"); LeaveCriticalSection(csp); if (lockOrderValidation) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } }
void lock_ReleaseWrite(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ReleaseWriteProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { int found = 0; lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); found = 1; break; } } osi_assertx(found, "write lock not found in TLS queue"); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread"); lockp->tid[0] = 0; lockp->flags &= ~OSI_LOCKFLAG_EXCL; if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, csp); } else { /* and finally release the big lock */ LeaveCriticalSection(csp); } }
void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp) { char msg[512]; osi_lock_ref_t * lockRefp; for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW) { if (lockRefp->rw == lockp) { sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level); osi_panic(msg, __FILE__, __LINE__); } if (lockRefp->rw->level > lockp->level) { sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d", lockRefp->rw, lockRefp->rw->level, lockp, lockp->level); osi_panic(msg, __FILE__, __LINE__); } } else { if (lockRefp->mx->level > lockp->level) { sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d", lockRefp->mx, lockRefp->mx->level, lockp, lockp->level); osi_panic(msg, __FILE__, __LINE__); } osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation"); } } }
osi_queueData_t *osi_QDAlloc(void) { osi_queueData_t *tp; int i; thrd_EnterCrit(&osi_qdcrit); if (tp = osi_QDFreeListp) { osi_QDFreeListp = (osi_queueData_t *) tp->q.nextp; } else { /* need to allocate a block more */ tp = (osi_queueData_t *) malloc(OSI_NQDALLOC * sizeof(osi_queueData_t)); /* leave last guy off of the free list; this is the one we'll * return. */ for(i=0; i<OSI_NQDALLOC-1; i++, tp++) { tp->q.nextp = (osi_queue_t *) osi_QDFreeListp; tp->datap = NULL; osi_QDFreeListp = tp; } /* when we get here, tp is pointing to the last dude allocated. * This guy wasn't put on the free list, so we can return him now. */ tp->datap = NULL; } thrd_LeaveCrit(&osi_qdcrit); osi_assertx(tp->datap == NULL, "queue freelist screwup"); return tp; }
void lock_ConvertRToW(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ConvertRToWProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held"); osi_assertx(lockp->readers > 0, "read lock not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } if (--(lockp->readers) == 0) { /* convert read lock to write lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid[0] = tid; } else { osi_assertx(lockp->readers > 0, "read lock underflow"); lockp->waiters++; osi_TWaitExt(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp, FALSE); lockp->waiters--; osi_assertx(lockp->waiters >= 0, "waiters underflow"); osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); } LeaveCriticalSection(csp); }
void cm_InitDaemon(int nDaemons) { static osi_once_t once; pthread_t phandle; pthread_attr_t tattr; int pstatus; int i; pthread_attr_init(&tattr); pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED); cm_nDaemons = (nDaemons > CM_MAX_DAEMONS) ? CM_MAX_DAEMONS : nDaemons; if (osi_Once(&once)) { /* creating IP Address Change monitor daemon */ pstatus = pthread_create(&phandle, &tattr, cm_IpAddrDaemon, 0); osi_assertx(pstatus == 0, "cm_IpAddrDaemon thread creation failure"); /* creating pinging daemon */ pstatus = pthread_create(&phandle, &tattr, cm_Daemon, 0); osi_assertx(pstatus == 0, "cm_Daemon thread creation failure"); pstatus = pthread_create(&phandle, &tattr, cm_LockDaemon, 0); osi_assertx(pstatus == 0, "cm_LockDaemon thread creation failure"); cm_bkgListpp = malloc(nDaemons * sizeof(void *)); cm_bkgListEndpp = malloc(nDaemons * sizeof(void *)); cm_bkgQueueCountp = malloc(nDaemons * sizeof(afs_uint64)); cm_daemonLockp = malloc(nDaemons * sizeof(osi_rwlock_t)); for(i=0; i < cm_nDaemons; i++) { lock_InitializeRWLock(&cm_daemonLockp[i], "cm_daemonLock", LOCK_HIERARCHY_DAEMON_GLOBAL); cm_bkgListpp[i] = cm_bkgListEndpp[i] = NULL; cm_bkgQueueCountp[i]=0; pstatus = pthread_create(&phandle, &tattr, cm_BkgDaemon, (LPVOID)(LONG_PTR)i); osi_assertx(pstatus == 0, "cm_BkgDaemon thread creation failure"); } osi_EndOnce(&once); } pthread_attr_destroy(&tattr); }
void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->SleepRProc)(sleepVal, lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); break; } } TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->readers > 0, "osi_SleepR: not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } /* XXX better to get the list of things to wakeup from TSignalForMLs, and * then do the wakeup after SleepSpin releases the low-level mutex. */ if (--(lockp->readers) == 0 && lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, NULL); } /* now call into scheduler to sleep atomically with releasing spin lock */ osi_SleepSpin(sleepVal, csp); }
/*! \brief End an RPC operation \see smb_RPC_BeginOp() */ afs_int32 smb_RPC_EndOp(smb_rpc_t * rpcp) { lock_ObtainMutex(&rpcp->fidp->mx); osi_assertx(rpcp->fidp->flags & SMB_FID_RPC_INCALL, "RPC_EndOp() call without RPC_BeginOp()"); rpcp->fidp->flags &= ~SMB_FID_RPC_INCALL; lock_ReleaseMutex(&rpcp->fidp->mx); osi_Wakeup((LONG_PTR) rpcp); return 0; }
int lock_TryWrite(struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i=lockp->type) != 0) if (i >= 0 && i < OSI_NLOCKTYPES) return (osi_lockOps[i]->TryWriteProc)(lockp); /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) { for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW) { osi_assertx(lockRefp->rw != lockp, "RW Lock already held"); } } } } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) || (lockp->readers > 0)) { i = 0; } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid[0] = thrd_Current(); i = 1; } LeaveCriticalSection(csp); if (lockOrderValidation && i) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } return i; }
void lock_ObtainMutex(struct osi_mutex *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i=lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ObtainMutexProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) lock_VerifyOrderMX(lockRefH, lockRefT, lockp); } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { lockp->waiters++; osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp); lockp->waiters--; osi_assertx(lockp->waiters >= 0, "waiters underflow"); osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL); } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid = thrd_Current(); } LeaveCriticalSection(csp); if (lockOrderValidation) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } }
/*! \brief Begin an RPC operation While generally we receive RPC requests one at a time, we have to protect against receiving multiple requests in parallel since there's nothing really preventing that from happening. This should be called before calling any of the smb_RPC_*() functions. If the return value is non-zero, it should be considered unsafe to call any smb_RPC_*() function. Each successful call to smb_RPC_BeginOp() should be coupled with a call to smb_RPC_EndOp(). \note Should be called with rpcp->fidp->mx locked. */ afs_int32 smb_RPC_BeginOp(smb_rpc_t * rpcp) { if (rpcp == NULL) return CM_ERROR_INVAL; osi_assertx(rpcp->fidp, "No fidp assigned to smb_rpc_t"); lock_AssertMutex(&rpcp->fidp->mx); while (rpcp->fidp->flags & SMB_FID_RPC_INCALL) { osi_SleepM((LONG_PTR) rpcp, &rpcp->fidp->mx); lock_ObtainMutex(&rpcp->fidp->mx); } rpcp->fidp->flags |= SMB_FID_RPC_INCALL; return 0; }
void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->SleepWProc)(sleepVal, lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); break; } } TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held"); lockp->flags &= ~OSI_LOCKFLAG_EXCL; lockp->tid[0] = 0; if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, NULL); } /* and finally release the big lock */ osi_SleepSpin(sleepVal, csp); }
void lock_ReleaseRead(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ReleaseReadProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { int found = 0; lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); found = 1; break; } } osi_assertx(found, "read lock not found in TLS queue"); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->readers > 0, "read lock not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } lockp->readers--; /* releasing a read lock can allow writers */ if (lockp->readers == 0 && lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, csp); } else { osi_assertx(lockp->readers >= 0, "read lock underflow"); /* and finally release the big lock */ LeaveCriticalSection(csp); } }
afs_int32 smb_RPCNmpipeTransact(smb_fid_t *fidp, smb_vc_t *vcp, smb_tran2Packet_t *p, smb_packet_t *op) { smb_tran2Packet_t *outp = NULL; struct smb_rpc *rpcp; afs_int32 code = 0; cm_user_t * userp = NULL; smb_user_t * uidp = NULL; int len; osi_Log0(smb_logp, "smb_RPCNmpipeTransact() begin"); uidp = smb_FindUID(vcp, p->uid, 0); if (!uidp) return CM_ERROR_BADSMB; userp = smb_GetUserFromUID(uidp); osi_assertx(userp != NULL, "null cm_user_t"); if (uidp && uidp->unp) { osi_Log3(afsd_logp, "RPC Transact uid %d user %x name %S", uidp->userID, userp, osi_LogSaveClientString(afsd_logp, uidp->unp->name)); } else { if (uidp) osi_Log2(afsd_logp, "RPC Transact uid %d user %x no name", uidp->userID, userp); else osi_Log1(afsd_logp, "RPC Transact no uid user %x no name", userp); } lock_ObtainMutex(&fidp->mx); rpcp = fidp->rpcp; code = smb_RPC_BeginOp(rpcp); if (code) { osi_Log0(smb_logp, "Can't begin RPC op. Aborting"); lock_ReleaseMutex(&fidp->mx); smb_ReleaseUID(uidp); cm_ReleaseUser(userp); return code; } osi_assertx((fidp->flags & SMB_FID_RPC), "FID wasn't setup for RPC"); osi_assertx(fidp->rpcp, "smb_rpc_t not associated with RPC FID"); lock_ReleaseMutex(&fidp->mx); code = smb_RPC_PrepareWrite(rpcp); if (code) goto done; code = smb_RPC_WritePacket(rpcp, p->datap, p->totalData, userp); if (code) goto done; code = smb_RPC_PrepareRead(rpcp); if (code) goto done; len = smb_RPC_ReadPacketLength(rpcp, p->maxReturnData); outp = smb_GetTran2ResponsePacket(vcp, p, op, 0, len); if (len > 0) { code = smb_RPC_ReadPacket(rpcp, outp->datap, len); if (code == CM_ERROR_RPC_MOREDATA) { outp->error_code = CM_ERROR_RPC_MOREDATA; } } if (code == 0 || code == CM_ERROR_RPC_MOREDATA) smb_SendTran2Packet(vcp, outp, op); smb_FreeTran2Packet(outp); done: smb_RPC_EndOp(rpcp); osi_Log1(smb_logp, "smb_RPCNmpipeTransact() end code=%d", code); if (uidp) smb_ReleaseUID(uidp); if (userp) cm_ReleaseUser(userp); return code; }
/** Handle SMB_COM_READ_ANDX for an RPC fid Called from smb_ReceiveV3ReadX to handle RPC descriptor reads */ afs_int32 smb_RPCV3Read(smb_fid_t *fidp, smb_vc_t *vcp, smb_packet_t *inp, smb_packet_t *outp) { smb_rpc_t *rpcp; long count; afs_int32 code; char *op; cm_user_t *userp; smb_user_t *uidp; count = smb_GetSMBParm(inp, 5); uidp = smb_FindUID(vcp, ((smb_t *)inp)->uid, 0); if (!uidp) return CM_ERROR_BADSMB; userp = smb_GetUserFromUID(uidp); osi_assertx(userp != NULL, "null cm_user_t"); osi_Log3(smb_logp, "smb_RPCV3Read for user[0x%p] fid[0x%p (%d)]", userp, fidp, fidp->fid); if (uidp && uidp->unp) { osi_Log3(afsd_logp, "RPC uid %d user %x name %S", uidp->userID, userp, osi_LogSaveClientString(afsd_logp, uidp->unp->name)); } else { if (uidp) osi_Log2(afsd_logp, "RPC uid %d user %x no name", uidp->userID, userp); else osi_Log1(afsd_logp, "RPC no uid user %x no name", userp); } lock_ObtainMutex(&fidp->mx); rpcp = fidp->rpcp; code = smb_RPC_BeginOp(rpcp); lock_ReleaseMutex(&fidp->mx); if (code) { if (uidp) smb_ReleaseUID(uidp); cm_ReleaseUser(userp); return code; } code = smb_RPC_PrepareRead(rpcp); if (uidp) { smb_ReleaseUID(uidp); } if (code) { cm_ReleaseUser(userp); smb_RPC_EndOp(rpcp); return code; } count = smb_RPC_ReadPacketLength(rpcp, count); /* 0 and 1 are reserved for request chaining, were setup by our caller, * and will be further filled in after we return. */ smb_SetSMBParm(outp, 2, 0); /* remaining bytes, for pipes */ smb_SetSMBParm(outp, 3, 0); /* resvd */ smb_SetSMBParm(outp, 4, 0); /* resvd */ smb_SetSMBParm(outp, 5, count); /* # of bytes we're going to read */ /* fill in #6 when we have all the parameters' space reserved */ smb_SetSMBParm(outp, 7, 0); /* resv'd */ smb_SetSMBParm(outp, 8, 0); /* resv'd */ smb_SetSMBParm(outp, 9, 0); /* resv'd */ smb_SetSMBParm(outp, 10, 0); /* resv'd */ smb_SetSMBParm(outp, 11, 0); /* reserved */ /* get op ptr after putting in the last parm, since otherwise we don't * know where the data really is. */ op = smb_GetSMBData(outp, NULL); /* now fill in offset from start of SMB header to first data byte (to op) */ smb_SetSMBParm(outp, 6, ((int) (op - outp->data))); /* set the packet data length the count of the # of bytes */ smb_SetSMBDataLength(outp, count); smb_RPC_ReadPacket(rpcp, op, count); smb_RPC_EndOp(rpcp); /* and cleanup things */ cm_ReleaseUser(userp); return 0; }
void cm_BkgDaemon(void * parm) { cm_bkgRequest_t *rp; afs_int32 code; char name[32] = ""; long daemonID = (long)parm; snprintf(name, sizeof(name), "cm_BkgDaemon_ShutdownEvent%d", daemonID); cm_BkgDaemon_ShutdownEvent[daemonID] = thrd_CreateEvent(NULL, FALSE, FALSE, name); if ( GetLastError() == ERROR_ALREADY_EXISTS ) afsi_log("Event Object Already Exists: %s", name); rx_StartClientThread(); lock_ObtainWrite(&cm_daemonLock); while (daemon_ShutdownFlag == 0) { if (powerStateSuspended) { Sleep(1000); continue; } if (!cm_bkgListEndp) { osi_SleepW((LONG_PTR)&cm_bkgListp, &cm_daemonLock); lock_ObtainWrite(&cm_daemonLock); continue; } /* we found a request */ for (rp = cm_bkgListEndp; rp; rp = (cm_bkgRequest_t *) osi_QPrev(&rp->q)) { if (cm_ServerAvailable(&rp->scp->fid, rp->userp) && !(rp->scp->flags & CM_SCACHEFLAG_DATASTORING)) break; } if (rp == NULL) { /* we couldn't find a request that we could process at the current time */ lock_ReleaseWrite(&cm_daemonLock); Sleep(1000); lock_ObtainWrite(&cm_daemonLock); continue; } osi_QRemoveHT((osi_queue_t **) &cm_bkgListp, (osi_queue_t **) &cm_bkgListEndp, &rp->q); osi_assertx(cm_bkgQueueCount-- > 0, "cm_bkgQueueCount 0"); lock_ReleaseWrite(&cm_daemonLock); osi_Log1(afsd_logp,"cm_BkgDaemon processing request 0x%p", rp); #ifdef DEBUG_REFCOUNT osi_Log2(afsd_logp,"cm_BkgDaemon (before) scp 0x%x ref %d",rp->scp, rp->scp->refCount); #endif code = (*rp->procp)(rp->scp, rp->p1, rp->p2, rp->p3, rp->p4, rp->userp); #ifdef DEBUG_REFCOUNT osi_Log2(afsd_logp,"cm_BkgDaemon (after) scp 0x%x ref %d",rp->scp, rp->scp->refCount); #endif /* * Keep the following list synchronized with the * error code list in cm_BkgStore. * cm_SyncOpDone(CM_SCACHESYNC_ASYNCSTORE) will be called there unless * one of these errors has occurred. */ switch ( code ) { case CM_ERROR_TIMEDOUT: /* or server restarting */ case CM_ERROR_RETRY: case CM_ERROR_WOULDBLOCK: case CM_ERROR_ALLBUSY: case CM_ERROR_ALLDOWN: case CM_ERROR_ALLOFFLINE: case CM_ERROR_PARTIALWRITE: if (rp->procp == cm_BkgStore) { osi_Log2(afsd_logp, "cm_BkgDaemon re-queueing failed request 0x%p code 0x%x", rp, code); lock_ObtainWrite(&cm_daemonLock); cm_bkgQueueCount++; osi_QAddT((osi_queue_t **) &cm_bkgListp, (osi_queue_t **)&cm_bkgListEndp, &rp->q); break; } /* otherwise fall through */ case 0: /* success */ default: /* other error */ if (code == 0) osi_Log1(afsd_logp,"cm_BkgDaemon SUCCESS: request 0x%p", rp); else osi_Log2(afsd_logp,"cm_BkgDaemon FAILED: request dropped 0x%p code 0x%x", rp, code); cm_ReleaseUser(rp->userp); cm_ReleaseSCache(rp->scp); free(rp); lock_ObtainWrite(&cm_daemonLock); } } lock_ReleaseWrite(&cm_daemonLock); thrd_SetEvent(cm_BkgDaemon_ShutdownEvent[daemonID]); }
void * cm_BkgDaemon(void * vparm) { cm_bkgRequest_t *rp; afs_int32 code; char name[32] = ""; long daemonID = (long)(LONG_PTR)vparm; snprintf(name, sizeof(name), "cm_BkgDaemon_ShutdownEvent%u", daemonID); cm_BkgDaemon_ShutdownEvent[daemonID] = thrd_CreateEvent(NULL, FALSE, FALSE, name); if ( GetLastError() == ERROR_ALREADY_EXISTS ) afsi_log("Event Object Already Exists: %s", name); rx_StartClientThread(); lock_ObtainWrite(&cm_daemonLockp[daemonID]); while (daemon_ShutdownFlag == 0) { int willBlock = 0; if (powerStateSuspended) { Sleep(1000); continue; } if (!cm_bkgListEndpp[daemonID]) { osi_SleepW((LONG_PTR)&cm_bkgListpp[daemonID], &cm_daemonLockp[daemonID]); lock_ObtainWrite(&cm_daemonLockp[daemonID]); continue; } /* we found a request */ for (rp = cm_bkgListEndpp[daemonID]; rp; rp = (cm_bkgRequest_t *) osi_QPrev(&rp->q)) { if (rp->scp->flags & CM_SCACHEFLAG_DELETED) break; /* * If the request has active I/O such that this worker would * be forced to block, leave the request in the queue and move * on to one that might be available for servicing. */ if (cm_RequestWillBlock(rp)) { willBlock++; continue; } if (cm_ServerAvailable(&rp->scp->fid, rp->userp)) break; } if (rp == NULL) { /* * Couldn't find a request that we could process at the * current time. If there were requests that would cause * the worker to block, sleep for 25ms so it can promptly * respond when it is available. Otherwise, sleep for 1s. * * This polling cycle needs to be replaced with a proper * producer/consumer dynamic worker pool. */ osi_Log2(afsd_logp,"cm_BkgDaemon[%u] sleeping %dms all tasks would block", daemonID, willBlock ? 100 : 1000); lock_ReleaseWrite(&cm_daemonLockp[daemonID]); Sleep(willBlock ? 100 : 1000); lock_ObtainWrite(&cm_daemonLockp[daemonID]); continue; } osi_QRemoveHT((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **) &cm_bkgListEndpp[daemonID], &rp->q); osi_assertx(cm_bkgQueueCountp[daemonID]-- > 0, "cm_bkgQueueCount 0"); lock_ReleaseWrite(&cm_daemonLockp[daemonID]); osi_Log2(afsd_logp,"cm_BkgDaemon[%u] processing request 0x%p", daemonID, rp); if (rp->scp->flags & CM_SCACHEFLAG_DELETED) { osi_Log2(afsd_logp,"cm_BkgDaemon[%u] DELETED scp 0x%x", daemonID, rp->scp); code = CM_ERROR_BADFD; } else { #ifdef DEBUG_REFCOUNT osi_Log3(afsd_logp,"cm_BkgDaemon[%u] (before) scp 0x%x ref %d", daemonID, rp->scp, rp->scp->refCount); #endif code = (*rp->procp)(rp->scp, rp->p1, rp->p2, rp->p3, rp->p4, rp->userp, &rp->req); #ifdef DEBUG_REFCOUNT osi_Log3(afsd_logp,"cm_BkgDaemon[%u] (after) scp 0x%x ref %d", daemonID, rp->scp, rp->scp->refCount); #endif } /* * Keep the following list synchronized with the * error code list in cm_BkgStore. * cm_SyncOpDone(CM_SCACHESYNC_ASYNCSTORE) will be called there unless * one of these errors has occurred. */ switch ( code ) { case CM_ERROR_TIMEDOUT: /* or server restarting */ case CM_ERROR_RETRY: case CM_ERROR_WOULDBLOCK: case CM_ERROR_ALLBUSY: case CM_ERROR_ALLDOWN: case CM_ERROR_ALLOFFLINE: case CM_ERROR_PARTIALWRITE: if (rp->procp == cm_BkgStore || rp->procp == RDR_BkgFetch) { osi_Log3(afsd_logp, "cm_BkgDaemon[%u] re-queueing failed request 0x%p code 0x%x", daemonID, rp, code); lock_ObtainWrite(&cm_daemonLockp[daemonID]); cm_bkgQueueCountp[daemonID]++; osi_QAddT((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **)&cm_bkgListEndpp[daemonID], &rp->q); break; } /* otherwise fall through */ case 0: /* success */ default: /* other error */ if (code == 0) { osi_Log2(afsd_logp,"cm_BkgDaemon[%u] SUCCESS: request 0x%p", daemonID, rp); } else { osi_Log3(afsd_logp,"cm_BkgDaemon[%u] FAILED: request dropped 0x%p code 0x%x", daemonID, rp, code); } cm_ReleaseUser(rp->userp); cm_ReleaseSCache(rp->scp); free(rp); lock_ObtainWrite(&cm_daemonLockp[daemonID]); } } lock_ReleaseWrite(&cm_daemonLockp[daemonID]); thrd_SetEvent(cm_BkgDaemon_ShutdownEvent[daemonID]); pthread_exit(NULL); return NULL; }