void cm_RankUpServers() { cm_server_t * tsp; lock_ObtainRead(&cm_serverLock); for (tsp = cm_serversAllFirstp; tsp; tsp = (cm_server_t *)osi_QNext(&tsp->allq)) { cm_GetServerNoLock(tsp); lock_ReleaseRead(&cm_serverLock); lock_ObtainMutex(&tsp->mx); /* if the server is not down, rank the server */ if(!(tsp->flags & CM_SERVERFLAG_DOWN)) cm_RankServer(tsp); lock_ReleaseMutex(&tsp->mx); lock_ObtainRead(&cm_serverLock); cm_PutServerNoLock(tsp); } lock_ReleaseRead(&cm_serverLock); }
void cm_QueueBKGRequest(cm_scache_t *scp, cm_bkgProc_t *procp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4, cm_user_t *userp, cm_req_t *reqp) { cm_bkgRequest_t *rp, *rpq; afs_uint32 daemonID; int duplicate = 0; rp = malloc(sizeof(*rp)); memset(rp, 0, sizeof(*rp)); cm_HoldSCache(scp); rp->scp = scp; cm_HoldUser(userp); rp->userp = userp; rp->procp = procp; rp->p1 = p1; rp->p2 = p2; rp->p3 = p3; rp->p4 = p4; rp->req = *reqp; /* Use separate queues for fetch and store operations */ daemonID = scp->fid.hash % (cm_nDaemons/2) * 2; if (procp == cm_BkgStore) daemonID++; lock_ObtainWrite(&cm_daemonLockp[daemonID]); /* Check to see if this is a duplicate request */ for (rpq = cm_bkgListpp[daemonID]; rpq; rpq = (cm_bkgRequest_t *) osi_QNext(&rpq->q)) { if ( rpq->p1 == p1 && rpq->p3 == p3 && rpq->procp == procp && rpq->p2 == p2 && rpq->p4 == p4 && rpq->scp == scp && rpq->userp == userp) { /* found a duplicate; update request with latest info */ duplicate = 1; break; } } if (!duplicate) { cm_bkgQueueCountp[daemonID]++; osi_QAddH((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **)&cm_bkgListEndpp[daemonID], &rp->q); } lock_ReleaseWrite(&cm_daemonLockp[daemonID]); if (duplicate) { cm_ReleaseSCache(scp); cm_ReleaseUser(userp); free(rp); } else { osi_Wakeup((LONG_PTR) &cm_bkgListpp[daemonID]); } }
static void cm_CheckServersSingular(afs_uint32 flags, cm_cell_t *cellp) { /* ping all file servers, up or down, with unauthenticated connection, * to find out whether we have all our callbacks from the server still. * Also, ping down VLDBs. */ cm_server_t *tsp; int doPing; int isDown; int isFS; int isVLDB; lock_ObtainRead(&cm_serverLock); for (tsp = cm_serversAllFirstp; tsp; tsp = (cm_server_t *)osi_QNext(&tsp->allq)) { cm_GetServerNoLock(tsp); lock_ReleaseRead(&cm_serverLock); /* now process the server */ lock_ObtainMutex(&tsp->mx); doPing = 0; isDown = tsp->flags & CM_SERVERFLAG_DOWN; isFS = tsp->type == CM_SERVER_FILE; isVLDB = tsp->type == CM_SERVER_VLDB; /* only do the ping if the cell matches the requested cell, or we're * matching all cells (cellp == NULL), and if we've requested to ping * this type of {up, down} servers. */ if ((cellp == NULL || cellp == tsp->cellp) && ((isDown && (flags & CM_FLAG_CHECKDOWNSERVERS)) || (!isDown && (flags & CM_FLAG_CHECKUPSERVERS))) && ((!(flags & CM_FLAG_CHECKVLDBSERVERS) || isVLDB && (flags & CM_FLAG_CHECKVLDBSERVERS)) && (!(flags & CM_FLAG_CHECKFILESERVERS) || isFS && (flags & CM_FLAG_CHECKFILESERVERS)))) { doPing = 1; } /* we're supposed to check this up/down server */ lock_ReleaseMutex(&tsp->mx); /* at this point, we've adjusted the server state, so do the ping and * adjust things. */ if (doPing) cm_PingServer(tsp); /* also, run the GC function for connections on all of the * server's connections. */ cm_GCConnections(tsp); lock_ObtainRead(&cm_serverLock); cm_PutServerNoLock(tsp); } lock_ReleaseRead(&cm_serverLock); }
void cm_ForceNewConnectionsAllServers(void) { cm_server_t *tsp; lock_ObtainRead(&cm_serverLock); for (tsp = cm_serversAllFirstp; tsp; tsp = (cm_server_t *)osi_QNext(&tsp->allq)) { cm_GetServerNoLock(tsp); lock_ReleaseRead(&cm_serverLock); cm_ForceNewConnections(tsp); lock_ObtainRead(&cm_serverLock); cm_PutServerNoLock(tsp); } lock_ReleaseRead(&cm_serverLock); }
/* utility function to wakeup someone sleeping in SleepSched */ void osi_WakeupSpin(LONG_PTR sleepValue) { LONG_PTR idx; CRITICAL_SECTION *csp; osi_sleepInfo_t *tsp; idx = osi_SLEEPHASH(sleepValue); csp = &osi_critSec[idx]; EnterCriticalSection(csp); for(tsp=osi_sleepers[idx]; tsp; tsp=(osi_sleepInfo_t *) osi_QNext(&tsp->q)) { if ((!(tsp->states & (OSI_SLEEPINFO_DELETED|OSI_SLEEPINFO_SIGNALLED))) && tsp->value == sleepValue) { _InterlockedOr(&tsp->states, OSI_SLEEPINFO_SIGNALLED); ReleaseSemaphore(tsp->sema, 1, NULL); } } LeaveCriticalSection(csp); }
/* we just panic'd. Log the error to all enabled log files. * Be careful not to wait for a lock. */ void osi_LogPanic(char *msgp, char *filep, size_t lineNumber) { osi_log_t *tlp; for(tlp = osi_allLogsp; tlp; tlp = (osi_log_t *) osi_QNext(&tlp->q)) { if (!tlp->enabled) continue; /* otherwise, proceed */ if (filep) osi_LogAdd(tlp, "**PANIC** \"%s\" (file %s:%d)", (size_t)msgp, (size_t) filep, lineNumber, 0, 0); else osi_LogAdd(tlp, "**PANIC** \"%s\"", (size_t)msgp, 0, 0, 0, 0); /* should grab lock for this, but we're in panic, and better safe than * sorry. */ tlp->enabled = 0; } }
void cm_ServerClearRPCStats(void) { cm_server_t *tsp; afs_uint16 port; lock_ObtainRead(&cm_serverLock); for (tsp = cm_serversAllFirstp; tsp; tsp = (cm_server_t *)osi_QNext(&tsp->allq)) { switch (tsp->type) { case CM_SERVER_VLDB: port = htons(7003); rx_ClearPeerRPCStats(opcode_VL_ProbeServer>>32, tsp->addr.sin_addr.s_addr, port); break; case CM_SERVER_FILE: port = htons(7000); rx_ClearPeerRPCStats(opcode_RXAFS_GetCapabilities>>32, tsp->addr.sin_addr.s_addr, port); rx_ClearPeerRPCStats(opcode_RXAFS_GetTime>>32, tsp->addr.sin_addr.s_addr, port); break; } } lock_ReleaseRead(&cm_serverLock); }
void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp) { char msg[512]; osi_lock_ref_t * lockRefp; for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW) { if (lockRefp->rw == lockp) { sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level); osi_panic(msg, __FILE__, __LINE__); } if (lockRefp->rw->level > lockp->level) { sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d", lockRefp->rw, lockRefp->rw->level, lockp, lockp->level); osi_panic(msg, __FILE__, __LINE__); } } else { if (lockRefp->mx->level > lockp->level) { sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d", lockRefp->mx, lockRefp->mx->level, lockp, lockp->level); osi_panic(msg, __FILE__, __LINE__); } osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation"); } } }
void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->SleepMProc)(sleepVal, lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); break; } } TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held"); lockp->flags &= ~OSI_LOCKFLAG_EXCL; lockp->tid = 0; if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, NULL); } /* and finally release the big lock */ osi_SleepSpin(sleepVal, csp); }
void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->SleepRProc)(sleepVal, lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); break; } } TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->readers > 0, "osi_SleepR: not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } /* XXX better to get the list of things to wakeup from TSignalForMLs, and * then do the wakeup after SleepSpin releases the low-level mutex. */ if (--(lockp->readers) == 0 && lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, NULL); } /* now call into scheduler to sleep atomically with releasing spin lock */ osi_SleepSpin(sleepVal, csp); }
int lock_TryMutex(struct osi_mutex *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i=lockp->type) != 0) if (i >= 0 && i < OSI_NLOCKTYPES) return (osi_lockOps[i]->TryMutexProc)(lockp); /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) { for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_MUTEX) { osi_assertx(lockRefp->mx != lockp, "Mutex already held"); } } } } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { i = 0; } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid = thrd_Current(); i = 1; } LeaveCriticalSection(csp); if (lockOrderValidation && i) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } return i; }
void lock_ReleaseMutex(struct osi_mutex *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ReleaseMutexProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { int found = 0; lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); found = 1; break; } } osi_assertx(found, "mutex lock not found in TLS queue"); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held"); osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread"); lockp->flags &= ~OSI_LOCKFLAG_EXCL; lockp->tid = 0; if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, csp); } else { /* and finally release the big lock */ LeaveCriticalSection(csp); } }
void lock_ReleaseRead(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ReleaseReadProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { int found = 0; lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); found = 1; break; } } osi_assertx(found, "read lock not found in TLS queue"); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->readers > 0, "read lock not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } lockp->readers--; /* releasing a read lock can allow writers */ if (lockp->readers == 0 && lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, csp); } else { osi_assertx(lockp->readers >= 0, "read lock underflow"); /* and finally release the big lock */ LeaveCriticalSection(csp); } }
long cm_ValidateACLCache(void) { long size = cm_data.stats * 2; long count; cm_aclent_t * aclp; if ( cm_data.aclLRUp == NULL && cm_data.aclLRUEndp != NULL || cm_data.aclLRUp != NULL && cm_data.aclLRUEndp == NULL) { afsi_log("cm_ValidateACLCache failure: inconsistent LRU pointers"); fprintf(stderr, "cm_ValidateACLCache failure: inconsistent LRU pointers\n"); return -9; } for ( aclp = cm_data.aclLRUp, count = 0; aclp; aclp = (cm_aclent_t *) osi_QNext(&aclp->q), count++ ) { if (aclp->magic != CM_ACLENT_MAGIC) { afsi_log("cm_ValidateACLCache failure: acpl->magic != CM_ACLENT_MAGIC"); fprintf(stderr, "cm_ValidateACLCache failure: acpl->magic != CM_ACLENT_MAGIC\n"); return -1; } if (aclp->nextp && aclp->nextp->magic != CM_ACLENT_MAGIC) { afsi_log("cm_ValidateACLCache failure: acpl->nextp->magic != CM_ACLENT_MAGIC"); fprintf(stderr,"cm_ValidateACLCache failure: acpl->nextp->magic != CM_ACLENT_MAGIC\n"); return -2; } if (aclp->backp && aclp->backp->magic != CM_SCACHE_MAGIC) { afsi_log("cm_ValidateACLCache failure: acpl->backp->magic != CM_SCACHE_MAGIC"); fprintf(stderr,"cm_ValidateACLCache failure: acpl->backp->magic != CM_SCACHE_MAGIC\n"); return -3; } if (count != 0 && aclp == cm_data.aclLRUp || count > size) { afsi_log("cm_ValidateACLCache failure: loop in cm_data.aclLRUp list"); fprintf(stderr, "cm_ValidateACLCache failure: loop in cm_data.aclLRUp list\n"); return -4; } } for ( aclp = cm_data.aclLRUEndp, count = 0; aclp; aclp = (cm_aclent_t *) osi_QPrev(&aclp->q), count++ ) { if (aclp->magic != CM_ACLENT_MAGIC) { afsi_log("cm_ValidateACLCache failure: aclp->magic != CM_ACLENT_MAGIC"); fprintf(stderr, "cm_ValidateACLCache failure: aclp->magic != CM_ACLENT_MAGIC\n"); return -5; } if (aclp->nextp && aclp->nextp->magic != CM_ACLENT_MAGIC) { afsi_log("cm_ValidateACLCache failure: aclp->nextp->magic != CM_ACLENT_MAGIC"); fprintf(stderr, "cm_ValidateACLCache failure: aclp->nextp->magic != CM_ACLENT_MAGIC\n"); return -6; } if (aclp->backp && aclp->backp->magic != CM_SCACHE_MAGIC) { afsi_log("cm_ValidateACLCache failure: aclp->backp->magic != CM_SCACHE_MAGIC"); fprintf(stderr, "cm_ValidateACLCache failure: aclp->backp->magic != CM_SCACHE_MAGIC\n"); return -7; } if (count != 0 && aclp == cm_data.aclLRUEndp || count > size) { afsi_log("cm_ValidateACLCache failure: loop in cm_data.aclLRUEndp list"); fprintf(stderr, "cm_ValidateACLCache failure: loop in cm_data.aclLRUEndp list\n"); return -8; } } return 0; }
static void cm_CheckServersMulti(afs_uint32 flags, cm_cell_t *cellp) { /* * The goal of this function is to probe simultaneously * probe all of the up/down servers (vldb/file) as * specified by flags in the minimum number of RPCs. * Effectively that means use one multi_RXAFS_GetCapabilities() * followed by possibly one multi_RXAFS_GetTime() and * one multi_VL_ProbeServer(). * * To make this work we must construct the list of vldb * and file servers that are to be probed as well as the * associated data structures. */ int srvAddrCount = 0; struct srvAddr **addrs = NULL; cm_conn_t **conns = NULL; struct rx_connection **rxconns = NULL; cm_req_t req; afs_int32 i, nconns = 0, maxconns; afs_int32 *conntimer, *results; Capabilities *caps = NULL; cm_server_t ** serversp, *tsp; afs_uint32 isDown, wasDown; afs_uint32 code; time_t start; char hoststr[16]; cm_InitReq(&req); maxconns = max(cm_numFileServers,cm_numVldbServers); if (maxconns == 0) return; conns = (cm_conn_t **)malloc(maxconns * sizeof(cm_conn_t *)); rxconns = (struct rx_connection **)malloc(maxconns * sizeof(struct rx_connection *)); conntimer = (afs_int32 *)malloc(maxconns * sizeof (afs_int32)); results = (afs_int32 *)malloc(maxconns * sizeof (afs_int32)); serversp = (cm_server_t **)malloc(maxconns * sizeof(cm_server_t *)); caps = (Capabilities *)malloc(maxconns * sizeof(Capabilities)); memset(caps, 0, maxconns * sizeof(Capabilities)); if ((flags & CM_FLAG_CHECKFILESERVERS) || !(flags & (CM_FLAG_CHECKFILESERVERS|CM_FLAG_CHECKVLDBSERVERS))) { lock_ObtainRead(&cm_serverLock); for (nconns=0, tsp = cm_serversAllFirstp; tsp != NULL && nconns < maxconns; tsp = (cm_server_t *)osi_QNext(&tsp->allq)) { if (tsp->type != CM_SERVER_FILE || tsp->cellp == NULL || /* SetPref only */ cellp && cellp != tsp->cellp) continue; cm_GetServerNoLock(tsp); lock_ReleaseRead(&cm_serverLock); lock_ObtainMutex(&tsp->mx); isDown = tsp->flags & CM_SERVERFLAG_DOWN; if (tsp->pingCount > 0 || !((isDown && (flags & CM_FLAG_CHECKDOWNSERVERS)) || (!isDown && (flags & CM_FLAG_CHECKUPSERVERS)))) { lock_ReleaseMutex(&tsp->mx); lock_ObtainRead(&cm_serverLock); cm_PutServerNoLock(tsp); continue; } InterlockedIncrement(&tsp->pingCount); lock_ReleaseMutex(&tsp->mx); if (cm_noIPAddr > 0) code = cm_ConnByServer(tsp, cm_rootUserp, FALSE, &conns[nconns]); else code = RX_CALL_DEAD; if (code) { lock_ObtainMutex(&tsp->mx); if (code == RX_CALL_DEAD) cm_MarkServerDown(tsp, code, isDown); InterlockedDecrement(&tsp->pingCount); lock_ReleaseMutex(&tsp->mx); lock_ObtainRead(&cm_serverLock); cm_PutServerNoLock(tsp); continue; } lock_ObtainRead(&cm_serverLock); rxconns[nconns] = cm_GetRxConn(conns[nconns]); if (conntimer[nconns] = (isDown ? 1 : 0)) rx_SetConnHardDeadTime(rxconns[nconns], 10); serversp[nconns] = tsp; nconns++; } lock_ReleaseRead(&cm_serverLock); if (nconns) { /* Perform the multi call */ start = time(NULL); multi_Rx(rxconns,nconns) { multi_RXAFS_GetCapabilities(&caps[multi_i]); results[multi_i]=multi_error; } multi_End; }