unsigned long main_Mod2(void *parm) { long i; for(i=0; i<OSI_MOD2LOOPS; i++) { osi_Log0(main_logp, "mod2"); lock_ObtainMutex(&main_aMutex); lock_ObtainWrite(&main_bRWLock); a += 3; Sleep(0); b -= 3; osi_assert(a+b == 100); Sleep(0); lock_ReleaseWrite(&main_bRWLock); Sleep(0); lock_ReleaseMutex(&main_aMutex); Sleep(0); m2Loops = i; osi_Log4(main_logp, "mod2 done, %d %d %d %d", m2Loops, 2, 3, 4); } lock_ObtainWrite(&main_doneRWLock); done++; Sleep(0); lock_ReleaseWrite(&main_doneRWLock); return 0; }
/* * This function returns a free (not in the LRU queue) acl cache entry. * It must be called with the cm_aclLock lock held */ static cm_aclent_t *GetFreeACLEnt(cm_scache_t * scp) { cm_aclent_t *aclp; cm_scache_t *ascp = 0; if (cm_data.aclLRUp == NULL) osi_panic("empty aclent LRU", __FILE__, __LINE__); if (cm_data.aclLRUEndp == NULL) osi_panic("inconsistent aclent LRUEndp == NULL", __FILE__, __LINE__); aclp = cm_data.aclLRUEndp; osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); if (aclp->backp && scp != aclp->backp) { ascp = aclp->backp; lock_ReleaseWrite(&cm_aclLock); lock_ObtainWrite(&ascp->rw); lock_ObtainWrite(&cm_aclLock); } CleanupACLEnt(aclp); if (ascp) lock_ReleaseWrite(&ascp->rw); return aclp; }
/* * Invalidate all ACL entries for particular user on this particular vnode. * * The scp must not be locked. */ void cm_InvalidateACLUser(cm_scache_t *scp, cm_user_t *userp) { cm_aclent_t *aclp; cm_aclent_t **laclpp; int found = 0; int callback = 0; lock_ObtainWrite(&scp->rw); lock_ObtainWrite(&cm_aclLock); laclpp = &scp->randomACLp; for (aclp = *laclpp; aclp; laclpp = &aclp->nextp, aclp = *laclpp) { if (userp == aclp->userp) { /* One for a given user/scache */ *laclpp = aclp->nextp; cm_ReleaseUser(aclp->userp); aclp->userp = NULL; aclp->backp = (struct cm_scache *) 0; found = 1; break; } } lock_ReleaseWrite(&cm_aclLock); if (found) callback = cm_HaveCallback(scp); lock_ReleaseWrite(&scp->rw); if (found && callback && RDR_Initialized) RDR_InvalidateObject(scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique, scp->fid.hash, scp->fileType, AFS_INVALIDATE_CREDS); }
unsigned long main_Mod1(void *parm) { long i; for(i=0; i<OSI_MOD1LOOPS; i++) { lock_ObtainMutex(&main_aMutex); osi_Log0(main_logp, "mod1"); lock_ObtainWrite(&main_bRWLock); a -= 52; Sleep(0); b += 52; osi_assert(a+b == 100); Sleep(0); lock_ReleaseWrite(&main_bRWLock); Sleep(0); lock_ReleaseMutex(&main_aMutex); Sleep(0); m1Loops = i; osi_Log1(main_logp, "mod1 done, %d", m1Loops); } lock_ObtainWrite(&main_doneRWLock); done++; Sleep(0); lock_ReleaseWrite(&main_doneRWLock); return 0; }
/* called with locked scp; ensures that we have an ACL cache entry for the * user specified by the parameter "userp." * In pathological race conditions, this function may return success without * having loaded the entry properly (due to a racing callback revoke), so this * function must also be called in a while loop to make sure that we eventually * succeed. */ long cm_GetAccessRights(struct cm_scache *scp, struct cm_user *userp, struct cm_req *reqp) { long code; cm_fid_t tfid; cm_scache_t *aclScp = NULL; int got_cb = 0; cm_volume_t * volp = cm_FindVolumeByFID(&scp->fid, userp, reqp); /* pretty easy: just force a pass through the fetch status code */ osi_Log2(afsd_logp, "GetAccessRights scp 0x%p user 0x%p", scp, userp); /* first, start by finding out whether we have a directory or something * else, so we can find what object's ACL we need. */ if (scp->fileType == CM_SCACHETYPE_DIRECTORY || cm_accessPerFileCheck || !volp || (volp->flags & CM_VOLUMEFLAG_DFS_VOLUME)) { code = cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_FORCECB); if (!code) cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); else osi_Log3(afsd_logp, "GetAccessRights syncop failure scp %x user %x code %x", scp, userp, code); } else { /* not a dir, use parent dir's acl */ cm_SetFid(&tfid, scp->fid.cell, scp->fid.volume, scp->parentVnode, scp->parentUnique); lock_ReleaseWrite(&scp->rw); code = cm_GetSCache(&tfid, NULL, &aclScp, userp, reqp); if (code) { lock_ObtainWrite(&scp->rw); goto _done; } osi_Log2(afsd_logp, "GetAccessRights parent scp %x user %x", aclScp, userp); lock_ObtainWrite(&aclScp->rw); code = cm_SyncOp(aclScp, NULL, userp, reqp, 0, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_FORCECB); if (!code) cm_SyncOpDone(aclScp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); else osi_Log3(afsd_logp, "GetAccessRights parent syncop failure scp %x user %x code %x", aclScp, userp, code); lock_ReleaseWrite(&aclScp->rw); cm_ReleaseSCache(aclScp); lock_ObtainWrite(&scp->rw); } _done: if (volp) cm_PutVolume(volp); return code; }
/* * Add rights to an acl cache entry. Do the right thing if not present, * including digging up an entry from the LRU queue. * * The scp must be locked when this function is called. */ long cm_AddACLCache(cm_scache_t *scp, cm_user_t *userp, afs_uint32 rights) { register struct cm_aclent *aclp; lock_ObtainWrite(&cm_aclLock); for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) { if (aclp->userp == userp) { aclp->randomAccess = rights; if (aclp->tgtLifetime == 0) aclp->tgtLifetime = cm_TGTLifeTime(pag); lock_ReleaseWrite(&cm_aclLock); return 0; } } /* * Didn't find the dude we're looking for, so take someone from the LRUQ * and reuse. But first try the free list and see if there's already * someone there. */ aclp = GetFreeACLEnt(scp); /* can't fail, panics instead */ osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); aclp->backp = scp; aclp->nextp = scp->randomACLp; scp->randomACLp = aclp; cm_HoldUser(userp); aclp->userp = userp; aclp->randomAccess = rights; aclp->tgtLifetime = cm_TGTLifeTime(userp); lock_ReleaseWrite(&cm_aclLock); return 0; }
void cm_InitCell(int newFile, long maxCells) { static osi_once_t once; if (osi_Once(&once)) { cm_cell_t * cellp; lock_InitializeRWLock(&cm_cellLock, "cell global lock", LOCK_HIERARCHY_CELL_GLOBAL); if ( newFile ) { cm_data.allCellsp = NULL; cm_data.currentCells = 0; cm_data.maxCells = maxCells; memset(cm_data.cellNameHashTablep, 0, sizeof(cm_cell_t *) * cm_data.cellHashTableSize); memset(cm_data.cellIDHashTablep, 0, sizeof(cm_cell_t *) * cm_data.cellHashTableSize); #ifdef AFS_FREELANCE_CLIENT /* Generate a dummy entry for the Freelance cell whether or not * freelance mode is being used in this session */ cellp = &cm_data.cellBaseAddress[cm_data.currentCells++]; memset(cellp, 0, sizeof(cm_cell_t)); cellp->magic = CM_CELL_MAGIC; lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL); lock_ObtainMutex(&cellp->mx); lock_ObtainWrite(&cm_cellLock); /* copy in name */ strncpy(cellp->name, "Freelance.Local.Cell", CELL_MAXNAMELEN); /*safe*/ cellp->name[CELL_MAXNAMELEN-1] = '\0'; /* thread on global list */ cellp->allNextp = cm_data.allCellsp; cm_data.allCellsp = cellp; cellp->cellID = AFS_FAKE_ROOT_CELL_ID; cellp->vlServersp = NULL; _InterlockedOr(&cellp->flags, CM_CELLFLAG_FREELANCE); cm_AddCellToNameHashTable(cellp); cm_AddCellToIDHashTable(cellp); lock_ReleaseWrite(&cm_cellLock); lock_ReleaseMutex(&cellp->mx); #endif } else { lock_ObtainRead(&cm_cellLock); for (cellp = cm_data.allCellsp; cellp; cellp=cellp->allNextp) { lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL); cellp->vlServersp = NULL; _InterlockedOr(&cellp->flags, CM_CELLFLAG_VLSERVER_INVALID); } lock_ReleaseRead(&cm_cellLock); } osi_EndOnce(&once); } }
void cm_FreeSpace(cm_space_t *tsp) { lock_ObtainWrite(&cm_utilsLock); tsp->nextp = cm_spaceListp; cm_spaceListp = tsp; lock_ReleaseWrite(&cm_utilsLock); }
void cm_QueueBKGRequest(cm_scache_t *scp, cm_bkgProc_t *procp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4, cm_user_t *userp) { cm_bkgRequest_t *rp; rp = malloc(sizeof(*rp)); memset(rp, 0, sizeof(*rp)); cm_HoldSCache(scp); rp->scp = scp; cm_HoldUser(userp); rp->userp = userp; rp->procp = procp; rp->p1 = p1; rp->p2 = p2; rp->p3 = p3; rp->p4 = p4; lock_ObtainWrite(&cm_daemonLock); cm_bkgQueueCount++; osi_QAdd((osi_queue_t **) &cm_bkgListp, &rp->q); if (!cm_bkgListEndp) cm_bkgListEndp = rp; lock_ReleaseWrite(&cm_daemonLock); osi_Wakeup((LONG_PTR) &cm_bkgListp); }
unsigned long main_Scan2(unsigned long parm) { while (1) { osi_Log0(main_logp, "scan2"); /* check to see if we're done */ lock_ObtainRead(&main_doneRWLock); lock_AssertAny(&main_doneRWLock); if (done >= 2) break; lock_ReleaseRead(&main_doneRWLock); /* check state for consistency without locks */ if (a+b != 100) s2Events++; /* and record that we went around again */ s2Loops++; /* give others a chance */ Sleep(0); osi_Log3(main_logp, "scan2 done %d %d %d", s2Loops, 2, 3); } lock_ReleaseRead(&main_doneRWLock); lock_ObtainWrite(&main_doneRWLock); lock_AssertAny(&main_doneRWLock); done++; lock_ReleaseWrite(&main_doneRWLock); return 0; }
void cm_QueueBKGRequest(cm_scache_t *scp, cm_bkgProc_t *procp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4, cm_user_t *userp, cm_req_t *reqp) { cm_bkgRequest_t *rp, *rpq; afs_uint32 daemonID; int duplicate = 0; rp = malloc(sizeof(*rp)); memset(rp, 0, sizeof(*rp)); cm_HoldSCache(scp); rp->scp = scp; cm_HoldUser(userp); rp->userp = userp; rp->procp = procp; rp->p1 = p1; rp->p2 = p2; rp->p3 = p3; rp->p4 = p4; rp->req = *reqp; /* Use separate queues for fetch and store operations */ daemonID = scp->fid.hash % (cm_nDaemons/2) * 2; if (procp == cm_BkgStore) daemonID++; lock_ObtainWrite(&cm_daemonLockp[daemonID]); /* Check to see if this is a duplicate request */ for (rpq = cm_bkgListpp[daemonID]; rpq; rpq = (cm_bkgRequest_t *) osi_QNext(&rpq->q)) { if ( rpq->p1 == p1 && rpq->p3 == p3 && rpq->procp == procp && rpq->p2 == p2 && rpq->p4 == p4 && rpq->scp == scp && rpq->userp == userp) { /* found a duplicate; update request with latest info */ duplicate = 1; break; } } if (!duplicate) { cm_bkgQueueCountp[daemonID]++; osi_QAddH((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **)&cm_bkgListEndpp[daemonID], &rp->q); } lock_ReleaseWrite(&cm_daemonLockp[daemonID]); if (duplicate) { cm_ReleaseSCache(scp); cm_ReleaseUser(userp); free(rp); } else { osi_Wakeup((LONG_PTR) &cm_bkgListpp[daemonID]); } }
/* * Invalidate ACL info for a user that has just obtained or lost tokens. */ void cm_ResetACLCache(cm_user_t *userp) { cm_scache_t *scp; int hash; lock_ObtainWrite(&cm_scacheLock); for (hash=0; hash < cm_data.scacheHashTableSize; hash++) { for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) { cm_HoldSCacheNoLock(scp); lock_ReleaseWrite(&cm_scacheLock); lock_ObtainWrite(&scp->rw); cm_InvalidateACLUser(scp, userp); lock_ReleaseWrite(&scp->rw); lock_ObtainWrite(&cm_scacheLock); cm_ReleaseSCacheNoLock(scp); } } lock_ReleaseWrite(&cm_scacheLock); }
/* * Invalidate all ACL entries for particular user on this particular vnode. * * The scp must be locked. */ void cm_InvalidateACLUser(cm_scache_t *scp, cm_user_t *userp) { cm_aclent_t *aclp; cm_aclent_t **laclpp; lock_ObtainWrite(&cm_aclLock); laclpp = &scp->randomACLp; for (aclp = *laclpp; aclp; laclpp = &aclp->nextp, aclp = *laclpp) { if (userp == aclp->userp) { /* One for a given user/scache */ *laclpp = aclp->nextp; cm_ReleaseUser(aclp->userp); aclp->userp = NULL; aclp->backp = (struct cm_scache *) 0; break; } } lock_ReleaseWrite(&cm_aclLock); }
/* * Free all associated acl entries. We actually just clear the back pointer * since the acl entries are already in the free list. The scp must be locked * or completely unreferenced (such as when called while recycling the scp). */ void cm_FreeAllACLEnts(cm_scache_t *scp) { cm_aclent_t *aclp; cm_aclent_t *taclp; lock_ObtainWrite(&cm_aclLock); for (aclp = scp->randomACLp; aclp; aclp = taclp) { taclp = aclp->nextp; if (aclp->userp) { cm_ReleaseUser(aclp->userp); aclp->userp = NULL; } aclp->backp = (struct cm_scache *) 0; } scp->randomACLp = (struct cm_aclent *) 0; scp->anyAccess = 0; /* reset this, too */ lock_ReleaseWrite(&cm_aclLock); }
cm_space_t *cm_GetSpace(void) { cm_space_t *tsp; if (osi_Once(&cm_utilsOnce)) { lock_InitializeRWLock(&cm_utilsLock, "cm_utilsLock", LOCK_HIERARCHY_UTILS_GLOBAL); osi_EndOnce(&cm_utilsOnce); } lock_ObtainWrite(&cm_utilsLock); if (tsp = cm_spaceListp) { cm_spaceListp = tsp->nextp; } else tsp = (cm_space_t *) malloc(sizeof(cm_space_t)); (void) memset(tsp, 0, sizeof(cm_space_t)); lock_ReleaseWrite(&cm_utilsLock); return tsp; }
/* * Get an acl cache entry for a particular user and file, or return that it doesn't exist. * Called with the scp write locked. */ long cm_FindACLCache(cm_scache_t *scp, cm_user_t *userp, afs_uint32 *rightsp) { cm_aclent_t *aclp; long retval = -1; time_t now = time(NULL); lock_AssertWrite(&scp->rw); lock_ObtainWrite(&cm_aclLock); *rightsp = 0; /* get a new acl from server if we don't find a * current entry */ for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) { if (aclp->userp == userp) { if (aclp->tgtLifetime && aclp->tgtLifetime <= now) { /* ticket expired */ osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); CleanupACLEnt(aclp); /* move to the tail of the LRU queue */ osi_QAddT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); } else { *rightsp = aclp->randomAccess; if (cm_data.aclLRUp != aclp) { /* move to the head of the LRU queue */ osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); } retval = 0; /* success */ } break; } } lock_ReleaseWrite(&cm_aclLock); return retval; }
/* function called as callback proc from cm_SearchCellFile. Return 0 to * continue processing. * * At the present time the return value is ignored by the caller. */ long cm_AddCellProc(void *rockp, struct sockaddr_in *addrp, char *hostnamep, unsigned short ipRank) { cm_server_t *tsp; cm_serverRef_t *tsrp; cm_cell_t *cellp; cm_cell_rock_t *cellrockp = (cm_cell_rock_t *)rockp; afs_uint32 probe; cellp = cellrockp->cellp; probe = !(cellrockp->flags & CM_FLAG_NOPROBE); /* if this server was previously created by fs setserverprefs */ if ( tsp = cm_FindServer(addrp, CM_SERVER_VLDB)) { if ( !tsp->cellp ) tsp->cellp = cellp; else if (tsp->cellp != cellp) { osi_Log3(afsd_logp, "found a vlserver %s associated with two cells named %s and %s", osi_LogSaveString(afsd_logp,hostnamep), osi_LogSaveString(afsd_logp,tsp->cellp->name), osi_LogSaveString(afsd_logp,cellp->name)); } } else tsp = cm_NewServer(addrp, CM_SERVER_VLDB, cellp, NULL, probe ? 0 : CM_FLAG_NOPROBE); if (ipRank) tsp->ipRank = ipRank; /* Insert the vlserver into a sorted list, sorted by server rank */ tsrp = cm_NewServerRef(tsp, 0); cm_InsertServerList(&cellp->vlServersp, tsrp); /* drop the allocation reference */ lock_ObtainWrite(&cm_serverLock); tsrp->refCount--; lock_ReleaseWrite(&cm_serverLock); return 0; }
unsigned long main_Scan1(unsigned long parm) { while (1) { osi_Log0(main_logp, "scan1"); /* check to see if we're done */ lock_ObtainRead(&main_doneRWLock); lock_AssertRead(&main_doneRWLock); if (done >= 2) break; lock_ReleaseRead(&main_doneRWLock); /* check state for consistency */ lock_ObtainMutex(&main_aMutex); lock_AssertMutex(&main_aMutex); Sleep(0); lock_ObtainRead(&main_bRWLock); Sleep(0); osi_assert(a+b == 100); lock_ReleaseRead(&main_bRWLock); Sleep(0); lock_ReleaseMutex(&main_aMutex); /* get a read lock here to test people getting stuck on RW lock alone */ lock_ObtainRead(&main_bRWLock); Sleep(0); lock_ReleaseRead(&main_bRWLock); s1Loops++; osi_Log2(main_logp, "scan1 done %d %d", s1Loops, 2); } lock_ReleaseRead(&main_doneRWLock); lock_ObtainWrite(&main_doneRWLock); lock_AssertWrite(&main_doneRWLock); done++; lock_ReleaseWrite(&main_doneRWLock); return 0; }
/* * Initialize the cache to have an entries. Called during system startup. */ long cm_InitACLCache(int newFile, long size) { cm_aclent_t *aclp; long i; static osi_once_t once; if (osi_Once(&once)) { lock_InitializeRWLock(&cm_aclLock, "cm_aclLock", LOCK_HIERARCHY_ACL_GLOBAL); osi_EndOnce(&once); } lock_ObtainWrite(&cm_aclLock); if ( newFile ) { cm_data.aclLRUp = cm_data.aclLRUEndp = NULL; aclp = (cm_aclent_t *) cm_data.aclBaseAddress; memset(aclp, 0, size * sizeof(cm_aclent_t)); /* * Put all of these guys on the LRU queue */ for (i = 0; i < size; i++) { aclp->magic = CM_ACLENT_MAGIC; osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); aclp++; } } else { aclp = (cm_aclent_t *) cm_data.aclBaseAddress; for (i = 0; i < size; i++) { aclp->userp = NULL; aclp->tgtLifetime = 0; aclp++; } } lock_ReleaseWrite(&cm_aclLock); return 0; }
void cm_BkgDaemon(void * parm) { cm_bkgRequest_t *rp; afs_int32 code; char name[32] = ""; long daemonID = (long)parm; snprintf(name, sizeof(name), "cm_BkgDaemon_ShutdownEvent%d", daemonID); cm_BkgDaemon_ShutdownEvent[daemonID] = thrd_CreateEvent(NULL, FALSE, FALSE, name); if ( GetLastError() == ERROR_ALREADY_EXISTS ) afsi_log("Event Object Already Exists: %s", name); rx_StartClientThread(); lock_ObtainWrite(&cm_daemonLock); while (daemon_ShutdownFlag == 0) { if (powerStateSuspended) { Sleep(1000); continue; } if (!cm_bkgListEndp) { osi_SleepW((LONG_PTR)&cm_bkgListp, &cm_daemonLock); lock_ObtainWrite(&cm_daemonLock); continue; } /* we found a request */ for (rp = cm_bkgListEndp; rp; rp = (cm_bkgRequest_t *) osi_QPrev(&rp->q)) { if (cm_ServerAvailable(&rp->scp->fid, rp->userp) && !(rp->scp->flags & CM_SCACHEFLAG_DATASTORING)) break; } if (rp == NULL) { /* we couldn't find a request that we could process at the current time */ lock_ReleaseWrite(&cm_daemonLock); Sleep(1000); lock_ObtainWrite(&cm_daemonLock); continue; } osi_QRemoveHT((osi_queue_t **) &cm_bkgListp, (osi_queue_t **) &cm_bkgListEndp, &rp->q); osi_assertx(cm_bkgQueueCount-- > 0, "cm_bkgQueueCount 0"); lock_ReleaseWrite(&cm_daemonLock); osi_Log1(afsd_logp,"cm_BkgDaemon processing request 0x%p", rp); #ifdef DEBUG_REFCOUNT osi_Log2(afsd_logp,"cm_BkgDaemon (before) scp 0x%x ref %d",rp->scp, rp->scp->refCount); #endif code = (*rp->procp)(rp->scp, rp->p1, rp->p2, rp->p3, rp->p4, rp->userp); #ifdef DEBUG_REFCOUNT osi_Log2(afsd_logp,"cm_BkgDaemon (after) scp 0x%x ref %d",rp->scp, rp->scp->refCount); #endif /* * Keep the following list synchronized with the * error code list in cm_BkgStore. * cm_SyncOpDone(CM_SCACHESYNC_ASYNCSTORE) will be called there unless * one of these errors has occurred. */ switch ( code ) { case CM_ERROR_TIMEDOUT: /* or server restarting */ case CM_ERROR_RETRY: case CM_ERROR_WOULDBLOCK: case CM_ERROR_ALLBUSY: case CM_ERROR_ALLDOWN: case CM_ERROR_ALLOFFLINE: case CM_ERROR_PARTIALWRITE: if (rp->procp == cm_BkgStore) { osi_Log2(afsd_logp, "cm_BkgDaemon re-queueing failed request 0x%p code 0x%x", rp, code); lock_ObtainWrite(&cm_daemonLock); cm_bkgQueueCount++; osi_QAddT((osi_queue_t **) &cm_bkgListp, (osi_queue_t **)&cm_bkgListEndp, &rp->q); break; } /* otherwise fall through */ case 0: /* success */ default: /* other error */ if (code == 0) osi_Log1(afsd_logp,"cm_BkgDaemon SUCCESS: request 0x%p", rp); else osi_Log2(afsd_logp,"cm_BkgDaemon FAILED: request dropped 0x%p code 0x%x", rp, code); cm_ReleaseUser(rp->userp); cm_ReleaseSCache(rp->scp); free(rp); lock_ObtainWrite(&cm_daemonLock); } } lock_ReleaseWrite(&cm_daemonLock); thrd_SetEvent(cm_BkgDaemon_ShutdownEvent[daemonID]); }
/* called with scp write-locked, check to see if we have the ACL info we need * and can get it w/o blocking for any locks. * * Never drops the scp lock, but may fail if the access control info comes from * the parent directory, and the parent's scache entry can't be found, or it * can't be locked. Thus, this must always be called in a while loop to stabilize * things, since we can always lose the race condition getting to the parent vnode. */ int cm_HaveAccessRights(struct cm_scache *scp, struct cm_user *userp, afs_uint32 rights, afs_uint32 *outRightsp) { cm_scache_t *aclScp; long code; cm_fid_t tfid; int didLock; long trights; int release = 0; /* Used to avoid a call to cm_HoldSCache in the directory case */ didLock = 0; if (scp->fileType == CM_SCACHETYPE_DIRECTORY || cm_accessPerFileCheck) { aclScp = scp; /* not held, not released */ } else { cm_SetFid(&tfid, scp->fid.cell, scp->fid.volume, scp->parentVnode, scp->parentUnique); aclScp = cm_FindSCache(&tfid); if (!aclScp) return 0; if (aclScp != scp) { if (aclScp->fid.vnode < scp->fid.vnode) lock_ReleaseWrite(&scp->rw); lock_ObtainRead(&aclScp->rw); if (aclScp->fid.vnode < scp->fid.vnode) lock_ObtainWrite(&scp->rw); /* check that we have a callback, too */ if (!cm_HaveCallback(aclScp)) { /* can't use it */ lock_ReleaseRead(&aclScp->rw); cm_ReleaseSCache(aclScp); return 0; } didLock = 1; } release = 1; } lock_AssertAny(&aclScp->rw); /* now if rights is a subset of the public rights, we're done. * Otherwise, if we an explicit acl entry, we're also in good shape, * and can definitively answer. */ #ifdef AFS_FREELANCE_CLIENT if (cm_freelanceEnabled && aclScp->fid.cell==AFS_FAKE_ROOT_CELL_ID && aclScp->fid.volume==AFS_FAKE_ROOT_VOL_ID) { *outRightsp = aclScp->anyAccess; } else #endif if ((~aclScp->anyAccess & rights) == 0) { *outRightsp = rights; } else { /* we have to check the specific rights info */ code = cm_FindACLCache(aclScp, userp, &trights); if (code) { code = 0; goto done; } *outRightsp = trights; } if (scp->fileType > 0 && scp->fileType != CM_SCACHETYPE_DIRECTORY) { /* check mode bits */ if ((scp->unixModeBits & 0400) == 0) { osi_Log2(afsd_logp,"cm_HaveAccessRights UnixMode removing READ scp 0x%p unix 0%o", scp, scp->unixModeBits); *outRightsp &= ~PRSFS_READ; } if ((scp->unixModeBits & 0200) == 0 && (rights != (PRSFS_WRITE | PRSFS_LOCK))) { osi_Log2(afsd_logp,"cm_HaveAccessRights UnixMode removing WRITE scp 0x%p unix 0%o", scp, scp->unixModeBits); *outRightsp &= ~PRSFS_WRITE; } if ((scp->unixModeBits & 0200) == 0 && !cm_deleteReadOnly) { osi_Log2(afsd_logp,"cm_HaveAccessRights UnixMode removing DELETE scp 0x%p unix 0%o", scp, scp->unixModeBits); *outRightsp &= ~PRSFS_DELETE; } } /* if the user can insert, we must assume they can read/write as well * because we do not have the ability to determine if the current user * is the owner of the file. We will have to make the call to the * file server and let the file server tell us if the request should * be denied. */ if ((*outRightsp & PRSFS_INSERT) && (scp->creator == userp)) *outRightsp |= PRSFS_READ | PRSFS_WRITE; /* if the user can obtain a write-lock, read-locks are implied */ if (*outRightsp & PRSFS_WRITE) *outRightsp |= PRSFS_LOCK; code = 1; /* fall through */ done: if (didLock) lock_ReleaseRead(&aclScp->rw); if (release) cm_ReleaseSCache(aclScp); return code; }
void * cm_BkgDaemon(void * vparm) { cm_bkgRequest_t *rp; afs_int32 code; char name[32] = ""; long daemonID = (long)(LONG_PTR)vparm; snprintf(name, sizeof(name), "cm_BkgDaemon_ShutdownEvent%u", daemonID); cm_BkgDaemon_ShutdownEvent[daemonID] = thrd_CreateEvent(NULL, FALSE, FALSE, name); if ( GetLastError() == ERROR_ALREADY_EXISTS ) afsi_log("Event Object Already Exists: %s", name); rx_StartClientThread(); lock_ObtainWrite(&cm_daemonLockp[daemonID]); while (daemon_ShutdownFlag == 0) { int willBlock = 0; if (powerStateSuspended) { Sleep(1000); continue; } if (!cm_bkgListEndpp[daemonID]) { osi_SleepW((LONG_PTR)&cm_bkgListpp[daemonID], &cm_daemonLockp[daemonID]); lock_ObtainWrite(&cm_daemonLockp[daemonID]); continue; } /* we found a request */ for (rp = cm_bkgListEndpp[daemonID]; rp; rp = (cm_bkgRequest_t *) osi_QPrev(&rp->q)) { if (rp->scp->flags & CM_SCACHEFLAG_DELETED) break; /* * If the request has active I/O such that this worker would * be forced to block, leave the request in the queue and move * on to one that might be available for servicing. */ if (cm_RequestWillBlock(rp)) { willBlock++; continue; } if (cm_ServerAvailable(&rp->scp->fid, rp->userp)) break; } if (rp == NULL) { /* * Couldn't find a request that we could process at the * current time. If there were requests that would cause * the worker to block, sleep for 25ms so it can promptly * respond when it is available. Otherwise, sleep for 1s. * * This polling cycle needs to be replaced with a proper * producer/consumer dynamic worker pool. */ osi_Log2(afsd_logp,"cm_BkgDaemon[%u] sleeping %dms all tasks would block", daemonID, willBlock ? 100 : 1000); lock_ReleaseWrite(&cm_daemonLockp[daemonID]); Sleep(willBlock ? 100 : 1000); lock_ObtainWrite(&cm_daemonLockp[daemonID]); continue; } osi_QRemoveHT((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **) &cm_bkgListEndpp[daemonID], &rp->q); osi_assertx(cm_bkgQueueCountp[daemonID]-- > 0, "cm_bkgQueueCount 0"); lock_ReleaseWrite(&cm_daemonLockp[daemonID]); osi_Log2(afsd_logp,"cm_BkgDaemon[%u] processing request 0x%p", daemonID, rp); if (rp->scp->flags & CM_SCACHEFLAG_DELETED) { osi_Log2(afsd_logp,"cm_BkgDaemon[%u] DELETED scp 0x%x", daemonID, rp->scp); code = CM_ERROR_BADFD; } else { #ifdef DEBUG_REFCOUNT osi_Log3(afsd_logp,"cm_BkgDaemon[%u] (before) scp 0x%x ref %d", daemonID, rp->scp, rp->scp->refCount); #endif code = (*rp->procp)(rp->scp, rp->p1, rp->p2, rp->p3, rp->p4, rp->userp, &rp->req); #ifdef DEBUG_REFCOUNT osi_Log3(afsd_logp,"cm_BkgDaemon[%u] (after) scp 0x%x ref %d", daemonID, rp->scp, rp->scp->refCount); #endif } /* * Keep the following list synchronized with the * error code list in cm_BkgStore. * cm_SyncOpDone(CM_SCACHESYNC_ASYNCSTORE) will be called there unless * one of these errors has occurred. */ switch ( code ) { case CM_ERROR_TIMEDOUT: /* or server restarting */ case CM_ERROR_RETRY: case CM_ERROR_WOULDBLOCK: case CM_ERROR_ALLBUSY: case CM_ERROR_ALLDOWN: case CM_ERROR_ALLOFFLINE: case CM_ERROR_PARTIALWRITE: if (rp->procp == cm_BkgStore || rp->procp == RDR_BkgFetch) { osi_Log3(afsd_logp, "cm_BkgDaemon[%u] re-queueing failed request 0x%p code 0x%x", daemonID, rp, code); lock_ObtainWrite(&cm_daemonLockp[daemonID]); cm_bkgQueueCountp[daemonID]++; osi_QAddT((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **)&cm_bkgListEndpp[daemonID], &rp->q); break; } /* otherwise fall through */ case 0: /* success */ default: /* other error */ if (code == 0) { osi_Log2(afsd_logp,"cm_BkgDaemon[%u] SUCCESS: request 0x%p", daemonID, rp); } else { osi_Log3(afsd_logp,"cm_BkgDaemon[%u] FAILED: request dropped 0x%p code 0x%x", daemonID, rp, code); } cm_ReleaseUser(rp->userp); cm_ReleaseSCache(rp->scp); free(rp); lock_ObtainWrite(&cm_daemonLockp[daemonID]); } } lock_ReleaseWrite(&cm_daemonLockp[daemonID]); thrd_SetEvent(cm_BkgDaemon_ShutdownEvent[daemonID]); pthread_exit(NULL); return NULL; }
cm_cell_t *cm_GetCell_Gen(char *namep, char *newnamep, afs_uint32 flags) { cm_cell_t *cp, *cp2; long code; char fullname[CELL_MAXNAMELEN]=""; char linkedName[CELL_MAXNAMELEN]=""; char name[CELL_MAXNAMELEN]=""; int hasWriteLock = 0; int hasMutex = 0; afs_uint32 hash; cm_cell_rock_t rock; size_t len; if (namep == NULL || !namep[0] || !strcmp(namep,CM_IOCTL_FILENAME_NOSLASH)) return NULL; /* * Strip off any trailing dots at the end of the cell name. * Failure to do so results in an undesireable alias as the * result of DNS AFSDB record lookups where a trailing dot * has special meaning. */ strncpy(name, namep, CELL_MAXNAMELEN); for (len = strlen(namep); len > 0 && namep[len-1] == '.'; len--) { name[len-1] = '\0'; } if (len == 0) return NULL; namep = name; hash = CM_CELL_NAME_HASH(namep); lock_ObtainRead(&cm_cellLock); for (cp = cm_data.cellNameHashTablep[hash]; cp; cp=cp->nameNextp) { if (cm_stricmp_utf8(namep, cp->name) == 0) { strncpy(fullname, cp->name, CELL_MAXNAMELEN); fullname[CELL_MAXNAMELEN-1] = '\0'; break; } } if (!cp) { for (cp = cm_data.allCellsp; cp; cp=cp->allNextp) { if (strnicmp(namep, cp->name, strlen(namep)) == 0) { strncpy(fullname, cp->name, CELL_MAXNAMELEN); fullname[CELL_MAXNAMELEN-1] = '\0'; break; } } } if (cp) { lock_ReleaseRead(&cm_cellLock); cm_UpdateCell(cp, flags); } else if (flags & CM_FLAG_CREATE) { lock_ConvertRToW(&cm_cellLock); hasWriteLock = 1; /* when we dropped the lock the cell could have been added * to the list so check again while holding the write lock */ for (cp = cm_data.cellNameHashTablep[hash]; cp; cp=cp->nameNextp) { if (cm_stricmp_utf8(namep, cp->name) == 0) { strncpy(fullname, cp->name, CELL_MAXNAMELEN); fullname[CELL_MAXNAMELEN-1] = '\0'; break; } } if (cp) goto done; for (cp = cm_data.allCellsp; cp; cp=cp->allNextp) { if (strnicmp(namep, cp->name, strlen(namep)) == 0) { strncpy(fullname, cp->name, CELL_MAXNAMELEN); fullname[CELL_MAXNAMELEN-1] = '\0'; break; } } if (cp) { lock_ReleaseWrite(&cm_cellLock); lock_ObtainMutex(&cp->mx); lock_ObtainWrite(&cm_cellLock); cm_AddCellToNameHashTable(cp); cm_AddCellToIDHashTable(cp); lock_ReleaseMutex(&cp->mx); goto done; } if ( cm_data.freeCellsp != NULL ) { cp = cm_data.freeCellsp; cm_data.freeCellsp = cp->freeNextp; /* * The magic, cellID, and mx fields are already set. */ } else { if ( cm_data.currentCells >= cm_data.maxCells ) osi_panic("Exceeded Max Cells", __FILE__, __LINE__); /* don't increment currentCells until we know that we * are going to keep this entry */ cp = &cm_data.cellBaseAddress[cm_data.currentCells]; memset(cp, 0, sizeof(cm_cell_t)); cp->magic = CM_CELL_MAGIC; /* the cellID cannot be 0 */ cp->cellID = ++cm_data.currentCells; /* otherwise we found the cell, and so we're nearly done */ lock_InitializeMutex(&cp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL); } lock_ReleaseWrite(&cm_cellLock); hasWriteLock = 0; rock.cellp = cp; rock.flags = flags; code = cm_SearchCellRegistry(1, namep, fullname, linkedName, cm_AddCellProc, &rock); if (code && code != CM_ERROR_FORCE_DNS_LOOKUP) code = cm_SearchCellFileEx(namep, fullname, linkedName, cm_AddCellProc, &rock); if (code) { osi_Log4(afsd_logp,"in cm_GetCell_gen cm_SearchCellFileEx(%s) returns code= %d fullname= %s linkedName= %s", osi_LogSaveString(afsd_logp,namep), code, osi_LogSaveString(afsd_logp,fullname), osi_LogSaveString(afsd_logp,linkedName)); if (cm_dnsEnabled) { int ttl; code = cm_SearchCellByDNS(namep, fullname, &ttl, cm_AddCellProc, &rock); if ( code ) { osi_Log3(afsd_logp,"in cm_GetCell_gen cm_SearchCellByDNS(%s) returns code= %d fullname= %s", osi_LogSaveString(afsd_logp,namep), code, osi_LogSaveString(afsd_logp,fullname)); lock_ObtainMutex(&cp->mx); lock_ObtainWrite(&cm_cellLock); hasWriteLock = 1; cm_RemoveCellFromIDHashTable(cp); cm_RemoveCellFromNameHashTable(cp); lock_ReleaseMutex(&cp->mx); cm_FreeCell(cp); cp = NULL; goto done; } else { /* got cell from DNS */ lock_ObtainMutex(&cp->mx); hasMutex = 1; _InterlockedOr(&cp->flags, CM_CELLFLAG_DNS); _InterlockedAnd(&cp->flags, ~CM_CELLFLAG_VLSERVER_INVALID); cp->timeout = time(0) + ttl; } } else { lock_ObtainMutex(&cp->mx); lock_ObtainWrite(&cm_cellLock); hasWriteLock = 1; cm_RemoveCellFromIDHashTable(cp); cm_RemoveCellFromNameHashTable(cp); lock_ReleaseMutex(&cp->mx); cm_FreeCell(cp); cp = NULL; goto done; } } else { lock_ObtainMutex(&cp->mx); hasMutex = 1; cp->timeout = time(0) + 7200; /* two hour timeout */ } /* we have now been given the fullname of the cell. It may * be that we already have a cell with that name. If so, * we should use it instead of completing the allocation * of a new cm_cell_t */ lock_ObtainRead(&cm_cellLock); hash = CM_CELL_NAME_HASH(fullname); for (cp2 = cm_data.cellNameHashTablep[hash]; cp2; cp2=cp2->nameNextp) { if (cm_stricmp_utf8(fullname, cp2->name) == 0) { break; } } if (cp2) { if (!hasMutex) { lock_ObtainMutex(&cp->mx); hasMutex = 1; } lock_ConvertRToW(&cm_cellLock); hasWriteLock = 1; cm_RemoveCellFromIDHashTable(cp); cm_RemoveCellFromNameHashTable(cp); lock_ReleaseMutex(&cp->mx); hasMutex = 0; cm_FreeCell(cp); cp = cp2; goto done; } lock_ReleaseRead(&cm_cellLock); /* randomise among those vlservers having the same rank*/ cm_RandomizeServer(&cp->vlServersp); if (!hasMutex) lock_ObtainMutex(&cp->mx); /* copy in name */ strncpy(cp->name, fullname, CELL_MAXNAMELEN); cp->name[CELL_MAXNAMELEN-1] = '\0'; strncpy(cp->linkedName, linkedName, CELL_MAXNAMELEN); cp->linkedName[CELL_MAXNAMELEN-1] = '\0'; lock_ObtainWrite(&cm_cellLock); hasWriteLock = 1; cm_AddCellToNameHashTable(cp); cm_AddCellToIDHashTable(cp); lock_ReleaseMutex(&cp->mx); hasMutex = 0; /* append cell to global list */ if (cm_data.allCellsp == NULL) { cm_data.allCellsp = cp; } else { for (cp2 = cm_data.allCellsp; cp2->allNextp; cp2=cp2->allNextp) ; cp2->allNextp = cp; } cp->allNextp = NULL; } else { lock_ReleaseRead(&cm_cellLock); } done: if (hasMutex && cp) lock_ReleaseMutex(&cp->mx); if (hasWriteLock) lock_ReleaseWrite(&cm_cellLock); /* fullname is not valid if cp == NULL */ if (newnamep) { if (cp) { strncpy(newnamep, fullname, CELL_MAXNAMELEN); newnamep[CELL_MAXNAMELEN-1]='\0'; } else { newnamep[0] = '\0'; } } if (cp && cp->linkedName[0]) { cm_cell_t * linkedCellp = NULL; if (!strcmp(cp->name, cp->linkedName)) { cp->linkedName[0] = '\0'; } else if (!(flags & CM_FLAG_NOMOUNTCHASE)) { linkedCellp = cm_GetCell(cp->linkedName, CM_FLAG_CREATE|CM_FLAG_NOPROBE|CM_FLAG_NOMOUNTCHASE); lock_ObtainWrite(&cm_cellLock); if (!linkedCellp || (linkedCellp->linkedName[0] && strcmp(cp->name, linkedCellp->linkedName))) { cp->linkedName[0] = '\0'; } else { strncpy(linkedCellp->linkedName, cp->name, CELL_MAXNAMELEN); linkedCellp->linkedName[CELL_MAXNAMELEN-1]='\0'; } lock_ReleaseWrite(&cm_cellLock); } } return cp; }
/* * called with scp->rw lock held exclusive */ afs_int32 raw_ReadData( cm_scache_t *scp, osi_hyper_t *offsetp, afs_uint32 length, char *bufferp, afs_uint32 *readp, cm_user_t *userp, cm_req_t *reqp) { long code; cm_buf_t *bufp = NULL; osi_hyper_t fileLength; osi_hyper_t thyper; osi_hyper_t lastByte; osi_hyper_t bufferOffset; long bufIndex, nbytes; int sequential = 0; /* start by looking up the file's end */ code = cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); if (code) goto done; cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); /* now we have the entry locked, look up the length */ fileLength = scp->length; /* adjust length down so that it won't go past EOF */ thyper.LowPart = length; thyper.HighPart = 0; thyper = LargeIntegerAdd(*offsetp, thyper); /* where read should end */ lastByte = thyper; if (LargeIntegerGreaterThan(thyper, fileLength)) { /* we'd read past EOF, so just stop at fileLength bytes. * Start by computing how many bytes remain in the file. */ thyper = LargeIntegerSubtract(fileLength, *offsetp); /* if we are past EOF, read 0 bytes */ if (LargeIntegerLessThanZero(thyper)) length = 0; else length = thyper.LowPart; } *readp = length; /* now, copy the data one buffer at a time, * until we've filled the request packet */ while (1) { /* if we've copied all the data requested, we're done */ if (length <= 0) break; /* otherwise, load up a buffer of data */ thyper.HighPart = offsetp->HighPart; thyper.LowPart = offsetp->LowPart & ~(cm_data.blockSize-1); if (!bufp || !LargeIntegerEqualTo(thyper, bufferOffset)) { /* wrong buffer */ if (bufp) { buf_Release(bufp); bufp = NULL; } lock_ReleaseWrite(&scp->rw); code = buf_Get(scp, &thyper, reqp, 0, &bufp); lock_ObtainWrite(&scp->rw); if (code) goto done; bufferOffset = thyper; /* now get the data in the cache */ while (1) { code = cm_SyncOp(scp, bufp, userp, reqp, 0, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_READ); if (code) goto done; cm_SyncOpDone(scp, bufp, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_READ); if (cm_HaveBuffer(scp, bufp, 0)) break; /* otherwise, load the buffer and try again */ code = cm_GetBuffer(scp, bufp, NULL, userp, reqp); if (code) break; } if (code) { buf_Release(bufp); bufp = NULL; goto done; } } /* if (wrong buffer) ... */ /* now we have the right buffer loaded. Copy out the * data from here to the user's buffer. */ bufIndex = offsetp->LowPart & (cm_data.blockSize - 1); /* and figure out how many bytes we want from this buffer */ nbytes = cm_data.blockSize - bufIndex; /* what remains in buffer */ if (nbytes > length) nbytes = length; /* don't go past EOF */ /* now copy the data */ memcpy(bufferp, bufp->datap + bufIndex, nbytes); /* adjust lengthers, pointers, etc. */ bufferp += nbytes; length -= nbytes; thyper.LowPart = nbytes; thyper.HighPart = 0; *offsetp = LargeIntegerAdd(thyper, *offsetp); } /* while 1 */ done: if (bufp) buf_Release(bufp); if (code == 0 && sequential) cm_ConsiderPrefetch(scp, &lastByte, *readp, userp, reqp); return code; }
afs_uint32 raw_WriteData( cm_scache_t *scp, osi_hyper_t *offsetp, afs_uint32 length, char *bufferp, cm_user_t *userp, cm_req_t *reqp, afs_uint32 *writtenp) { long code = 0; long written = 0; osi_hyper_t fileLength; /* file's length at start of write */ osi_hyper_t minLength; /* don't read past this */ afs_uint32 nbytes; /* # of bytes to transfer this iteration */ cm_buf_t *bufp = NULL; osi_hyper_t thyper; /* hyper tmp variable */ osi_hyper_t bufferOffset; afs_uint32 bufIndex; /* index in buffer where our data is */ int doWriteBack = 0; osi_hyper_t writeBackOffset;/* offset of region to write back when I/O is done */ afs_uint32 writeBackLength; DWORD filter = 0; /* start by looking up the file's end */ code = cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_SETSTATUS | CM_SCACHESYNC_GETSTATUS); if (code) goto done; cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_SETSTATUS | CM_SCACHESYNC_GETSTATUS); /* now we have the entry locked, look up the length */ fileLength = scp->length; minLength = fileLength; if (LargeIntegerGreaterThan(minLength, scp->serverLength)) minLength = scp->serverLength; /* adjust file length if we extend past EOF */ thyper.LowPart = length; thyper.HighPart = 0; thyper = LargeIntegerAdd(*offsetp, thyper); /* where write should end */ if (LargeIntegerGreaterThan(thyper, fileLength)) { /* we'd write past EOF, so extend the file */ scp->mask |= CM_SCACHEMASK_LENGTH; scp->length = thyper; filter |= (FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_SIZE); } else filter |= FILE_NOTIFY_CHANGE_LAST_WRITE; doWriteBack = 1; writeBackOffset = *offsetp; writeBackLength = length; /* now, copy the data one buffer at a time, until we've filled the * request packet */ while (1) { /* if we've copied all the data requested, we're done */ if (length <= 0) break; /* otherwise, load up a buffer of data */ thyper.HighPart = offsetp->HighPart; thyper.LowPart = offsetp->LowPart & ~(cm_data.blockSize-1); if (!bufp || !LargeIntegerEqualTo(thyper, bufferOffset)) { /* wrong buffer */ if (bufp) { lock_ReleaseMutex(&bufp->mx); buf_Release(bufp); bufp = NULL; } lock_ReleaseWrite(&scp->rw); code = buf_Get(scp, &thyper, reqp, 0, &bufp); if (bufp) lock_ObtainMutex(&bufp->mx); lock_ObtainWrite(&scp->rw); if (code) goto done; bufferOffset = thyper; /* now get the data in the cache */ while (1) { code = cm_SyncOp(scp, bufp, userp, reqp, 0, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_WRITE | CM_SCACHESYNC_BUFLOCKED); if (code) goto done; cm_SyncOpDone(scp, bufp, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_WRITE | CM_SCACHESYNC_BUFLOCKED); /* If we're overwriting the entire buffer, or * if we're writing at or past EOF, mark the * buffer as current so we don't call * cm_GetBuffer. This skips the fetch from the * server in those cases where we're going to * obliterate all the data in the buffer anyway, * or in those cases where there is no useful * data at the server to start with. * * Use minLength instead of scp->length, since * the latter has already been updated by this * call. */ if (LargeIntegerGreaterThanOrEqualTo(bufp->offset, minLength) || LargeIntegerEqualTo(*offsetp, bufp->offset) && (length >= cm_data.blockSize || LargeIntegerGreaterThanOrEqualTo(LargeIntegerAdd(*offsetp, ConvertLongToLargeInteger(length)), minLength))) { if (length < cm_data.blockSize && bufp->dataVersion == CM_BUF_VERSION_BAD) memset(bufp->datap, 0, cm_data.blockSize); bufp->dataVersion = scp->dataVersion; } if (cm_HaveBuffer(scp, bufp, 1)) break; /* otherwise, load the buffer and try again */ lock_ReleaseMutex(&bufp->mx); code = cm_GetBuffer(scp, bufp, NULL, userp, reqp); lock_ReleaseWrite(&scp->rw); lock_ObtainMutex(&bufp->mx); lock_ObtainWrite(&scp->rw); if (code) break; } if (code) { lock_ReleaseMutex(&bufp->mx); buf_Release(bufp); bufp = NULL; goto done; } } /* if (wrong buffer) ... */ /* now we have the right buffer loaded. Copy out the * data from here to the user's buffer. */ bufIndex = offsetp->LowPart & (cm_data.blockSize - 1); /* and figure out how many bytes we want from this buffer */ nbytes = cm_data.blockSize - bufIndex; /* what remains in buffer */ if (nbytes > length) nbytes = length; /* don't go past end of request */ /* now copy the data */ memcpy(bufp->datap + bufIndex, bufferp, nbytes); buf_SetDirty(bufp, reqp, bufIndex, nbytes, userp); /* adjust lengthers, pointers, etc. */ bufferp += nbytes; length -= nbytes; written += nbytes; thyper.LowPart = nbytes; thyper.HighPart = 0; *offsetp = LargeIntegerAdd(thyper, *offsetp); } /* while 1 */ done: if (writtenp) *writtenp = written; if (bufp) { lock_ReleaseMutex(&bufp->mx); buf_Release(bufp); } if (code == 0 && doWriteBack) { rock_BkgStore_t *rockp = malloc(sizeof(*rockp)); if (rockp) { code = cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_ASYNCSTORE); if (code == 0) { rockp->length = writeBackLength; rockp->offset = writeBackOffset; lock_ReleaseWrite(&scp->rw); cm_QueueBKGRequest(scp, cm_BkgStore, rockp, userp, reqp); lock_ObtainWrite(&scp->rw); /* rock is freed by cm_BkgDaemon */ } else { free(rockp); } } } /* cm_SyncOpDone is called when cm_BkgStore completes */ return code; }
long WriteData(cm_scache_t *scp, osi_hyper_t offset, long count, char *op, cm_user_t *userp, long *writtenp) { long code = 0; long written = 0; osi_hyper_t fileLength; /* file's length at start of write */ osi_hyper_t minLength; /* don't read past this */ afs_uint32 nbytes; /* # of bytes to transfer this iteration */ cm_buf_t *bufferp; osi_hyper_t thyper; /* hyper tmp variable */ osi_hyper_t bufferOffset; afs_uint32 bufIndex; /* index in buffer where our data is */ int doWriteBack; osi_hyper_t writeBackOffset;/* offset of region to write back when * I/O is done */ DWORD filter = 0; cm_req_t req; cm_InitReq(&req); bufferp = NULL; doWriteBack = 0; lock_ObtainWrite(&scp->rw); /* start by looking up the file's end */ code = cm_SyncOp(scp, NULL, userp, &req, 0, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_SETSTATUS | CM_SCACHESYNC_GETSTATUS); if (code) goto done; cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_SETSTATUS | CM_SCACHESYNC_GETSTATUS); #if 0 /* make sure we have a writable FD */ if (!(fidp->flags & SMB_FID_OPENWRITE)) { code = CM_ERROR_BADFDOP; goto done; } #endif /* now we have the entry locked, look up the length */ fileLength = scp->length; minLength = fileLength; if (LargeIntegerGreaterThan(minLength, scp->serverLength)) minLength = scp->serverLength; /* adjust file length if we extend past EOF */ thyper.LowPart = count; thyper.HighPart = 0; thyper = LargeIntegerAdd(offset, thyper); /* where write should end */ if (LargeIntegerGreaterThan(thyper, fileLength)) { /* we'd write past EOF, so extend the file */ scp->mask |= CM_SCACHEMASK_LENGTH; scp->length = thyper; filter |= (FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_SIZE); } else filter |= FILE_NOTIFY_CHANGE_LAST_WRITE; /* now, if the new position (thyper) and the old (offset) are in * different storeback windows, remember to store back the previous * storeback window when we're done with the write. */ if ((thyper.LowPart & (-cm_chunkSize)) != (offset.LowPart & (-cm_chunkSize))) { /* they're different */ doWriteBack = 1; writeBackOffset.HighPart = offset.HighPart; writeBackOffset.LowPart = offset.LowPart & (-cm_chunkSize); } *writtenp = count; /* now, copy the data one buffer at a time, until we've filled the * request packet */ while (1) { /* if we've copied all the data requested, we're done */ if (count <= 0) break; /* handle over quota or out of space */ if (scp->flags & (CM_SCACHEFLAG_OVERQUOTA | CM_SCACHEFLAG_OUTOFSPACE)) { *writtenp = written; break; } /* otherwise, load up a buffer of data */ thyper.HighPart = offset.HighPart; thyper.LowPart = offset.LowPart & ~(buf_bufferSize-1); if (!bufferp || !LargeIntegerEqualTo(thyper, bufferOffset)) { /* wrong buffer */ if (bufferp) { lock_ReleaseMutex(&bufferp->mx); buf_Release(bufferp); bufferp = NULL; } lock_ReleaseWrite(&scp->rw); code = buf_Get(scp, &thyper, &bufferp); lock_ObtainMutex(&bufferp->mx); lock_ObtainWrite(&scp->rw); if (code) goto done; bufferOffset = thyper; /* now get the data in the cache */ while (1) { code = cm_SyncOp(scp, bufferp, userp, &req, 0, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_WRITE | CM_SCACHESYNC_BUFLOCKED); if (code) goto done; cm_SyncOpDone(scp, bufferp, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_WRITE | CM_SCACHESYNC_BUFLOCKED); /* If we're overwriting the entire buffer, or * if we're writing at or past EOF, mark the * buffer as current so we don't call * cm_GetBuffer. This skips the fetch from the * server in those cases where we're going to * obliterate all the data in the buffer anyway, * or in those cases where there is no useful * data at the server to start with. * * Use minLength instead of scp->length, since * the latter has already been updated by this * call. */ if (LargeIntegerGreaterThanOrEqualTo(bufferp->offset, minLength) || LargeIntegerEqualTo(offset, bufferp->offset) && (count >= buf_bufferSize || LargeIntegerGreaterThanOrEqualTo(LargeIntegerAdd(offset, ConvertLongToLargeInteger(count)), minLength))) { if (count < buf_bufferSize && bufferp->dataVersion == -1) memset(bufferp->datap, 0, buf_bufferSize); bufferp->dataVersion = scp->dataVersion; } if (cm_HaveBuffer(scp, bufferp, 1)) break; /* otherwise, load the buffer and try again */ lock_ReleaseMutex(&bufferp->mx); code = cm_GetBuffer(scp, bufferp, NULL, userp, &req); lock_ReleaseWrite(&scp->rw); lock_ObtainMutex(&bufferp->mx); lock_ObtainWrite(&scp->rw); if (code) break; } if (code) { lock_ReleaseMutex(&bufferp->mx); buf_Release(bufferp); bufferp = NULL; goto done; } } /* if (wrong buffer) ... */ /* now we have the right buffer loaded. Copy out the * data from here to the user's buffer. */ bufIndex = offset.LowPart & (buf_bufferSize - 1); /* and figure out how many bytes we want from this buffer */ nbytes = buf_bufferSize - bufIndex; /* what remains in buffer */ if (nbytes > count) nbytes = count; /* don't go past end of request */ /* now copy the data */ memcpy(bufferp->datap + bufIndex, op, nbytes); buf_SetDirty(bufferp, bufIndex, nbytes, userp); /* adjust counters, pointers, etc. */ op += nbytes; count -= nbytes; written += nbytes; thyper.LowPart = nbytes; thyper.HighPart = 0; offset = LargeIntegerAdd(thyper, offset); } /* while 1 */ done: lock_ReleaseWrite(&scp->rw); if (bufferp) { lock_ReleaseMutex(&bufferp->mx); buf_Release(bufferp); } if (code == 0 && doWriteBack) { lock_ObtainWrite(&scp->rw); code = cm_SyncOp(scp, NULL, userp, &req, 0, CM_SCACHESYNC_ASYNCSTORE); lock_ReleaseWrite(&scp->rw); if (code == 0) cm_QueueBKGRequest(scp, cm_BkgStore, writeBackOffset.LowPart, writeBackOffset.HighPart, cm_chunkSize, 0, userp); } /* cm_SyncOpDone is called when cm_BkgStore completes */ return code; }