void shutdown_osinet(void) { AFS_STATCNT(shutdown_osinet); #ifndef AFS_PRIVATE_OSI_ALLOCSPACES if (afs_cold_shutdown) { struct osi_packet *tp; while ((tp = freePacketList)) { freePacketList = tp->next; afs_osi_Free(tp, AFS_LRALLOCSIZ); #ifdef KERNEL_HAVE_PIN unpin(tp, AFS_LRALLOCSIZ); #endif } while ((tp = freeSmallList)) { freeSmallList = tp->next; afs_osi_Free(tp, AFS_SMALLOCSIZ); #ifdef KERNEL_HAVE_PIN unpin(tp, AFS_SMALLOCSIZ); #endif } LOCK_INIT(&osi_fsplock, "osi_fsplock"); LOCK_INIT(&osi_flplock, "osi_flplock"); } #endif /* AFS_PRIVATE_OSI_ALLOCSPACES */ if (afs_stats_cmperf.LargeBlocksActive || afs_stats_cmperf.SmallBlocksActive) { afs_warn("WARNING: not all blocks freed: large %d small %d\n", afs_stats_cmperf.LargeBlocksActive, afs_stats_cmperf.SmallBlocksActive); } }
/*! * Called on shutdown, should deallocate memory, etc. */ void shutdown_cell(void) { struct afs_q *cq, *tq; struct cell *tc; #ifdef AFS_CACHE_VNODE_PATH if (cacheDiskType != AFS_FCACHE_TYPE_MEM) { afs_osi_FreeStr(afs_cellname_inode.ufs); } #endif AFS_RWLOCK_INIT(&afs_xcell, "afs_xcell"); for (cq = CellLRU.next; cq != &CellLRU; cq = tq) { tc = QTOC(cq); tq = QNext(cq); if (tc->cellName) afs_osi_FreeStr(tc->cellName); afs_osi_Free(tc, sizeof(struct cell)); } QInit(&CellLRU); { struct cell_name *cn = afs_cellname_head; while (cn) { struct cell_name *next = cn->next; afs_osi_FreeStr(cn->cellname); afs_osi_Free(cn, sizeof(struct cell_name)); cn = next; } } }
/* DARWIN uses locking, and so must provide its own */ void shutdown_osisleep(void) { afs_event_t *tmp; int i; for (i=0;i<AFS_EVHASHSIZE;i++) { while ((tmp = afs_evhasht[i]) != NULL) { afs_evhasht[i] = tmp->next; if (tmp->refcount > 0) { afs_warn("nonzero refcount in shutdown_osisleep()\n"); } else { #if defined(AFS_AIX_ENV) xmfree(tmp); #elif defined(AFS_FBSD_ENV) afs_osi_Free(tmp, sizeof(*tmp)); #elif defined(AFS_SGI_ENV) || defined(AFS_XBSD_ENV) || defined(AFS_SUN5_ENV) osi_FreeSmallSpace(tmp); #elif defined(AFS_LINUX26_ENV) kfree(tmp); #elif defined(AFS_LINUX20_ENV) osi_linux_free(tmp); #endif } } } }
/** * Init a new dynroot volume. * @param Volume FID. * @return Volume or NULL if not found. */ static struct volume * afs_NewDynrootVolume(struct VenusFid *fid) { struct cell *tcell; struct volume *tv; struct vldbentry *tve; char *bp, tbuf[CVBS]; tcell = afs_GetCell(fid->Cell, READ_LOCK); if (!tcell) return NULL; tve = afs_osi_Alloc(sizeof(*tve)); osi_Assert(tve != NULL); if (!(tcell->states & CHasVolRef)) tcell->states |= CHasVolRef; bp = afs_cv2string(&tbuf[CVBS], fid->Fid.Volume); memset(tve, 0, sizeof(*tve)); strcpy(tve->name, "local-dynroot"); tve->volumeId[ROVOL] = fid->Fid.Volume; tve->flags = VLF_ROEXISTS; tv = afs_SetupVolume(0, bp, tve, tcell, 0, 0, 0); afs_PutCell(tcell, READ_LOCK); afs_osi_Free(tve, sizeof(*tve)); return tv; }
/** * Release all connections for unix user xu at server xs * @param xu * @param xs */ static void release_conns_user_server(struct unixuser *xu, struct server *xs) { int cix, glocked; struct srvAddr *sa; struct afs_conn *tc; struct sa_conn_vector *tcv, **lcv; for (sa = (xs)->addr; sa; sa = sa->next_sa) { lcv = &sa->conns; for (tcv = *lcv; tcv; lcv = &tcv->next, tcv = *lcv) { if (tcv->user == (xu) && tcv->refCount == 0) { *lcv = tcv->next; /* our old friend, the GLOCK */ glocked = ISAFS_GLOCK(); if (glocked) AFS_GUNLOCK(); for(cix = 0; cix < CVEC_LEN; ++cix) { tc = &(tcv->cvec[cix]); if (tc->activated) { rx_SetConnSecondsUntilNatPing(tc->id, 0); rx_DestroyConnection(tc->id); } } if (glocked) AFS_GLOCK(); afs_osi_Free(tcv, sizeof(struct sa_conn_vector)); break; /* at most one instance per server */ } /*Found unreferenced connection for user */ } } /*For each connection on the server */ } /* release_conns_user_server */
static void release_conns_vector(struct sa_conn_vector *xcv) { int cix, glocked; struct afs_conn *tc; struct sa_conn_vector *tcv = NULL; struct sa_conn_vector **lcv = NULL; for (tcv = xcv; tcv; lcv = &tcv->next, tcv = *lcv) { *lcv = tcv->next; /* you know it, you love it, the GLOCK */ glocked = ISAFS_GLOCK(); if (glocked) AFS_GUNLOCK(); \ for(cix = 0; cix < CVEC_LEN; ++cix) { tc = &(tcv->cvec[cix]); if (tc->activated) { rx_SetConnSecondsUntilNatPing(tc->id, 0); rx_DestroyConnection(tc->id); } } if (glocked) AFS_GLOCK(); afs_osi_Free(tcv, sizeof(struct sa_conn_vector)); } } /* release_conns_vector */
int afs_MemCacheTruncate(struct osi_file *fP, int size) { struct memCacheEntry *mceP = (struct memCacheEntry *)fP; AFS_STATCNT(afs_MemCacheTruncate); ObtainWriteLock(&mceP->afs_memLock, 313); /* old directory entry; g.c. */ if (size == 0 && mceP->dataSize > memCacheBlkSize) { char *oldData = mceP->data; mceP->data = afs_osi_Alloc(memCacheBlkSize); if (mceP->data == NULL) { /* no available memory */ mceP->data = oldData; ReleaseWriteLock(&mceP->afs_memLock); afs_warn("afs: afs_MemWriteBlk mem alloc failure (%d bytes)\n", memCacheBlkSize); } else { afs_osi_Free(oldData, mceP->dataSize); mceP->dataSize = memCacheBlkSize; } } if (size < mceP->size) mceP->size = size; ReleaseWriteLock(&mceP->afs_memLock); return 0; }
struct afspag_cell *afspag_GetCell(char *acell) { struct afspag_cell *tcell; ObtainWriteLock(&afs_xpagcell, 820); for (tcell = cells; tcell; tcell = tcell->next) { if (!strcmp(acell, tcell->cellname)) break; } if (!tcell) { tcell = (struct afspag_cell *)afs_osi_Alloc(sizeof(struct afspag_cell)); if (!tcell) goto out; tcell->cellname = (char *)afs_osi_Alloc(strlen(acell) + 1); if (!tcell->cellname) { afs_osi_Free(tcell, sizeof(struct afspag_cell)); tcell = 0; goto out; } strcpy(tcell->cellname, acell); tcell->cellnum = ++lastcell; tcell->next = cells; cells = tcell; if (!primary_cell) primary_cell = tcell; } out: ReleaseWriteLock(&afs_xpagcell); return tcell; }
void shutdown_memcache(void) { int index; if (cacheDiskType != AFS_FCACHE_TYPE_MEM) return; memCacheBlkSize = 8192; for (index = 0; index < memMaxBlkNumber; index++) { LOCK_INIT(&((memCache + index)->afs_memLock), "afs_memLock"); afs_osi_Free((memCache + index)->data, (memCache + index)->dataSize); } afs_osi_Free((char *)memCache, memMaxBlkNumber * sizeof(struct memCacheEntry)); memMaxBlkNumber = 0; }
int afs_UFSHandleLink(register struct vcache *avc, struct vrequest *areq) { register struct dcache *tdc; register char *tp, *rbuf; void *tfile; afs_size_t offset, len; afs_int32 tlen, alen; register afs_int32 code; /* two different formats, one for links protected 644, have a "." at the * end of the file name, which we turn into a null. Others, protected * 755, we add a null to the end of */ AFS_STATCNT(afs_UFSHandleLink); if (!avc->linkData) { tdc = afs_GetDCache(avc, (afs_size_t) 0, areq, &offset, &len, 0); afs_Trace3(afs_iclSetp, CM_TRACE_UFSLINK, ICL_TYPE_POINTER, avc, ICL_TYPE_POINTER, tdc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); if (!tdc) { if (AFS_IS_DISCONNECTED) return ENETDOWN; else return EIO; } /* otherwise we have the data loaded, go for it */ if (len > 1024) { afs_PutDCache(tdc); return EFAULT; } if (avc->f.m.Mode & 0111) alen = len + 1; /* regular link */ else alen = len; /* mt point */ rbuf = (char *)osi_AllocLargeSpace(AFS_LRALLOCSIZ); tlen = len; ObtainReadLock(&tdc->lock); #if defined(LINUX_USE_FH) tfile = osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type); #else tfile = osi_UFSOpen(tdc->f.inode); #endif code = afs_osi_Read(tfile, -1, rbuf, tlen); osi_UFSClose(tfile); ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); rbuf[alen - 1] = '\0'; alen = strlen(rbuf) + 1; tp = afs_osi_Alloc(alen); /* make room for terminating null */ memcpy(tp, rbuf, alen); osi_FreeLargeSpace(rbuf); if (code != tlen) { afs_osi_Free(tp, alen); return EIO; } avc->linkData = tp; } return 0; }
/* * This is almost exactly like the PFlush() routine in afs_pioctl.c, * but that routine is static. We are about to change a file from * bypassing caching to normal caching. Therefore, we want to * throw out any existing VM pages for the file. We keep track of * the number of times we go back and forth from caching to bypass. */ void afs_TransitionToCaching(struct vcache *avc, afs_ucred_t *acred, int aflags) { int resetDesire = 0; int setManual = 0; if (!avc) return; if (aflags & TRANSChangeDesiredBit) resetDesire = 1; if (aflags & TRANSSetManualBit) setManual = 1; #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */ #else AFS_GLOCK(); #endif ObtainWriteLock(&avc->lock, 926); /* * Someone may have beat us to doing the transition - we had no lock * when we checked the flag earlier. No cause to panic, just return. */ if (!(avc->cachingStates & FCSBypass)) goto done; /* Ok, we actually do need to flush */ ObtainWriteLock(&afs_xcbhash, 957); afs_DequeueCallback(avc); avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */ ReleaseWriteLock(&afs_xcbhash); /* now find the disk cache entries */ afs_TryToSmush(avc, acred, 1); osi_dnlc_purgedp(avc); if (avc->linkData && !(avc->f.states & CCore)) { afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1); avc->linkData = NULL; } avc->cachingStates &= ~(FCSBypass); /* Reset the bypass flag */ if (resetDesire) avc->cachingStates &= ~(FCSDesireBypass); if (setManual) avc->cachingStates |= FCSManuallySet; avc->cachingTransitions++; done: ReleaseWriteLock(&avc->lock); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&avc->pvnLock, avc); #else AFS_GUNLOCK(); #endif }
int SPAGCB_GetSysName(struct rx_call *a_call, afs_int32 a_uid, SysNameList *a_sysnames) { int i; RX_AFS_GLOCK(); ObtainReadLock(&afs_xpagsys); memset(a_sysnames, 0, sizeof(struct SysNameList)); a_sysnames->SysNameList_len = afs_sysnamecount; a_sysnames->SysNameList_val = afs_osi_Alloc(afs_sysnamecount * sizeof(SysNameEnt)); if (!a_sysnames->SysNameList_val) goto out; for (i = 0; i < afs_sysnamecount; i++) { a_sysnames->SysNameList_val[i].sysname = afs_osi_Alloc(strlen(afs_sysnamelist[i]) + 1); if (!a_sysnames->SysNameList_val[i].sysname) goto out; strcpy(a_sysnames->SysNameList_val[i].sysname, afs_sysnamelist[i]); } ReleaseReadLock(&afs_xpagsys); RX_AFS_GUNLOCK(); return 0; out: if (a_sysnames->SysNameList_val) { while (i-- > 0) { afs_osi_Free(a_sysnames->SysNameList_val[i].sysname, strlen(a_sysnames->SysNameList_val[i].sysname) + 1); } afs_osi_Free(a_sysnames->SysNameList_val, afs_sysnamecount * sizeof(SysNameEnt)); } ReleaseWriteLock(&afs_xpagsys); RX_AFS_GUNLOCK(); return UAENOMEM; }
void afs_FreeOneToken(struct tokenJar *token) { if (token->next != NULL) osi_Panic("Freeing linked token"); switch (token->type) { case RX_SECIDX_KAD: if (token->content.rxkad.ticket != NULL) { memset(token->content.rxkad.ticket, 0, token->content.rxkad.ticketLen); afs_osi_Free(token->content.rxkad.ticket, token->content.rxkad.ticketLen); } break; default: break; } memset(token, 0, sizeof(struct tokenJar)); afs_osi_Free(token, sizeof(struct tokenJar)); }
void shutdown_exporter(void) { struct afs_exporter *ex, *op; for (op = root_exported; op; op = ex) { ex = op->exp_next; afs_osi_Free(op, sizeof(struct afs_exporter)); } init_xexported = 0; }
/* * Remove a temporary symlink entry from /afs. */ int afs_DynrootVOPRemove(struct vcache *avc, afs_ucred_t *acred, char *aname) { struct afs_dynSymlink **tpps; struct afs_dynSymlink *tps; int found = 0; #if defined(AFS_SUN510_ENV) if (crgetruid(acred)) #else if (afs_cr_uid(acred)) #endif return EPERM; ObtainWriteLock(&afs_dynSymlinkLock, 97); tpps = &afs_dynSymlinkBase; while (*tpps) { tps = *tpps; if (afs_strcasecmp(aname, tps->name) == 0) { afs_osi_Free(tps->name, strlen(tps->name) + 1); afs_osi_Free(tps->target, strlen(tps->target) + 1); *tpps = tps->next; afs_osi_Free(tps, sizeof(*tps)); afs_dynSymlinkIndex++; found = 1; break; } tpps = &(tps->next); } ReleaseWriteLock(&afs_dynSymlinkLock); if (found) { afs_DynrootInvalidate(); return 0; } if (afs_CellOrAliasExists(aname)) return EROFS; else return ENOENT; }
void shutdown_bufferpackage(void) { struct buffer *tp; int i; AFS_STATCNT(shutdown_bufferpackage); /* Free all allocated Buffers and associated buffer pages */ DFlush(); if (afs_cold_shutdown) { dinit_flag = 0; tp = Buffers; for (i = 0; i < nbuffers; i += NPB, tp += NPB) { afs_osi_Free(tp->data, NPB * AFS_BUFFER_PAGESIZE); } afs_osi_Free(Buffers, nbuffers * sizeof(struct buffer)); nbuffers = 0; timecounter = 1; for (i = 0; i < PHSIZE; i++) phTable[i] = 0; memset(&afs_bufferLock, 0, sizeof(afs_lock_t)); } }
int afs_MemHandleLink(struct vcache *avc, struct vrequest *areq) { struct dcache *tdc; char *tp, *rbuf; afs_size_t offset, len; afs_int32 tlen, alen; afs_int32 code; AFS_STATCNT(afs_MemHandleLink); /* two different formats, one for links protected 644, have a "." at * the end of the file name, which we turn into a null. Others, * protected 755, we add a null to the end of */ if (!avc->linkData) { void *addr; tdc = afs_GetDCache(avc, (afs_size_t) 0, areq, &offset, &len, 0); if (!tdc) { return EIO; } /* otherwise we have the data loaded, go for it */ if (len > 1024) { afs_PutDCache(tdc); return EFAULT; } if (avc->f.m.Mode & 0111) alen = len + 1; /* regular link */ else alen = len; /* mt point */ rbuf = osi_AllocLargeSpace(AFS_LRALLOCSIZ); ObtainReadLock(&tdc->lock); addr = afs_MemCacheOpen(&tdc->f.inode); tlen = len; code = afs_MemReadBlk(addr, 0, rbuf, tlen); afs_MemCacheClose(addr); ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); rbuf[alen - 1] = 0; alen = strlen(rbuf) + 1; tp = afs_osi_Alloc(alen); /* make room for terminating null */ osi_Assert(tp != NULL); memcpy(tp, rbuf, alen); osi_FreeLargeSpace(rbuf); if (code != len) { afs_osi_Free(tp, alen); return EIO; } avc->linkData = tp; } return 0; }
static void afs_clear_inode(struct inode *ip) { struct vcache *vcp = VTOAFS(ip); if (vcp->vlruq.prev || vcp->vlruq.next) osi_Panic("inode freed while on LRU"); if (vcp->hnext) osi_Panic("inode freed while still hashed"); #if !defined(STRUCT_SUPER_OPERATIONS_HAS_ALLOC_INODE) afs_osi_Free(ip->u.generic_ip, sizeof(struct vcache)); #endif }
/*! * * Extend a cache file * * \param avc pointer to vcache to extend data for * \param alen Length to extend file to * \param areq * * \note avc must be write locked. May release and reobtain avc and GLOCK */ int afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq) { afs_size_t offset, toAdd; struct osi_file *tfile; afs_int32 code = 0; struct dcache *tdc; void *zeros; zeros = afs_osi_Alloc(AFS_PAGESIZE); if (zeros == NULL) return ENOMEM; memset(zeros, 0, AFS_PAGESIZE); while (avc->f.m.Length < alen) { tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0); if (!tdc) { code = EIO; break; } toAdd = alen - avc->f.m.Length; offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk); if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) { toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; } tfile = afs_CFileOpen(&tdc->f.inode); while(tdc->validPos < avc->f.m.Length + toAdd) { afs_size_t towrite; towrite = (avc->f.m.Length + toAdd) - tdc->validPos; if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE; code = afs_CFileWrite(tfile, tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk), zeros, towrite); tdc->validPos += towrite; } afs_CFileClose(tfile); afs_AdjustSize(tdc, offset + toAdd ); avc->f.m.Length += toAdd; ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } afs_osi_Free(zeros, AFS_PAGESIZE); return code; }
/** * Reset volume info for the specified volume strecture. Mark volume * to be rechecked next time. * @param tv */ void afs_ResetVolumeInfo(struct volume *tv) { int i; AFS_STATCNT(afs_ResetVolumeInfo); ObtainWriteLock(&tv->lock, 117); tv->states |= VRecheck; for (i = 0; i < MAXHOSTS; i++) tv->status[i] = not_busy; if (tv->name) { afs_osi_Free(tv->name, strlen(tv->name) + 1); tv->name = NULL; } ReleaseWriteLock(&tv->lock); }
int SRXAFSCB_GetCellServDB(struct rx_call *a_call, afs_int32 a_index, char **a_name, serverList * a_hosts) { afs_int32 i, j = 0; struct cell *tcell; char *t_name, *p_name = NULL; RX_AFS_GLOCK(); AFS_STATCNT(SRXAFSCB_GetCellServDB); tcell = afs_GetCellByIndex(a_index, READ_LOCK); if (!tcell) { i = 0; a_hosts->serverList_val = 0; a_hosts->serverList_len = 0; } else { p_name = tcell->cellName; for (j = 0; j < AFSMAXCELLHOSTS && tcell->cellHosts[j]; j++); i = strlen(p_name); a_hosts->serverList_val = afs_osi_Alloc(j * sizeof(afs_int32)); osi_Assert(a_hosts->serverList_val != NULL); a_hosts->serverList_len = j; for (j = 0; j < AFSMAXCELLHOSTS && tcell->cellHosts[j]; j++) a_hosts->serverList_val[j] = ntohl(tcell->cellHosts[j]->addr->sa_ip); afs_PutCell(tcell, READ_LOCK); } t_name = afs_osi_Alloc(i + 1); if (t_name == NULL) { afs_osi_Free(a_hosts->serverList_val, (j * sizeof(afs_int32))); RX_AFS_GUNLOCK(); return ENOMEM; } t_name[i] = '\0'; if (p_name) memcpy(t_name, p_name, i); RX_AFS_GUNLOCK(); *a_name = t_name; return 0; }
static void afs_evict_inode(struct inode *ip) { struct vcache *vcp = VTOAFS(ip); if (vcp->vlruq.prev || vcp->vlruq.next) osi_Panic("inode freed while on LRU"); if (vcp->hnext) osi_Panic("inode freed while still hashed"); truncate_inode_pages(&ip->i_data, 0); end_writeback(ip); #if !defined(STRUCT_SUPER_OPERATIONS_HAS_ALLOC_INODE) afs_osi_Free(ip->u.generic_ip, sizeof(struct vcache)); #endif }
/*XXX: this extends a block arbitrarily to support big directories */ int afs_MemWritevBlk(struct memCacheEntry *mceP, int offset, struct iovec *iov, int nio, int size) { int i; int bytesWritten; int bytesToWrite; AFS_STATCNT(afs_MemWriteBlk); ObtainWriteLock(&mceP->afs_memLock, 561); if (offset + size > mceP->dataSize) { char *oldData = mceP->data; mceP->data = afs_osi_Alloc(size + offset); if (mceP->data == NULL) { /* no available memory */ mceP->data = oldData; /* revert back change that was made */ ReleaseWriteLock(&mceP->afs_memLock); afs_warn("afs: afs_MemWriteBlk mem alloc failure (%d bytes)\n", size + offset); return -ENOMEM; } /* may overlap, but this is OK */ AFS_GUNLOCK(); memcpy(mceP->data, oldData, mceP->size); AFS_GLOCK(); afs_osi_Free(oldData, mceP->dataSize); mceP->dataSize = size + offset; } AFS_GUNLOCK(); if (mceP->size < offset) memset(mceP->data + mceP->size, 0, offset - mceP->size); for (bytesWritten = 0, i = 0; i < nio && size > 0; i++) { bytesToWrite = (size < iov[i].iov_len) ? size : iov[i].iov_len; memcpy(mceP->data + offset, iov[i].iov_base, bytesToWrite); offset += bytesToWrite; bytesWritten += bytesToWrite; size -= bytesToWrite; } mceP->size = (offset < mceP->size) ? mceP->size : offset; AFS_GLOCK(); ReleaseWriteLock(&mceP->afs_memLock); return bytesWritten; }
int afs_InitMemCache(int blkCount, int blkSize, int flags) { int index; AFS_STATCNT(afs_InitMemCache); if (blkSize) memCacheBlkSize = blkSize; memMaxBlkNumber = blkCount; memCache = afs_osi_Alloc(memMaxBlkNumber * sizeof(struct memCacheEntry)); osi_Assert(memCache != NULL); for (index = 0; index < memMaxBlkNumber; index++) { char *blk; (memCache + index)->size = 0; (memCache + index)->dataSize = memCacheBlkSize; LOCK_INIT(&((memCache + index)->afs_memLock), "afs_memLock"); blk = afs_osi_Alloc(memCacheBlkSize); if (blk == NULL) goto nomem; (memCache + index)->data = blk; memset((memCache + index)->data, 0, memCacheBlkSize); } #if defined(AFS_HAVE_VXFS) afs_InitDualFSCacheOps((struct vnode *)0); #endif for (index = 0; index < blkCount; index++) afs_InitCacheFile(NULL, 0); return 0; nomem: afs_warn("afsd: memCache allocation failure at %d KB.\n", (index * memCacheBlkSize) / 1024); while (--index >= 0) { afs_osi_Free((memCache + index)->data, memCacheBlkSize); (memCache + index)->data = NULL; } return ENOMEM; }
static void afs_RebuildDynrootMount(void) { int i; int curChunk, curPage; char *newDir; struct DirHeader *dirHeader; newDir = afs_osi_Alloc(AFS_PAGESIZE); /* * Now actually construct the directory. */ curChunk = 13; curPage = 0; dirHeader = (struct DirHeader *)newDir; dirHeader->header.pgcount = 0; dirHeader->header.tag = htons(1234); dirHeader->header.freecount = 0; dirHeader->header.freebitmap[0] = 0xff; dirHeader->header.freebitmap[1] = 0x1f; for (i = 2; i < EPP / 8; i++) dirHeader->header.freebitmap[i] = 0; dirHeader->alloMap[0] = EPP - DHE - 1; for (i = 1; i < MAXPAGES; i++) dirHeader->alloMap[i] = EPP; for (i = 0; i < NHASHENT; i++) dirHeader->hashTable[i] = 0; /* Install "." and ".." */ afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, ".", 1); afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, "..", 1); ObtainWriteLock(&afs_dynrootDirLock, 549); if (afs_dynrootMountDir) afs_osi_Free(afs_dynrootMountDir, afs_dynrootMountDirLen); afs_dynrootMountDir = newDir; afs_dynrootMountDirLen = AFS_PAGESIZE; ReleaseWriteLock(&afs_dynrootDirLock); }
/** * Reset volume info for the specified volume strecture. Mark volume * to be rechecked next time. * @param tv */ void afs_ResetVolumeInfo(struct volume *tv) { int i; AFS_STATCNT(afs_ResetVolumeInfo); ObtainWriteLock(&tv->lock, 117); tv->states |= VRecheck; /* the hard-mount code in afs_Analyze may not be able to reset this flag * when VRecheck is set, so clear it here to ensure it gets cleared. */ tv->states &= ~VHardMount; for (i = 0; i < AFS_MAXHOSTS; i++) tv->status[i] = not_busy; if (tv->name) { afs_osi_Free(tv->name, strlen(tv->name) + 1); tv->name = NULL; } ReleaseWriteLock(&tv->lock); }
int afs_MemWriteUIO(afs_dcache_id_t *ainode, struct uio *uioP) { struct memCacheEntry *mceP = (struct memCacheEntry *)afs_MemCacheOpen(ainode); afs_int32 code; AFS_STATCNT(afs_MemWriteUIO); ObtainWriteLock(&mceP->afs_memLock, 312); if (AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP) > mceP->dataSize) { char *oldData = mceP->data; mceP->data = afs_osi_Alloc(AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP)); if (mceP->data == NULL) { /* no available memory */ mceP->data = oldData; /* revert back change that was made */ ReleaseWriteLock(&mceP->afs_memLock); afs_warn("afs: afs_MemWriteBlk mem alloc failure (%ld bytes)\n", (long)(AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP))); return -ENOMEM; } AFS_GUNLOCK(); memcpy(mceP->data, oldData, mceP->size); AFS_GLOCK(); afs_osi_Free(oldData, mceP->dataSize); mceP->dataSize = AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP); } if (mceP->size < AFS_UIO_OFFSET(uioP)) memset(mceP->data + mceP->size, 0, (int)(AFS_UIO_OFFSET(uioP) - mceP->size)); AFS_UIOMOVE(mceP->data + AFS_UIO_OFFSET(uioP), AFS_UIO_RESID(uioP), UIO_WRITE, uioP, code); if (AFS_UIO_OFFSET(uioP) > mceP->size) mceP->size = AFS_UIO_OFFSET(uioP); ReleaseWriteLock(&mceP->afs_memLock); return code; }
/** * Reset volume name to volume id mapping cache. * @param flags */ void afs_CheckVolumeNames(int flags) { afs_int32 i, j; struct volume *tv; unsigned int now; struct vcache *tvc; afs_int32 *volumeID, *cellID, vsize, nvols; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif AFS_STATCNT(afs_CheckVolumeNames); nvols = 0; volumeID = cellID = NULL; vsize = 0; ObtainReadLock(&afs_xvolume); if (flags & AFS_VOLCHECK_EXPIRED) { /* * allocate space to hold the volumeIDs and cellIDs, only if * we will be invalidating the mountpoints later on */ for (i = 0; i < NVOLS; i++) for (tv = afs_volumes[i]; tv; tv = tv->next) ++vsize; volumeID = afs_osi_Alloc(2 * vsize * sizeof(*volumeID)); cellID = (volumeID) ? volumeID + vsize : 0; } now = osi_Time(); for (i = 0; i < NVOLS; i++) { for (tv = afs_volumes[i]; tv; tv = tv->next) { if (flags & AFS_VOLCHECK_EXPIRED) { if (((tv->expireTime < (now + 10)) && (tv->states & VRO)) || (flags & AFS_VOLCHECK_FORCE)) { afs_ResetVolumeInfo(tv); /* also resets status */ if (volumeID) { volumeID[nvols] = tv->volume; cellID[nvols] = tv->cell; } ++nvols; continue; } } /* ??? */ if (flags & (AFS_VOLCHECK_BUSY | AFS_VOLCHECK_FORCE)) { for (j = 0; j < AFS_MAXHOSTS; j++) tv->status[j] = not_busy; } } } ReleaseReadLock(&afs_xvolume); /* next ensure all mt points are re-evaluated */ if (nvols || (flags & (AFS_VOLCHECK_FORCE | AFS_VOLCHECK_MTPTS))) { loop: ObtainReadLock(&afs_xvcache); for (i = 0; i < VCSIZE; i++) { for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) { /* if the volume of "mvid" of the vcache entry is among the * ones we found earlier, then we re-evaluate it. Also, if the * force bit is set or we explicitly asked to reevaluate the * mt-pts, we clean the cmvalid bit */ if ((flags & (AFS_VOLCHECK_FORCE | AFS_VOLCHECK_MTPTS)) || (tvc->mvid && inVolList(tvc->mvid, nvols, volumeID, cellID))) tvc->f.states &= ~CMValid; /* If the volume that this file belongs to was reset earlier, * then we should remove its callback. * Again, if forced, always do it. */ if ((tvc->f.states & CRO) && (inVolList(&tvc->f.fid, nvols, volumeID, cellID) || (flags & AFS_VOLCHECK_FORCE))) { if (tvc->f.states & CVInit) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop; } #ifdef AFS_DARWIN80_ENV if (tvc->f.states & CDeadVnode) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop; } tvp = AFSTOV(tvc); if (vnode_get(tvp)) continue; if (vnode_ref(tvp)) { AFS_GUNLOCK(); /* AFSTOV(tvc) may be NULL */ vnode_put(tvp); AFS_GLOCK(); continue; } #else AFS_FAST_HOLD(tvc); #endif ReleaseReadLock(&afs_xvcache); ObtainWriteLock(&afs_xcbhash, 485); /* LOCKXXX: We aren't holding tvc write lock? */ afs_DequeueCallback(tvc); tvc->f.states &= ~CStatd; ReleaseWriteLock(&afs_xcbhash); if (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)) osi_dnlc_purgedp(tvc); #ifdef AFS_DARWIN80_ENV vnode_put(AFSTOV(tvc)); /* our tvc ptr is still good until now */ AFS_FAST_RELE(tvc); ObtainReadLock(&afs_xvcache); #else ObtainReadLock(&afs_xvcache); /* our tvc ptr is still good until now */ AFS_FAST_RELE(tvc); #endif } } } osi_dnlc_purge(); /* definitely overkill, but it's safer this way. */ ReleaseReadLock(&afs_xvcache); } if (volumeID) afs_osi_Free(volumeID, 2 * vsize * sizeof(*volumeID)); } /*afs_CheckVolumeNames */
/** * UFS specific version of afs_GetVolSlot * @return */ struct volume * afs_UFSGetVolSlot(void) { struct volume *tv = NULL, **lv; struct osi_file *tfile; afs_int32 i = -1, code; afs_int32 bestTime; struct volume *bestVp, *oldLp = NULL, **bestLp = NULL; char *oldname = NULL; afs_int32 oldvtix = -2; /* Initialize to a value that doesn't occur */ AFS_STATCNT(afs_UFSGetVolSlot); if (!afs_freeVolList) { /* get free slot */ bestTime = 0x7fffffff; bestVp = 0; bestLp = 0; for (i = 0; i < NVOLS; i++) { lv = &afs_volumes[i]; for (tv = *lv; tv; lv = &tv->next, tv = *lv) { if (tv->refCount == 0) { /* is this one available? */ if (tv->accessTime < bestTime) { /* best one available? */ bestTime = tv->accessTime; bestLp = lv; bestVp = tv; } } } } if (!bestVp) { afs_warn("afs_UFSGetVolSlot: no vol slots available\n"); goto error; } tv = bestVp; oldLp = *bestLp; *bestLp = tv->next; oldname = tv->name; tv->name = NULL; oldvtix = tv->vtix; /* now write out volume structure to file */ if (tv->vtix < 0) { tv->vtix = afs_volCounter++; /* now put on hash chain */ i = FVHash(tv->cell, tv->volume); staticFVolume.next = fvTable[i]; fvTable[i] = tv->vtix; } else { /* * Haul the guy in from disk so we don't overwrite hash table * next chain */ if (afs_FVIndex != tv->vtix) { tfile = osi_UFSOpen(&volumeInode); code = afs_osi_Read(tfile, sizeof(struct fvolume) * tv->vtix, &staticFVolume, sizeof(struct fvolume)); osi_UFSClose(tfile); if (code != sizeof(struct fvolume)) { afs_warn("afs_UFSGetVolSlot: error %d reading volumeinfo\n", (int)code); goto error; } afs_FVIndex = tv->vtix; } } afs_FVIndex = tv->vtix; staticFVolume.volume = tv->volume; staticFVolume.cell = tv->cell; staticFVolume.mtpoint = tv->mtpoint; staticFVolume.dotdot = tv->dotdot; staticFVolume.rootVnode = tv->rootVnode; staticFVolume.rootUnique = tv->rootUnique; tfile = osi_UFSOpen(&volumeInode); code = afs_osi_Write(tfile, sizeof(struct fvolume) * afs_FVIndex, &staticFVolume, sizeof(struct fvolume)); osi_UFSClose(tfile); if (code != sizeof(struct fvolume)) { afs_warn("afs_UFSGetVolSlot: error %d writing volumeinfo\n", (int)code); goto error; } if (oldname) { afs_osi_Free(oldname, strlen(oldname) + 1); oldname = NULL; } } else { tv = afs_freeVolList; afs_freeVolList = tv->next; } return tv; error: if (tv) { if (oldvtix == -2) { afs_warn("afs_UFSGetVolSlot: oldvtix is uninitialized\n"); return NULL; } if (oldname) { tv->name = oldname; oldname = NULL; } if (oldvtix < 0) { afs_volCounter--; fvTable[i] = staticFVolume.next; } if (bestLp) { *bestLp = oldLp; } tv->vtix = oldvtix; /* we messed with staticFVolume, so make sure someone else * doesn't think it's fine to use */ afs_FVIndex = -1; } return NULL; } /*afs_UFSGetVolSlot */
/* * This is almost exactly like the PFlush() routine in afs_pioctl.c, * but that routine is static. We are about to change a file from * normal caching to bypass it's caching. Therefore, we want to * free up any cache space in use by the file, and throw out any * existing VM pages for the file. We keep track of the number of * times we go back and forth from caching to bypass. */ void afs_TransitionToBypass(struct vcache *avc, afs_ucred_t *acred, int aflags) { afs_int32 code; struct vrequest treq; int setDesire = 0; int setManual = 0; if (!avc) return; if (aflags & TRANSChangeDesiredBit) setDesire = 1; if (aflags & TRANSSetManualBit) setManual = 1; #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */ #else AFS_GLOCK(); #endif ObtainWriteLock(&avc->lock, 925); /* * Someone may have beat us to doing the transition - we had no lock * when we checked the flag earlier. No cause to panic, just return. */ if (avc->cachingStates & FCSBypass) goto done; /* If we never cached this, just change state */ if (setDesire && (!(avc->cachingStates & FCSBypass))) { avc->cachingStates |= FCSBypass; goto done; } /* cg2v, try to store any chunks not written 20071204 */ if (avc->execsOrWriters > 0) { code = afs_InitReq(&treq, acred); if (!code) code = afs_StoreAllSegments(avc, &treq, AFS_SYNC | AFS_LASTSTORE); } #if 0 /* also cg2v, don't dequeue the callback */ ObtainWriteLock(&afs_xcbhash, 956); afs_DequeueCallback(avc); ReleaseWriteLock(&afs_xcbhash); #endif avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */ /* now find the disk cache entries */ afs_TryToSmush(avc, acred, 1); osi_dnlc_purgedp(avc); if (avc->linkData && !(avc->f.states & CCore)) { afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1); avc->linkData = NULL; } avc->cachingStates |= FCSBypass; /* Set the bypass flag */ if(setDesire) avc->cachingStates |= FCSDesireBypass; if(setManual) avc->cachingStates |= FCSManuallySet; avc->cachingTransitions++; done: ReleaseWriteLock(&avc->lock); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&avc->pvnLock, avc); #else AFS_GUNLOCK(); #endif }