/*! * \brief Load the list of cells from given inode. * \param inode Source inode. * \param lookupcode * \return 0 for success. < 0 for error. */ int afs_cellname_init(afs_dcache_id_t *inode, int lookupcode) { struct osi_file *tfile; int cc, off = 0; ObtainWriteLock(&afs_xcell, 692); afs_cellnum_next = 1; afs_cellname_dirty = 0; if (cacheDiskType == AFS_FCACHE_TYPE_MEM) { ReleaseWriteLock(&afs_xcell); return 0; } if (lookupcode) { ReleaseWriteLock(&afs_xcell); return lookupcode; } tfile = osi_UFSOpen(inode); if (!tfile) { ReleaseWriteLock(&afs_xcell); return EIO; } afs_copy_inode(&afs_cellname_inode, inode); afs_cellname_inode_set = 1; while (1) { afs_int32 cellnum, clen, magic; struct cell_name *cn; char *cellname; cc = afs_osi_Read(tfile, off, &magic, sizeof(magic)); if (cc != sizeof(magic)) break; if (magic != AFS_CELLINFO_MAGIC) break; off += cc; cc = afs_osi_Read(tfile, off, &cellnum, sizeof(cellnum)); if (cc != sizeof(cellnum)) break; off += cc; cc = afs_osi_Read(tfile, off, &clen, sizeof(clen)); if (cc != sizeof(clen)) break; off += cc; cellname = afs_osi_Alloc(clen + 1); if (!cellname) break; cc = afs_osi_Read(tfile, off, cellname, clen); if (cc != clen) { afs_osi_Free(cellname, clen + 1); break; } off += cc; cellname[clen] = '\0'; if (afs_cellname_lookup_name(cellname) || afs_cellname_lookup_id(cellnum)) { afs_osi_Free(cellname, clen + 1); break; } cn = afs_cellname_new(cellname, cellnum); afs_osi_Free(cellname, clen + 1); } osi_UFSClose(tfile); ReleaseWriteLock(&afs_xcell); return 0; }
/* lp is pointer to a fairly-old buffer */ static struct buffer * afs_newslot(struct dcache *adc, afs_int32 apage, struct buffer *lp) { /* Find a usable buffer slot */ afs_int32 i; afs_int32 lt = 0; struct buffer *tp; struct osi_file *tfile; AFS_STATCNT(afs_newslot); /* we take a pointer here to a buffer which was at the end of an * LRU hash chain. Odds are, it's one of the older buffers, not * one of the newer. Having an older buffer to start with may * permit us to avoid a few of the assignments in the "typical * case" for loop below. */ if (lp && (lp->lockers == 0)) { lt = lp->accesstime; } else { lp = NULL; } /* timecounter might have wrapped, if machine is very very busy * and stays up for a long time. Timecounter mustn't wrap twice * (positive->negative->positive) before calling newslot, but that * would require 2 billion consecutive cache hits... Anyway, the * penalty is only that the cache replacement policy will be * almost MRU for the next ~2 billion DReads... newslot doesn't * get called nearly as often as DRead, so in order to avoid the * performance penalty of using the hypers, it's worth doing the * extra check here every time. It's probably cheaper than doing * hcmp, anyway. There is a little performance hit resulting from * resetting all the access times to 0, but it only happens once * every month or so, and the access times will rapidly sort * themselves back out after just a few more DReads. */ if (timecounter < 0) { timecounter = 1; tp = Buffers; for (i = 0; i < nbuffers; i++, tp++) { tp->accesstime = 0; if (!lp && !tp->lockers) /* one is as good as the rest, I guess */ lp = tp; } } else { /* this is the typical case */ tp = Buffers; for (i = 0; i < nbuffers; i++, tp++) { if (tp->lockers == 0) { if (!lp || tp->accesstime < lt) { lp = tp; lt = tp->accesstime; } } } } if (lp == 0) { /* No unlocked buffers. If still possible, allocate a new increment */ if (nbuffers + NPB > afs_max_buffers) { /* There are no unlocked buffers -- this used to panic, but that * seems extreme. To the best of my knowledge, all the callers * of DRead are prepared to handle a zero return. Some of them * just panic directly, but not all of them. */ afs_warn("afs: all buffers locked\n"); return 0; } BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB); osi_Assert(BufferData != NULL); for (i = 0; i< NPB; i++) { /* Fill in each buffer with an empty indication. */ tp = &Buffers[i + nbuffers]; tp->fid = NULLIDX; afs_reset_inode(&tp->inode); tp->accesstime = 0; tp->lockers = 0; tp->data = &BufferData[AFS_BUFFER_PAGESIZE * i]; tp->hashIndex = 0; tp->dirty = 0; AFS_RWLOCK_INIT(&tp->lock, "buffer lock"); } lp = &Buffers[nbuffers]; nbuffers += NPB; } if (lp->dirty) { /* see DFlush for rationale for not getting and locking the dcache */ tfile = afs_CFileOpen(&lp->inode); afs_CFileWrite(tfile, lp->page * AFS_BUFFER_PAGESIZE, lp->data, AFS_BUFFER_PAGESIZE); lp->dirty = 0; afs_CFileClose(tfile); AFS_STATS(afs_stats_cmperf.bufFlushDirty++); } /* Now fill in the header. */ lp->fid = adc->index; afs_copy_inode(&lp->inode, &adc->f.inode); lp->page = apage; lp->accesstime = timecounter++; FixupBucket(lp); /* move to the right hash bucket */ return lp; }