static int afs_DisconCreateSymlink(struct vcache *avc, char *aname, struct vrequest *areq) { struct dcache *tdc; struct osi_file *tfile; afs_size_t offset, len; tdc = afs_GetDCache(avc, 0, areq, &offset, &len, 0); if (!tdc) { /* printf("afs_DisconCreateSymlink: can't get new dcache for symlink.\n"); */ return ENETDOWN; } len = strlen(aname); avc->f.m.Length = len; ObtainWriteLock(&tdc->lock, 720); afs_AdjustSize(tdc, len); tdc->validPos = len; tfile = afs_CFileOpen(&tdc->f.inode); afs_CFileWrite(tfile, 0, aname, len); afs_CFileClose(tfile); ReleaseWriteLock(&tdc->lock); return 0; }
static void DFlushBuffer(struct buffer *ab) { struct osi_file *tfile; tfile = afs_CFileOpen(&ab->inode); afs_CFileWrite(tfile, ab->page * AFS_BUFFER_PAGESIZE, ab->data, AFS_BUFFER_PAGESIZE); ab->dirty = 0; /* Clear the dirty flag */ afs_CFileClose(tfile); }
/*! * * Extend a cache file * * \param avc pointer to vcache to extend data for * \param alen Length to extend file to * \param areq * * \note avc must be write locked. May release and reobtain avc and GLOCK */ int afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq) { afs_size_t offset, toAdd; struct osi_file *tfile; afs_int32 code = 0; struct dcache *tdc; void *zeros; zeros = afs_osi_Alloc(AFS_PAGESIZE); if (zeros == NULL) return ENOMEM; memset(zeros, 0, AFS_PAGESIZE); while (avc->f.m.Length < alen) { tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0); if (!tdc) { code = EIO; break; } toAdd = alen - avc->f.m.Length; offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk); if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) { toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; } tfile = afs_CFileOpen(&tdc->f.inode); while(tdc->validPos < avc->f.m.Length + toAdd) { afs_size_t towrite; towrite = (avc->f.m.Length + toAdd) - tdc->validPos; if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE; code = afs_CFileWrite(tfile, tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk), zeros, towrite); tdc->validPos += towrite; } afs_CFileClose(tfile); afs_AdjustSize(tdc, offset + toAdd ); avc->f.m.Length += toAdd; ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } afs_osi_Free(zeros, AFS_PAGESIZE); return code; }
/* lp is pointer to a fairly-old buffer */ static struct buffer * afs_newslot(struct dcache *adc, afs_int32 apage, struct buffer *lp) { /* Find a usable buffer slot */ afs_int32 i; afs_int32 lt = 0; struct buffer *tp; struct osi_file *tfile; AFS_STATCNT(afs_newslot); /* we take a pointer here to a buffer which was at the end of an * LRU hash chain. Odds are, it's one of the older buffers, not * one of the newer. Having an older buffer to start with may * permit us to avoid a few of the assignments in the "typical * case" for loop below. */ if (lp && (lp->lockers == 0)) { lt = lp->accesstime; } else { lp = NULL; } /* timecounter might have wrapped, if machine is very very busy * and stays up for a long time. Timecounter mustn't wrap twice * (positive->negative->positive) before calling newslot, but that * would require 2 billion consecutive cache hits... Anyway, the * penalty is only that the cache replacement policy will be * almost MRU for the next ~2 billion DReads... newslot doesn't * get called nearly as often as DRead, so in order to avoid the * performance penalty of using the hypers, it's worth doing the * extra check here every time. It's probably cheaper than doing * hcmp, anyway. There is a little performance hit resulting from * resetting all the access times to 0, but it only happens once * every month or so, and the access times will rapidly sort * themselves back out after just a few more DReads. */ if (timecounter < 0) { timecounter = 1; tp = Buffers; for (i = 0; i < nbuffers; i++, tp++) { tp->accesstime = 0; if (!lp && !tp->lockers) /* one is as good as the rest, I guess */ lp = tp; } } else { /* this is the typical case */ tp = Buffers; for (i = 0; i < nbuffers; i++, tp++) { if (tp->lockers == 0) { if (!lp || tp->accesstime < lt) { lp = tp; lt = tp->accesstime; } } } } if (lp == 0) { /* No unlocked buffers. If still possible, allocate a new increment */ if (nbuffers + NPB > afs_max_buffers) { /* There are no unlocked buffers -- this used to panic, but that * seems extreme. To the best of my knowledge, all the callers * of DRead are prepared to handle a zero return. Some of them * just panic directly, but not all of them. */ afs_warn("afs: all buffers locked\n"); return 0; } BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB); osi_Assert(BufferData != NULL); for (i = 0; i< NPB; i++) { /* Fill in each buffer with an empty indication. */ tp = &Buffers[i + nbuffers]; tp->fid = NULLIDX; afs_reset_inode(&tp->inode); tp->accesstime = 0; tp->lockers = 0; tp->data = &BufferData[AFS_BUFFER_PAGESIZE * i]; tp->hashIndex = 0; tp->dirty = 0; AFS_RWLOCK_INIT(&tp->lock, "buffer lock"); } lp = &Buffers[nbuffers]; nbuffers += NPB; } if (lp->dirty) { /* see DFlush for rationale for not getting and locking the dcache */ tfile = afs_CFileOpen(&lp->inode); afs_CFileWrite(tfile, lp->page * AFS_BUFFER_PAGESIZE, lp->data, AFS_BUFFER_PAGESIZE); lp->dirty = 0; afs_CFileClose(tfile); AFS_STATS(afs_stats_cmperf.bufFlushDirty++); } /* Now fill in the header. */ lp->fid = adc->index; afs_copy_inode(&lp->inode, &adc->f.inode); lp->page = apage; lp->accesstime = timecounter++; FixupBucket(lp); /* move to the right hash bucket */ return lp; }