int afs_sgidaemon(void) { int s; struct dcache *tdc; if (afs_sgibklock == NULL) { SV_INIT(&afs_sgibksync, "bksync", 0, 0); SV_INIT(&afs_sgibkwait, "bkwait", 0, 0); SPINLOCK_INIT(&afs_sgibklock, "bklock"); } s = SPLOCK(afs_sgibklock); for (;;) { /* wait for something to do */ SP_WAIT(afs_sgibklock, s, &afs_sgibksync, PINOD); osi_Assert(afs_sgibklist); /* XX will probably need to generalize to real list someday */ s = SPLOCK(afs_sgibklock); while (afs_sgibklist) { tdc = afs_sgibklist; afs_sgibklist = NULL; SPUNLOCK(afs_sgibklock, s); AFS_GLOCK(); tdc->dflags &= ~DFEntryMod; osi_Assert(afs_WriteDCache(tdc, 1) == 0); AFS_GUNLOCK(); s = SPLOCK(afs_sgibklock); } /* done all the work - wake everyone up */ while (SV_SIGNAL(&afs_sgibkwait)); } }
afs_int32 FindByID(struct ubik_trans *at, afs_int32 aid) { /* returns address of entry if found, 0 otherwise */ afs_int32 code; afs_int32 i; struct prentry tentry; afs_int32 entry; if ((aid == PRBADID) || (aid == 0)) return 0; i = IDHash(aid); entry = ntohl(cheader.idHash[i]); if (entry == 0) return entry; memset(&tentry, 0, sizeof(tentry)); code = pr_ReadEntry(at, 0, entry, &tentry); if (code != 0) return 0; if (aid == tentry.id) return entry; osi_Assert(entry != tentry.nextID); entry = tentry.nextID; while (entry != 0) { memset(&tentry, 0, sizeof(tentry)); code = pr_ReadEntry(at, 0, entry, &tentry); if (code != 0) return 0; if (aid == tentry.id) return entry; osi_Assert(entry != tentry.nextID); entry = tentry.nextID; } return 0; }
/** * start a worker thread. * * @param[in] pool thread pool object * @param[inout] worker_out address in which to store worker thread object pointer * * @return operation status * @retval 0 success * @retval ENOMEM out of memory */ static int _afs_tp_worker_start(struct afs_thread_pool * pool, struct afs_thread_pool_worker ** worker_out) { int ret = 0; pthread_attr_t attrs; struct afs_thread_pool_worker * worker; ret = _afs_tp_worker_alloc(worker_out); if (ret) { goto error; } worker = *worker_out; worker->pool = pool; worker->req_shutdown = 0; osi_Assert(pthread_attr_init(&attrs) == 0); osi_Assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0); ret = pthread_create(&worker->tid, &attrs, &_afs_tp_worker_run, worker); error: return ret; }
afs_int32 FindByName(struct ubik_trans *at, char aname[PR_MAXNAMELEN], struct prentry *tentryp) { /* ditto */ afs_int32 code; afs_int32 i; afs_int32 entry; i = NameHash(aname); entry = ntohl(cheader.nameHash[i]); if (entry == 0) return entry; memset(tentryp, 0, sizeof(struct prentry)); code = pr_ReadEntry(at, 0, entry, tentryp); if (code != 0) return 0; if ((strncmp(aname, tentryp->name, PR_MAXNAMELEN)) == 0) return entry; osi_Assert(entry != tentryp->nextName); entry = tentryp->nextName; while (entry != 0) { memset(tentryp, 0, sizeof(struct prentry)); code = pr_ReadEntry(at, 0, entry, tentryp); if (code != 0) return 0; if ((strncmp(aname, tentryp->name, PR_MAXNAMELEN)) == 0) return entry; osi_Assert(entry != tentryp->nextName); entry = tentryp->nextName; } return 0; }
/* Copy an inode handle */ IHandle_t * ih_copy(IHandle_t * ihP) { IH_LOCK; osi_Assert(ih_Inited); osi_Assert(ihP->ih_refcnt > 0); ihP->ih_refcnt++; IH_UNLOCK; return ihP; }
/* * Actually close the file descriptor handle and return it to * the free list. */ int fd_reallyclose(FdHandle_t * fdP) { FD_t closeFd; IHandle_t *ihP; if (!fdP) return 0; IH_LOCK; osi_Assert(ih_Inited); osi_Assert(fdInUseCount > 0); osi_Assert(fdP->fd_status == FD_HANDLE_INUSE || fdP->fd_status == FD_HANDLE_CLOSING); ihP = fdP->fd_ih; closeFd = fdP->fd_fd; fdP->fd_refcnt--; if (fdP->fd_refcnt == 0) { DLL_DELETE(fdP, ihP->ih_fdhead, ihP->ih_fdtail, fd_ihnext, fd_ihprev); DLL_INSERT_TAIL(fdP, fdAvailHead, fdAvailTail, fd_next, fd_prev); fdP->fd_status = FD_HANDLE_AVAIL; fdP->fd_refcnt = 0; fdP->fd_ih = NULL; fdP->fd_fd = INVALID_FD; } /* All the file descriptor handles have been closed; reset * the IH_REALLY_CLOSED flag indicating that ih_reallyclose * has completed its job. */ if (!ihP->ih_fdhead) { ihP->ih_flags &= ~IH_REALLY_CLOSED; } if (fdP->fd_refcnt == 0) { IH_UNLOCK; OS_CLOSE(closeFd); IH_LOCK; fdInUseCount -= 1; } /* If this is not the only reference to the Inode then we can decrement * the reference count, otherwise we need to call ih_release. */ if (ihP->ih_refcnt > 1) ihP->ih_refcnt--; else _ih_release_r(ihP); IH_UNLOCK; return 0; }
/* Allocate a chunk of inode handles */ void iHandleAllocateChunk(void) { int i; IHandle_t *ihP; osi_Assert(ihAvailHead == NULL); ihP = malloc(I_HANDLE_MALLOCSIZE * sizeof(IHandle_t)); osi_Assert(ihP != NULL); for (i = 0; i < I_HANDLE_MALLOCSIZE; i++) { ihP[i].ih_refcnt = 0; DLL_INSERT_TAIL(&ihP[i], ihAvailHead, ihAvailTail, ih_next, ih_prev); } }
/** * get a socket descriptor of the appropriate domain. * * @param[in] endpoint pointer to sync endpoint object * * @return socket descriptor * * @post socket of domain specified in endpoint structure is created and * returned to caller. */ osi_socket SYNC_getSock(SYNC_endpoint_t * endpoint) { osi_socket sd; osi_Assert((sd = socket(endpoint->domain, SOCK_STREAM, 0)) >= 0); return sd; }
/** * low-level thread entry point. * * @param[in] rock opaque pointer to thread worker object * * @return opaque return pointer from pool entry function * * @internal */ static void * _afs_tp_worker_run(void * rock) { struct afs_thread_pool_worker * worker = rock; struct afs_thread_pool * pool = worker->pool; /* register worker with pool */ MUTEX_ENTER(&pool->lock); queue_Append(&pool->thread_list, worker); pool->nthreads++; MUTEX_EXIT(&pool->lock); /* call high-level entry point */ worker->ret = (*pool->entry)(pool, worker, pool->work_queue, pool->rock); /* adjust pool live thread count */ MUTEX_ENTER(&pool->lock); osi_Assert(pool->nthreads); queue_Remove(worker); pool->nthreads--; if (!pool->nthreads) { CV_BROADCAST(&pool->shutdown_cv); pool->state = AFS_TP_STATE_STOPPED; } MUTEX_EXIT(&pool->lock); _afs_tp_worker_free(worker); return NULL; }
/* Open a file for buffered I/O */ StreamHandle_t * stream_open(const char *filename, const char *mode) { FD_t fd = INVALID_FD; if (strcmp(mode, "r") == 0) { fd = OS_OPEN(filename, O_RDONLY, 0); } else if (strcmp(mode, "r+") == 0) { fd = OS_OPEN(filename, O_RDWR, 0); } else if (strcmp(mode, "w") == 0) { fd = OS_OPEN(filename, O_WRONLY | O_TRUNC | O_CREAT, 0); } else if (strcmp(mode, "w+") == 0) { fd = OS_OPEN(filename, O_RDWR | O_TRUNC | O_CREAT, 0); } else if (strcmp(mode, "a") == 0) { fd = OS_OPEN(filename, O_WRONLY | O_APPEND | O_CREAT, 0); } else if (strcmp(mode, "a+") == 0) { fd = OS_OPEN(filename, O_RDWR | O_APPEND | O_CREAT, 0); } else { osi_Assert(FALSE); /* not implemented */ } if (fd == INVALID_FD) { return NULL; } return stream_fdopen(fd); }
/** * Init a new dynroot volume. * @param Volume FID. * @return Volume or NULL if not found. */ static struct volume * afs_NewDynrootVolume(struct VenusFid *fid) { struct cell *tcell; struct volume *tv; struct vldbentry *tve; char *bp, tbuf[CVBS]; tcell = afs_GetCell(fid->Cell, READ_LOCK); if (!tcell) return NULL; tve = afs_osi_Alloc(sizeof(*tve)); osi_Assert(tve != NULL); if (!(tcell->states & CHasVolRef)) tcell->states |= CHasVolRef; bp = afs_cv2string(&tbuf[CVBS], fid->Fid.Volume); memset(tve, 0, sizeof(*tve)); strcpy(tve->name, "local-dynroot"); tve->volumeId[ROVOL] = fid->Fid.Volume; tve->flags = VLF_ROEXISTS; tv = afs_SetupVolume(0, bp, tve, tcell, 0, 0, 0); afs_PutCell(tcell, READ_LOCK); afs_osi_Free(tve, sizeof(*tve)); return tv; }
int DNew(struct dcache *adc, int page, struct DirBuffer *entry) { /* Same as read, only do *not* even try to read the page, since it * probably doesn't exist. */ struct buffer *tb; AFS_STATCNT(DNew); ObtainWriteLock(&afs_bufferLock, 264); if ((tb = afs_newslot(adc, page, NULL)) == 0) { ReleaseWriteLock(&afs_bufferLock); return EIO; } /* extend the chunk, if needed */ /* Do it now, not in DFlush or afs_newslot when the data is written out, * since now our caller has adc->lock writelocked, and we can't acquire * that lock (or even map from a fid to a dcache) in afs_newslot or * DFlush due to lock hierarchy issues */ if ((page + 1) * AFS_BUFFER_PAGESIZE > adc->f.chunkBytes) { afs_AdjustSize(adc, (page + 1) * AFS_BUFFER_PAGESIZE); osi_Assert(afs_WriteDCache(adc, 1) == 0); } ObtainWriteLock(&tb->lock, 265); tb->lockers++; ReleaseWriteLock(&afs_bufferLock); ReleaseWriteLock(&tb->lock); entry->buffer = tb; entry->data = tb->data; return 0; }
struct afs_exporter * exporter_add(afs_int32 size, struct exporterops *ops, afs_int32 state, afs_int32 type, char *data) { struct afs_exporter *ex, *op; afs_int32 length; AFS_STATCNT(exporter_add); if (!init_xexported) { init_xexported = 1; LOCK_INIT(&afs_xexp, "afs_xexp"); } length = (size ? size : sizeof(struct afs_exporter)); ex = afs_osi_Alloc(length); osi_Assert(ex != NULL); memset(ex, 0, length); ObtainWriteLock(&afs_xexp, 308); for (op = root_exported; op; op = op->exp_next) { if (!op->exp_next) break; } if (op) op->exp_next = ex; else root_exported = ex; ReleaseWriteLock(&afs_xexp); ex->exp_next = 0; ex->exp_op = ops; ex->exp_states = state; ex->exp_data = data; ex->exp_type = type; return ex; }
/* Allocate a chunk of stream handles */ void streamHandleAllocateChunk(void) { int i; StreamHandle_t *streamP; osi_Assert(streamAvailHead == NULL); streamP = (StreamHandle_t *) malloc(STREAM_HANDLE_MALLOCSIZE * sizeof(StreamHandle_t)); osi_Assert(streamP != NULL); for (i = 0; i < STREAM_HANDLE_MALLOCSIZE; i++) { streamP[i].str_fd = INVALID_FD; DLL_INSERT_TAIL(&streamP[i], streamAvailHead, streamAvailTail, str_next, str_prev); } }
/* Release an Inode handle. All cached file descriptors for this * inode are closed when the last reference to this handle is released */ static int _ih_release_r(IHandle_t * ihP) { int ihash; if (!ihP) return 0; osi_Assert(ihP->ih_refcnt > 0); if (ihP->ih_refcnt > 1) { ihP->ih_refcnt--; return 0; } ihash = IH_HASH(ihP->ih_dev, ihP->ih_vid, ihP->ih_ino); DLL_DELETE(ihP, ihashTable[ihash].ihash_head, ihashTable[ihash].ihash_tail, ih_next, ih_prev); ih_fdclose(ihP); ihP->ih_refcnt--; DLL_INSERT_TAIL(ihP, ihAvailHead, ihAvailTail, ih_next, ih_prev); return 0; }
static int vn_prolog(struct cmd_syndesc * as, struct fssync_state * state) { struct cmd_item *ti; state->vop = (struct volop_state *) calloc(1, sizeof(struct volop_state)); osi_Assert(state->vop != NULL); if ((ti = as->parms[CUSTOM_PARMS_OFFSET].items)) { /* -volumeid */ state->vop->volume = atoi(ti->data); } else { fprintf(stderr, "required argument -volumeid not given\n"); } if ((ti = as->parms[CUSTOM_PARMS_OFFSET+1].items)) { /* -vnodeid */ state->vop->vnode = atoi(ti->data); } else { fprintf(stderr, "required argument -vnodeid not given\n"); } if ((ti = as->parms[CUSTOM_PARMS_OFFSET+2].items)) { /* -unique */ state->vop->unique = atoi(ti->data); } else { state->vop->unique = 0; } if ((ti = as->parms[COMMON_VOLOP_PARMS_OFFSET+3].items)) { /* -partition */ strlcpy(state->vop->partName, ti->data, sizeof(state->vop->partName)); } else { memset(state->vop->partName, 0, sizeof(state->vop->partName)); } return 0; }
/* Close all cached file descriptors for this inode. */ int ih_reallyclose(IHandle_t * ihP) { if (!ihP) return 0; IH_LOCK; ihP->ih_refcnt++; /* must not disappear over unlock */ if (ihP->ih_synced) { FdHandle_t *fdP; ihP->ih_synced = 0; IH_UNLOCK; fdP = IH_OPEN(ihP); if (fdP) { OS_SYNC(fdP->fd_fd); FDH_CLOSE(fdP); } IH_LOCK; } osi_Assert(ihP->ih_refcnt > 0); ih_fdclose(ihP); if (ihP->ih_refcnt > 1) ihP->ih_refcnt--; else _ih_release_r(ihP); IH_UNLOCK; return 0; }
/* Free a buffered I/O handle */ int stream_close(StreamHandle_t * streamP, int reallyClose) { ssize_t rc; int retval = 0; osi_Assert(streamP != NULL); if (streamP->str_direction == STREAM_DIRECTION_WRITE && streamP->str_bufoff > 0) { rc = OS_PWRITE(streamP->str_fd, streamP->str_buffer, streamP->str_bufoff, streamP->str_fdoff); if (rc < 0) { retval = -1; } else { streamP->str_fdoff += rc; } } if (reallyClose) { rc = OS_CLOSE(streamP->str_fd); if (rc < 0) { retval = -1; } } streamP->str_fd = INVALID_FD; IH_LOCK; DLL_INSERT_TAIL(streamP, streamAvailHead, streamAvailTail, str_next, str_prev); IH_UNLOCK; return retval; }
/** * lock a file on disk for the process. * * @param[in] lf the struct VLockFile representing the file to lock * @param[in] offset the offset in the file to lock * @param[in] locktype READ_LOCK or WRITE_LOCK * @param[in] nonblock 0 to wait for conflicting locks to clear before * obtaining the lock; 1 to fail immediately if a * conflicting lock is held by someone else * * @return operation status * @retval 0 success * @retval EBUSY someone else is holding a conflicting lock and nonblock=1 was * specified * @retval EIO error acquiring file lock * * @note DAFS only * * @note do not try to lock/unlock the same offset in the same file from * different threads; use VGetDiskLock to protect threads from each other in * addition to other processes */ int VLockFileLock(struct VLockFile *lf, afs_uint32 offset, int locktype, int nonblock) { int code; osi_Assert(locktype == READ_LOCK || locktype == WRITE_LOCK); MUTEX_ENTER(&lf->mutex); if (lf->fd == INVALID_FD) { lf->fd = _VOpenPath(lf->path); if (lf->fd == INVALID_FD) { MUTEX_EXIT(&lf->mutex); return EIO; } } lf->refcount++; MUTEX_EXIT(&lf->mutex); code = _VLockFd(lf->fd, offset, locktype, nonblock); if (code) { MUTEX_ENTER(&lf->mutex); if (--lf->refcount < 1) { _VCloseFd(lf->fd); lf->fd = INVALID_FD; } MUTEX_EXIT(&lf->mutex); } return code; }
/** * append to a node list object. * * @param[in] list list object * @param[in] node node object * @param[in] state new node state * * @return operation status * @retval 0 success * @retval AFS_WQ_ERROR raced to enqueue node * * @pre * - node lock held * - node is not on a list * - node is either not busy, or it is marked as busy by the calling thread * * @post * - enqueued on list * - node lock dropped * * @internal */ static int _afs_wq_node_list_enqueue(struct afs_work_queue_node_list * list, struct afs_work_queue_node * node, afs_wq_work_state_t state) { int code, ret = 0; if (node->qidx != AFS_WQ_NODE_LIST_NONE) { /* raced */ ret = AFS_WQ_ERROR; goto error; } /* deal with lock inversion */ code = MUTEX_TRYENTER(&list->lock); if (!code) { /* contended */ _afs_wq_node_state_change(node, AFS_WQ_NODE_STATE_BUSY); MUTEX_EXIT(&node->lock); MUTEX_ENTER(&list->lock); MUTEX_ENTER(&node->lock); /* assert state of the world (we set busy, so this should never happen) */ osi_Assert(queue_IsNotOnQueue(node)); } if (list->shutdown) { ret = AFS_WQ_ERROR; goto error_unlock; } osi_Assert(node->qidx == AFS_WQ_NODE_LIST_NONE); if (queue_IsEmpty(&list->list)) { /* wakeup a dequeue thread */ CV_SIGNAL(&list->cv); } queue_Append(&list->list, node); node->qidx = list->qidx; _afs_wq_node_state_change(node, state); error_unlock: MUTEX_EXIT(&node->lock); MUTEX_EXIT(&list->lock); error: return ret; }
/* * Return a file descriptor handle to the cache */ int fd_close(FdHandle_t * fdP) { IHandle_t *ihP; if (!fdP) return 0; IH_LOCK; osi_Assert(ih_Inited); osi_Assert(fdInUseCount > 0); osi_Assert(fdP->fd_status == FD_HANDLE_INUSE || fdP->fd_status == FD_HANDLE_CLOSING); ihP = fdP->fd_ih; /* Call fd_reallyclose to really close the unused file handles if * the previous attempt to close (ih_reallyclose()) all file handles * failed (this is determined by checking the ihandle for the flag * IH_REALLY_CLOSED) or we have too many open files. */ if (fdP->fd_status == FD_HANDLE_CLOSING || ihP->ih_flags & IH_REALLY_CLOSED || fdInUseCount > fdCacheSize) { IH_UNLOCK; return fd_reallyclose(fdP); } fdP->fd_refcnt--; if (fdP->fd_refcnt == 0) { /* Put this descriptor back into the cache */ fdP->fd_status = FD_HANDLE_OPEN; DLL_INSERT_TAIL(fdP, fdLruHead, fdLruTail, fd_next, fd_prev); } /* If this is not the only reference to the Inode then we can decrement * the reference count, otherwise we need to call ih_release. */ if (ihP->ih_refcnt > 1) ihP->ih_refcnt--; else _ih_release_r(ihP); IH_UNLOCK; return 0; }
/** * put back a reference to a work node. * * @param[in] node work queue node * @param[in] drop drop node->lock * * @post if refcount reaches zero, node is deallocated. * * @return operation status * @retval 0 success * * @pre node->lock held * * @internal */ static int _afs_wq_node_put_r(struct afs_work_queue_node * node, int drop) { afs_uint32 refc; osi_Assert(node->refcount > 0); refc = --node->refcount; if (drop) { MUTEX_EXIT(&node->lock); } if (!refc) { osi_Assert(node->qidx == AFS_WQ_NODE_LIST_NONE); _afs_wq_node_free(node); } return 0; }
int afs_UFSHandleLink(struct vcache *avc, struct vrequest *areq) { struct dcache *tdc; char *tp, *rbuf; void *tfile; afs_size_t offset, len; afs_int32 tlen, alen; afs_int32 code; /* two different formats, one for links protected 644, have a "." at the * end of the file name, which we turn into a null. Others, protected * 755, we add a null to the end of */ AFS_STATCNT(afs_UFSHandleLink); if (!avc->linkData) { tdc = afs_GetDCache(avc, (afs_size_t) 0, areq, &offset, &len, 0); afs_Trace3(afs_iclSetp, CM_TRACE_UFSLINK, ICL_TYPE_POINTER, avc, ICL_TYPE_POINTER, tdc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); if (!tdc) { if (AFS_IS_DISCONNECTED) return ENETDOWN; else return EIO; } /* otherwise we have the data loaded, go for it */ if (len > 1024) { afs_PutDCache(tdc); return EFAULT; } if (avc->f.m.Mode & 0111) alen = len + 1; /* regular link */ else alen = len; /* mt point */ rbuf = osi_AllocLargeSpace(AFS_LRALLOCSIZ); tlen = len; ObtainReadLock(&tdc->lock); tfile = osi_UFSOpen(&tdc->f.inode); code = afs_osi_Read(tfile, -1, rbuf, tlen); osi_UFSClose(tfile); ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); rbuf[alen - 1] = '\0'; alen = strlen(rbuf) + 1; tp = afs_osi_Alloc(alen); /* make room for terminating null */ osi_Assert(tp != NULL); memcpy(tp, rbuf, alen); osi_FreeLargeSpace(rbuf); if (code != tlen) { afs_osi_Free(tp, alen); return EIO; } avc->linkData = tp; } return 0; }
/* Allocate a chunk of file descriptor handles */ void fdHandleAllocateChunk(void) { int i; FdHandle_t *fdP; osi_Assert(fdAvailHead == NULL); fdP = malloc(FD_HANDLE_MALLOCSIZE * sizeof(FdHandle_t)); osi_Assert(fdP != NULL); for (i = 0; i < FD_HANDLE_MALLOCSIZE; i++) { fdP[i].fd_status = FD_HANDLE_AVAIL; fdP[i].fd_refcnt = 0; fdP[i].fd_ih = NULL; fdP[i].fd_fd = INVALID_FD; fdP[i].fd_ihnext = NULL; fdP[i].fd_ihprev = NULL; DLL_INSERT_TAIL(&fdP[i], fdAvailHead, fdAvailTail, fd_next, fd_prev); } }
/** * free a cache entry. * * @param[in] entry cache entry * * @return operation status * @retval 0 success * * @internal */ static int _VVGC_entry_free(VVGCache_entry_t * entry) { int code = 0; osi_Assert(entry->refcnt == 0); free(entry); return code; }
/** * initialize a struct VDiskLock. * * @param[in] dl struct VDiskLock to initialize * @param[in] lf the struct VLockFile to associate with this disk lock */ void VDiskLockInit(struct VDiskLock *dl, struct VLockFile *lf, afs_uint32 offset) { osi_Assert(lf); memset(dl, 0, sizeof(*dl)); Lock_Init(&dl->rwlock); MUTEX_INIT(&dl->mutex, "disklock", MUTEX_DEFAULT, 0); CV_INIT(&dl->cv, "disklock cv", CV_DEFAULT, 0); dl->lockfile = lf; dl->offset = offset; }
afs_int32 RemoveFromIDHash(struct ubik_trans *tt, afs_int32 aid, afs_int32 *loc) /* ??? in case ID hashed twice ??? */ { /* remove entry designated by aid from id hash table */ afs_int32 code; afs_int32 current, trail, i; struct prentry tentry; struct prentry bentry; if ((aid == PRBADID) || (aid == 0)) return PRINCONSISTENT; i = IDHash(aid); current = ntohl(cheader.idHash[i]); memset(&tentry, 0, sizeof(tentry)); memset(&bentry, 0, sizeof(bentry)); trail = 0; if (current == 0) return PRSUCCESS; /* already gone */ code = pr_ReadEntry(tt, 0, current, &tentry); if (code) return PRDBFAIL; while (aid != tentry.id) { osi_Assert(trail != current); trail = current; current = tentry.nextID; if (current == 0) break; code = pr_ReadEntry(tt, 0, current, &tentry); if (code) return PRDBFAIL; } if (current == 0) return PRSUCCESS; /* we didn't find him, so he's already gone */ if (trail == 0) { /* it's the first entry! */ cheader.idHash[i] = htonl(tentry.nextID); code = pr_Write(tt, 0, 72 + HASHSIZE * 4 + i * 4, (char *)&cheader.idHash[i], sizeof(cheader.idHash[i])); if (code) return PRDBFAIL; } else { code = pr_ReadEntry(tt, 0, trail, &bentry); if (code) return PRDBFAIL; bentry.nextID = tentry.nextID; code = pr_WriteEntry(tt, 0, trail, &bentry); if (code) return PRDBFAIL; } *loc = current; return PRSUCCESS; }
void DInit(int abuffers) { /* Initialize the venus buffer system. */ int i; struct buffer *tb; AFS_STATCNT(DInit); if (dinit_flag) return; dinit_flag = 1; /* round up to next multiple of NPB, since we allocate multiple pages per chunk */ abuffers = ((abuffers - 1) | (NPB - 1)) + 1; afs_max_buffers = abuffers << 2; /* possibly grow up to 4 times as big */ LOCK_INIT(&afs_bufferLock, "afs_bufferLock"); Buffers = afs_osi_Alloc(afs_max_buffers * sizeof(struct buffer)); osi_Assert(Buffers != NULL); timecounter = 1; afs_stats_cmperf.bufAlloced = nbuffers = abuffers; for (i = 0; i < PHSIZE; i++) phTable[i] = 0; for (i = 0; i < abuffers; i++) { if ((i & (NPB - 1)) == 0) { /* time to allocate a fresh buffer */ BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB); osi_Assert(BufferData != NULL); } /* Fill in each buffer with an empty indication. */ tb = &Buffers[i]; tb->fid = NULLIDX; afs_reset_inode(&tb->inode); tb->accesstime = 0; tb->lockers = 0; tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))]; tb->hashIndex = 0; tb->dirty = 0; AFS_RWLOCK_INIT(&tb->lock, "buffer lock"); } return; }
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or * try to free pages, when deleting a file. * * Locking: the vcache entry's lock is held. It may be dropped and * re-obtained. * * Since we drop and re-obtain the lock, we can't guarantee that there won't * be some pages around when we return, newly created by concurrent activity. */ void osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync) { ReleaseWriteLock(&avc->lock); AFS_GUNLOCK(); /* current remapf restriction - cannot have VOP_RWLOCK */ osi_Assert(OSI_GET_LOCKID() != avc->vc_rwlockid); if (((vnode_t *) avc)->v_type == VREG && AFS_VN_MAPPED(((vnode_t *) avc))) remapf(((vnode_t *) avc), 0, 0); PTOSSVP(AFSTOV(avc), (off_t) 0, (off_t) MAXLONG); AFS_GLOCK(); ObtainWriteLock(&avc->lock, 62); }
afs_int32 RemoveFromNameHash(struct ubik_trans *tt, char *aname, afs_int32 *loc) { /* remove from name hash */ afs_int32 code; afs_int32 current, trail, i; struct prentry tentry; struct prentry bentry; i = NameHash(aname); current = ntohl(cheader.nameHash[i]); memset(&tentry, 0, sizeof(tentry)); memset(&bentry, 0, sizeof(bentry)); trail = 0; if (current == 0) return PRSUCCESS; /* already gone */ code = pr_ReadEntry(tt, 0, current, &tentry); if (code) return PRDBFAIL; while (strcmp(aname, tentry.name)) { osi_Assert(trail != current); trail = current; current = tentry.nextName; if (current == 0) break; code = pr_ReadEntry(tt, 0, current, &tentry); if (code) return PRDBFAIL; } if (current == 0) return PRSUCCESS; /* we didn't find him, already gone */ if (trail == 0) { /* it's the first entry! */ cheader.nameHash[i] = htonl(tentry.nextName); code = pr_Write(tt, 0, 72 + i * 4, (char *)&cheader.nameHash[i], sizeof(cheader.nameHash[i])); if (code) return PRDBFAIL; } else { code = pr_ReadEntry(tt, 0, trail, &bentry); if (code) return PRDBFAIL; bentry.nextName = tentry.nextName; code = pr_WriteEntry(tt, 0, trail, &bentry); if (code) return PRDBFAIL; } *loc = current; return PRSUCCESS; }