/* Free a buffered I/O handle */ int stream_close(StreamHandle_t * streamP, int reallyClose) { ssize_t rc; int retval = 0; osi_Assert(streamP != NULL); if (streamP->str_direction == STREAM_DIRECTION_WRITE && streamP->str_bufoff > 0) { rc = OS_PWRITE(streamP->str_fd, streamP->str_buffer, streamP->str_bufoff, streamP->str_fdoff); if (rc < 0) { retval = -1; } else { streamP->str_fdoff += rc; } } if (reallyClose) { rc = OS_CLOSE(streamP->str_fd); if (rc < 0) { retval = -1; } } streamP->str_fd = INVALID_FD; IH_LOCK; DLL_INSERT_TAIL(streamP, streamAvailHead, streamAvailTail, str_next, str_prev); IH_UNLOCK; return retval; }
/* Release an Inode handle. All cached file descriptors for this * inode are closed when the last reference to this handle is released */ static int _ih_release_r(IHandle_t * ihP) { int ihash; if (!ihP) return 0; osi_Assert(ihP->ih_refcnt > 0); if (ihP->ih_refcnt > 1) { ihP->ih_refcnt--; return 0; } ihash = IH_HASH(ihP->ih_dev, ihP->ih_vid, ihP->ih_ino); DLL_DELETE(ihP, ihashTable[ihash].ihash_head, ihashTable[ihash].ihash_tail, ih_next, ih_prev); ih_fdclose(ihP); ihP->ih_refcnt--; DLL_INSERT_TAIL(ihP, ihAvailHead, ihAvailTail, ih_next, ih_prev); return 0; }
/* * Actually close the file descriptor handle and return it to * the free list. */ int fd_reallyclose(FdHandle_t * fdP) { FD_t closeFd; IHandle_t *ihP; if (!fdP) return 0; IH_LOCK; osi_Assert(ih_Inited); osi_Assert(fdInUseCount > 0); osi_Assert(fdP->fd_status == FD_HANDLE_INUSE || fdP->fd_status == FD_HANDLE_CLOSING); ihP = fdP->fd_ih; closeFd = fdP->fd_fd; fdP->fd_refcnt--; if (fdP->fd_refcnt == 0) { DLL_DELETE(fdP, ihP->ih_fdhead, ihP->ih_fdtail, fd_ihnext, fd_ihprev); DLL_INSERT_TAIL(fdP, fdAvailHead, fdAvailTail, fd_next, fd_prev); fdP->fd_status = FD_HANDLE_AVAIL; fdP->fd_refcnt = 0; fdP->fd_ih = NULL; fdP->fd_fd = INVALID_FD; } /* All the file descriptor handles have been closed; reset * the IH_REALLY_CLOSED flag indicating that ih_reallyclose * has completed its job. */ if (!ihP->ih_fdhead) { ihP->ih_flags &= ~IH_REALLY_CLOSED; } if (fdP->fd_refcnt == 0) { IH_UNLOCK; OS_CLOSE(closeFd); IH_LOCK; fdInUseCount -= 1; } /* If this is not the only reference to the Inode then we can decrement * the reference count, otherwise we need to call ih_release. */ if (ihP->ih_refcnt > 1) ihP->ih_refcnt--; else _ih_release_r(ihP); IH_UNLOCK; return 0; }
/* * Insert a login token into the cache. If the user already has an entry, * then overwrite the old entry. */ int weblog_login_store(char *user, char *cell, char *cksum, char *token, int tokenLen, afs_uint32 expiration) { int index; long curTime; struct weblog_login *loginP, *tmpP, loginTmp; int parseToken(char *tokenBuf); /* * Search the hash chain for a matching entry, free * expired entries as we search */ index = weblog_login_hash(user, cell); curTime = time(NULL); loginP = weblog_login_cache[index].head; while (loginP != NULL) { if (strcmp(loginP->username, user) == 0 && strcmp(loginP->cellname, cell) == 0) { break; } if (loginP->expiration < curTime) { tmpP = loginP; loginP = tmpP->next; DLL_DELETE(tmpP, weblog_login_cache[index].head, weblog_login_cache[index].tail, next, prev); free(tmpP); continue; } loginP = loginP->next; } if (loginP == NULL) { loginP = (struct weblog_login *)malloc(sizeof(struct weblog_login)); strcpy(&loginP->username[0], user); strcpy(&loginP->cellname[0], cell); } else { DLL_DELETE(loginP, weblog_login_cache[index].head, weblog_login_cache[index].tail, next, prev); } memcpy((void *)&loginP->cksum[0], (void *)cksum, SHA_HASH_BYTES); loginP->expiration = expiration; loginP->tokenLen = getTokenLen(token); memcpy((void *)&loginP->token[0], (void *)token, MAXBUFF); DLL_INSERT_TAIL(loginP, weblog_login_cache[index].head, weblog_login_cache[index].tail, next, prev); return 0; }
/* Allocate a chunk of inode handles */ void iHandleAllocateChunk(void) { int i; IHandle_t *ihP; osi_Assert(ihAvailHead == NULL); ihP = malloc(I_HANDLE_MALLOCSIZE * sizeof(IHandle_t)); osi_Assert(ihP != NULL); for (i = 0; i < I_HANDLE_MALLOCSIZE; i++) { ihP[i].ih_refcnt = 0; DLL_INSERT_TAIL(&ihP[i], ihAvailHead, ihAvailTail, ih_next, ih_prev); } }
/* Allocate a chunk of stream handles */ void streamHandleAllocateChunk(void) { int i; StreamHandle_t *streamP; osi_Assert(streamAvailHead == NULL); streamP = (StreamHandle_t *) malloc(STREAM_HANDLE_MALLOCSIZE * sizeof(StreamHandle_t)); osi_Assert(streamP != NULL); for (i = 0; i < STREAM_HANDLE_MALLOCSIZE; i++) { streamP[i].str_fd = INVALID_FD; DLL_INSERT_TAIL(&streamP[i], streamAvailHead, streamAvailTail, str_next, str_prev); } }
/* Allocate a chunk of file descriptor handles */ void fdHandleAllocateChunk(void) { int i; FdHandle_t *fdP; assert(fdAvailHead == NULL); fdP = (FdHandle_t *) malloc(FD_HANDLE_MALLOCSIZE * sizeof(FdHandle_t)); assert(fdP != NULL); for (i = 0; i < FD_HANDLE_MALLOCSIZE; i++) { fdP[i].fd_status = FD_HANDLE_AVAIL; fdP[i].fd_ih = NULL; fdP[i].fd_fd = INVALID_FD; DLL_INSERT_TAIL(&fdP[i], fdAvailHead, fdAvailTail, fd_next, fd_prev); } }
/* * Return a file descriptor handle to the cache */ int fd_close(FdHandle_t * fdP) { IHandle_t *ihP; if (!fdP) return 0; IH_LOCK; osi_Assert(ih_Inited); osi_Assert(fdInUseCount > 0); osi_Assert(fdP->fd_status == FD_HANDLE_INUSE || fdP->fd_status == FD_HANDLE_CLOSING); ihP = fdP->fd_ih; /* Call fd_reallyclose to really close the unused file handles if * the previous attempt to close (ih_reallyclose()) all file handles * failed (this is determined by checking the ihandle for the flag * IH_REALLY_CLOSED) or we have too many open files. */ if (fdP->fd_status == FD_HANDLE_CLOSING || ihP->ih_flags & IH_REALLY_CLOSED || fdInUseCount > fdCacheSize) { IH_UNLOCK; return fd_reallyclose(fdP); } fdP->fd_refcnt--; if (fdP->fd_refcnt == 0) { /* Put this descriptor back into the cache */ fdP->fd_status = FD_HANDLE_OPEN; DLL_INSERT_TAIL(fdP, fdLruHead, fdLruTail, fd_next, fd_prev); } /* If this is not the only reference to the Inode then we can decrement * the reference count, otherwise we need to call ih_release. */ if (ihP->ih_refcnt > 1) ihP->ih_refcnt--; else _ih_release_r(ihP); IH_UNLOCK; return 0; }
/* Initialize an inode handle */ IHandle_t * ih_init(int dev, int vid, Inode ino) { int ihash = IH_HASH(dev, vid, ino); IHandle_t *ihP; if (!ih_PkgDefaultsSet) { ih_PkgDefaults(); } IH_LOCK; if (!ih_Inited) { ih_Initialize(); } /* Do we already have a handle for this Inode? */ for (ihP = ihashTable[ihash].ihash_head; ihP; ihP = ihP->ih_next) { if (ihP->ih_ino == ino && ihP->ih_vid == vid && ihP->ih_dev == dev) { ihP->ih_refcnt++; IH_UNLOCK; return ihP; } } /* Allocate and initialize a new Inode handle */ if (ihAvailHead == NULL) { iHandleAllocateChunk(); } ihP = ihAvailHead; osi_Assert(ihP->ih_refcnt == 0); DLL_DELETE(ihP, ihAvailHead, ihAvailTail, ih_next, ih_prev); ihP->ih_dev = dev; ihP->ih_vid = vid; ihP->ih_ino = ino; ihP->ih_flags = 0; ihP->ih_synced = 0; ihP->ih_refcnt = 1; DLL_INIT_LIST(ihP->ih_fdhead, ihP->ih_fdtail); DLL_INSERT_TAIL(ihP, ihashTable[ihash].ihash_head, ihashTable[ihash].ihash_tail, ih_next, ih_prev); IH_UNLOCK; return ihP; }
/* Close all unused file descriptors associated with the inode * handle. Called with IH_LOCK held. May drop and reacquire * IH_LOCK. Sets the IH_REALLY_CLOSED flag in the inode handle * if it fails to close all file handles. */ static int ih_fdclose(IHandle_t * ihP) { int closeCount, closedAll; FdHandle_t *fdP, *head, *tail, *next; osi_Assert(ihP->ih_refcnt > 0); closedAll = 1; DLL_INIT_LIST(head, tail); ihP->ih_flags &= ~IH_REALLY_CLOSED; /* * Remove the file descriptors for this Inode from the LRU queue * and the IHandle queue and put them on a temporary queue so we * can drop the lock before we close the files. */ for (fdP = ihP->ih_fdhead; fdP != NULL; fdP = next) { next = fdP->fd_ihnext; osi_Assert(fdP->fd_ih == ihP); osi_Assert(fdP->fd_status == FD_HANDLE_OPEN || fdP->fd_status == FD_HANDLE_INUSE || fdP->fd_status == FD_HANDLE_CLOSING); if (fdP->fd_status == FD_HANDLE_OPEN) { /* Note that FdHandle_t's do not count against the parent * IHandle_t ref count when they are FD_HANDLE_OPEN. So, we don't * need to dec the parent IHandle_t ref count for each one we pull * off here. */ DLL_DELETE(fdP, ihP->ih_fdhead, ihP->ih_fdtail, fd_ihnext, fd_ihprev); DLL_DELETE(fdP, fdLruHead, fdLruTail, fd_next, fd_prev); DLL_INSERT_TAIL(fdP, head, tail, fd_next, fd_prev); } else { closedAll = 0; fdP->fd_status = FD_HANDLE_CLOSING; ihP->ih_flags |= IH_REALLY_CLOSED; } } /* If the ihandle reference count is 1, we should have * closed all file descriptors. */ if (ihP->ih_refcnt == 1 || closedAll) { osi_Assert(closedAll); osi_Assert(!ihP->ih_fdhead); osi_Assert(!ihP->ih_fdtail); } if (head == NULL) { return 0; /* No file descriptors closed */ } IH_UNLOCK; /* * Close the file descriptors */ closeCount = 0; for (fdP = head; fdP != NULL; fdP = fdP->fd_next) { OS_CLOSE(fdP->fd_fd); fdP->fd_status = FD_HANDLE_AVAIL; fdP->fd_refcnt = 0; fdP->fd_fd = INVALID_FD; fdP->fd_ih = NULL; closeCount++; } IH_LOCK; osi_Assert(fdInUseCount >= closeCount); fdInUseCount -= closeCount; /* * Append the temporary queue to the list of available descriptors */ if (fdAvailHead == NULL) { fdAvailHead = head; fdAvailTail = tail; } else { fdAvailTail->fd_next = head; head->fd_prev = fdAvailTail; fdAvailTail = tail; } return 0; }
/* * Get a file descriptor handle given an Inode handle */ FdHandle_t * ih_open(IHandle_t * ihP) { FdHandle_t *fdP; FD_t fd; FD_t closeFd; if (!ihP) /* XXX should log here in the fileserver */ return NULL; IH_LOCK; /* Do we already have an open file handle for this Inode? */ for (fdP = ihP->ih_fdtail; fdP != NULL; fdP = fdP->fd_ihprev) { if (fdP->fd_status == FD_HANDLE_CLOSING) { /* The handle was open when an IH_REALLYCLOSE was issued, so we * cannot reuse it; it will be closed soon. */ continue; } #ifndef HAVE_PIO /* * If we don't have positional i/o, don't try to share fds, since * we can't do so in a threadsafe way. */ if (fdP->fd_status == FD_HANDLE_INUSE) { continue; } osi_Assert(fdP->fd_status == FD_HANDLE_OPEN); #else /* HAVE_PIO */ osi_Assert(fdP->fd_status != FD_HANDLE_AVAIL); #endif /* HAVE_PIO */ fdP->fd_refcnt++; if (fdP->fd_status == FD_HANDLE_OPEN) { fdP->fd_status = FD_HANDLE_INUSE; DLL_DELETE(fdP, fdLruHead, fdLruTail, fd_next, fd_prev); } ihP->ih_refcnt++; IH_UNLOCK; return fdP; } /* * Try to open the Inode, return NULL on error. */ fdInUseCount += 1; IH_UNLOCK; ih_open_retry: fd = OS_IOPEN(ihP); IH_LOCK; if (fd == INVALID_FD && (errno != EMFILE || fdLruHead == NULL) ) { fdInUseCount -= 1; IH_UNLOCK; return NULL; } /* fdCacheSize limits the size of the descriptor cache, but * we permit the number of open files to exceed fdCacheSize. * We only recycle open file descriptors when the number * of open files reaches the size of the cache */ if ((fdInUseCount > fdCacheSize || fd == INVALID_FD) && fdLruHead != NULL) { fdP = fdLruHead; osi_Assert(fdP->fd_status == FD_HANDLE_OPEN); DLL_DELETE(fdP, fdLruHead, fdLruTail, fd_next, fd_prev); DLL_DELETE(fdP, fdP->fd_ih->ih_fdhead, fdP->fd_ih->ih_fdtail, fd_ihnext, fd_ihprev); closeFd = fdP->fd_fd; if (fd == INVALID_FD) { fdCacheSize--; /* reduce in order to not run into here too often */ DLL_INSERT_TAIL(fdP, fdAvailHead, fdAvailTail, fd_next, fd_prev); fdP->fd_status = FD_HANDLE_AVAIL; fdP->fd_ih = NULL; fdP->fd_fd = INVALID_FD; IH_UNLOCK; OS_CLOSE(closeFd); goto ih_open_retry; } } else { if (fdAvailHead == NULL) { fdHandleAllocateChunk(); } fdP = fdAvailHead; osi_Assert(fdP->fd_status == FD_HANDLE_AVAIL); DLL_DELETE(fdP, fdAvailHead, fdAvailTail, fd_next, fd_prev); closeFd = INVALID_FD; } fdP->fd_status = FD_HANDLE_INUSE; fdP->fd_fd = fd; fdP->fd_ih = ihP; fdP->fd_refcnt++; ihP->ih_refcnt++; /* Add this handle to the Inode's list of open descriptors */ DLL_INSERT_TAIL(fdP, ihP->ih_fdhead, ihP->ih_fdtail, fd_ihnext, fd_ihprev); if (closeFd != INVALID_FD) { IH_UNLOCK; OS_CLOSE(closeFd); IH_LOCK; fdInUseCount -= 1; } IH_UNLOCK; return fdP; }
/* * Get a file descriptor handle given an Inode handle */ FdHandle_t * ih_open(IHandle_t * ihP) { FdHandle_t *fdP; FD_t fd; FD_t closeFd; if (!ihP) /* XXX should log here in the fileserver */ return NULL; IH_LOCK; /* Do we already have an open file handle for this Inode? */ for (fdP = ihP->ih_fdtail; fdP != NULL; fdP = fdP->fd_ihprev) { if (fdP->fd_status != FD_HANDLE_INUSE) { assert(fdP->fd_status == FD_HANDLE_OPEN); fdP->fd_status = FD_HANDLE_INUSE; DLL_DELETE(fdP, fdLruHead, fdLruTail, fd_next, fd_prev); ihP->ih_refcnt++; IH_UNLOCK; (void)FDH_SEEK(fdP, 0, SEEK_SET); return fdP; } } /* * Try to open the Inode, return NULL on error. */ fdInUseCount += 1; IH_UNLOCK; ih_open_retry: fd = OS_IOPEN(ihP); IH_LOCK; if (fd == INVALID_FD && (errno != EMFILE || fdLruHead == NULL) ) { fdInUseCount -= 1; IH_UNLOCK; return NULL; } /* fdCacheSize limits the size of the descriptor cache, but * we permit the number of open files to exceed fdCacheSize. * We only recycle open file descriptors when the number * of open files reaches the size of the cache */ if ((fdInUseCount > fdCacheSize || fd == INVALID_FD) && fdLruHead != NULL) { fdP = fdLruHead; assert(fdP->fd_status == FD_HANDLE_OPEN); DLL_DELETE(fdP, fdLruHead, fdLruTail, fd_next, fd_prev); DLL_DELETE(fdP, fdP->fd_ih->ih_fdhead, fdP->fd_ih->ih_fdtail, fd_ihnext, fd_ihprev); closeFd = fdP->fd_fd; if (fd == INVALID_FD) { fdCacheSize--; /* reduce in order to not run into here too often */ DLL_INSERT_TAIL(fdP, fdAvailHead, fdAvailTail, fd_next, fd_prev); fdP->fd_status = FD_HANDLE_AVAIL; fdP->fd_ih = NULL; fdP->fd_fd = INVALID_FD; IH_UNLOCK; OS_CLOSE(closeFd); goto ih_open_retry; } } else { if (fdAvailHead == NULL) { fdHandleAllocateChunk(); } fdP = fdAvailHead; assert(fdP->fd_status == FD_HANDLE_AVAIL); DLL_DELETE(fdP, fdAvailHead, fdAvailTail, fd_next, fd_prev); closeFd = INVALID_FD; } fdP->fd_status = FD_HANDLE_INUSE; fdP->fd_fd = fd; fdP->fd_ih = ihP; ihP->ih_refcnt++; /* Add this handle to the Inode's list of open descriptors */ DLL_INSERT_TAIL(fdP, ihP->ih_fdhead, ihP->ih_fdtail, fd_ihnext, fd_ihprev); if (closeFd != INVALID_FD) { IH_UNLOCK; OS_CLOSE(closeFd); IH_LOCK; fdInUseCount -= 1; } IH_UNLOCK; return fdP; }