Exemple #1
0
/*!
 * Print list of disconnected files.
 *
 * \note Call with afs_DDirtyVCListLock read locked.
 */
void
afs_DbgDisconFiles(void)
{
    struct vcache *tvc;
    struct afs_q *q;
    int i = 0;

    afs_warn("List of dirty files: \n");

    ObtainReadLock(&afs_disconDirtyLock);
    for (q = QPrev(&afs_disconDirty); q != &afs_disconDirty; q = QPrev(q)) {
        tvc = QEntry(q, struct vcache, dirtyq);

	afs_warn("Cell=%u Volume=%u VNode=%u Unique=%u\n",
		tvc->f.fid.Cell,
		tvc->f.fid.Fid.Volume,
		tvc->f.fid.Fid.Vnode,
		tvc->f.fid.Fid.Unique);

	i++;
	if (i >= 30)
	    osi_Panic("afs_DbgDisconFiles: loop in dirty list\n");
    }
    ReleaseReadLock(&afs_disconDirtyLock);
}
Exemple #2
0
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
 * try to free pages, when deleting a file.
 *
 * Locking:  the vcache entry's lock is held.  It may be dropped and 
 * re-obtained.
 *
 * Since we drop and re-obtain the lock, we can't guarantee that there won't
 * be some pages around when we return, newly created by concurrent activity.
 */
void
osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
{
    struct vnode *vp;
    int tries, code;
    int islocked;

    vp = AFSTOV(avc);

    VI_LOCK(vp);
    if (vp->v_iflag & VI_DOOMED) {
	VI_UNLOCK(vp);
	return;
    }
    VI_UNLOCK(vp);

    islocked = islocked_vnode(vp);
    if (islocked == LK_EXCLOTHER)
	panic("Trying to Smush over someone else's lock");
    else if (islocked == LK_SHARED) {
	afs_warn("Trying to Smush with a shared lock");
	lock_vnode(vp, LK_UPGRADE);
    } else if (!islocked)
	lock_vnode(vp, LK_EXCLUSIVE);

    if (vp->v_bufobj.bo_object != NULL) {
	AFS_VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
	/*
	 * Do we really want OBJPC_SYNC?  OBJPC_INVAL would be
	 * faster, if invalidation is really what we are being
	 * asked to do.  (It would make more sense, too, since
	 * otherwise this function is practically identical to
	 * osi_VM_StoreAllSegments().)  -GAW
	 */

	/*
	 * Dunno.  We no longer resemble osi_VM_StoreAllSegments,
	 * though maybe that's wrong, now.  And OBJPC_SYNC is the
	 * common thing in 70 file systems, it seems.  Matt.
	 */

	vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
	AFS_VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
    }

    tries = 5;
    code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
    while (code && (tries > 0)) {
	afs_warn("TryToSmush retrying vinvalbuf");
	code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
	--tries;
    }
    if (islocked == LK_SHARED)
	lock_vnode(vp, LK_DOWNGRADE);
    else if (!islocked)
	unlock_vnode(vp);
}
Exemple #3
0
void
cleanup_module(void)
#endif
{
#if defined(AFS_CACHE_BYPASS)
    afs_warn("Cache bypass patched libafs module cleaning up.\n");
#endif

    afs_shutdown_pagecopy();

#ifdef LINUX_KEYRING_SUPPORT
    osi_keyring_shutdown();
#endif
    osi_sysctl_clean();
#ifndef LINUX_KEYRING_SUPPORT
    osi_syscall_clean();
#endif
    unregister_filesystem(&afs_fs_type);

    afs_destroy_inodecache();
    osi_linux_free_afs_memory();

#ifdef AFS_LINUX24_ENV
    osi_ioctl_clean();
    osi_proc_clean();
#endif

    return;
}
Exemple #4
0
int
afs_MemCacheTruncate(struct osi_file *fP, int size)
{
    struct memCacheEntry *mceP = (struct memCacheEntry *)fP;
    AFS_STATCNT(afs_MemCacheTruncate);

    ObtainWriteLock(&mceP->afs_memLock, 313);
    /* old directory entry; g.c. */
    if (size == 0 && mceP->dataSize > memCacheBlkSize) {
	char *oldData = mceP->data;
	mceP->data = afs_osi_Alloc(memCacheBlkSize);
	if (mceP->data == NULL) {	/* no available memory */
	    mceP->data = oldData;
	    ReleaseWriteLock(&mceP->afs_memLock);
	    afs_warn("afs: afs_MemWriteBlk mem alloc failure (%d bytes)\n",
		     memCacheBlkSize);
	} else {
	    afs_osi_Free(oldData, mceP->dataSize);
	    mceP->dataSize = memCacheBlkSize;
	}
    }

    if (size < mceP->size)
	mceP->size = size;

    ReleaseWriteLock(&mceP->afs_memLock);
    return 0;
}
Exemple #5
0
void
shutdown_osinet(void)
{
    AFS_STATCNT(shutdown_osinet);
#ifndef AFS_PRIVATE_OSI_ALLOCSPACES
    if (afs_cold_shutdown) {
	struct osi_packet *tp;

	while ((tp = freePacketList)) {
	    freePacketList = tp->next;
	    afs_osi_Free(tp, AFS_LRALLOCSIZ);
#ifdef  KERNEL_HAVE_PIN
	    unpin(tp, AFS_LRALLOCSIZ);
#endif
	}

	while ((tp = freeSmallList)) {
	    freeSmallList = tp->next;
	    afs_osi_Free(tp, AFS_SMALLOCSIZ);
#ifdef  KERNEL_HAVE_PIN
	    unpin(tp, AFS_SMALLOCSIZ);
#endif
	}
	LOCK_INIT(&osi_fsplock, "osi_fsplock");
	LOCK_INIT(&osi_flplock, "osi_flplock");
    }
#endif /* AFS_PRIVATE_OSI_ALLOCSPACES */
    if (afs_stats_cmperf.LargeBlocksActive ||
	afs_stats_cmperf.SmallBlocksActive)
    {
	afs_warn("WARNING: not all blocks freed: large %d small %d\n",
		 afs_stats_cmperf.LargeBlocksActive,
		 afs_stats_cmperf.SmallBlocksActive);
    }
}
Exemple #6
0
struct file *
afs_linux_raw_open(afs_dcache_id_t *ainode)
{
    struct inode *tip = NULL;
    struct dentry *dp = NULL;
    struct file* filp;

    dp = afs_get_dentry_from_fh(afs_cacheSBp, ainode, cache_fh_len, cache_fh_type,
		afs_fh_acceptable);
    if ((!dp) || IS_ERR(dp))
           osi_Panic("Can't get dentry\n");
    tip = dp->d_inode;
    tip->i_flags |= S_NOATIME;	/* Disable updating access times. */

#if defined(STRUCT_TASK_STRUCT_HAS_CRED)
    /* Use stashed credentials - prevent selinux/apparmor problems  */
    filp = afs_dentry_open(dp, afs_cacheMnt, O_RDWR, cache_creds);
    if (IS_ERR(filp))
	filp = afs_dentry_open(dp, afs_cacheMnt, O_RDWR, current_cred());
#else
    filp = dentry_open(dget(dp), mntget(afs_cacheMnt), O_RDWR);
#endif
    if (IS_ERR(filp)) {
	afs_warn("afs: Cannot open cache file (code %d). Trying to continue, "
                 "but AFS accesses may return errors or panic the system\n",
                 (int) PTR_ERR(filp));
        filp = NULL;
    }

    dput(dp);

    return filp;
}
Exemple #7
0
/* DARWIN uses locking, and so must provide its own */
void
shutdown_osisleep(void)
{
    afs_event_t *tmp;
    int i;

    for (i=0;i<AFS_EVHASHSIZE;i++) {
	while ((tmp = afs_evhasht[i]) != NULL) {
	    afs_evhasht[i] = tmp->next;
	    if (tmp->refcount > 0) {
		afs_warn("nonzero refcount in shutdown_osisleep()\n");
	    } else {
#if defined(AFS_AIX_ENV)
		xmfree(tmp);
#elif defined(AFS_FBSD_ENV)
		afs_osi_Free(tmp, sizeof(*tmp));
#elif defined(AFS_SGI_ENV) || defined(AFS_XBSD_ENV) || defined(AFS_SUN5_ENV)
		osi_FreeSmallSpace(tmp);
#elif defined(AFS_LINUX26_ENV)
		kfree(tmp);
#elif defined(AFS_LINUX20_ENV)
		osi_linux_free(tmp);
#endif
	    }
	}
    }
}
Exemple #8
0
int
init_module(void)
#endif
{
    int err;
    AFS_RWLOCK_INIT(&afs_xosi, "afs_xosi");

#if !defined(AFS_LINUX24_ENV)
    /* obtain PAGE_OFFSET value */
    afs_linux_page_offset = get_page_offset();

#ifndef AFS_S390_LINUX22_ENV
    if (afs_linux_page_offset == 0) {
	/* couldn't obtain page offset so can't continue */
	printf("afs: Unable to obtain PAGE_OFFSET. Exiting..");
	return -EIO;
    }
#endif /* AFS_S390_LINUX22_ENV */
#endif /* !defined(AFS_LINUX24_ENV) */

    osi_Init();

#ifndef LINUX_KEYRING_SUPPORT
    err = osi_syscall_init();
    if (err)
	return err;
#endif
    err = afs_init_inodecache();
    if (err) {
#ifndef LINUX_KEYRING_SUPPORT
	osi_syscall_clean();
#endif
	return err;
    }
    err = register_filesystem(&afs_fs_type);
    if (err) {
	afs_destroy_inodecache();
#ifndef LINUX_KEYRING_SUPPORT
	osi_syscall_clean();
#endif
	return err;
    }

    osi_sysctl_init();
#ifdef LINUX_KEYRING_SUPPORT
    osi_keyring_init();
#endif
#ifdef AFS_LINUX24_ENV
    osi_proc_init();
    osi_ioctl_init();
#endif
#if defined(AFS_CACHE_BYPASS)
    afs_warn("Cache bypass patched libafs module init.\n");
#endif
    afs_init_pagecopy();

    return 0;
}
Exemple #9
0
/**
 * Find a connection with call slots available, allocating one
 * if nothing is available and we find an allocated slot
 * @param xcv  A connection vector
 * @param create  If set, a new connection may be created
 */
static struct afs_conn *
find_preferred_connection(struct sa_conn_vector *xcv, int create)
{
    afs_int32 cix, bix;
    struct afs_conn *tc = NULL;

    bix = -1;
    for(cix = 0; cix < CVEC_LEN; ++cix) {
        tc = &(xcv->cvec[cix]);
        if (!tc->id) {
            if (create) {
                tc->parent = xcv;
                tc->forceConnectFS = 1;
                tc->activated = 1;
                bix = cix;
                break;
            } /* create */
        } else {
            if (tc->refCount < (RX_MAXCALLS-1)) {
                bix = cix;
                goto f_conn;
            } else if (cix == (CVEC_LEN-1))
                conn_vec_select_conn(xcv, bix, tc);
        } /* tc->id */
    } /* for cix < CVEC_LEN */

    if (bix < 0) {
        afs_warn("find_preferred_connection: no connection and !create\n");
        tc = NULL;
        goto out;
    }

f_conn:
    tc->refCount++;
    xcv->refCount++;

#if REPORT_CONNECTIONS_ISSUED
    afs_warn("Issuing conn %d refCount=%d parent refCount=%d\n", bix,
             tc->refCount, xcv->refCount);
#endif

out:
    return (tc);

}        /* find_preferred_connection */
Exemple #10
0
void
osi_StopListener(void)
{
    struct proc *p;

    /*
     * Have to drop global lock to safely do this.
     * soclose() is currently protected by Giant,
     * but pfind and psignal are MPSAFE.
     */
    int haveGlock = ISAFS_GLOCK();
    if (haveGlock)
	AFS_GUNLOCK();
    soshutdown(rx_socket, 2);
#ifndef AFS_FBSD70_ENV
    soclose(rx_socket);
#endif
    p = pfind(rxk_ListenerPid);
    afs_warn("osi_StopListener: rxk_ListenerPid %lx\n", p);
    if (p)
	psignal(p, SIGUSR1);
#ifdef AFS_FBSD50_ENV
    PROC_UNLOCK(p);
#endif
#ifdef AFS_FBSD70_ENV
    {
      /* Avoid destroying socket until osi_NetReceive has
       * had a chance to clean up */
      int tries;
      struct mtx s_mtx;

      MUTEX_INIT(&s_mtx, "rx_shutdown_mutex", MUTEX_DEFAULT, 0);
      MUTEX_ENTER(&s_mtx);
      tries = 3;
      while ((tries > 0) && (!so_is_disconn(rx_socket))) {
	msleep(&osi_StopListener, &s_mtx, PSOCK | PCATCH,
	       "rx_shutdown_timedwait", 1 * hz);
	--tries;
      }
      if (so_is_disconn(rx_socket))
	soclose(rx_socket);
      MUTEX_EXIT(&s_mtx);
      MUTEX_DESTROY(&s_mtx);
    }
#endif
    if (haveGlock)
	AFS_GLOCK();
}
Exemple #11
0
/*XXX: this extends a block arbitrarily to support big directories */
int
afs_MemWritevBlk(struct memCacheEntry *mceP, int offset,
		 struct iovec *iov, int nio, int size)
{
    int i;
    int bytesWritten;
    int bytesToWrite;
    AFS_STATCNT(afs_MemWriteBlk);
    ObtainWriteLock(&mceP->afs_memLock, 561);
    if (offset + size > mceP->dataSize) {
	char *oldData = mceP->data;

	mceP->data = afs_osi_Alloc(size + offset);
	if (mceP->data == NULL) {	/* no available memory */
	    mceP->data = oldData;	/* revert back change that was made */
	    ReleaseWriteLock(&mceP->afs_memLock);
	    afs_warn("afs: afs_MemWriteBlk mem alloc failure (%d bytes)\n",
		     size + offset);
	    return -ENOMEM;
	}

	/* may overlap, but this is OK */
	AFS_GUNLOCK();
	memcpy(mceP->data, oldData, mceP->size);
	AFS_GLOCK();
	afs_osi_Free(oldData, mceP->dataSize);
	mceP->dataSize = size + offset;
    }
    AFS_GUNLOCK();
    if (mceP->size < offset)
	memset(mceP->data + mceP->size, 0, offset - mceP->size);
    for (bytesWritten = 0, i = 0; i < nio && size > 0; i++) {
	bytesToWrite = (size < iov[i].iov_len) ? size : iov[i].iov_len;
	memcpy(mceP->data + offset, iov[i].iov_base, bytesToWrite);
	offset += bytesToWrite;
	bytesWritten += bytesToWrite;
	size -= bytesToWrite;
    }
    mceP->size = (offset < mceP->size) ? mceP->size : offset;
    AFS_GLOCK();

    ReleaseWriteLock(&mceP->afs_memLock);
    return bytesWritten;
}
Exemple #12
0
int
afs_InitMemCache(int blkCount, int blkSize, int flags)
{
    int index;

    AFS_STATCNT(afs_InitMemCache);
    if (blkSize)
	memCacheBlkSize = blkSize;

    memMaxBlkNumber = blkCount;
    memCache =
	afs_osi_Alloc(memMaxBlkNumber * sizeof(struct memCacheEntry));
    osi_Assert(memCache != NULL);

    for (index = 0; index < memMaxBlkNumber; index++) {
	char *blk;
	(memCache + index)->size = 0;
	(memCache + index)->dataSize = memCacheBlkSize;
	LOCK_INIT(&((memCache + index)->afs_memLock), "afs_memLock");
	blk = afs_osi_Alloc(memCacheBlkSize);
	if (blk == NULL)
	    goto nomem;
	(memCache + index)->data = blk;
	memset((memCache + index)->data, 0, memCacheBlkSize);
    }
#if defined(AFS_HAVE_VXFS)
    afs_InitDualFSCacheOps((struct vnode *)0);
#endif
    for (index = 0; index < blkCount; index++)
	afs_InitCacheFile(NULL, 0);
    return 0;

  nomem:
    afs_warn("afsd:  memCache allocation failure at %d KB.\n",
	     (index * memCacheBlkSize) / 1024);
    while (--index >= 0) {
	afs_osi_Free((memCache + index)->data, memCacheBlkSize);
	(memCache + index)->data = NULL;
    }
    return ENOMEM;

}
Exemple #13
0
void
osi_AttachVnode(struct vcache *avc, int seq) {
    struct vnode *vp;
    struct thread *p = curthread;

    ReleaseWriteLock(&afs_xvcache);
    AFS_GUNLOCK();
#if defined(AFS_FBSD60_ENV)
    if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
#else
    if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
#endif
	panic("afs getnewvnode");	/* can't happen */
#ifdef AFS_FBSD70_ENV
    /* XXX verified on 80--TODO check on 7x */
    if (!vp->v_mount) {
        ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* !glocked */
        insmntque(vp, afs_globalVFS);
        MA_VOP_UNLOCK(vp, 0, p);
    }
#endif
    AFS_GLOCK();
    ObtainWriteLock(&afs_xvcache,339);
    if (avc->v != NULL) {
        /* I'd like to know if this ever happens...
         * We don't drop global for the rest of this function,
         * so if we do lose the race, the other thread should
         * have found the same vnode and finished initializing
         * the vcache entry.  Is it conceivable that this vcache
         * entry could be recycled during this interval?  If so,
         * then there probably needs to be some sort of additional
         * mutual exclusion (an Embryonic flag would suffice).
         * -GAW */
	afs_warn("afs_NewVCache: lost the race\n");
	return;
    }
    avc->v = vp;
    avc->v->v_data = avc;
    lockinit(&avc->rwlock, PINOD, "vcache", 0, 0);
}
Exemple #14
0
int
afs_MemWriteUIO(afs_dcache_id_t *ainode, struct uio *uioP)
{
    struct memCacheEntry *mceP =
	(struct memCacheEntry *)afs_MemCacheOpen(ainode);
    afs_int32 code;

    AFS_STATCNT(afs_MemWriteUIO);
    ObtainWriteLock(&mceP->afs_memLock, 312);
    if (AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP) > mceP->dataSize) {
	char *oldData = mceP->data;

	mceP->data = afs_osi_Alloc(AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP));
	if (mceP->data == NULL) {	/* no available memory */
	    mceP->data = oldData;	/* revert back change that was made */
	    ReleaseWriteLock(&mceP->afs_memLock);
	    afs_warn("afs: afs_MemWriteBlk mem alloc failure (%ld bytes)\n",
		     (long)(AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP)));
	    return -ENOMEM;
	}

	AFS_GUNLOCK();
	memcpy(mceP->data, oldData, mceP->size);
	AFS_GLOCK();

	afs_osi_Free(oldData, mceP->dataSize);
	mceP->dataSize = AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP);
    }
    if (mceP->size < AFS_UIO_OFFSET(uioP))
	memset(mceP->data + mceP->size, 0,
	       (int)(AFS_UIO_OFFSET(uioP) - mceP->size));
    AFS_UIOMOVE(mceP->data + AFS_UIO_OFFSET(uioP), AFS_UIO_RESID(uioP), UIO_WRITE,
		uioP, code);
    if (AFS_UIO_OFFSET(uioP) > mceP->size)
	mceP->size = AFS_UIO_OFFSET(uioP);

    ReleaseWriteLock(&mceP->afs_memLock);
    return code;
}
Exemple #15
0
void
shutdown_osisleep(void) {
    struct afs_event *evp, *nevp, **pevpp;
    int i;
    for (i=0; i < AFS_EVHASHSIZE; i++) {
	evp = afs_evhasht[i];
	pevpp = &afs_evhasht[i];
	while (evp) {
	    EVTLOCK_LOCK(evp);
	    nevp = evp->next;
	    if (evp->refcount == 0) {
		EVTLOCK_DESTROY(evp);
		*pevpp = evp->next;
		osi_FreeSmallSpace(evp);
		afs_evhashcnt--;
	    } else {
		afs_warn("nonzero refcount in shutdown_osisleep()\n");
		EVTLOCK_UNLOCK(evp);
		pevpp = &evp->next;
	    }
	    evp = nevp;
	}
    }
}
/* Cannot have static linkage--called from BPrefetch (afs_daemons) */
afs_int32
afs_PrefetchNoCache(struct vcache *avc,
		    afs_ucred_t *acred,
		    struct nocache_read_request *bparms)
{
    struct uio *auio;
#ifndef UKERNEL
    struct iovec *iovecp;
#endif
    struct vrequest *areq;
    afs_int32 code = 0;
    struct rx_connection *rxconn;
#ifdef AFS_64BIT_CLIENT
    afs_int32 length_hi, bytes, locked;
#endif

    struct afs_conn *tc;
    struct rx_call *tcall;
    struct tlocal1 {
	struct AFSVolSync tsync;
	struct AFSFetchStatus OutStatus;
	struct AFSCallBack CallBack;
    };
    struct tlocal1 *tcallspec;

    auio = bparms->auio;
    areq = bparms->areq;
#ifndef UKERNEL
    iovecp = auio->uio_iov;
#endif

    tcallspec = osi_Alloc(sizeof(struct tlocal1));
    do {
	tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK /* ignored */, &rxconn);
	if (tc) {
	    avc->callback = tc->parent->srvr->server;
	    tcall = rx_NewCall(rxconn);
#ifdef AFS_64BIT_CLIENT
	    if (!afs_serverHasNo64Bit(tc)) {
		code = StartRXAFS_FetchData64(tcall,
					      (struct AFSFid *) &avc->f.fid.Fid,
					      auio->uio_offset,
					      bparms->length);
		if (code == 0) {
		    COND_GUNLOCK(locked);
		    bytes = rx_Read(tcall, (char *)&length_hi,
				    sizeof(afs_int32));
		    COND_RE_GLOCK(locked);

		    if (bytes != sizeof(afs_int32)) {
			length_hi = 0;
			code = rx_Error(tcall);
			COND_GUNLOCK(locked);
			code = rx_EndCall(tcall, code);
			COND_RE_GLOCK(locked);
			tcall = NULL;
		    }
		}
	    } /* afs_serverHasNo64Bit */
	    if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) {
		if (auio->uio_offset > 0x7FFFFFFF) {
		    code = EFBIG;
		} else {
		    afs_int32 pos;
		    pos = auio->uio_offset;
		    COND_GUNLOCK(locked);
		    if (!tcall)
			tcall = rx_NewCall(rxconn);
		    code = StartRXAFS_FetchData(tcall,
					(struct AFSFid *) &avc->f.fid.Fid,
					pos, bparms->length);
		    COND_RE_GLOCK(locked);
		}
		afs_serverSetNo64Bit(tc);
	    }
#else
	    code = StartRXAFS_FetchData(tcall,
			                (struct AFSFid *) &avc->f.fid.Fid,
					auio->uio_offset, bparms->length);
#endif
	    if (code == 0) {
		code = afs_NoCacheFetchProc(tcall, avc, auio,
				            1 /* release_pages */,
					    bparms->length);
	    } else {
		afs_warn("BYPASS: StartRXAFS_FetchData failed: %d\n", code);
		unlock_and_release_pages(auio);
		goto done;
	    }
	    if (code == 0) {
		code = EndRXAFS_FetchData(tcall, &tcallspec->OutStatus,
					  &tcallspec->CallBack,
					  &tcallspec->tsync);
	    } else {
		afs_warn("BYPASS: NoCacheFetchProc failed: %d\n", code);
	    }
	    code = rx_EndCall(tcall, code);
	} else {
	    afs_warn("BYPASS: No connection.\n");
	    code = -1;
	    unlock_and_release_pages(auio);
	    goto done;
	}
    } while (afs_Analyze(tc, rxconn, code, &avc->f.fid, areq,
						 AFS_STATS_FS_RPCIDX_FETCHDATA,
						 SHARED_LOCK,0));
done:
    /*
     * Copy appropriate fields into vcache
     */

    if (!code)
	afs_ProcessFS(avc, &tcallspec->OutStatus, areq);

    osi_Free(areq, sizeof(struct vrequest));
    osi_Free(tcallspec, sizeof(struct tlocal1));
    osi_Free(bparms, sizeof(struct nocache_read_request));
#ifndef UKERNEL
    /* in UKERNEL, the "pages" are passed in */
    osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
    osi_Free(auio, sizeof(struct uio));
#endif
    return code;
}
/* dispatch a no-cache read request */
afs_int32
afs_ReadNoCache(struct vcache *avc,
		struct nocache_read_request *bparms,
		afs_ucred_t *acred)
{
    afs_int32 code;
    afs_int32 bcnt;
    struct brequest *breq;
    struct vrequest *areq;

    /* the reciever will free this */
    areq = osi_Alloc(sizeof(struct vrequest));

    if (avc->vc_error) {
	code = EIO;
	afs_warn("afs_ReadNoCache VCache Error!\n");
	goto cleanup;
    }
    if ((code = afs_InitReq(areq, acred))) {
	afs_warn("afs_ReadNoCache afs_InitReq error!\n");
	goto cleanup;
    }

    AFS_GLOCK();
    code = afs_VerifyVCache(avc, areq);
    AFS_GUNLOCK();

    if (code) {
	code = afs_CheckCode(code, areq, 11);	/* failed to get it */
	afs_warn("afs_ReadNoCache Failed to verify VCache!\n");
	goto cleanup;
    }

    bparms->areq = areq;

    /* and queue this one */
    bcnt = 1;
    AFS_GLOCK();
    while(bcnt < 20) {
	breq = afs_BQueue(BOP_FETCH_NOCACHE, avc, B_DONTWAIT, 0, acred, 1, 1,
			  bparms, (void *)0, (void *)0);
	if(breq != 0) {
	    code = 0;
	    break;
	}
	afs_osi_Wait(10 * bcnt, 0, 0);
    }
    AFS_GUNLOCK();

    if(!breq) {
    	code = EBUSY;
	goto cleanup;
    }

    return code;

cleanup:
    /* If there's a problem before we queue the request, we need to
     * do everything that would normally happen when the request was
     * processed, like unlocking the pages and freeing memory.
     */
    unlock_and_release_pages(bparms->auio);
    osi_Free(areq, sizeof(struct vrequest));
    osi_Free(bparms->auio->uio_iov,
	     bparms->auio->uio_iovcnt * sizeof(struct iovec));
    osi_Free(bparms->auio, sizeof(struct uio));
    osi_Free(bparms, sizeof(struct nocache_read_request));
    return code;
}
/* no-cache prefetch routine */
static afs_int32
afs_NoCacheFetchProc(struct rx_call *acall,
                     struct vcache *avc,
		     struct uio *auio,
                     afs_int32 release_pages,
		     afs_int32 size)
{
    afs_int32 length;
    afs_int32 code;
    int moredata, iovno, iovoff, iovmax, result, locked;
    struct iovec *ciov;
    struct iovec *rxiov;
    int nio = 0;
    bypass_page_t pp;

    int curpage, bytes;
    int pageoff;

    rxiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
    ciov = auio->uio_iov;
    pp = (bypass_page_t) ciov->iov_base;
    iovmax = auio->uio_iovcnt - 1;
    iovno = iovoff = result = 0;

    do {
	COND_GUNLOCK(locked);
	code = rx_Read(acall, (char *)&length, sizeof(afs_int32));
	COND_RE_GLOCK(locked);
	if (code != sizeof(afs_int32)) {
	    result = EIO;
	    afs_warn("Preread error. code: %d instead of %d\n",
		code, (int)sizeof(afs_int32));
	    unlock_and_release_pages(auio);
	    goto done;
	} else
	    length = ntohl(length);

	if (length > size) {
	    result = EIO;
	    afs_warn("Preread error. Got length %d, which is greater than size %d\n",
	             length, size);
	    unlock_and_release_pages(auio);
	    goto done;
	}

	/* If we get a 0 length reply, time to cleanup and return */
	if (length == 0) {
	    unlock_and_release_pages(auio);
	    result = 0;
	    goto done;
	}

	/*
	 * The fetch protocol is extended for the AFS/DFS translator
	 * to allow multiple blocks of data, each with its own length,
	 * to be returned. As long as the top bit is set, there are more
	 * blocks expected.
	 *
	 * We do not do this for AFS file servers because they sometimes
	 * return large negative numbers as the transfer size.
	 */
	if (avc->f.states & CForeign) {
	    moredata = length & 0x80000000;
	    length &= ~0x80000000;
	} else {
	    moredata = 0;
	}

	for (curpage = 0; curpage <= iovmax; curpage++) {
	    pageoff = 0;
	    /* properly, this should track uio_resid, not a fixed page size! */
	    while (pageoff < auio->uio_iov[curpage].iov_len) {
		/* If no more iovs, issue new read. */
		if (iovno >= nio) {
		    COND_GUNLOCK(locked);
		    bytes = rx_Readv(acall, rxiov, &nio, RX_MAXIOVECS, length);
		    COND_RE_GLOCK(locked);
		    if (bytes < 0) {
		        afs_warn("afs_NoCacheFetchProc: rx_Read error. Return code was %d\n", bytes);
			result = bytes;
			unlock_and_release_pages(auio);
			goto done;
		    } else if (bytes == 0) {
			/* we failed to read the full length */
			result = EIO;
			afs_warn("afs_NoCacheFetchProc: rx_Read returned zero. Aborting.\n");
			unlock_and_release_pages(auio);
			goto done;
		    }
		    size -= bytes;
		    auio->uio_resid -= bytes;
		    iovno = 0;
		}
		pp = (bypass_page_t)auio->uio_iov[curpage].iov_base;
		if (pageoff + (rxiov[iovno].iov_len - iovoff) <= auio->uio_iov[curpage].iov_len) {
		    /* Copy entire (or rest of) current iovec into current page */
		    if (pp)
			afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 0);
		    length -= (rxiov[iovno].iov_len - iovoff);
		    pageoff += rxiov[iovno].iov_len - iovoff;
		    iovno++;
		    iovoff = 0;
		} else {
		    /* Copy only what's needed to fill current page */
		    if (pp)
			afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 1);
		    length -= (auio->uio_iov[curpage].iov_len - pageoff);
		    iovoff += auio->uio_iov[curpage].iov_len - pageoff;
		    pageoff = auio->uio_iov[curpage].iov_len;
		}

		/* we filled a page, or this is the last page.  conditionally release it */
		if (pp && ((pageoff == auio->uio_iov[curpage].iov_len &&
			    release_pages) || (length == 0 && iovno >= nio)))
		    release_full_page(pp, pageoff);

		if (length == 0 && iovno >= nio)
		    goto done;
	    }
	}
    } while (moredata);

done:
    osi_FreeSmallSpace(rxiov);
    return result;
}
Exemple #19
0
/*
 * Inform dynroot that a new vnode is being created.  Return value
 * is non-zero if this vnode is handled by dynroot, in which case
 * FetchStatus will be filled in.
 */
int
afs_DynrootNewVnode(struct vcache *avc, struct AFSFetchStatus *status)
{
    char *bp, tbuf[CVBS];

    if (_afs_IsDynrootFid(&avc->f.fid)) {
	if (!afs_dynrootEnable)
	    return 0;
	afs_GetDynroot(0, 0, status);
	afs_PutDynroot();
	return 1;
    }

    if (afs_IsDynrootMount(avc)) {
	afs_GetDynrootMount(0, 0, status);
	afs_PutDynroot();
	return 1;
    }

    /*
     * Check if this is an entry under /afs, e.g. /afs/cellname.
     */
    if (avc->f.fid.Cell == afs_dynrootCell
	&& avc->f.fid.Fid.Volume == AFS_DYNROOT_VOLUME) {

	struct cell *c;
	struct cell_alias *ca;
	int namelen, linklen, cellidx, rw;

	memset(status, 0, sizeof(struct AFSFetchStatus));

	status->FileType = SymbolicLink;
	status->LinkCount = 1;
	status->DataVersion = 1;
	status->CallerAccess = PRSFS_LOOKUP | PRSFS_READ;
	status->AnonymousAccess = PRSFS_LOOKUP | PRSFS_READ;
	status->ParentVnode = 1;
	status->ParentUnique = 1;

	if (VNUM_TO_VNTYPE(avc->f.fid.Fid.Vnode) == VN_TYPE_SYMLINK) {
	    struct afs_dynSymlink *ts;
	    int index = VNUM_TO_VNID(avc->f.fid.Fid.Vnode);

	    ObtainReadLock(&afs_dynSymlinkLock);
	    ts = afs_dynSymlinkBase;
	    while (ts) {
		if (ts->index == index)
		    break;
		ts = ts->next;
	    }

	    if (ts) {
		linklen = strlen(ts->target);
		avc->linkData = afs_osi_Alloc(linklen + 1);
		strcpy(avc->linkData, ts->target);

		status->Length = linklen;
		status->UnixModeBits = 0755;
	    }
	    ReleaseReadLock(&afs_dynSymlinkLock);

	    return ts ? 1 : 0;
	}

	if (VNUM_TO_VNTYPE(avc->f.fid.Fid.Vnode) != VN_TYPE_CELL
	    && VNUM_TO_VNTYPE(avc->f.fid.Fid.Vnode) != VN_TYPE_ALIAS
	    && VNUM_TO_VNTYPE(avc->f.fid.Fid.Vnode) != VN_TYPE_MOUNT) {
	    afs_warn("dynroot vnode inconsistency, unknown VNTYPE %d\n",
		     VNUM_TO_VNTYPE(avc->f.fid.Fid.Vnode));
	    return 0;
	}

	cellidx = VNUM_TO_CIDX(avc->f.fid.Fid.Vnode);
	rw = VNUM_TO_RW(avc->f.fid.Fid.Vnode);

	if (VNUM_TO_VNTYPE(avc->f.fid.Fid.Vnode) == VN_TYPE_ALIAS) {
	    char *realName;

	    ca = afs_GetCellAlias(cellidx);
	    if (!ca) {
		afs_warn("dynroot vnode inconsistency, can't find alias %d\n",
			 cellidx);
		return 0;
	    }

	    /*
	     * linkData needs to contain the name of the cell
	     * we're aliasing for.
	     */
	    realName = ca->cell;
	    if (!realName) {
		afs_warn("dynroot: alias %s missing real cell name\n",
			 ca->alias);
		avc->linkData = afs_strdup("unknown");
		linklen = 7;
	    } else {
		int namelen = strlen(realName);
		linklen = rw + namelen;
		avc->linkData = afs_osi_Alloc(linklen + 1);
		strcpy(avc->linkData, rw ? "." : "");
		afs_strcat(avc->linkData, realName);
	    }

	    status->UnixModeBits = 0755;
	    afs_PutCellAlias(ca);

	} else if (VNUM_TO_VNTYPE(avc->f.fid.Fid.Vnode) == VN_TYPE_MOUNT) {
	    c = afs_GetCellByIndex(cellidx, READ_LOCK);
	    if (!c) {
		afs_warn("dynroot vnode inconsistency, can't find cell %d\n",
			 cellidx);
		return 0;
	    }

	    /*
	     * linkData needs to contain "%cell:volumeid"
	     */
	    namelen = strlen(c->cellName);
	    bp = afs_cv2string(&tbuf[CVBS], avc->f.fid.Fid.Unique);
	    linklen = 2 + namelen + strlen(bp);
	    avc->linkData = afs_osi_Alloc(linklen + 1);
	    strcpy(avc->linkData, "%");
	    afs_strcat(avc->linkData, c->cellName);
	    afs_strcat(avc->linkData, ":");
	    afs_strcat(avc->linkData, bp);

	    status->UnixModeBits = 0644;
	    status->ParentVnode = AFS_DYNROOT_MOUNT_VNODE;
	    afs_PutCell(c, READ_LOCK);

	} else {
	    c = afs_GetCellByIndex(cellidx, READ_LOCK);
	    if (!c) {
		afs_warn("dynroot vnode inconsistency, can't find cell %d\n",
			 cellidx);
		return 0;
	    }

	    /*
	     * linkData needs to contain "#cell:root.cell" or "%cell:root.cell"
	     */
	    namelen = strlen(c->cellName);
	    linklen = 1 + namelen + 10;
	    avc->linkData = afs_osi_Alloc(linklen + 1);
	    strcpy(avc->linkData, rw ? "%" : "#");
	    afs_strcat(avc->linkData, c->cellName);
	    afs_strcat(avc->linkData, ":root.cell");

	    status->UnixModeBits = 0644;
	    afs_PutCell(c, READ_LOCK);
	}

	status->Length = linklen;
	return 1;
    }

    return 0;
}
Exemple #20
0
void
afs_CheckCallbacks(unsigned int secs)
{
    struct vcache *tvc;
    register struct afs_q *tq;
    struct afs_q *uq;
    afs_uint32 now;
    struct volume *tvp;
    register int safety;

    ObtainWriteLock(&afs_xcbhash, 85);	/* pretty likely I'm going to remove something */
    now = osi_Time();
    for (safety = 0, tq = cbHashT[base].head.prev;
	 (safety <= CBQ_LIMIT) && (tq != &(cbHashT[base].head));
	 tq = uq, safety++) {

	uq = QPrev(tq);
	tvc = CBQTOV(tq);
	if (tvc->cbExpires < now + secs) {	/* race #1 here */
	    /* Get the volume, and if its callback expiration time is more than secs
	     * seconds into the future, update this vcache entry and requeue it below
	     */
	    if ((tvc->f.states & CRO)
		&& (tvp = afs_FindVolume(&(tvc->f.fid), READ_LOCK))) {
		if (tvp->expireTime > now + secs) {
		    tvc->cbExpires = tvp->expireTime;	/* XXX race here */
		} else {
		    int i;
		    for (i = 0; i < MAXHOSTS && tvp->serverHost[i]; i++) {
			if (!(tvp->serverHost[i]->flags & SRVR_ISDOWN)) {
			    /* What about locking xvcache or vrefcount++ or
			     * write locking tvc? */
			    QRemove(tq);
			    tvc->f.states &= ~(CStatd | CMValid | CUnique);
                            if (!(tvc->f.states & (CVInit|CVFlushed)) &&
                                (tvc->f.fid.Fid.Vnode & 1 ||
                                 (vType(tvc) == VDIR)))
				osi_dnlc_purgedp(tvc);
			    tvc->dchint = NULL;	/*invalidate em */
			    afs_ResetVolumeInfo(tvp);
			    break;
			}
		    }
		}
		afs_PutVolume(tvp, READ_LOCK);
	    } else {
		/* Do I need to worry about things like execsorwriters?
		 * What about locking xvcache or vrefcount++ or write locking tvc?
		 */
		QRemove(tq);
		tvc->f.states &= ~(CStatd | CMValid | CUnique);
                if (!(tvc->f.states & (CVInit|CVFlushed)) &&
                    (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
		    osi_dnlc_purgedp(tvc);
	    }
	}

	if ((tvc->cbExpires > basetime) && CBHash(tvc->cbExpires - basetime)) {
	    /* it's been renewed on us.  Have to be careful not to put it back
	     * into this slot, or we may never get out of here.
	     */
	    int slot;
	    slot = (CBHash(tvc->cbExpires - basetime) + base) % CBHTSIZE;
	    if (slot != base) {
		if (QPrev(tq))
		    QRemove(&(tvc->callsort));
		QAdd(&(cbHashT[slot].head), &(tvc->callsort));
		/* XXX remember to update volume expiration time */
		/* -- not needed for correctness, though */
	    }
	}
    }

    if (safety > CBQ_LIMIT) {
	afs_stats_cmperf.cbloops++;
	if (afs_paniconwarn)
	    osi_Panic("CheckCallbacks");

	afs_warn
	    ("AFS Internal Error (minor): please contact AFS Product Support.\n");
	ReleaseWriteLock(&afs_xcbhash);
	afs_FlushCBs();
	return;
    } else
	ReleaseWriteLock(&afs_xcbhash);


/* XXX future optimization:
   if this item has been recently accessed, queue up a stat for it.
   {
   struct dcache * adc;

   ObtainReadLock(&afs_xdcache);
   if ((adc = tvc->quick.dc) && (adc->stamp == tvc->quick.stamp)
   && (afs_indexTimes[adc->index] > afs_indexCounter - 20)) {
   queue up the stat request
   }
   ReleaseReadLock(&afs_xdcache);
   }
   */

    return;
}				/* afs_CheckCallback */
Exemple #21
0
struct vcache *
osi_dnlc_lookup(struct vcache *adp, char *aname, int locktype)
{
    struct vcache *tvc;
    unsigned int key, skey;
    char *ts = aname;
    struct nc *tnc;
    int safety;
#ifdef AFS_DARWIN80_ENV
    vnode_t tvp;
#endif

    if (!afs_usednlc)
      return 0;

    dnlcHash(ts, key);		/* leaves ts pointing at the NULL */
    if (ts - aname >= AFSNCNAMESIZE)
      return 0;
    skey = key & (NHSIZE - 1);

    TRACE(osi_dnlc_lookupT, skey);
    dnlcstats.lookups++;

    ObtainReadLock(&afs_xvcache);
    ObtainReadLock(&afs_xdnlc);

    for (tvc = NULL, tnc = nameHash[skey], safety = 0; tnc;
	 tnc = tnc->next, safety++) {
	if ( /* (tnc->key == key)  && */ (tnc->dirp == adp)
	    && (!strcmp((char *)tnc->name, aname))) {
	    tvc = tnc->vp;
	    break;
	} else if (tnc->next == nameHash[skey]) {	/* end of list */
	    break;
	} else if (safety > NCSIZE) {
	    afs_warn("DNLC cycle");
	    dnlcstats.cycles++;
	    ReleaseReadLock(&afs_xdnlc);
	    ReleaseReadLock(&afs_xvcache);
	    osi_dnlc_purge();
	    return (0);
	}
    }

    ReleaseReadLock(&afs_xdnlc);

    if (!tvc) {
	ReleaseReadLock(&afs_xvcache);
	dnlcstats.misses++;
    } else {
	if ((tvc->f.states & CVInit)
#ifdef  AFS_DARWIN80_ENV
	    ||(tvc->f.states & CDeadVnode)
#endif
	    )
	{
	    ReleaseReadLock(&afs_xvcache);
	    dnlcstats.misses++;
	    osi_dnlc_remove(adp, aname, tvc);
	    return 0;
	}
#if defined(AFS_DARWIN80_ENV)
	tvp = AFSTOV(tvc);
	if (vnode_get(tvp)) {
	    ReleaseReadLock(&afs_xvcache);
	    dnlcstats.misses++;
	    osi_dnlc_remove(adp, aname, tvc);
	    return 0;
	}
	if (vnode_ref(tvp)) {
	    ReleaseReadLock(&afs_xvcache);
	    AFS_GUNLOCK();
	    vnode_put(tvp);
	    AFS_GLOCK();
	    dnlcstats.misses++;
	    osi_dnlc_remove(adp, aname, tvc);
	    return 0;
	}
#else
	osi_vnhold(tvc, 0);
#endif
	ReleaseReadLock(&afs_xvcache);

    }

    return tvc;
}
int
afs_UFSRead(register struct vcache *avc, struct uio *auio,
            struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp,
            int noLock)
{
    afs_size_t totalLength;
    afs_size_t transferLength;
    afs_size_t filePos;
    afs_size_t offset, len, tlen;
    afs_int32 trimlen;
    struct dcache *tdc = 0;
    afs_int32 error;
#ifdef AFS_DARWIN80_ENV
    uio_t tuiop=NULL;
#else
    struct uio tuio;
    struct uio *tuiop = &tuio;
    struct iovec *tvec;
#endif
    struct osi_file *tfile;
    afs_int32 code;
    int trybusy = 1;
    struct vrequest treq;

    AFS_STATCNT(afs_UFSRead);
    if (avc && avc->vc_error)
        return EIO;

    AFS_DISCON_LOCK();

    /* check that we have the latest status info in the vnode cache */
    if ((code = afs_InitReq(&treq, acred)))
        return code;
    if (!noLock) {
        if (!avc)
            osi_Panic("null avc in afs_UFSRead");
        else {
            code = afs_VerifyVCache(avc, &treq);
            if (code) {
                code = afs_CheckCode(code, &treq, 11);	/* failed to get it */
                AFS_DISCON_UNLOCK();
                return code;
            }
        }
    }
#ifndef	AFS_VM_RDWR_ENV
    if (AFS_NFSXLATORREQ(acred)) {
        if (!afs_AccessOK
                (avc, PRSFS_READ, &treq,
                 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
            AFS_DISCON_UNLOCK();
            return afs_CheckCode(EACCES, &treq, 12);
        }
    }
#endif

#ifndef AFS_DARWIN80_ENV
    tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
#endif
    totalLength = AFS_UIO_RESID(auio);
    filePos = AFS_UIO_OFFSET(auio);
    afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
               ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_INT32,
               totalLength, ICL_TYPE_OFFSET,
               ICL_HANDLE_OFFSET(avc->f.m.Length));
    error = 0;
    transferLength = 0;
    if (!noLock)
        ObtainReadLock(&avc->lock);
#if	defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
    if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
        hset(avc->flushDV, avc->f.m.DataVersion);
    }
#endif

    if (filePos >= avc->f.m.Length) {
        if (len > AFS_ZEROS)
            len = sizeof(afs_zeros);	/* and in 0 buffer */
        len = 0;
#ifdef AFS_DARWIN80_ENV
        trimlen = len;
        tuiop = afsio_darwin_partialcopy(auio, trimlen);
#else
        afsio_copy(auio, &tuio, tvec);
        trimlen = len;
        afsio_trim(&tuio, trimlen);
#endif
        AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
    }

    while (avc->f.m.Length > 0 && totalLength > 0) {
        /* read all of the cached info */
        if (filePos >= avc->f.m.Length)
            break;		/* all done */
        if (noLock) {
            if (tdc) {
                ReleaseReadLock(&tdc->lock);
                afs_PutDCache(tdc);
            }
            tdc = afs_FindDCache(avc, filePos);
            if (tdc) {
                ObtainReadLock(&tdc->lock);
                offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                len = tdc->validPos - filePos;
            }
        } else {
            /* a tricky question: does the presence of the DFFetching flag
             * mean that we're fetching the latest version of the file?  No.
             * The server could update the file as soon as the fetch responsible
             * for the setting of the DFFetching flag completes.
             *
             * However, the presence of the DFFetching flag (visible under
             * a dcache read lock since it is set and cleared only under a
             * dcache write lock) means that we're fetching as good a version
             * as was known to this client at the time of the last call to
             * afs_VerifyVCache, since the latter updates the stat cache's
             * m.DataVersion field under a vcache write lock, and from the
             * time that the DFFetching flag goes on in afs_GetDCache (before
             * the fetch starts), to the time it goes off (after the fetch
             * completes), afs_GetDCache keeps at least a read lock on the
             * vcache entry.
             *
             * This means that if the DFFetching flag is set, we can use that
             * data for any reads that must come from the current version of
             * the file (current == m.DataVersion).
             *
             * Another way of looking at this same point is this: if we're
             * fetching some data and then try do an afs_VerifyVCache, the
             * VerifyVCache operation will not complete until after the
             * DFFetching flag is turned off and the dcache entry's f.versionNo
             * field is updated.
             *
             * Note, by the way, that if DFFetching is set,
             * m.DataVersion > f.versionNo (the latter is not updated until
             * after the fetch completes).
             */
            if (tdc) {
                ReleaseReadLock(&tdc->lock);
                afs_PutDCache(tdc);	/* before reusing tdc */
            }
            tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
#ifdef AFS_DISCON_ENV
            if (!tdc) {
                printf("Network down in afs_read");
                error = ENETDOWN;
                break;
            }
#endif /* AFS_DISCON_ENV */

            ObtainReadLock(&tdc->lock);
            /* now, first try to start transfer, if we'll need the data.  If
             * data already coming, we don't need to do this, obviously.  Type
             * 2 requests never return a null dcache entry, btw. */
            if (!(tdc->dflags & DFFetching)
                    && !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
                /* have cache entry, it is not coming in now, and we'll need new data */
tagain:
                if (trybusy && !afs_BBusy()) {
                    struct brequest *bp;
                    /* daemon is not busy */
                    ObtainSharedLock(&tdc->mflock, 667);
                    if (!(tdc->mflags & DFFetchReq)) {
                        UpgradeSToWLock(&tdc->mflock, 668);
                        tdc->mflags |= DFFetchReq;
                        bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
                                        (afs_size_t) filePos, (afs_size_t) 0,
                                        tdc);
                        if (!bp) {
                            /* Bkg table full; retry deadlocks */
                            tdc->mflags &= ~DFFetchReq;
                            trybusy = 0;	/* Avoid bkg daemon since they're too busy */
                            ReleaseWriteLock(&tdc->mflock);
                            goto tagain;
                        }
                        ConvertWToSLock(&tdc->mflock);
                    }
                    code = 0;
                    ConvertSToRLock(&tdc->mflock);
                    while (!code && tdc->mflags & DFFetchReq) {
                        afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
                                   ICL_TYPE_STRING, __FILE__, ICL_TYPE_INT32,
                                   __LINE__, ICL_TYPE_POINTER, tdc,
                                   ICL_TYPE_INT32, tdc->dflags);
                        /* don't need waiting flag on this one */
                        ReleaseReadLock(&tdc->mflock);
                        ReleaseReadLock(&tdc->lock);
                        ReleaseReadLock(&avc->lock);
                        code = afs_osi_SleepSig(&tdc->validPos);
                        ObtainReadLock(&avc->lock);
                        ObtainReadLock(&tdc->lock);
                        ObtainReadLock(&tdc->mflock);
                    }
                    ReleaseReadLock(&tdc->mflock);
                    if (code) {
                        error = code;
                        break;
                    }
                }
            }
            /* now data may have started flowing in (if DFFetching is on).  If
             * data is now streaming in, then wait for some interesting stuff.
             */
            code = 0;
            while (!code && (tdc->dflags & DFFetching)
                    && tdc->validPos <= filePos) {
                /* too early: wait for DFFetching flag to vanish,
                 * or data to appear */
                afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
                           __FILE__, ICL_TYPE_INT32, __LINE__,
                           ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32,
                           tdc->dflags);
                ReleaseReadLock(&tdc->lock);
                ReleaseReadLock(&avc->lock);
                code = afs_osi_SleepSig(&tdc->validPos);
                ObtainReadLock(&avc->lock);
                ObtainReadLock(&tdc->lock);
            }
            if (code) {
                error = code;
                break;
            }
            /* fetching flag gone, data is here, or we never tried
             * (BBusy for instance) */
            if (tdc->dflags & DFFetching) {
                /* still fetching, some new data is here:
                 * compute length and offset */
                offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                len = tdc->validPos - filePos;
            } else {
                /* no longer fetching, verify data version (avoid new
                 * GetDCache call) */
                if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)
                        && ((len = tdc->validPos - filePos) > 0)) {
                    offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                } else {
                    /* don't have current data, so get it below */
                    afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
                               ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
                               ICL_TYPE_HYPER, &avc->f.m.DataVersion,
                               ICL_TYPE_HYPER, &tdc->f.versionNo);
                    ReleaseReadLock(&tdc->lock);
                    afs_PutDCache(tdc);
                    tdc = NULL;
                }
            }

            if (!tdc) {
                /* If we get, it was not possible to start the
                 * background daemon. With flag == 1 afs_GetDCache
                 * does the FetchData rpc synchronously.
                 */
                ReleaseReadLock(&avc->lock);
                tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
                ObtainReadLock(&avc->lock);
                if (tdc)
                    ObtainReadLock(&tdc->lock);
            }
        }

        if (!tdc) {
            error = EIO;
            break;
        }
        len = tdc->validPos - filePos;
        afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD, ICL_TYPE_POINTER, tdc,
                   ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
                   ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
        if (len > totalLength)
            len = totalLength;	/* will read len bytes */
        if (len <= 0) {		/* shouldn't get here if DFFetching is on */
            afs_Trace4(afs_iclSetp, CM_TRACE_VNODEREAD2, ICL_TYPE_POINTER,
                       tdc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tdc->validPos),
                       ICL_TYPE_INT32, tdc->f.chunkBytes, ICL_TYPE_INT32,
                       tdc->dflags);
            /* read past the end of a chunk, may not be at next chunk yet, and yet
             * also not at eof, so may have to supply fake zeros */
            len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;	/* bytes left in chunk addr space */
            if (len > totalLength)
                len = totalLength;	/* and still within xfr request */
            tlen = avc->f.m.Length - offset;	/* and still within file */
            if (len > tlen)
                len = tlen;
            if (len > AFS_ZEROS)
                len = sizeof(afs_zeros);	/* and in 0 buffer */
#ifdef AFS_DARWIN80_ENV
            trimlen = len;
            tuiop = afsio_darwin_partialcopy(auio, trimlen);
#else
            afsio_copy(auio, &tuio, tvec);
            trimlen = len;
            afsio_trim(&tuio, trimlen);
#endif
            AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
            if (code) {
                error = code;
                break;
            }
        } else {
            /* get the data from the file */
#ifdef IHINT
            if (tfile = tdc->ihint) {
                if (tdc->f.inode != tfile->inum) {
                    afs_warn("afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
                             tdc, tdc->f.inode, tfile->inum);
                    osi_UFSClose(tfile);
                    tdc->ihint = tfile = 0;
                    nihints--;
                }
            }
            if (tfile != 0) {
                usedihint++;
            } else
#endif /* IHINT */
#if defined(LINUX_USE_FH)
                tfile = (struct osi_file *)osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type);
#else
                tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
#endif
#ifdef AFS_DARWIN80_ENV
            trimlen = len;
            tuiop = afsio_darwin_partialcopy(auio, trimlen);
            uio_setoffset(tuiop, offset);
#else
            /* mung uio structure to be right for this transfer */
            afsio_copy(auio, &tuio, tvec);
            trimlen = len;
            afsio_trim(&tuio, trimlen);
            tuio.afsio_offset = offset;
#endif

#if defined(AFS_AIX41_ENV)
            AFS_GUNLOCK();
            code =
                VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
                          NULL, afs_osi_credp);
            AFS_GLOCK();
#elif defined(AFS_AIX32_ENV)
            code =
                VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
            /* Flush all JFS pages now for big performance gain in big file cases
             * If we do something like this, must check to be sure that AFS file
             * isn't mmapped... see afs_gn_map() for why.
             */
            /*
            	  if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
             many different ways to do similar things:
               so far, the best performing one is #2, but #1 might match it if we
               straighten out the confusion regarding which pages to flush.  It
               really does matter.
               1.	    vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
               2.	    vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
            			(len + PAGESIZE-1)/PAGESIZE);
               3.	    vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
               4.  	    vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
            	    tfile->vnode->v_gnode->gn_seg = NULL;
               5.       deletep
               6.       ipgrlse
               7.       ifreeseg
                      Unfortunately, this seems to cause frequent "cache corruption" episodes.
               	    vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
            			(len + PAGESIZE-1)/PAGESIZE);
            	  }
            */
#elif defined(AFS_AIX_ENV)
            code =
                VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t) & offset,
                          &tuio, NULL, NULL, -1);
#elif defined(AFS_SUN5_ENV)
            AFS_GUNLOCK();
#ifdef AFS_SUN510_ENV
            {
                caller_context_t ct;

                VOP_RWLOCK(tfile->vnode, 0, &ct);
                code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
                VOP_RWUNLOCK(tfile->vnode, 0, &ct);
            }
#else
            VOP_RWLOCK(tfile->vnode, 0);
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_RWUNLOCK(tfile->vnode, 0);
#endif
            AFS_GLOCK();
#elif defined(AFS_SGI_ENV)
            AFS_GUNLOCK();
            AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
            AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp,
                         code);
            AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
            AFS_GLOCK();
#elif defined(AFS_OSF_ENV)
            tuio.uio_rw = UIO_READ;
            AFS_GUNLOCK();
            VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp, code);
            AFS_GLOCK();
#elif defined(AFS_HPUX100_ENV)
            AFS_GUNLOCK();
            code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, afs_osi_credp);
            AFS_GLOCK();
#elif defined(AFS_LINUX20_ENV)
            AFS_GUNLOCK();
            code = osi_rdwr(tfile, &tuio, UIO_READ);
            AFS_GLOCK();
#elif defined(AFS_DARWIN80_ENV)
            AFS_GUNLOCK();
            code = VNOP_READ(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
            AFS_GLOCK();
#elif defined(AFS_DARWIN_ENV)
            AFS_GUNLOCK();
            VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_UNLOCK(tfile->vnode, 0, current_proc());
            AFS_GLOCK();
#elif defined(AFS_FBSD80_ENV)
            AFS_GUNLOCK();
            VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_UNLOCK(tfile->vnode, 0);
            AFS_GLOCK();
#elif defined(AFS_FBSD50_ENV)
            AFS_GUNLOCK();
            VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_UNLOCK(tfile->vnode, 0, curthread);
            AFS_GLOCK();
#elif defined(AFS_XBSD_ENV)
            AFS_GUNLOCK();
            VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_UNLOCK(tfile->vnode, 0, curproc);
            AFS_GLOCK();
#else
            code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, afs_osi_credp);
#endif

#ifdef IHINT
            if (!tdc->ihint && nihints < maxIHint) {
                tdc->ihint = tfile;
                nihints++;
            } else
#endif /* IHINT */
                osi_UFSClose(tfile);

            if (code) {
                error = code;
                break;
            }
        }
        /* otherwise we've read some, fixup length, etc and continue with next seg */
        len = len - AFS_UIO_RESID(tuiop);	/* compute amount really transferred */
        trimlen = len;
        afsio_skip(auio, trimlen);	/* update input uio structure */
        totalLength -= len;
        transferLength += len;
        filePos += len;
        if (len <= 0)
            break;		/* surprise eof */
#ifdef AFS_DARWIN80_ENV
        if (tuiop) {
            uio_free(tuiop);
            tuiop = 0;
        }
#endif
    }

    /* if we make it here with tdc non-zero, then it is the last chunk we
     * dealt with, and we have to release it when we're done.  We hold on
     * to it in case we need to do a prefetch, obviously.
     */
    if (tdc) {
        ReleaseReadLock(&tdc->lock);
#if !defined(AFS_VM_RDWR_ENV)
        /* try to queue prefetch, if needed */
        if (!noLock) {
            if (!(tdc->mflags & DFNextStarted))
                afs_PrefetchChunk(avc, tdc, acred, &treq);
        }
#endif
        afs_PutDCache(tdc);
    }
    if (!noLock)
        ReleaseReadLock(&avc->lock);

#ifdef AFS_DARWIN80_ENV
    if (tuiop)
        uio_free(tuiop);
#else
    osi_FreeSmallSpace(tvec);
#endif
    AFS_DISCON_UNLOCK();
    error = afs_CheckCode(error, &treq, 13);
    return error;
}
Exemple #23
0
struct vcache *
osi_dnlc_lookup(struct vcache *adp, char *aname, int locktype)
{
    struct vcache *tvc;
    int LRUme;
    unsigned int key, skey;
    char *ts = aname;
    struct nc *tnc, *tnc1 = 0;
    int safety;
#ifdef AFS_DARWIN80_ENV
    vnode_t tvp;
#endif

    ma_critical_enter();

    if (!afs_usednlc) {
      ma_critical_exit();
      return 0;
    }

    dnlcHash(ts, key);		/* leaves ts pointing at the NULL */
    if (ts - aname >= AFSNCNAMESIZE) {
      ma_critical_exit();
      return 0;
    }
    skey = key & (NHSIZE - 1);

    TRACE(osi_dnlc_lookupT, skey);
    dnlcstats.lookups++;

    ObtainReadLock(&afs_xvcache);
    ObtainReadLock(&afs_xdnlc);

    for (tvc = NULL, tnc = nameHash[skey], safety = 0; tnc;
	 tnc = tnc->next, safety++) {
	if ( /* (tnc->key == key)  && */ (tnc->dirp == adp)
	    && (!strcmp((char *)tnc->name, aname))) {
	    tvc = tnc->vp;
	    tnc1 = tnc;
	    break;
	} else if (tnc->next == nameHash[skey]) {	/* end of list */
	    break;
	} else if (safety > NCSIZE) {
	    afs_warn("DNLC cycle");
	    dnlcstats.cycles++;
	    ReleaseReadLock(&afs_xdnlc);
	    ReleaseReadLock(&afs_xvcache);
	    osi_dnlc_purge();
	    ma_critical_exit();
	    return (0);
	}
    }

    LRUme = 0;			/* (tnc != nameHash[skey]); */
    ReleaseReadLock(&afs_xdnlc);

    if (!tvc) {
	ReleaseReadLock(&afs_xvcache);
	dnlcstats.misses++;
    } else {
	if ((tvc->f.states & CVInit)
#ifdef  AFS_DARWIN80_ENV
	    ||(tvc->f.states & CDeadVnode)
#endif
	    )      
	{
	    ReleaseReadLock(&afs_xvcache);
	    dnlcstats.misses++;
	    osi_dnlc_remove(adp, aname, tvc);
	    ma_critical_exit();
	    return 0;
	}
#if defined(AFS_DARWIN80_ENV)
	tvp = AFSTOV(tvc);
	if (vnode_get(tvp)) {
	    ReleaseReadLock(&afs_xvcache);
	    dnlcstats.misses++;
	    osi_dnlc_remove(adp, aname, tvc);
	    ma_critical_exit();
	    return 0;
	}
	if (vnode_ref(tvp)) {
	    ReleaseReadLock(&afs_xvcache);
	    AFS_GUNLOCK();
	    vnode_put(tvp);
	    AFS_GLOCK();
	    dnlcstats.misses++;
	    osi_dnlc_remove(adp, aname, tvc);
	    ma_critical_exit();
	    return 0;
	}
#elif defined(AFS_FBSD_ENV)
	/* can't sleep in a critical section */
	ma_critical_exit();
	osi_vnhold(tvc, 0);
	ma_critical_enter();
#else
	osi_vnhold(tvc, 0);
#endif
	ReleaseReadLock(&afs_xvcache);

#ifdef	notdef
	/* 
	 * XX If LRUme ever is non-zero change the if statement around because
	 * aix's cc with optimizer on won't necessarily check things in order XX
	 */
	if (LRUme && (0 == NBObtainWriteLock(&afs_xdnlc))) {
	    /* don't block to do this */
	    /* tnc might have been moved during race condition, */
	    /* but it's always in a legit hash chain when a lock is granted, 
	     * or else it's on the freelist so prev == NULL, 
	     * so at worst this is redundant */
	    /* Now that we've got it held, and a lock on the dnlc, we 
	     * should check to be sure that there was no race, and 
	     * bail out if there was. */
	    if (tnc->prev) {
		/* special case for only two elements on list - relative ordering
		 * doesn't change */
		if (tnc->prev != tnc->next) {
		    /* remove from old location */
		    tnc->prev->next = tnc->next;
		    tnc->next->prev = tnc->prev;
		    /* insert into new location */
		    tnc->next = nameHash[skey];
		    tnc->prev = tnc->next->prev;
		    tnc->next->prev = tnc;
		    tnc->prev->next = tnc;
		}
		nameHash[skey] = tnc;
	    }
	    ReleaseWriteLock(&afs_xdnlc);
	}
#endif
    }

    ma_critical_exit();
    return tvc;
}
Exemple #24
0
/* This function always holds the GLOCK whilst it is running. The caller
 * gets the GLOCK before invoking it, and afs_osi_Sleep drops the GLOCK
 * whilst we are sleeping, and regains it when we're woken up.
 */
void
afs_Daemon(void)
{
    afs_int32 code;
    struct afs_exporter *exporter;
    afs_int32 now;
    afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck;
    afs_int32 last1MinCheck, last5MinCheck;
    afs_uint32 lastCBSlotBump;
    char cs_warned = 0;

    AFS_STATCNT(afs_Daemon);

    afs_rootFid.Fid.Volume = 0;
    while (afs_initState < 101)
	afs_osi_Sleep(&afs_initState);

#ifdef AFS_DARWIN80_ENV
    if (afs_osi_ctxtp_initialized)
        osi_Panic("vfs context already initialized");
    while (afs_osi_ctxtp && vfs_context_ref)
        afs_osi_Sleep(&afs_osi_ctxtp);
    if (afs_osi_ctxtp && !vfs_context_ref)
       vfs_context_rele(afs_osi_ctxtp);
    afs_osi_ctxtp = vfs_context_create(NULL);
    afs_osi_ctxtp_initialized = 1;
#endif
    now = osi_Time();
    lastCBSlotBump = now;

    /* when a lot of clients are booted simultaneously, they develop
     * annoying synchronous VL server bashing behaviors.  So we stagger them.
     */
    last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60);	/* an extra 30 */
    last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
    last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600);
    last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600);
    last5MinCheck = now - 150 + ((afs_random() & 0x7fffffff) % 300);
    lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);

    /* start off with afs_initState >= 101 (basic init done) */
    while (1) {
	afs_CheckCallbacks(20);	/* unstat anything which will expire soon */

	/* things to do every 20 seconds or less - required by protocol spec */
	if (afs_nfsexporter)
	    afs_FlushActiveVcaches(0);	/* flush NFS writes */
	afs_FlushVCBs(1);	/* flush queued callbacks */

	afs_MaybeWakeupTruncateDaemon();	/* free cache space if have too */
	rx_CheckPackets();	/* Does RX need more packets? */

	now = osi_Time();
	if (lastCBSlotBump + CBHTSLOTLEN < now) {	/* pretty time-dependant */
	    lastCBSlotBump = now;
	    if (afs_BumpBase()) {
		afs_CheckCallbacks(20);	/* unstat anything which will expire soon */
	    }
	}

	if (last1MinCheck + 60 < now) {
	    /* things to do every minute */
	    DFlush();		/* write out dir buffers */
	    afs_WriteThroughDSlots();	/* write through cacheinfo entries */
	    ObtainWriteLock(&afs_xvcache, 736);
	    afs_FlushReclaimedVcaches();
	    ReleaseWriteLock(&afs_xvcache);
	    afs_FlushActiveVcaches(1);	/* keep flocks held & flush nfs writes */
#if 0
	    afs_StoreDirtyVcaches();
#endif
	    afs_CheckRXEpoch();
	    last1MinCheck = now;
	}

	if (last3MinCheck + 180 < now) {
	    afs_CheckTokenCache();	/* check for access cache resets due to expired
					 * tickets */
	    last3MinCheck = now;
	}

        if (afsd_dynamic_vcaches && (last5MinCheck + 300 < now)) {
            /* start with trying to drop us back to our base usage */
            int anumber = VCACHE_FREE + (afs_vcount - afs_cacheStats);

	    if (anumber > 0) {
		ObtainWriteLock(&afs_xvcache, 734);
		afs_ShakeLooseVCaches(anumber);
		ReleaseWriteLock(&afs_xvcache);
	    }
            last5MinCheck = now;
        }

	if (!afs_CheckServerDaemonStarted) {
	    /* Do the check here if the correct afsd is not installed. */
	    if (!cs_warned) {
		cs_warned = 1;
		afs_warn("Please install afsd with check server daemon.\n");
	    }
	    if (lastNMinCheck + afs_probe_interval < now) {
		/* only check down servers */
		afs_CheckServers(1, NULL);
		lastNMinCheck = now;
	    }
	}
	if (last10MinCheck + 600 < now) {
#ifdef AFS_USERSPACE_IP_ADDR
	    extern int rxi_GetcbiInfo(void);
#endif
	    afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
#ifdef AFS_USERSPACE_IP_ADDR
	    if (rxi_GetcbiInfo()) {	/* addresses changed from last time */
		afs_FlushCBs();
	    }
#else /* AFS_USERSPACE_IP_ADDR */
	    if (rxi_GetIFInfo()) {	/* addresses changed from last time */
		afs_FlushCBs();
	    }
#endif /* else AFS_USERSPACE_IP_ADDR */
	    if (!afs_CheckServerDaemonStarted)
		afs_CheckServers(0, NULL);
	    afs_GCUserData(0);	/* gc old conns */
	    /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */
	    for (exporter = root_exported; exporter;
		 exporter = exporter->exp_next) {
		(void)EXP_GC(exporter, 0);	/* Generalize params */
	    }
	    {
		static int cnt = 0;
		if (++cnt < 12) {
		    afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
					 AFS_VOLCHECK_BUSY);
		} else {
		    cnt = 0;
		    afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
					 AFS_VOLCHECK_BUSY |
					 AFS_VOLCHECK_MTPTS);
		}
	    }
	    last10MinCheck = now;
	}
	if (last60MinCheck + 3600 < now) {
	    afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME, ICL_TYPE_INT32,
		       3600);
	    afs_CheckRootVolume();
#if AFS_GCPAGS
	    if (afs_gcpags == AFS_GCPAGS_OK) {
		afs_int32 didany;
		afs_GCPAGs(&didany);
	    }
#endif
	    last60MinCheck = now;
	}
	if (afs_initState < 300) {	/* while things ain't rosy */
	    code = afs_CheckRootVolume();
	    if (code == 0)
		afs_initState = 300;	/* succeeded */
	    if (afs_initState < 200)
		afs_initState = 200;	/* tried once */
	    afs_osi_Wakeup(&afs_initState);
	}

	/* 18285 is because we're trying to divide evenly into 128, that is,
	 * CBSlotLen, while staying just under 20 seconds.  If CBSlotLen
	 * changes, should probably change this interval, too.
	 * Some of the preceding actions may take quite some time, so we
	 * might not want to wait the entire interval */
	now = 18285 - (osi_Time() - now);
	if (now > 0) {
	    afs_osi_Wait(now, &AFS_WaitHandler, 0);
	}

	if (afs_termState == AFSOP_STOP_AFS) {
	    if (afs_CheckServerDaemonStarted)
		afs_termState = AFSOP_STOP_CS;
	    else
		afs_termState = AFSOP_STOP_TRUNCDAEMON;
	    afs_osi_Wakeup(&afs_termState);
	    return;
	}
    }
}
Exemple #25
0
/* lp is pointer to a fairly-old buffer */
static struct buffer *
afs_newslot(struct dcache *adc, afs_int32 apage, struct buffer *lp)
{
    /* Find a usable buffer slot */
    afs_int32 i;
    afs_int32 lt = 0;
    struct buffer *tp;
    struct osi_file *tfile;

    AFS_STATCNT(afs_newslot);
    /* we take a pointer here to a buffer which was at the end of an
     * LRU hash chain.  Odds are, it's one of the older buffers, not
     * one of the newer.  Having an older buffer to start with may
     * permit us to avoid a few of the assignments in the "typical
     * case" for loop below.
     */
    if (lp && (lp->lockers == 0)) {
	lt = lp->accesstime;
    } else {
	lp = NULL;
    }

    /* timecounter might have wrapped, if machine is very very busy
     * and stays up for a long time.  Timecounter mustn't wrap twice
     * (positive->negative->positive) before calling newslot, but that
     * would require 2 billion consecutive cache hits... Anyway, the
     * penalty is only that the cache replacement policy will be
     * almost MRU for the next ~2 billion DReads...  newslot doesn't
     * get called nearly as often as DRead, so in order to avoid the
     * performance penalty of using the hypers, it's worth doing the
     * extra check here every time.  It's probably cheaper than doing
     * hcmp, anyway.  There is a little performance hit resulting from
     * resetting all the access times to 0, but it only happens once
     * every month or so, and the access times will rapidly sort
     * themselves back out after just a few more DReads.
     */
    if (timecounter < 0) {
	timecounter = 1;
	tp = Buffers;
	for (i = 0; i < nbuffers; i++, tp++) {
	    tp->accesstime = 0;
	    if (!lp && !tp->lockers)	/* one is as good as the rest, I guess */
		lp = tp;
	}
    } else {
	/* this is the typical case */
	tp = Buffers;
	for (i = 0; i < nbuffers; i++, tp++) {
	    if (tp->lockers == 0) {
		if (!lp || tp->accesstime < lt) {
		    lp = tp;
		    lt = tp->accesstime;
		}
	    }
	}
    }

    if (lp == 0) {
	/* No unlocked buffers. If still possible, allocate a new increment */
	if (nbuffers + NPB > afs_max_buffers) {
	    /* There are no unlocked buffers -- this used to panic, but that
	     * seems extreme.  To the best of my knowledge, all the callers
	     * of DRead are prepared to handle a zero return.  Some of them
	     * just panic directly, but not all of them. */
	    afs_warn("afs: all buffers locked\n");
	    return 0;
	}

	BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB);
	osi_Assert(BufferData != NULL);
	for (i = 0; i< NPB; i++) {
	    /* Fill in each buffer with an empty indication. */
	    tp = &Buffers[i + nbuffers];
	    tp->fid = NULLIDX;
	    afs_reset_inode(&tp->inode);
	    tp->accesstime = 0;
	    tp->lockers = 0;
	    tp->data = &BufferData[AFS_BUFFER_PAGESIZE * i];
	    tp->hashIndex = 0;
	    tp->dirty = 0;
	    AFS_RWLOCK_INIT(&tp->lock, "buffer lock");
	}
	lp = &Buffers[nbuffers];
	nbuffers += NPB;
    }

    if (lp->dirty) {
	/* see DFlush for rationale for not getting and locking the dcache */
        tfile = afs_CFileOpen(&lp->inode);
	afs_CFileWrite(tfile, lp->page * AFS_BUFFER_PAGESIZE, lp->data,
		       AFS_BUFFER_PAGESIZE);
	lp->dirty = 0;
	afs_CFileClose(tfile);
	AFS_STATS(afs_stats_cmperf.bufFlushDirty++);
    }

    /* Now fill in the header. */
    lp->fid = adc->index;
    afs_copy_inode(&lp->inode, &adc->f.inode);
    lp->page = apage;
    lp->accesstime = timecounter++;
    FixupBucket(lp);		/* move to the right hash bucket */

    return lp;
}
Exemple #26
0
/**
 * UFS specific version of afs_GetVolSlot
 * @return
 */
struct volume *
afs_UFSGetVolSlot(void)
{
    struct volume *tv = NULL, **lv;
    struct osi_file *tfile;
    afs_int32 i = -1, code;
    afs_int32 bestTime;
    struct volume *bestVp, *oldLp = NULL, **bestLp = NULL;
    char *oldname = NULL;
    afs_int32 oldvtix = -2; /* Initialize to a value that doesn't occur */

    AFS_STATCNT(afs_UFSGetVolSlot);
    if (!afs_freeVolList) {
	/* get free slot */
	bestTime = 0x7fffffff;
	bestVp = 0;
	bestLp = 0;
	for (i = 0; i < NVOLS; i++) {
	    lv = &afs_volumes[i];
	    for (tv = *lv; tv; lv = &tv->next, tv = *lv) {
		if (tv->refCount == 0) {	/* is this one available? */
		    if (tv->accessTime < bestTime) {	/* best one available? */
			bestTime = tv->accessTime;
			bestLp = lv;
			bestVp = tv;
		    }
		}
	    }
	}
	if (!bestVp) {
	    afs_warn("afs_UFSGetVolSlot: no vol slots available\n");
	    goto error;
	}
	tv = bestVp;

	oldLp = *bestLp;
	*bestLp = tv->next;

	oldname = tv->name;
	tv->name = NULL;

	oldvtix = tv->vtix;
	/* now write out volume structure to file */
	if (tv->vtix < 0) {
	    tv->vtix = afs_volCounter++;
	    /* now put on hash chain */
	    i = FVHash(tv->cell, tv->volume);
	    staticFVolume.next = fvTable[i];
	    fvTable[i] = tv->vtix;
	} else {
	    /*
	     * Haul the guy in from disk so we don't overwrite hash table
	     * next chain
	     */
	    if (afs_FVIndex != tv->vtix) {
		tfile = osi_UFSOpen(&volumeInode);
		code =
		    afs_osi_Read(tfile, sizeof(struct fvolume) * tv->vtix,
				 &staticFVolume, sizeof(struct fvolume));
		osi_UFSClose(tfile);
		if (code != sizeof(struct fvolume)) {
		    afs_warn("afs_UFSGetVolSlot: error %d reading volumeinfo\n",
		             (int)code);
		    goto error;
		}
		afs_FVIndex = tv->vtix;
	    }
	}
	afs_FVIndex = tv->vtix;
	staticFVolume.volume = tv->volume;
	staticFVolume.cell = tv->cell;
	staticFVolume.mtpoint = tv->mtpoint;
	staticFVolume.dotdot = tv->dotdot;
	staticFVolume.rootVnode = tv->rootVnode;
	staticFVolume.rootUnique = tv->rootUnique;
	tfile = osi_UFSOpen(&volumeInode);
	code =
	    afs_osi_Write(tfile, sizeof(struct fvolume) * afs_FVIndex,
			  &staticFVolume, sizeof(struct fvolume));
	osi_UFSClose(tfile);
	if (code != sizeof(struct fvolume)) {
	    afs_warn("afs_UFSGetVolSlot: error %d writing volumeinfo\n",
	             (int)code);
	    goto error;
	}
	if (oldname) {
	    afs_osi_Free(oldname, strlen(oldname) + 1);
	    oldname = NULL;
	}
    } else {
	tv = afs_freeVolList;
	afs_freeVolList = tv->next;
    }
    return tv;

 error:
    if (tv) {
	if (oldvtix == -2) {
	    afs_warn("afs_UFSGetVolSlot: oldvtix is uninitialized\n");
	    return NULL;
	}
	if (oldname) {
	    tv->name = oldname;
	    oldname = NULL;
	}
	if (oldvtix < 0) {
	    afs_volCounter--;
	    fvTable[i] = staticFVolume.next;
	}
	if (bestLp) {
	    *bestLp = oldLp;
	}
	tv->vtix = oldvtix;
	/* we messed with staticFVolume, so make sure someone else
	 * doesn't think it's fine to use */
	afs_FVIndex = -1;
    }
    return NULL;
}				/*afs_UFSGetVolSlot */
Exemple #27
0
/**
 *
 * @param volid Volume ID. If it's 0, get it from the name.
 * @param aname Volume name.
 * @param ve Volume entry.
 * @param tcell The cell containing this volume.
 * @param agood
 * @param type Type of volume.
 * @param areq Request.
 * @return Volume or NULL if failure.
 */
static struct volume *
afs_SetupVolume(afs_int32 volid, char *aname, void *ve, struct cell *tcell,
		afs_int32 agood, afs_int32 type, struct vrequest *areq)
{
    struct volume *tv;
    struct vldbentry *ove = (struct vldbentry *)ve;
    struct nvldbentry *nve = (struct nvldbentry *)ve;
    struct uvldbentry *uve = (struct uvldbentry *)ve;

    int whichType;		/* which type of volume to look for */
    int i, j, err = 0;

    if (!volid) {
	int len;
	/* special hint from file server to use vlserver */
	len = strlen(aname);
	if (len >= 8 && strcmp(aname + len - 7, ".backup") == 0)
	    whichType = BACKVOL;
	else if (len >= 10 && strcmp(aname + len - 9, ".readonly") == 0)
	    whichType = ROVOL;
	else
	    whichType = RWVOL;

	/* figure out which one we're really interested in (a set is returned) */
	volid = afs_vtoi(aname);
	if (volid == 0) {
	    if (type == 2) {
		volid = uve->volumeId[whichType];
	    } else if (type == 1) {
		volid = nve->volumeId[whichType];
	    } else {
		volid = ove->volumeId[whichType];
	    }
	} /* end of if (volid == 0) */
    } /* end of if (!volid) */


    ObtainWriteLock(&afs_xvolume, 108);
    i = VHash(volid);
    for (tv = afs_volumes[i]; tv; tv = tv->next) {
	if (tv->volume == volid && tv->cell == tcell->cellNum) {
	    break;
	}
    }
    if (!tv) {
	struct fvolume *tf = 0;

	tv = afs_GetVolSlot();
	if (!tv) {
	    ReleaseWriteLock(&afs_xvolume);
	    return NULL;
	}
	memset(tv, 0, sizeof(struct volume));

	for (j = fvTable[FVHash(tcell->cellNum, volid)]; j != 0; j = tf->next) {
	    if (afs_FVIndex != j) {
		struct osi_file *tfile;
	        tfile = osi_UFSOpen(&volumeInode);
		err =
		    afs_osi_Read(tfile, sizeof(struct fvolume) * j,
				 &staticFVolume, sizeof(struct fvolume));
		osi_UFSClose(tfile);
                if (err != sizeof(struct fvolume)) {
                    afs_warn("afs_SetupVolume: error %d reading volumeinfo\n",
                             (int)err);
                    /* put tv back on the free list; the data in it is not valid */
                    tv->next = afs_freeVolList;
                    afs_freeVolList = tv;
                    /* staticFVolume contents are not valid */
                    afs_FVIndex = -1;
                    ReleaseWriteLock(&afs_xvolume);
                    return NULL;
                }
		afs_FVIndex = j;
	    }
	    tf = &staticFVolume;
	    if (tf->cell == tcell->cellNum && tf->volume == volid)
		break;
	}

        tv->cell = tcell->cellNum;
        AFS_RWLOCK_INIT(&tv->lock, "volume lock");
        tv->next = afs_volumes[i];      /* thread into list */
        afs_volumes[i] = tv;
        tv->volume = volid;

	if (tf && (j != 0)) {
	    tv->vtix = afs_FVIndex;
	    tv->mtpoint = tf->mtpoint;
	    tv->dotdot = tf->dotdot;
	    tv->rootVnode = tf->rootVnode;
	    tv->rootUnique = tf->rootUnique;
	} else {
	    tv->vtix = -1;
	    tv->rootVnode = tv->rootUnique = 0;
            afs_GetDynrootMountFid(&tv->dotdot);
            afs_GetDynrootMountFid(&tv->mtpoint);
            tv->mtpoint.Fid.Vnode =
              VNUM_FROM_TYPEID(VN_TYPE_MOUNT, tcell->cellIndex << 2);
            tv->mtpoint.Fid.Unique = volid;
	}
    }
    tv->refCount++;
    tv->states &= ~VRecheck;	/* just checked it */
    tv->accessTime = osi_Time();
    ReleaseWriteLock(&afs_xvolume);
    if (type == 2) {
	LockAndInstallUVolumeEntry(tv, uve, tcell->cellNum, tcell, areq);
    } else if (type == 1)
	LockAndInstallNVolumeEntry(tv, nve, tcell->cellNum);
    else
	LockAndInstallVolumeEntry(tv, ove, tcell->cellNum);
    if (agood) {
	if (!tv->name) {
	    tv->name = afs_osi_Alloc(strlen(aname) + 1);
	    osi_Assert(tv->name != NULL);
	    strcpy(tv->name, aname);
	}
    }
    for (i = 0; i < NMAXNSERVERS; i++) {
	tv->status[i] = not_busy;
    }
    ReleaseWriteLock(&tv->lock);
    return tv;
}
Exemple #28
0
int
osi_dnlc_enter(struct vcache *adp, char *aname, struct vcache *avc,
	       afs_hyper_t * avno)
{
    struct nc *tnc;
    unsigned int key, skey;
    char *ts = aname;
    int safety;

    if (!afs_usednlc)
	return 0;

    TRACE(osi_dnlc_enterT, 0);
    dnlcHash(ts, key);		/* leaves ts pointing at the NULL */
    if (ts - aname >= AFSNCNAMESIZE) {
	return 0;
    }
    skey = key & (NHSIZE - 1);
    dnlcstats.enters++;

  retry:
    ObtainWriteLock(&afs_xdnlc, 222);

    /* Only cache entries from the latest version of the directory */
    if (!(adp->f.states & CStatd) || !hsame(*avno, adp->f.m.DataVersion)) {
	ReleaseWriteLock(&afs_xdnlc);
	return 0;
    }

    /*
     * Make sure each directory entry gets cached no more than once.
     */
    for (tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) {
	if ((tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) {
	    /* duplicate entry */
	    break;
	} else if (tnc->next == nameHash[skey]) {	/* end of list */
	    tnc = NULL;
	    break;
	} else if (safety > NCSIZE) {
	    afs_warn("DNLC cycle");
	    dnlcstats.cycles++;
	    ReleaseWriteLock(&afs_xdnlc);
	    osi_dnlc_purge();
	    goto retry;
	}
    }

    if (tnc == NULL) {
	tnc = GetMeAnEntry();

	tnc->dirp = adp;
	tnc->vp = avc;
	tnc->key = key;
	memcpy((char *)tnc->name, aname, ts - aname + 1);	/* include the NULL */

	InsertEntry(tnc);
    } else {
	/* duplicate */
	tnc->vp = avc;
    }
    ReleaseWriteLock(&afs_xdnlc);

    return 0;
}