Пример #1
0
void
afs_osi_Sleep(void *event)
{
    struct afs_event *evp;
    int seq;

    evp = afs_getevent(event);
#ifdef AFS_DARWIN80_ENV
     AFS_ASSERT_GLOCK();
     AFS_GUNLOCK();
#endif
    seq = evp->seq;
    while (seq == evp->seq) {
#ifdef AFS_DARWIN80_ENV
	evp->owner = 0;
	msleep(event, evp->lck, PVFS, "afs_osi_Sleep", NULL);
	evp->owner = current_thread();
#else
	AFS_ASSERT_GLOCK();
	AFS_GUNLOCK();
	/* this is probably safe for all versions, but testing is hard */
	sleep(event, PVFS);
	AFS_GLOCK();
#endif
    }
    relevent(evp);
#ifdef AFS_DARWIN80_ENV
    AFS_GLOCK();
#endif
}
Пример #2
0
/* afs_osi_Wait
 * Waits for data on ahandle, or ams ms later.  ahandle may be null.
 * Returns 0 if timeout and EINTR if signalled.
 */
int
afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
{
    int code;
    afs_int32 endTime, tid;

    AFS_STATCNT(osi_Wait);
    endTime = osi_Time() + (ams / 1000);
    if (ahandle)
	ahandle->proc = (caddr_t) thread_self();
    do {
	AFS_ASSERT_GLOCK();
	code = 0;
	code = afs_osi_TimedSleep(&waitV, ams, aintok);

	if (code)
	    break;		/* if something happened, quit now */
	/* if we we're cancelled, quit now */
	if (ahandle && (ahandle->proc == (caddr_t) 0)) {
	    /* we've been signalled */
	    break;
	}
    } while (osi_Time() < endTime);
    return code;
}
Пример #3
0
/* afs_osi_SleepSig
 *
 * Waits for an event to be notified, returning early if a signal
 * is received.  Returns EINTR if signaled, and 0 otherwise.
 */
int
afs_osi_SleepSig(void *event)
{
    struct afs_event *evp;
    int seq, retval;
#ifdef DECLARE_WAITQUEUE
    DECLARE_WAITQUEUE(wait, current);
#else
    struct wait_queue wait = { current, NULL };
#endif

    evp = afs_getevent(event);
    if (!evp) {
	afs_addevent(event);
	evp = afs_getevent(event);
    }

    seq = evp->seq;
    retval = 0;

    add_wait_queue(&evp->cond, &wait);
    while (seq == evp->seq) {
	set_current_state(TASK_INTERRUPTIBLE);
	AFS_ASSERT_GLOCK();
	AFS_GUNLOCK();
	schedule();
#ifdef CONFIG_PM
	if (
#ifdef PF_FREEZE
	    current->flags & PF_FREEZE
#else
#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
	    !current->todo
#else
#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
            test_ti_thread_flag(current->thread_info, TIF_FREEZE)
#else
            test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
#endif
#endif
#endif
	    )
#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
	    refrigerator(PF_FREEZE);
#else
	    refrigerator();
#endif
#endif
	AFS_GLOCK();
	if (signal_pending(current)) {
	    retval = EINTR;
	    break;
	}
    }
    remove_wait_queue(&evp->cond, &wait);
    set_current_state(TASK_RUNNING);

    relevent(evp);
    return retval;
}
Пример #4
0
/* Get and initialize event structure corresponding to lwp event (i.e. address)
 * */
static afs_event_t *
afs_getevent(char *event)
{
    afs_event_t *evp, *oevp, *newp = 0;
    int hashcode;

    AFS_ASSERT_GLOCK();
    hashcode = afs_evhash(event);
    evp = afs_evhasht[hashcode];
    while (evp) {
	EVTLOCK_LOCK(evp);
	if (evp->event == event) {
	    evp->refcount++;
	    return evp;
	}
	if (evp->refcount == 0)
	    newp = evp;
	EVTLOCK_UNLOCK(evp);
	evp = evp->next;
    }
    if (!newp) {
	newp = (afs_event_t *) osi_AllocSmallSpace(sizeof(afs_event_t));
	afs_evhashcnt++;
	newp->next = afs_evhasht[hashcode];
	afs_evhasht[hashcode] = newp;
	newp->seq = 0;
	EVTLOCK_INIT(newp);
    }
    EVTLOCK_LOCK(newp);
    newp->event = event;
    newp->refcount = 1;
    return newp;
}
Пример #5
0
/* allocate space for sender */
void *
osi_AllocLargeSpace(size_t size)
{
    struct osi_packet *tp;

    AFS_ASSERT_GLOCK();

    AFS_STATCNT(osi_AllocLargeSpace);
    if (size > AFS_LRALLOCSIZ)
	osi_Panic("osi_AllocLargeSpace: size=%d\n", (int)size);
    afs_stats_cmperf.LargeBlocksActive++;
    if (!freePacketList) {
	char *p;

	afs_stats_cmperf.LargeBlocksAlloced++;
	p = afs_osi_Alloc(AFS_LRALLOCSIZ);
#ifdef  KERNEL_HAVE_PIN
	/*
	 * Need to pin this memory since under heavy conditions this memory
	 * could be swapped out; the problem is that we could inside rx where
	 * interrupts are disabled and thus we would panic if we don't pin it.
	 */
	pin(p, AFS_LRALLOCSIZ);
#endif
	return p;
    }
    ObtainWriteLock(&osi_flplock, 324);
    tp = freePacketList;
    if (tp)
	freePacketList = tp->next;
    ReleaseWriteLock(&osi_flplock);
    return (char *)tp;
}
Пример #6
0
/*!
 * \brief Query the AFSDB handler and wait for response.
 * \param  acellName
 * \return 0 for success. < 0 is error.
 */
static int
afs_GetCellHostsAFSDB(char *acellName)
{
    AFS_ASSERT_GLOCK();
    if (!afsdb_handler_running)
	return ENOENT;

    ObtainWriteLock(&afsdb_client_lock, 685);
    ObtainWriteLock(&afsdb_req_lock, 686);

    afsdb_req.cellname = acellName;

    afsdb_req.complete = 0;
    afsdb_req.pending = 1;
    afs_osi_Wakeup(&afsdb_req);
    ConvertWToRLock(&afsdb_req_lock);

    while (afsdb_handler_running && !afsdb_req.complete) {
	ReleaseReadLock(&afsdb_req_lock);
	afs_osi_Sleep(&afsdb_req);
	ObtainReadLock(&afsdb_req_lock);
    };

    ReleaseReadLock(&afsdb_req_lock);
    ReleaseWriteLock(&afsdb_client_lock);

    if (afsdb_req.cellname) {
	return 0;
    } else
	return ENOENT;
}
Пример #7
0
/* Get and initialize event structure corresponding to lwp event (i.e. address)
 * */
static afs_event_t *
afs_getevent(char *event)
{
    afs_event_t *evp, *newp = 0;
    int hashcode;

    AFS_ASSERT_GLOCK();
    hashcode = afs_evhash(event);
    evp = afs_evhasht[hashcode];
    while (evp) {
	if (evp->event == event) {
	    evp->refcount++;
	    return evp;
	}
	if (evp->refcount == 0)
	    newp = evp;
	evp = evp->next;
    }
    if (!newp)
	return NULL;

    newp->event = event;
    newp->refcount = 1;
    return newp;
}
Пример #8
0
/* Get and initialize event structure corresponding to lwp event (i.e. address)
 * */
static afs_event_t *
afs_getevent(char *event)
{
    afs_event_t *evp, *newp = 0;
    int hashcode;

    AFS_ASSERT_GLOCK();
    hashcode = afs_evhash(event);
    evp = afs_evhasht[hashcode];
    while (evp) {
	if (evp->event == event) {
	    evp->refcount++;
	    return evp;
	}
	if (evp->refcount == 0)
	    newp = evp;
	evp = evp->next;
    }
    if (!newp) {
	newp = (afs_event_t *) xmalloc(sizeof(afs_event_t), 5, pinned_heap);
	afs_evhashcnt++;
	newp->next = afs_evhasht[hashcode];
	afs_evhasht[hashcode] = newp;
	newp->cond = EVENT_NULL;
	newp->seq = 0;
    }
    newp->event = event;
    newp->refcount = 1;
    return newp;
}
Пример #9
0
/* Get and initialize event structure corresponding to lwp event (i.e. address)
 * */
static afs_event_t *
afs_getevent(char *event)
{
    afs_event_t *evp, *newp = 0;
    int hashcode;

    AFS_ASSERT_GLOCK();
    hashcode = afs_evhash(event);
    evp = afs_evhasht[hashcode];
    while (evp) {
	if (evp->event == event) {
	    evp->refcount++;
	    return evp;
	}
	if (evp->refcount == 0)
	    newp = evp;
	evp = evp->next;
    }
    if (!newp) {
	newp = osi_AllocSmallSpace(sizeof(afs_event_t));
	afs_evhashcnt++;
	newp->next = afs_evhasht[hashcode];
	afs_evhasht[hashcode] = newp;
	cv_init(&newp->cond, "event cond var", CV_DEFAULT, NULL);
	newp->seq = 0;
    }
    newp->event = event;
    newp->refcount = 1;
    return newp;
}
Пример #10
0
/* afs_osi_TimedSleep
 * 
 * Arguments:
 * event - event to sleep on
 * ams --- max sleep time in milliseconds
 * aintok - 1 if should sleep interruptibly
 *
 * Returns 0 if timeout and EINTR if signalled.
 */
int
afs_osi_TimedSleep(void *event, afs_int32 ams, int aintok)
{
    int code = 0;
    struct afs_event *evp;
    clock_t ticks;

    ticks = (ams * afs_hz) / 1000;
#if defined(AFS_SUN510_ENV)
    ticks = ticks + ddi_get_lbolt();
#else
    ticks = ticks + lbolt;
#endif

    evp = afs_getevent(event);

    AFS_ASSERT_GLOCK();
    if (aintok) {
	if (cv_timedwait_sig(&evp->cond, &afs_global_lock, ticks) == 0)
	    code = EINTR;
    } else {
	cv_timedwait(&evp->cond, &afs_global_lock, ticks);
    }

    relevent(evp);
    return code;
}
Пример #11
0
void
afs_osi_Sleep(void *event)
{
    AFS_ASSERT_GLOCK();
    AFS_GUNLOCK();
    tsleep(event, PVFS, "afsslp", 0);
    AFS_GLOCK();
}
Пример #12
0
void
osi_FreeSmallSpace(void *adata)
{

    AFS_ASSERT_GLOCK();

    AFS_STATCNT(osi_FreeSmallSpace);
    afs_stats_cmperf.SmallBlocksActive--;
    ObtainWriteLock(&osi_fsplock, 323);
    ((struct osi_packet *)adata)->next = freeSmallList;
    freeSmallList = adata;
    ReleaseWriteLock(&osi_fsplock);
}
Пример #13
0
void
osi_FreeLargeSpace(void *adata)
{

    AFS_ASSERT_GLOCK();

    AFS_STATCNT(osi_FreeLargeSpace);
    afs_stats_cmperf.LargeBlocksActive--;
    ObtainWriteLock(&osi_flplock, 322);
    ((struct osi_packet *)adata)->next = freePacketList;
    freePacketList = adata;
    ReleaseWriteLock(&osi_flplock);
}
Пример #14
0
void
afs_osi_Sleep(void *event)
{
    struct afs_event *evp;
    int seq;

    evp = afs_getevent(event);
    seq = evp->seq;
    while (seq == evp->seq) {
	AFS_ASSERT_GLOCK();
	cv_wait(&evp->cond, &afs_global_lock);
    }
    relevent(evp);
}
Пример #15
0
static void
afs_addevent(char *event)
{
    int hashcode;
    afs_event_t *newp;

    AFS_ASSERT_GLOCK();
    hashcode = afs_evhash(event);
    newp = kzalloc(sizeof(afs_event_t), GFP_NOFS);
    afs_evhashcnt++;
    newp->next = afs_evhasht[hashcode];
    afs_evhasht[hashcode] = newp;
    init_waitqueue_head(&newp->cond);
    newp->event = &dummyV;	/* Dummy address for new events */
}
Пример #16
0
void
afs_osi_Sleep(void *event)
{
    struct afs_event *evp;
    int seq;

    evp = afs_getevent(event);
    seq = evp->seq;
    while (seq == evp->seq) {
	AFS_ASSERT_GLOCK();
	assert_wait((vm_offset_t) (&evp->cond), 0);
	AFS_GUNLOCK();
	thread_block();
	AFS_GLOCK();
    }
    relevent(evp);
}
Пример #17
0
void
afs_osi_Sleep(void *event)
{
    struct afs_event *evp;
    int seq;

    evp = afs_getevent(event);
    seq = evp->seq;
    while (seq == evp->seq) {
	AFS_ASSERT_GLOCK();
	e_assert_wait(&evp->cond, 0);
	AFS_GUNLOCK();
	e_block_thread();
	AFS_GLOCK();
    }
    relevent(evp);
}
Пример #18
0
static void
afs_addevent(char *event)
{
    int hashcode;
    afs_event_t *newp;

    AFS_ASSERT_GLOCK();
    hashcode = afs_evhash(event);
    newp = osi_linux_alloc(sizeof(afs_event_t), 0);
    afs_evhashcnt++;
    newp->next = afs_evhasht[hashcode];
    afs_evhasht[hashcode] = newp;
    init_waitqueue_head(&newp->cond);
    newp->seq = 0;
    newp->event = &dummyV;	/* Dummy address for new events */
    newp->refcount = 0;
}
Пример #19
0
int
afs_osi_SleepSig(void *event)
{
    struct afs_event *evp;
    int seq, code = 0;

    evp = afs_getevent(event);
    seq = evp->seq;
    while (seq == evp->seq) {
	AFS_ASSERT_GLOCK();
	if (cv_wait_sig(&evp->cond, &afs_global_lock) == 0) {
	    code = EINTR;
	    break;
	}
    }
    relevent(evp);
    return code;
}
Пример #20
0
/* afs_osi_SleepSig
 *
 * Waits for an event to be notified, returning early if a signal
 * is received.  Returns EINTR if signaled, and 0 otherwise.
 */
int
afs_osi_SleepSig(void *event)
{
    struct afs_event *evp;
    int seq, retval;
#ifdef DECLARE_WAITQUEUE
    DECLARE_WAITQUEUE(wait, current);
#else
    struct wait_queue wait = { current, NULL };
#endif

    evp = afs_getevent(event);
    if (!evp) {
	afs_addevent(event);
	evp = afs_getevent(event);
    }

    seq = evp->seq;
    retval = 0;

    add_wait_queue(&evp->cond, &wait);
    while (seq == evp->seq) {
	set_current_state(TASK_INTERRUPTIBLE);
	AFS_ASSERT_GLOCK();
	AFS_GUNLOCK();
	schedule();
	try_to_freeze();

	AFS_GLOCK();
	if (signal_pending(current)) {
	    retval = EINTR;
	    break;
	}
    }
    remove_wait_queue(&evp->cond, &wait);
    set_current_state(TASK_RUNNING);

    relevent(evp);
    return retval;
}
Пример #21
0
/* afs_osi_Wait
 * Waits for data on ahandle, or ams ms later.  ahandle may be null.
 * Returns 0 if timeout and EINTR if signalled.
 */
int
afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
{
    int timo, code = 0;
    struct timeval atv, now, endTime;

    AFS_STATCNT(osi_Wait);

    atv.tv_sec = ams / 1000;
    atv.tv_usec = (ams % 1000) * 1000;
    getmicrotime(&now);
    timeradd(&atv, &now, &endTime);

    if (ahandle)
	ahandle->proc = (caddr_t) curproc;
    AFS_ASSERT_GLOCK();
    AFS_GUNLOCK();

    do {
	timersub(&endTime, &now, &atv);
	timo = atv.tv_sec * hz + atv.tv_usec * hz / 1000000 + 1;
	if (aintok) {
	    code = tsleep(&waitV, PCATCH | PVFS, "afs_W1", timo);
	    if (code)
		code = (code == EWOULDBLOCK) ? 0 : EINTR;
	} else
	    tsleep(&waitV, PVFS, "afs_W2", timo);

	/* if we were cancelled, quit now */
	if (ahandle && (ahandle->proc == NULL)) {
	    /* we've been signalled */
	    break;
	}
	getmicrotime(&now);
    } while (timercmp(&now, &endTime, <));

    AFS_GLOCK();
    return code;
}
Пример #22
0
/* afs_osi_Wait
 * Waits for data on ahandle, or ams ms later.  ahandle may be null.
 * Returns 0 if timeout and EINTR if signalled.
 */
int
afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
{
    afs_int32 endTime;
    int code;

    AFS_STATCNT(osi_Wait);
    endTime = osi_Time() + (ams / 1000);
    if (ahandle)
	ahandle->proc = (caddr_t) current;

    do {
	AFS_ASSERT_GLOCK();
	code = afs_osi_TimedSleep(&waitV, ams, 1);
	if (code)
	    break;
	if (ahandle && (ahandle->proc == (caddr_t) 0)) {
	    /* we've been signalled */
	    break;
	}
    } while (osi_Time() < endTime);
    return code;
}
Пример #23
0
static int
ClearCallBack(struct rx_connection *a_conn,
	      struct AFSFid *a_fid)
{
    struct vcache *tvc;
    int i;
    struct VenusFid localFid;
    struct volume *tv;
#ifdef AFS_DARWIN80_ENV
    vnode_t vp;
#endif

    AFS_STATCNT(ClearCallBack);

    AFS_ASSERT_GLOCK();

    /*
     * XXXX Don't hold any server locks here because of callback protocol XXX
     */
    localFid.Cell = 0;
    localFid.Fid.Volume = a_fid->Volume;
    localFid.Fid.Vnode = a_fid->Vnode;
    localFid.Fid.Unique = a_fid->Unique;

    /*
     * Volume ID of zero means don't do anything.
     */
    if (a_fid->Volume != 0) {
	if (a_fid->Vnode == 0) {
		struct afs_q *tq, *uq;
	    /*
	     * Clear callback for the whole volume.  Zip through the
	     * hash chain, nullifying entries whose volume ID matches.
	     */
loop1:
		ObtainReadLock(&afs_xvcache);
		i = VCHashV(&localFid);
		for (tq = afs_vhashTV[i].prev; tq != &afs_vhashTV[i]; tq = uq) {
		    uq = QPrev(tq);
		    tvc = QTOVH(tq);
		    if (tvc->f.fid.Fid.Volume == a_fid->Volume) {
			tvc->callback = NULL;
			if (!localFid.Cell)
			    localFid.Cell = tvc->f.fid.Cell;
			tvc->dchint = NULL;	/* invalidate hints */
			if (tvc->f.states & CVInit) {
			    ReleaseReadLock(&afs_xvcache);
			    afs_osi_Sleep(&tvc->f.states);
			    goto loop1;
			}
#if     defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)  || defined(AFS_HPUX_ENV) || defined(AFS_LINUX20_ENV)
			AFS_FAST_HOLD(tvc);
#else
#ifdef AFS_DARWIN80_ENV
			if (tvc->f.states & CDeadVnode) {
			    if (!(tvc->f.states & CBulkFetching)) {
				ReleaseReadLock(&afs_xvcache);
				afs_osi_Sleep(&tvc->f.states);
				goto loop1;
			    }
			}
			vp = AFSTOV(tvc);
			if (vnode_get(vp))
			    continue;
			if (vnode_ref(vp)) {
			    AFS_GUNLOCK();
			    vnode_put(vp);
			    AFS_GLOCK();
			    continue;
			}
			if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
			    AFS_GUNLOCK();
			    vnode_recycle(AFSTOV(tvc));
			    AFS_GLOCK();
			}
#else
			AFS_FAST_HOLD(tvc);
#endif
#endif
			ReleaseReadLock(&afs_xvcache);
			ObtainWriteLock(&afs_xcbhash, 449);
			afs_DequeueCallback(tvc);
			tvc->f.states &= ~(CStatd | CUnique | CBulkFetching);
			afs_allCBs++;
			if (tvc->f.fid.Fid.Vnode & 1)
			    afs_oddCBs++;
			else
			    afs_evenCBs++;
			ReleaseWriteLock(&afs_xcbhash);
			if ((tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
			    osi_dnlc_purgedp(tvc);
			afs_Trace3(afs_iclSetp, CM_TRACE_CALLBACK,
				   ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
				   tvc->f.states, ICL_TYPE_INT32,
				   a_fid->Volume);
#ifdef AFS_DARWIN80_ENV
			vnode_put(AFSTOV(tvc));
#endif
			ObtainReadLock(&afs_xvcache);
			uq = QPrev(tq);
			AFS_FAST_RELE(tvc);
		    } else if ((tvc->f.states & CMValid)
			       && (tvc->mvid->Fid.Volume == a_fid->Volume)) {
			tvc->f.states &= ~CMValid;
			if (!localFid.Cell)
			    localFid.Cell = tvc->mvid->Cell;
		    }
		}
		ReleaseReadLock(&afs_xvcache);

	    /*
	     * XXXX Don't hold any locks here XXXX
	     */
	    tv = afs_FindVolume(&localFid, 0);
	    if (tv) {
		afs_ResetVolumeInfo(tv);
		afs_PutVolume(tv, 0);
		/* invalidate mtpoint? */
	    }
	} /*Clear callbacks for whole volume */
	else {
	    /*
	     * Clear callbacks just for the one file.
	     */
	    struct vcache *uvc;
	    afs_allCBs++;
	    if (a_fid->Vnode & 1)
		afs_oddCBs++;	/*Could do this on volume basis, too */
	    else
		afs_evenCBs++;	/*A particular fid was specified */
loop2:
	    ObtainReadLock(&afs_xvcache);
	    i = VCHash(&localFid);
	    for (tvc = afs_vhashT[i]; tvc; tvc = uvc) {
		uvc = tvc->hnext;
		if (tvc->f.fid.Fid.Vnode == a_fid->Vnode
		    && tvc->f.fid.Fid.Volume == a_fid->Volume
		    && tvc->f.fid.Fid.Unique == a_fid->Unique) {
		    tvc->callback = NULL;
		    tvc->dchint = NULL;	/* invalidate hints */
		    if (tvc->f.states & CVInit) {
			ReleaseReadLock(&afs_xvcache);
			afs_osi_Sleep(&tvc->f.states);
			goto loop2;
		    }
#if     defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)  || defined(AFS_HPUX_ENV) || defined(AFS_LINUX20_ENV)
		    AFS_FAST_HOLD(tvc);
#else
#ifdef AFS_DARWIN80_ENV
		    if (tvc->f.states & CDeadVnode) {
			if (!(tvc->f.states & CBulkFetching)) {
			    ReleaseReadLock(&afs_xvcache);
			    afs_osi_Sleep(&tvc->f.states);
			    goto loop2;
			}
		    }
		    vp = AFSTOV(tvc);
		    if (vnode_get(vp))
			continue;
		    if (vnode_ref(vp)) {
			AFS_GUNLOCK();
			vnode_put(vp);
			AFS_GLOCK();
			continue;
		    }
		    if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
			AFS_GUNLOCK();
			vnode_recycle(AFSTOV(tvc));
			AFS_GLOCK();
		    }
#else
		    AFS_FAST_HOLD(tvc);
#endif
#endif
		    ReleaseReadLock(&afs_xvcache);
		    ObtainWriteLock(&afs_xcbhash, 450);
		    afs_DequeueCallback(tvc);
		    tvc->f.states &= ~(CStatd | CUnique | CBulkFetching);
		    ReleaseWriteLock(&afs_xcbhash);
		    if ((tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
			osi_dnlc_purgedp(tvc);
		    afs_Trace3(afs_iclSetp, CM_TRACE_CALLBACK,
			       ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
			       tvc->f.states, ICL_TYPE_LONG, 0);
#ifdef CBDEBUG
		    lastCallBack_vnode = afid->Vnode;
		    lastCallBack_dv = tvc->mstat.DataVersion.low;
		    osi_GetuTime(&lastCallBack_time);
#endif /* CBDEBUG */
#ifdef AFS_DARWIN80_ENV
		    vnode_put(AFSTOV(tvc));
#endif
		    ObtainReadLock(&afs_xvcache);
		    uvc = tvc->hnext;
		    AFS_FAST_RELE(tvc);
		}
	    }			/*Walk through hash table */
	    ReleaseReadLock(&afs_xvcache);
	}			/*Clear callbacks for one file */
    }

    /*Fid has non-zero volume ID */
    /*
     * Always return a predictable value.
     */
    return (0);

}				/*ClearCallBack */