Example #1
0
/*
 * Make sure dynroot initialization has been done.
 */
int
afs_InitDynroot(void)
{
    if (afs_dynrootInit)
	return 0;
    AFS_RWLOCK_INIT(&afs_dynrootDirLock, "afs_dynrootDirLock");
    AFS_RWLOCK_INIT(&afs_dynSymlinkLock, "afs_dynSymlinkLock");
    afs_dynrootInit = 0;
    return afs_dynrootCellInit();
}
Example #2
0
/*!
 * Perform whatever initialization is necessary.
 */
void
afs_CellInit(void)
{
    AFS_RWLOCK_INIT(&afs_xcell, "afs_xcell");
    AFS_RWLOCK_INIT(&afsdb_client_lock, "afsdb_client_lock");
    AFS_RWLOCK_INIT(&afsdb_req_lock, "afsdb_req_lock");
    QInit(&CellLRU);

    afs_cellindex = 0;
    afs_cellalias_index = 0;
}
Example #3
0
void
osi_PrePopulateVCache(struct vcache *avc) {
    memset(avc, 0, sizeof(struct vcache));

    AFS_RWLOCK_INIT(&avc->vlock, "vcache vlock");

    rw_init(&avc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);

#if defined(AFS_SUN55_ENV)
    /* This is required if the kaio (kernel aynchronous io)
     ** module is installed. Inside the kernel, the function
     ** check_vp( common/os/aio.c) checks to see if the kernel has
     ** to provide asynchronous io for this vnode. This
     ** function extracts the device number by following the
     ** v_data field of the vnode. If we do not set this field
     ** then the system panics. The  value of the v_data field
     ** is not really important for AFS vnodes because the kernel
     ** does not do asynchronous io for regular files. Hence,
     ** for the time being, we fill up the v_data field with the
     ** vnode pointer itself. */
    avc->v.v_data = (char *)avc;
#endif /* AFS_SUN55_ENV */

#if defined(AFS_BOZONLOCK_ENV)
    afs_BozonInit(&avc->pvnLock, avc);
#endif
}
Example #4
0
/*!
 * Called on shutdown, should deallocate memory, etc.
 */
void
shutdown_cell(void)
{
    struct afs_q *cq, *tq;
    struct cell *tc;

#ifdef AFS_CACHE_VNODE_PATH
    if (cacheDiskType != AFS_FCACHE_TYPE_MEM) {
	afs_osi_FreeStr(afs_cellname_inode.ufs);
    }
#endif
    AFS_RWLOCK_INIT(&afs_xcell, "afs_xcell");

    for (cq = CellLRU.next; cq != &CellLRU; cq = tq) {
	tc = QTOC(cq);
	tq = QNext(cq);
	if (tc->cellName)
	    afs_osi_FreeStr(tc->cellName);
	afs_osi_Free(tc, sizeof(struct cell));
    }
    QInit(&CellLRU);

{
    struct cell_name *cn = afs_cellname_head;

    while (cn) {
	struct cell_name *next = cn->next;

	afs_osi_FreeStr(cn->cellname);
	afs_osi_Free(cn, sizeof(struct cell_name));
	cn = next;
    }
}
}
Example #5
0
int
init_module(void)
#endif
{
    int err;
    AFS_RWLOCK_INIT(&afs_xosi, "afs_xosi");

#if !defined(AFS_LINUX24_ENV)
    /* obtain PAGE_OFFSET value */
    afs_linux_page_offset = get_page_offset();

#ifndef AFS_S390_LINUX22_ENV
    if (afs_linux_page_offset == 0) {
	/* couldn't obtain page offset so can't continue */
	printf("afs: Unable to obtain PAGE_OFFSET. Exiting..");
	return -EIO;
    }
#endif /* AFS_S390_LINUX22_ENV */
#endif /* !defined(AFS_LINUX24_ENV) */

    osi_Init();

#ifndef LINUX_KEYRING_SUPPORT
    err = osi_syscall_init();
    if (err)
	return err;
#endif
    err = afs_init_inodecache();
    if (err) {
#ifndef LINUX_KEYRING_SUPPORT
	osi_syscall_clean();
#endif
	return err;
    }
    err = register_filesystem(&afs_fs_type);
    if (err) {
	afs_destroy_inodecache();
#ifndef LINUX_KEYRING_SUPPORT
	osi_syscall_clean();
#endif
	return err;
    }

    osi_sysctl_init();
#ifdef LINUX_KEYRING_SUPPORT
    osi_keyring_init();
#endif
#ifdef AFS_LINUX24_ENV
    osi_proc_init();
    osi_ioctl_init();
#endif
#if defined(AFS_CACHE_BYPASS)
    afs_warn("Cache bypass patched libafs module init.\n");
#endif
    afs_init_pagecopy();

    return 0;
}
Example #6
0
/*!
 * Perform whatever initialization is necessary.
 */
void
afs_CellInit(void)
{
    static char CellInit_done = 0;

    if (CellInit_done)
	return;

    CellInit_done = 1;

    AFS_RWLOCK_INIT(&afs_xcell, "afs_xcell");
    AFS_RWLOCK_INIT(&afsdb_client_lock, "afsdb_client_lock");
    AFS_RWLOCK_INIT(&afsdb_req_lock, "afsdb_req_lock");
    QInit(&CellLRU);

    afs_cellindex = 0;
    afs_cellalias_index = 0;
}
Example #7
0
void
DInit(int abuffers)
{
    /* Initialize the venus buffer system. */
    register int i;
    register struct buffer *tb;
#if defined(AFS_USEBUFFERS)
    struct buf *tub;		/* unix buffer for allocation */
#endif

    AFS_STATCNT(DInit);
    if (dinit_flag)
	return;
    dinit_flag = 1;
#if defined(AFS_USEBUFFERS)
    /* round up to next multiple of NPB, since we allocate multiple pages per chunk */
    abuffers = ((abuffers - 1) | (NPB - 1)) + 1;
#endif
    LOCK_INIT(&afs_bufferLock, "afs_bufferLock");
    Buffers =
	(struct buffer *)afs_osi_Alloc(abuffers * sizeof(struct buffer));
#if !defined(AFS_USEBUFFERS)
    BufferData = (char *)afs_osi_Alloc(abuffers * AFS_BUFFER_PAGESIZE);
#endif
    timecounter = 1;
    afs_stats_cmperf.bufAlloced = nbuffers = abuffers;
    for (i = 0; i < PHSIZE; i++)
	phTable[i] = 0;
    for (i = 0; i < abuffers; i++) {
#if defined(AFS_USEBUFFERS)
	if ((i & (NPB - 1)) == 0) {
	    /* time to allocate a fresh buffer */
	    tub = geteblk(AFS_BUFFER_PAGESIZE * NPB);
	    BufferData = (char *)tub->b_un.b_addr;
	}
#endif
	/* Fill in each buffer with an empty indication. */
	tb = &Buffers[i];
	tb->fid = NULLIDX;
	tb->inode = 0;
	tb->accesstime = 0;
	tb->lockers = 0;
#if defined(AFS_USEBUFFERS)
	if ((i & (NPB - 1)) == 0)
	    tb->bufp = tub;
	else
	    tb->bufp = 0;
	tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))];
#else
	tb->data = &BufferData[AFS_BUFFER_PAGESIZE * i];
#endif
	tb->hashIndex = 0;
	tb->dirty = 0;
	AFS_RWLOCK_INIT(&tb->lock, "buffer lock");
    }
    return;
}
Example #8
0
void
DInit(int abuffers)
{
    /* Initialize the venus buffer system. */
    int i;
    struct buffer *tb;

    AFS_STATCNT(DInit);
    if (dinit_flag)
	return;
    dinit_flag = 1;
    /* round up to next multiple of NPB, since we allocate multiple pages per chunk */
    abuffers = ((abuffers - 1) | (NPB - 1)) + 1;
    afs_max_buffers = abuffers << 2;		/* possibly grow up to 4 times as big */
    LOCK_INIT(&afs_bufferLock, "afs_bufferLock");
    Buffers = afs_osi_Alloc(afs_max_buffers * sizeof(struct buffer));
    osi_Assert(Buffers != NULL);
    timecounter = 1;
    afs_stats_cmperf.bufAlloced = nbuffers = abuffers;
    for (i = 0; i < PHSIZE; i++)
	phTable[i] = 0;
    for (i = 0; i < abuffers; i++) {
	if ((i & (NPB - 1)) == 0) {
	    /* time to allocate a fresh buffer */
	    BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB);
	    osi_Assert(BufferData != NULL);
	}
	/* Fill in each buffer with an empty indication. */
	tb = &Buffers[i];
	tb->fid = NULLIDX;
	afs_reset_inode(&tb->inode);
	tb->accesstime = 0;
	tb->lockers = 0;
	tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))];
	tb->hashIndex = 0;
	tb->dirty = 0;
	AFS_RWLOCK_INIT(&tb->lock, "buffer lock");
    }
    return;
}
Example #9
0
/**
 *
 * @param volid Volume ID. If it's 0, get it from the name.
 * @param aname Volume name.
 * @param ve Volume entry.
 * @param tcell The cell containing this volume.
 * @param agood
 * @param type Type of volume.
 * @param areq Request.
 * @return Volume or NULL if failure.
 */
static struct volume *
afs_SetupVolume(afs_int32 volid, char *aname, void *ve, struct cell *tcell,
		afs_int32 agood, afs_int32 type, struct vrequest *areq)
{
    struct volume *tv;
    struct vldbentry *ove = (struct vldbentry *)ve;
    struct nvldbentry *nve = (struct nvldbentry *)ve;
    struct uvldbentry *uve = (struct uvldbentry *)ve;

    int whichType;		/* which type of volume to look for */
    int i, j, err = 0;

    if (!volid) {
	int len;
	/* special hint from file server to use vlserver */
	len = strlen(aname);
	if (len >= 8 && strcmp(aname + len - 7, ".backup") == 0)
	    whichType = BACKVOL;
	else if (len >= 10 && strcmp(aname + len - 9, ".readonly") == 0)
	    whichType = ROVOL;
	else
	    whichType = RWVOL;

	/* figure out which one we're really interested in (a set is returned) */
	volid = afs_vtoi(aname);
	if (volid == 0) {
	    if (type == 2) {
		volid = uve->volumeId[whichType];
	    } else if (type == 1) {
		volid = nve->volumeId[whichType];
	    } else {
		volid = ove->volumeId[whichType];
	    }
	} /* end of if (volid == 0) */
    } /* end of if (!volid) */


    ObtainWriteLock(&afs_xvolume, 108);
    i = VHash(volid);
    for (tv = afs_volumes[i]; tv; tv = tv->next) {
	if (tv->volume == volid && tv->cell == tcell->cellNum) {
	    break;
	}
    }
    if (!tv) {
	struct fvolume *tf = 0;

	tv = afs_GetVolSlot();
	if (!tv) {
	    ReleaseWriteLock(&afs_xvolume);
	    return NULL;
	}
	memset(tv, 0, sizeof(struct volume));

	for (j = fvTable[FVHash(tcell->cellNum, volid)]; j != 0; j = tf->next) {
	    if (afs_FVIndex != j) {
		struct osi_file *tfile;
	        tfile = osi_UFSOpen(&volumeInode);
		err =
		    afs_osi_Read(tfile, sizeof(struct fvolume) * j,
				 &staticFVolume, sizeof(struct fvolume));
		osi_UFSClose(tfile);
                if (err != sizeof(struct fvolume)) {
                    afs_warn("afs_SetupVolume: error %d reading volumeinfo\n",
                             (int)err);
                    /* put tv back on the free list; the data in it is not valid */
                    tv->next = afs_freeVolList;
                    afs_freeVolList = tv;
                    /* staticFVolume contents are not valid */
                    afs_FVIndex = -1;
                    ReleaseWriteLock(&afs_xvolume);
                    return NULL;
                }
		afs_FVIndex = j;
	    }
	    tf = &staticFVolume;
	    if (tf->cell == tcell->cellNum && tf->volume == volid)
		break;
	}

        tv->cell = tcell->cellNum;
        AFS_RWLOCK_INIT(&tv->lock, "volume lock");
        tv->next = afs_volumes[i];      /* thread into list */
        afs_volumes[i] = tv;
        tv->volume = volid;

	if (tf && (j != 0)) {
	    tv->vtix = afs_FVIndex;
	    tv->mtpoint = tf->mtpoint;
	    tv->dotdot = tf->dotdot;
	    tv->rootVnode = tf->rootVnode;
	    tv->rootUnique = tf->rootUnique;
	} else {
	    tv->vtix = -1;
	    tv->rootVnode = tv->rootUnique = 0;
            afs_GetDynrootMountFid(&tv->dotdot);
            afs_GetDynrootMountFid(&tv->mtpoint);
            tv->mtpoint.Fid.Vnode =
              VNUM_FROM_TYPEID(VN_TYPE_MOUNT, tcell->cellIndex << 2);
            tv->mtpoint.Fid.Unique = volid;
	}
    }
    tv->refCount++;
    tv->states &= ~VRecheck;	/* just checked it */
    tv->accessTime = osi_Time();
    ReleaseWriteLock(&afs_xvolume);
    if (type == 2) {
	LockAndInstallUVolumeEntry(tv, uve, tcell->cellNum, tcell, areq);
    } else if (type == 1)
	LockAndInstallNVolumeEntry(tv, nve, tcell->cellNum);
    else
	LockAndInstallVolumeEntry(tv, ove, tcell->cellNum);
    if (agood) {
	if (!tv->name) {
	    tv->name = afs_osi_Alloc(strlen(aname) + 1);
	    osi_Assert(tv->name != NULL);
	    strcpy(tv->name, aname);
	}
    }
    for (i = 0; i < NMAXNSERVERS; i++) {
	tv->status[i] = not_busy;
    }
    ReleaseWriteLock(&tv->lock);
    return tv;
}
Example #10
0
/*!
 * Create or update a cell entry.
 * \param acellName Name of cell.
 * \param acellHosts Array of hosts that this cell has.
 * \param aflags Cell flags.
 * \param linkedcname
 * \param fsport File server port.
 * \param vlport Volume server port.
 * \param timeout Cell timeout value, 0 means static AFSDB entry.
 * \return
 */
afs_int32
afs_NewCell(char *acellName, afs_int32 * acellHosts, int aflags,
	    char *linkedcname, u_short fsport, u_short vlport, int timeout)
{
    struct cell *tc, *tcl = 0;
    afs_int32 i, newc = 0, code = 0;

    AFS_STATCNT(afs_NewCell);

    ObtainWriteLock(&afs_xcell, 103);

    tc = afs_FindCellByName_nl(acellName, READ_LOCK);
    if (tc) {
	aflags &= ~CNoSUID;
    } else {
	tc = afs_osi_Alloc(sizeof(struct cell));
	osi_Assert(tc != NULL);
	memset(tc, 0, sizeof(*tc));
	tc->cellName = afs_strdup(acellName);
	tc->fsport = AFS_FSPORT;
	tc->vlport = AFS_VLPORT;
	AFS_MD5_String(tc->cellHandle, tc->cellName, strlen(tc->cellName));
	AFS_RWLOCK_INIT(&tc->lock, "cell lock");
	newc = 1;
	aflags |= CNoSUID;
    }
    ObtainWriteLock(&tc->lock, 688);

    /* If the cell we've found has the correct name but no timeout,
     * and we're called with a non-zero timeout, bail out:  never
     * override static configuration entries with AFSDB ones.
     * One exception: if the original cell entry had no servers,
     * it must get servers from AFSDB.
     */
    if (timeout && !tc->timeout && tc->cellHosts[0]) {
	code = EEXIST;		/* This code is checked for in afs_LookupAFSDB */
	goto bad;
    }

    /* we don't want to keep pinging old vlservers which were down,
     * since they don't matter any more.  It's easier to do this than
     * to remove the server from its various hash tables. */
    for (i = 0; i < AFS_MAXCELLHOSTS; i++) {
	if (!tc->cellHosts[i])
	    break;
	tc->cellHosts[i]->flags &= ~SRVR_ISDOWN;
	tc->cellHosts[i]->flags |= SRVR_ISGONE;
    }

    if (fsport)
	tc->fsport = fsport;
    if (vlport)
	tc->vlport = vlport;

    if (aflags & CLinkedCell) {
	if (!linkedcname) {
	    code = EINVAL;
	    goto bad;
	}
	tcl = afs_FindCellByName_nl(linkedcname, READ_LOCK);
	if (!tcl) {
	    code = ENOENT;
	    goto bad;
	}
	if (tcl->lcellp) {	/* XXX Overwriting if one existed before! XXX */
	    tcl->lcellp->lcellp = (struct cell *)0;
	    tcl->lcellp->states &= ~CLinkedCell;
	}
	tc->lcellp = tcl;
	tcl->lcellp = tc;
    }
    tc->states |= aflags;
    tc->timeout = timeout;

    memset(tc->cellHosts, 0, sizeof(tc->cellHosts));
    for (i = 0; i < AFS_MAXCELLHOSTS; i++) {
	/* Get server for each host and link this cell in.*/
	struct server *ts;
	afs_uint32 temp = acellHosts[i];
	if (!temp)
	    break;
	ts = afs_GetServer(&temp, 1, 0, tc->vlport, WRITE_LOCK, NULL, 0);
	ts->cell = tc;
	ts->flags &= ~SRVR_ISGONE;
	/* Set the server as a host of the new cell. */
	tc->cellHosts[i] = ts;
	afs_PutServer(ts, WRITE_LOCK);
    }
    afs_SortServers(tc->cellHosts, AFS_MAXCELLHOSTS);	/* randomize servers */

    /* New cell: Build and add to LRU cell queue. */
    if (newc) {
	struct cell_name *cn;

	cn = afs_cellname_lookup_name(acellName);
	if (!cn)
	    cn = afs_cellname_new(acellName, 0);

	tc->cnamep = cn;
	tc->cellNum = cn->cellnum;
	tc->cellIndex = afs_cellindex++;
	afs_stats_cmperf.numCellsVisible++;
	QAdd(&CellLRU, &tc->lruq);
    }

    ReleaseWriteLock(&tc->lock);
    ReleaseWriteLock(&afs_xcell);
    afs_PutCell(tc, 0);
    if (!(aflags & CHush))
	afs_DynrootInvalidate();
    return 0;

  bad:
    if (newc) {
	afs_osi_FreeStr(tc->cellName);
	afs_osi_Free(tc, sizeof(struct cell));
    }
    ReleaseWriteLock(&tc->lock);
    ReleaseWriteLock(&afs_xcell);
    return code;
}
Example #11
0
/* lp is pointer to a fairly-old buffer */
static struct buffer *
afs_newslot(struct dcache *adc, afs_int32 apage, struct buffer *lp)
{
    /* Find a usable buffer slot */
    afs_int32 i;
    afs_int32 lt = 0;
    struct buffer *tp;
    struct osi_file *tfile;

    AFS_STATCNT(afs_newslot);
    /* we take a pointer here to a buffer which was at the end of an
     * LRU hash chain.  Odds are, it's one of the older buffers, not
     * one of the newer.  Having an older buffer to start with may
     * permit us to avoid a few of the assignments in the "typical
     * case" for loop below.
     */
    if (lp && (lp->lockers == 0)) {
	lt = lp->accesstime;
    } else {
	lp = NULL;
    }

    /* timecounter might have wrapped, if machine is very very busy
     * and stays up for a long time.  Timecounter mustn't wrap twice
     * (positive->negative->positive) before calling newslot, but that
     * would require 2 billion consecutive cache hits... Anyway, the
     * penalty is only that the cache replacement policy will be
     * almost MRU for the next ~2 billion DReads...  newslot doesn't
     * get called nearly as often as DRead, so in order to avoid the
     * performance penalty of using the hypers, it's worth doing the
     * extra check here every time.  It's probably cheaper than doing
     * hcmp, anyway.  There is a little performance hit resulting from
     * resetting all the access times to 0, but it only happens once
     * every month or so, and the access times will rapidly sort
     * themselves back out after just a few more DReads.
     */
    if (timecounter < 0) {
	timecounter = 1;
	tp = Buffers;
	for (i = 0; i < nbuffers; i++, tp++) {
	    tp->accesstime = 0;
	    if (!lp && !tp->lockers)	/* one is as good as the rest, I guess */
		lp = tp;
	}
    } else {
	/* this is the typical case */
	tp = Buffers;
	for (i = 0; i < nbuffers; i++, tp++) {
	    if (tp->lockers == 0) {
		if (!lp || tp->accesstime < lt) {
		    lp = tp;
		    lt = tp->accesstime;
		}
	    }
	}
    }

    if (lp == 0) {
	/* No unlocked buffers. If still possible, allocate a new increment */
	if (nbuffers + NPB > afs_max_buffers) {
	    /* There are no unlocked buffers -- this used to panic, but that
	     * seems extreme.  To the best of my knowledge, all the callers
	     * of DRead are prepared to handle a zero return.  Some of them
	     * just panic directly, but not all of them. */
	    afs_warn("afs: all buffers locked\n");
	    return 0;
	}

	BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB);
	osi_Assert(BufferData != NULL);
	for (i = 0; i< NPB; i++) {
	    /* Fill in each buffer with an empty indication. */
	    tp = &Buffers[i + nbuffers];
	    tp->fid = NULLIDX;
	    afs_reset_inode(&tp->inode);
	    tp->accesstime = 0;
	    tp->lockers = 0;
	    tp->data = &BufferData[AFS_BUFFER_PAGESIZE * i];
	    tp->hashIndex = 0;
	    tp->dirty = 0;
	    AFS_RWLOCK_INIT(&tp->lock, "buffer lock");
	}
	lp = &Buffers[nbuffers];
	nbuffers += NPB;
    }

    if (lp->dirty) {
	/* see DFlush for rationale for not getting and locking the dcache */
        tfile = afs_CFileOpen(&lp->inode);
	afs_CFileWrite(tfile, lp->page * AFS_BUFFER_PAGESIZE, lp->data,
		       AFS_BUFFER_PAGESIZE);
	lp->dirty = 0;
	afs_CFileClose(tfile);
	AFS_STATS(afs_stats_cmperf.bufFlushDirty++);
    }

    /* Now fill in the header. */
    lp->fid = adc->index;
    afs_copy_inode(&lp->inode, &adc->f.inode);
    lp->page = apage;
    lp->accesstime = timecounter++;
    FixupBucket(lp);		/* move to the right hash bucket */

    return lp;
}
Example #12
0
void
afspag_Init(afs_int32 nfs_server_addr)
{
    struct clientcred ccred;
    struct rmtbulk idata, odata;
    afs_int32 code, err, addr, obuf;
    int i;

    afs_uuid_create(&afs_cb_interface.uuid);

    AFS_GLOCK();

    afs_InitStats();
    rx_Init(htons(7001));

    AFS_STATCNT(afs_ResourceInit);
    AFS_RWLOCK_INIT(&afs_xuser, "afs_xuser");
    AFS_RWLOCK_INIT(&afs_xpagcell, "afs_xpagcell");
    AFS_RWLOCK_INIT(&afs_xpagsys, "afs_xpagsys");
    AFS_RWLOCK_INIT(&afs_icl_lock, "afs_icl_lock");

    afs_resourceinit_flag = 1;
    afs_nfs_server_addr = nfs_server_addr;
    for (i = 0; i < MAXNUMSYSNAMES; i++) {
	afs_sysnamelist[i] = afs_osi_Alloc(MAXSYSNAME);
        osi_Assert(afs_sysnamelist[i] != NULL);
    }
    afs_sysname = afs_sysnamelist[0];
    strcpy(afs_sysname, SYS_NAME);
    afs_sysnamecount = 1;
    afs_sysnamegen++;

    srv_secobj = rxnull_NewServerSecurityObject();
    stats_svc = rx_NewService(0, RX_STATS_SERVICE_ID, "rpcstats", &srv_secobj,
			      1, RXSTATS_ExecuteRequest);
    pagcb_svc = rx_NewService(0, PAGCB_SERVICEID, "pagcb", &srv_secobj,
			      1, PAGCB_ExecuteRequest);
    rx_StartServer(0);

    clt_secobj = rxnull_NewClientSecurityObject();
    rmtsys_conn = rx_NewConnection(nfs_server_addr, htons(7009),
				   RMTSYS_SERVICEID, clt_secobj, 0);

#ifdef RXK_LISTENER_ENV
    afs_start_thread(rxk_Listener,       "Rx Listener");
#endif
    afs_start_thread((void *)(void *)rx_ServerProc,      "Rx Server Thread");
    afs_start_thread(afs_rxevent_daemon, "Rx Event Daemon");
    afs_start_thread(afs_Daemon,         "AFS PAG Daemon");

    afs_icl_InitLogs();

    AFS_GUNLOCK();

    /* If it's reachable, tell the translator to nuke our creds.
     * We should be more agressive about making sure this gets done,
     * even if the translator is unreachable when we boot.
     */
    addr = obuf = err = 0;
    idata.rmtbulk_len = sizeof(addr);
    idata.rmtbulk_val = (char *)&addr;
    odata.rmtbulk_len = sizeof(obuf);
    odata.rmtbulk_val = (char *)&obuf;
    memset(&ccred, 0, sizeof(ccred));
    code = RMTSYS_Pioctl(rmtsys_conn, &ccred, NIL_PATHP, 0x4F01, 0,
                         &idata, &odata, &err);
}				/*afs_ResourceInit */