struct afspag_cell *afspag_GetCell(char *acell) { struct afspag_cell *tcell; ObtainWriteLock(&afs_xpagcell, 820); for (tcell = cells; tcell; tcell = tcell->next) { if (!strcmp(acell, tcell->cellname)) break; } if (!tcell) { tcell = (struct afspag_cell *)afs_osi_Alloc(sizeof(struct afspag_cell)); if (!tcell) goto out; tcell->cellname = (char *)afs_osi_Alloc(strlen(acell) + 1); if (!tcell->cellname) { afs_osi_Free(tcell, sizeof(struct afspag_cell)); tcell = 0; goto out; } strcpy(tcell->cellname, acell); tcell->cellnum = ++lastcell; tcell->next = cells; cells = tcell; if (!primary_cell) primary_cell = tcell; } out: ReleaseWriteLock(&afs_xpagcell); return tcell; }
void DInit(int abuffers) { /* Initialize the venus buffer system. */ register int i; register struct buffer *tb; #if defined(AFS_USEBUFFERS) struct buf *tub; /* unix buffer for allocation */ #endif AFS_STATCNT(DInit); if (dinit_flag) return; dinit_flag = 1; #if defined(AFS_USEBUFFERS) /* round up to next multiple of NPB, since we allocate multiple pages per chunk */ abuffers = ((abuffers - 1) | (NPB - 1)) + 1; #endif LOCK_INIT(&afs_bufferLock, "afs_bufferLock"); Buffers = (struct buffer *)afs_osi_Alloc(abuffers * sizeof(struct buffer)); #if !defined(AFS_USEBUFFERS) BufferData = (char *)afs_osi_Alloc(abuffers * AFS_BUFFER_PAGESIZE); #endif timecounter = 1; afs_stats_cmperf.bufAlloced = nbuffers = abuffers; for (i = 0; i < PHSIZE; i++) phTable[i] = 0; for (i = 0; i < abuffers; i++) { #if defined(AFS_USEBUFFERS) if ((i & (NPB - 1)) == 0) { /* time to allocate a fresh buffer */ tub = geteblk(AFS_BUFFER_PAGESIZE * NPB); BufferData = (char *)tub->b_un.b_addr; } #endif /* Fill in each buffer with an empty indication. */ tb = &Buffers[i]; tb->fid = NULLIDX; tb->inode = 0; tb->accesstime = 0; tb->lockers = 0; #if defined(AFS_USEBUFFERS) if ((i & (NPB - 1)) == 0) tb->bufp = tub; else tb->bufp = 0; tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))]; #else tb->data = &BufferData[AFS_BUFFER_PAGESIZE * i]; #endif tb->hashIndex = 0; tb->dirty = 0; AFS_RWLOCK_INIT(&tb->lock, "buffer lock"); } return; }
/* allocate space for sender */ void * osi_AllocSmallSpace(size_t size) { struct osi_packet *tp; AFS_STATCNT(osi_AllocSmallSpace); if (size > AFS_SMALLOCSIZ) osi_Panic("osi_AllocSmallS: size=%d\n", (int)size); if (!freeSmallList) { afs_stats_cmperf.SmallBlocksAlloced++; afs_stats_cmperf.SmallBlocksActive++; tp = afs_osi_Alloc(AFS_SMALLOCSIZ); #ifdef KERNEL_HAVE_PIN pin((char *)tp, AFS_SMALLOCSIZ); #endif return (char *)tp; } afs_stats_cmperf.SmallBlocksActive++; ObtainWriteLock(&osi_fsplock, 327); tp = freeSmallList; if (tp) freeSmallList = tp->next; ReleaseWriteLock(&osi_fsplock); return (char *)tp; }
struct afs_exporter * exporter_add(afs_int32 size, struct exporterops *ops, afs_int32 state, afs_int32 type, char *data) { struct afs_exporter *ex, *op; afs_int32 length; AFS_STATCNT(exporter_add); if (!init_xexported) { init_xexported = 1; LOCK_INIT(&afs_xexp, "afs_xexp"); } length = (size ? size : sizeof(struct afs_exporter)); ex = (struct afs_exporter *)afs_osi_Alloc(length); memset(ex, 0, length); ObtainWriteLock(&afs_xexp, 308); for (op = root_exported; op; op = op->exp_next) { if (!op->exp_next) break; } if (op) op->exp_next = ex; else root_exported = ex; ReleaseWriteLock(&afs_xexp); ex->exp_next = 0; ex->exp_op = ops; ex->exp_states = state; ex->exp_data = data; ex->exp_type = type; return ex; }
int afs_MemCacheTruncate(struct osi_file *fP, int size) { struct memCacheEntry *mceP = (struct memCacheEntry *)fP; AFS_STATCNT(afs_MemCacheTruncate); ObtainWriteLock(&mceP->afs_memLock, 313); /* old directory entry; g.c. */ if (size == 0 && mceP->dataSize > memCacheBlkSize) { char *oldData = mceP->data; mceP->data = afs_osi_Alloc(memCacheBlkSize); if (mceP->data == NULL) { /* no available memory */ mceP->data = oldData; ReleaseWriteLock(&mceP->afs_memLock); afs_warn("afs: afs_MemWriteBlk mem alloc failure (%d bytes)\n", memCacheBlkSize); } else { afs_osi_Free(oldData, mceP->dataSize); mceP->dataSize = memCacheBlkSize; } } if (size < mceP->size) mceP->size = size; ReleaseWriteLock(&mceP->afs_memLock); return 0; }
/* allocate space for sender */ void * osi_AllocLargeSpace(size_t size) { struct osi_packet *tp; AFS_ASSERT_GLOCK(); AFS_STATCNT(osi_AllocLargeSpace); if (size > AFS_LRALLOCSIZ) osi_Panic("osi_AllocLargeSpace: size=%d\n", (int)size); afs_stats_cmperf.LargeBlocksActive++; if (!freePacketList) { char *p; afs_stats_cmperf.LargeBlocksAlloced++; p = afs_osi_Alloc(AFS_LRALLOCSIZ); #ifdef KERNEL_HAVE_PIN /* * Need to pin this memory since under heavy conditions this memory * could be swapped out; the problem is that we could inside rx where * interrupts are disabled and thus we would panic if we don't pin it. */ pin(p, AFS_LRALLOCSIZ); #endif return p; } ObtainWriteLock(&osi_flplock, 324); tp = freePacketList; if (tp) freePacketList = tp->next; ReleaseWriteLock(&osi_flplock); return (char *)tp; }
/** * Init a new dynroot volume. * @param Volume FID. * @return Volume or NULL if not found. */ static struct volume * afs_NewDynrootVolume(struct VenusFid *fid) { struct cell *tcell; struct volume *tv; struct vldbentry *tve; char *bp, tbuf[CVBS]; tcell = afs_GetCell(fid->Cell, READ_LOCK); if (!tcell) return NULL; tve = afs_osi_Alloc(sizeof(*tve)); osi_Assert(tve != NULL); if (!(tcell->states & CHasVolRef)) tcell->states |= CHasVolRef; bp = afs_cv2string(&tbuf[CVBS], fid->Fid.Volume); memset(tve, 0, sizeof(*tve)); strcpy(tve->name, "local-dynroot"); tve->volumeId[ROVOL] = fid->Fid.Volume; tve->flags = VLF_ROEXISTS; tv = afs_SetupVolume(0, bp, tve, tcell, 0, 0, 0); afs_PutCell(tcell, READ_LOCK); afs_osi_Free(tve, sizeof(*tve)); return tv; }
int afs_UFSHandleLink(register struct vcache *avc, struct vrequest *areq) { register struct dcache *tdc; register char *tp, *rbuf; void *tfile; afs_size_t offset, len; afs_int32 tlen, alen; register afs_int32 code; /* two different formats, one for links protected 644, have a "." at the * end of the file name, which we turn into a null. Others, protected * 755, we add a null to the end of */ AFS_STATCNT(afs_UFSHandleLink); if (!avc->linkData) { tdc = afs_GetDCache(avc, (afs_size_t) 0, areq, &offset, &len, 0); afs_Trace3(afs_iclSetp, CM_TRACE_UFSLINK, ICL_TYPE_POINTER, avc, ICL_TYPE_POINTER, tdc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); if (!tdc) { if (AFS_IS_DISCONNECTED) return ENETDOWN; else return EIO; } /* otherwise we have the data loaded, go for it */ if (len > 1024) { afs_PutDCache(tdc); return EFAULT; } if (avc->f.m.Mode & 0111) alen = len + 1; /* regular link */ else alen = len; /* mt point */ rbuf = (char *)osi_AllocLargeSpace(AFS_LRALLOCSIZ); tlen = len; ObtainReadLock(&tdc->lock); #if defined(LINUX_USE_FH) tfile = osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type); #else tfile = osi_UFSOpen(tdc->f.inode); #endif code = afs_osi_Read(tfile, -1, rbuf, tlen); osi_UFSClose(tfile); ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); rbuf[alen - 1] = '\0'; alen = strlen(rbuf) + 1; tp = afs_osi_Alloc(alen); /* make room for terminating null */ memcpy(tp, rbuf, alen); osi_FreeLargeSpace(rbuf); if (code != tlen) { afs_osi_Free(tp, alen); return EIO; } avc->linkData = tp; } return 0; }
int SRXAFSCB_GetCellServDB(struct rx_call *a_call, afs_int32 a_index, char **a_name, serverList * a_hosts) { afs_int32 i, j = 0; struct cell *tcell; char *t_name, *p_name = NULL; RX_AFS_GLOCK(); AFS_STATCNT(SRXAFSCB_GetCellServDB); tcell = afs_GetCellByIndex(a_index, READ_LOCK); if (!tcell) { i = 0; a_hosts->serverList_val = 0; a_hosts->serverList_len = 0; } else { p_name = tcell->cellName; for (j = 0; j < AFSMAXCELLHOSTS && tcell->cellHosts[j]; j++); i = strlen(p_name); a_hosts->serverList_val = afs_osi_Alloc(j * sizeof(afs_int32)); osi_Assert(a_hosts->serverList_val != NULL); a_hosts->serverList_len = j; for (j = 0; j < AFSMAXCELLHOSTS && tcell->cellHosts[j]; j++) a_hosts->serverList_val[j] = ntohl(tcell->cellHosts[j]->addr->sa_ip); afs_PutCell(tcell, READ_LOCK); } t_name = afs_osi_Alloc(i + 1); if (t_name == NULL) { afs_osi_Free(a_hosts->serverList_val, (j * sizeof(afs_int32))); RX_AFS_GUNLOCK(); return ENOMEM; } t_name[i] = '\0'; if (p_name) memcpy(t_name, p_name, i); RX_AFS_GUNLOCK(); *a_name = t_name; return 0; }
void * _afscrypto_malloc(size_t len) { void *ptr; ptr = afs_osi_Alloc(len); return ptr; }
struct vcache * osi_NewVnode(void) { struct vcache *tvc; tvc = afs_osi_Alloc(sizeof(struct vcache)); tvc->v = NULL; /* important to clean this, or use memset 0 */ return tvc; }
int SPAGCB_GetSysName(struct rx_call *a_call, afs_int32 a_uid, SysNameList *a_sysnames) { int i; RX_AFS_GLOCK(); ObtainReadLock(&afs_xpagsys); memset(a_sysnames, 0, sizeof(struct SysNameList)); a_sysnames->SysNameList_len = afs_sysnamecount; a_sysnames->SysNameList_val = afs_osi_Alloc(afs_sysnamecount * sizeof(SysNameEnt)); if (!a_sysnames->SysNameList_val) goto out; for (i = 0; i < afs_sysnamecount; i++) { a_sysnames->SysNameList_val[i].sysname = afs_osi_Alloc(strlen(afs_sysnamelist[i]) + 1); if (!a_sysnames->SysNameList_val[i].sysname) goto out; strcpy(a_sysnames->SysNameList_val[i].sysname, afs_sysnamelist[i]); } ReleaseReadLock(&afs_xpagsys); RX_AFS_GUNLOCK(); return 0; out: if (a_sysnames->SysNameList_val) { while (i-- > 0) { afs_osi_Free(a_sysnames->SysNameList_val[i].sysname, strlen(a_sysnames->SysNameList_val[i].sysname) + 1); } afs_osi_Free(a_sysnames->SysNameList_val, afs_sysnamecount * sizeof(SysNameEnt)); } ReleaseWriteLock(&afs_xpagsys); RX_AFS_GUNLOCK(); return UAENOMEM; }
int afs_InitMemCache(int blkCount, int blkSize, int flags) { int index; AFS_STATCNT(afs_InitMemCache); if (blkSize) memCacheBlkSize = blkSize; memMaxBlkNumber = blkCount; memCache = afs_osi_Alloc(memMaxBlkNumber * sizeof(struct memCacheEntry)); osi_Assert(memCache != NULL); for (index = 0; index < memMaxBlkNumber; index++) { char *blk; (memCache + index)->size = 0; (memCache + index)->dataSize = memCacheBlkSize; LOCK_INIT(&((memCache + index)->afs_memLock), "afs_memLock"); blk = afs_osi_Alloc(memCacheBlkSize); if (blk == NULL) goto nomem; (memCache + index)->data = blk; memset((memCache + index)->data, 0, memCacheBlkSize); } #if defined(AFS_HAVE_VXFS) afs_InitDualFSCacheOps((struct vnode *)0); #endif for (index = 0; index < blkCount; index++) afs_InitCacheFile(NULL, 0); return 0; nomem: afs_warn("afsd: memCache allocation failure at %d KB.\n", (index * memCacheBlkSize) / 1024); while (--index >= 0) { afs_osi_Free((memCache + index)->data, memCacheBlkSize); (memCache + index)->data = NULL; } return ENOMEM; }
void DInit(int abuffers) { /* Initialize the venus buffer system. */ int i; struct buffer *tb; AFS_STATCNT(DInit); if (dinit_flag) return; dinit_flag = 1; /* round up to next multiple of NPB, since we allocate multiple pages per chunk */ abuffers = ((abuffers - 1) | (NPB - 1)) + 1; afs_max_buffers = abuffers << 2; /* possibly grow up to 4 times as big */ LOCK_INIT(&afs_bufferLock, "afs_bufferLock"); Buffers = afs_osi_Alloc(afs_max_buffers * sizeof(struct buffer)); osi_Assert(Buffers != NULL); timecounter = 1; afs_stats_cmperf.bufAlloced = nbuffers = abuffers; for (i = 0; i < PHSIZE; i++) phTable[i] = 0; for (i = 0; i < abuffers; i++) { if ((i & (NPB - 1)) == 0) { /* time to allocate a fresh buffer */ BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB); osi_Assert(BufferData != NULL); } /* Fill in each buffer with an empty indication. */ tb = &Buffers[i]; tb->fid = NULLIDX; afs_reset_inode(&tb->inode); tb->accesstime = 0; tb->lockers = 0; tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))]; tb->hashIndex = 0; tb->dirty = 0; AFS_RWLOCK_INIT(&tb->lock, "buffer lock"); } return; }
void * _afscrypto_calloc(int num, size_t len) { void *ptr; size_t total; total = num * len; ptr = afs_osi_Alloc(total); /* In practice, callers assume the afs_osi_Alloc() will not fail. */ if (ptr != NULL) memset(ptr, 0, total); return ptr; }
int afs_MemHandleLink(struct vcache *avc, struct vrequest *areq) { struct dcache *tdc; char *tp, *rbuf; afs_size_t offset, len; afs_int32 tlen, alen; afs_int32 code; AFS_STATCNT(afs_MemHandleLink); /* two different formats, one for links protected 644, have a "." at * the end of the file name, which we turn into a null. Others, * protected 755, we add a null to the end of */ if (!avc->linkData) { void *addr; tdc = afs_GetDCache(avc, (afs_size_t) 0, areq, &offset, &len, 0); if (!tdc) { return EIO; } /* otherwise we have the data loaded, go for it */ if (len > 1024) { afs_PutDCache(tdc); return EFAULT; } if (avc->f.m.Mode & 0111) alen = len + 1; /* regular link */ else alen = len; /* mt point */ rbuf = osi_AllocLargeSpace(AFS_LRALLOCSIZ); ObtainReadLock(&tdc->lock); addr = afs_MemCacheOpen(&tdc->f.inode); tlen = len; code = afs_MemReadBlk(addr, 0, rbuf, tlen); afs_MemCacheClose(addr); ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); rbuf[alen - 1] = 0; alen = strlen(rbuf) + 1; tp = afs_osi_Alloc(alen); /* make room for terminating null */ osi_Assert(tp != NULL); memcpy(tp, rbuf, alen); osi_FreeLargeSpace(rbuf); if (code != len) { afs_osi_Free(tp, alen); return EIO; } avc->linkData = tp; } return 0; }
/*! * Add a token to a token jar * * Add a new token to a token jar. If the jar already exists, * then this token becomes the first in the jar. If it doesn't * exist, then a new jar is created. The contents of the new * token are initialised to 0 upon creation. * * @param[in] tokens * A pointer to the address of the token jar to populate * @param[in] type * The type of token to create * * @return * A pointer to the tokenUnion of the newly created token, * which may then be used to populate the token. */ union tokenUnion * afs_AddToken(struct tokenJar **tokens, rx_securityIndex type) { struct tokenJar *newToken; newToken = afs_osi_Alloc(sizeof(struct tokenJar)); osi_Assert(newToken != NULL); memset(newToken, 0, sizeof(*newToken)); newToken->type = type; newToken->next = *tokens; *tokens = newToken; return &newToken->content; }
/*! * * Extend a cache file * * \param avc pointer to vcache to extend data for * \param alen Length to extend file to * \param areq * * \note avc must be write locked. May release and reobtain avc and GLOCK */ int afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq) { afs_size_t offset, toAdd; struct osi_file *tfile; afs_int32 code = 0; struct dcache *tdc; void *zeros; zeros = afs_osi_Alloc(AFS_PAGESIZE); if (zeros == NULL) return ENOMEM; memset(zeros, 0, AFS_PAGESIZE); while (avc->f.m.Length < alen) { tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0); if (!tdc) { code = EIO; break; } toAdd = alen - avc->f.m.Length; offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk); if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) { toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; } tfile = afs_CFileOpen(&tdc->f.inode); while(tdc->validPos < avc->f.m.Length + toAdd) { afs_size_t towrite; towrite = (avc->f.m.Length + toAdd) - tdc->validPos; if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE; code = afs_CFileWrite(tfile, tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk), zeros, towrite); tdc->validPos += towrite; } afs_CFileClose(tfile); afs_AdjustSize(tdc, offset + toAdd ); avc->f.m.Length += toAdd; ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } afs_osi_Free(zeros, AFS_PAGESIZE); return code; }
/*! * Add an rxkad token to the token jar * * @param[in] tokens * A pointer to the address of the jar to add the token to * @param[in] ticket * A data block containing the token's opaque ticket * @param[in] ticketLen * The length of the ticket data block * @param[in] clearToken * The cleartext token information */ void afs_AddRxkadToken(struct tokenJar **tokens, char *ticket, int ticketLen, struct ClearToken *clearToken) { union tokenUnion *tokenU; struct rxkadToken *rxkad; tokenU = afs_AddToken(tokens, RX_SECIDX_KAD); rxkad = &tokenU->rxkad; rxkad->ticket = afs_osi_Alloc(ticketLen); osi_Assert(rxkad->ticket != NULL); rxkad->ticketLen = ticketLen; memcpy(rxkad->ticket, ticket, ticketLen); rxkad->clearToken = *clearToken; }
/* * Create a temporary symlink entry in /afs. */ int afs_DynrootVOPSymlink(struct vcache *avc, afs_ucred_t *acred, char *aname, char *atargetName) { struct afs_dynSymlink *tps; if (afs_cr_uid(acred)) return EPERM; if (afs_CellOrAliasExists(aname)) return EEXIST; /* Check if it's already a symlink */ ObtainWriteLock(&afs_dynSymlinkLock, 91); tps = afs_dynSymlinkBase; while (tps) { if (afs_strcasecmp(aname, tps->name) == 0) { ReleaseWriteLock(&afs_dynSymlinkLock); return EEXIST; } tps = tps->next; } /* Doesn't already exist -- go ahead and create it */ tps = afs_osi_Alloc(sizeof(*tps)); tps->index = afs_dynSymlinkIndex++; tps->next = afs_dynSymlinkBase; tps->name = afs_osi_Alloc(strlen(aname) + 1); strcpy(tps->name, aname); tps->target = afs_osi_Alloc(strlen(atargetName) + 1); strcpy(tps->target, atargetName); afs_dynSymlinkBase = tps; ReleaseWriteLock(&afs_dynSymlinkLock); afs_DynrootInvalidate(); return 0; }
/*XXX: this extends a block arbitrarily to support big directories */ int afs_MemWritevBlk(struct memCacheEntry *mceP, int offset, struct iovec *iov, int nio, int size) { int i; int bytesWritten; int bytesToWrite; AFS_STATCNT(afs_MemWriteBlk); ObtainWriteLock(&mceP->afs_memLock, 561); if (offset + size > mceP->dataSize) { char *oldData = mceP->data; mceP->data = afs_osi_Alloc(size + offset); if (mceP->data == NULL) { /* no available memory */ mceP->data = oldData; /* revert back change that was made */ ReleaseWriteLock(&mceP->afs_memLock); afs_warn("afs: afs_MemWriteBlk mem alloc failure (%d bytes)\n", size + offset); return -ENOMEM; } /* may overlap, but this is OK */ AFS_GUNLOCK(); memcpy(mceP->data, oldData, mceP->size); AFS_GLOCK(); afs_osi_Free(oldData, mceP->dataSize); mceP->dataSize = size + offset; } AFS_GUNLOCK(); if (mceP->size < offset) memset(mceP->data + mceP->size, 0, offset - mceP->size); for (bytesWritten = 0, i = 0; i < nio && size > 0; i++) { bytesToWrite = (size < iov[i].iov_len) ? size : iov[i].iov_len; memcpy(mceP->data + offset, iov[i].iov_base, bytesToWrite); offset += bytesToWrite; bytesWritten += bytesToWrite; size -= bytesToWrite; } mceP->size = (offset < mceP->size) ? mceP->size : offset; AFS_GLOCK(); ReleaseWriteLock(&mceP->afs_memLock); return bytesWritten; }
/** * Get an available volume list slot. If the list does not exist, * create one containing a single element. * @return */ struct volume * afs_MemGetVolSlot(void) { register struct volume *tv; AFS_STATCNT(afs_MemGetVolSlot); if (!afs_freeVolList) { struct volume *newVp; newVp = (struct volume *)afs_osi_Alloc(sizeof(struct volume)); newVp->next = NULL; afs_freeVolList = newVp; } tv = afs_freeVolList; afs_freeVolList = tv->next; return tv; } /*afs_MemGetVolSlot */
int SRXAFSCB_GetCacheConfig(struct rx_call *a_call, afs_uint32 callerVersion, afs_uint32 * serverVersion, afs_uint32 * configCount, cacheConfig * config) { afs_uint32 *t_config; size_t allocsize; cm_initparams_v1 cm_config; RX_AFS_GLOCK(); AFS_STATCNT(SRXAFSCB_GetCacheConfig); /* * Currently only support version 1 */ allocsize = sizeof(cm_initparams_v1); t_config = afs_osi_Alloc(allocsize); if (t_config == NULL) { RX_AFS_GUNLOCK(); return ENOMEM; } cm_config.nChunkFiles = cm_initParams.cmi_nChunkFiles; cm_config.nStatCaches = cm_initParams.cmi_nStatCaches; cm_config.nDataCaches = cm_initParams.cmi_nDataCaches; cm_config.nVolumeCaches = cm_initParams.cmi_nVolumeCaches; cm_config.firstChunkSize = cm_initParams.cmi_firstChunkSize; cm_config.otherChunkSize = cm_initParams.cmi_otherChunkSize; cm_config.cacheSize = cm_initParams.cmi_cacheSize; cm_config.setTime = cm_initParams.cmi_setTime; cm_config.memCache = cm_initParams.cmi_memCache; afs_MarshallCacheConfig(callerVersion, &cm_config, t_config); *serverVersion = AFS_CLIENT_RETRIEVAL_FIRST_EDITION; *configCount = allocsize; config->cacheConfig_val = t_config; config->cacheConfig_len = allocsize / sizeof(afs_uint32); RX_AFS_GUNLOCK(); return 0; }
static void afs_RebuildDynrootMount(void) { int i; int curChunk, curPage; char *newDir; struct DirHeader *dirHeader; newDir = afs_osi_Alloc(AFS_PAGESIZE); /* * Now actually construct the directory. */ curChunk = 13; curPage = 0; dirHeader = (struct DirHeader *)newDir; dirHeader->header.pgcount = 0; dirHeader->header.tag = htons(1234); dirHeader->header.freecount = 0; dirHeader->header.freebitmap[0] = 0xff; dirHeader->header.freebitmap[1] = 0x1f; for (i = 2; i < EPP / 8; i++) dirHeader->header.freebitmap[i] = 0; dirHeader->alloMap[0] = EPP - DHE - 1; for (i = 1; i < MAXPAGES; i++) dirHeader->alloMap[i] = EPP; for (i = 0; i < NHASHENT; i++) dirHeader->hashTable[i] = 0; /* Install "." and ".." */ afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, ".", 1); afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, "..", 1); ObtainWriteLock(&afs_dynrootDirLock, 549); if (afs_dynrootMountDir) afs_osi_Free(afs_dynrootMountDir, afs_dynrootMountDirLen); afs_dynrootMountDir = newDir; afs_dynrootMountDirLen = AFS_PAGESIZE; ReleaseWriteLock(&afs_dynrootDirLock); }
/*! * Create a new cell name, optional cell number. * \param name Name of cell. * \param cellnum Cellname number. * \return Initialized structure. */ static struct cell_name * afs_cellname_new(char *name, afs_int32 cellnum) { struct cell_name *cn; if (cellnum == 0) cellnum = afs_cellnum_next; cn = (struct cell_name *)afs_osi_Alloc(sizeof(*cn)); cn->next = afs_cellname_head; cn->cellnum = cellnum; cn->cellname = afs_strdup(name); cn->used = 0; afs_cellname_head = cn; if (cellnum >= afs_cellnum_next) afs_cellnum_next = cellnum + 1; return cn; }
struct vcache * osi_NewVnode(void) { struct vcache *avc; char name[METER_NAMSZ]; avc = afs_osi_Alloc(sizeof(struct vcache)); memset(avc, 0, sizeof(struct vcache)); avc->v.v_number = ++afsvnumbers; avc->vc_rwlockid = OSI_NO_LOCKID; initnsema(&avc->vc_rwlock, 1, makesname(name, "vrw", avc->v.v_number)); #ifndef AFS_SGI53_ENV initnsema(&avc->v.v_sync, 0, makesname(name, "vsy", avc->v.v_number)); #endif #ifndef AFS_SGI62_ENV initnlock(&avc->v.v_lock, makesname(name, "vlk", avc->v.v_number)); #endif return avc; }
int SRXAFSCB_GetLocalCell(struct rx_call *a_call, char **a_name) { int plen; struct cell *tcell; char *t_name, *p_name = NULL; RX_AFS_GLOCK(); AFS_STATCNT(SRXAFSCB_GetLocalCell); /* Search the list for the primary cell. Cell number 1 is only * the primary cell is when no other cell is explicitly marked as * the primary cell. */ tcell = afs_GetPrimaryCell(READ_LOCK); if (tcell) p_name = tcell->cellName; if (p_name) plen = strlen(p_name); else plen = 0; t_name = afs_osi_Alloc(plen + 1); if (t_name == NULL) { if (tcell) afs_PutCell(tcell, READ_LOCK); RX_AFS_GUNLOCK(); return ENOMEM; } t_name[plen] = '\0'; if (p_name) memcpy(t_name, p_name, plen); RX_AFS_GUNLOCK(); *a_name = t_name; if (tcell) afs_PutCell(tcell, READ_LOCK); return 0; }
int afs_MemWriteUIO(afs_dcache_id_t *ainode, struct uio *uioP) { struct memCacheEntry *mceP = (struct memCacheEntry *)afs_MemCacheOpen(ainode); afs_int32 code; AFS_STATCNT(afs_MemWriteUIO); ObtainWriteLock(&mceP->afs_memLock, 312); if (AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP) > mceP->dataSize) { char *oldData = mceP->data; mceP->data = afs_osi_Alloc(AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP)); if (mceP->data == NULL) { /* no available memory */ mceP->data = oldData; /* revert back change that was made */ ReleaseWriteLock(&mceP->afs_memLock); afs_warn("afs: afs_MemWriteBlk mem alloc failure (%ld bytes)\n", (long)(AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP))); return -ENOMEM; } AFS_GUNLOCK(); memcpy(mceP->data, oldData, mceP->size); AFS_GLOCK(); afs_osi_Free(oldData, mceP->dataSize); mceP->dataSize = AFS_UIO_RESID(uioP) + AFS_UIO_OFFSET(uioP); } if (mceP->size < AFS_UIO_OFFSET(uioP)) memset(mceP->data + mceP->size, 0, (int)(AFS_UIO_OFFSET(uioP) - mceP->size)); AFS_UIOMOVE(mceP->data + AFS_UIO_OFFSET(uioP), AFS_UIO_RESID(uioP), UIO_WRITE, uioP, code); if (AFS_UIO_OFFSET(uioP) > mceP->size) mceP->size = AFS_UIO_OFFSET(uioP); ReleaseWriteLock(&mceP->afs_memLock); return code; }
int SRXAFSCB_TellMeAboutYourself(struct rx_call *a_call, struct interfaceAddr *addr, Capabilities * capabilities) { int i; int code = 0; afs_uint32 *dataBuffP; afs_int32 dataBytes; RX_AFS_GLOCK(); AFS_STATCNT(SRXAFSCB_WhoAreYou); ObtainReadLock(&afs_xinterface); /* return all network interface addresses */ addr->numberOfInterfaces = afs_cb_interface.numberOfInterfaces; addr->uuid = afs_cb_interface.uuid; for (i = 0; i < afs_cb_interface.numberOfInterfaces; i++) { addr->addr_in[i] = ntohl(afs_cb_interface.addr_in[i]); addr->subnetmask[i] = ntohl(afs_cb_interface.subnetmask[i]); addr->mtu[i] = ntohl(afs_cb_interface.mtu[i]); } ReleaseReadLock(&afs_xinterface); RX_AFS_GUNLOCK(); dataBytes = 1 * sizeof(afs_uint32); dataBuffP = afs_osi_Alloc(dataBytes); osi_Assert(dataBuffP != NULL); dataBuffP[0] = CLIENT_CAPABILITY_ERRORTRANS; capabilities->Capabilities_len = dataBytes / sizeof(afs_uint32); capabilities->Capabilities_val = dataBuffP; return code; }
/*! * Create new cell alias entry and update dynroot vnode. * \param alias * \param cell * \return */ afs_int32 afs_NewCellAlias(char *alias, char *cell) { struct cell_alias *tc; ObtainSharedLock(&afs_xcell, 681); if (afs_CellOrAliasExists_nl(alias)) { ReleaseSharedLock(&afs_xcell); return EEXIST; } UpgradeSToWLock(&afs_xcell, 682); tc = (struct cell_alias *)afs_osi_Alloc(sizeof(struct cell_alias)); tc->alias = afs_strdup(alias); tc->cell = afs_strdup(cell); tc->next = afs_cellalias_head; tc->index = afs_cellalias_index++; afs_cellalias_head = tc; ReleaseWriteLock(&afs_xcell); afs_DynrootInvalidate(); return 0; }