JS_ArenaAllocate(JSArenaPool *pool, size_t nb) { JSArena **ap, *a, *b; jsuword extra, hdrsz, gross; void *p; /* * Search pool from current forward till we find or make enough space. * * NB: subtract nb from a->limit in the loop condition, instead of adding * nb to a->avail, to avoid overflowing a 32-bit address space (possible * when running a 32-bit program on a 64-bit system where the kernel maps * the heap up against the top of the 32-bit address space). * * Thanks to Juergen Kreileder <*****@*****.**>, who brought this up in * https://bugzilla.mozilla.org/show_bug.cgi?id=279273. */ JS_ASSERT((nb & pool->mask) == 0); for (a = pool->current; nb > a->limit || a->avail > a->limit - nb; pool->current = a) { ap = &a->next; if (!*ap) { /* Not enough space in pool, so we must malloc. */ extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0; hdrsz = sizeof *a + extra + pool->mask; gross = hdrsz + JS_MAX(nb, pool->arenasize); if (gross < nb) return NULL; if (pool->quotap) { if (gross > *pool->quotap) return NULL; b = (JSArena *) js_malloc(gross); if (!b) return NULL; *pool->quotap -= gross; } else { b = (JSArena *) js_malloc(gross); if (!b) return NULL; } b->next = NULL; b->limit = (jsuword)b + gross; JS_COUNT_ARENA(pool,++); COUNT(pool, nmallocs); /* If oversized, store ap in the header, just before a->base. */ *ap = a = b; JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a)); if (extra) { a->base = a->avail = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool); SET_HEADER(pool, a, ap); } else { a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1); } continue; } a = *ap; /* move to next arena */ }
X_IPC_RETURN_STATUS_TYPE x_ipc_dataMsgSend(int sd, DATA_MSG_PTR dataMsg) { int32 headerAmount, classAmount, dataAmount; X_IPC_RETURN_STATUS_TYPE res; char *sendInfo; struct iovec *tmpVec; LOCK_IO_MUTEX; headerAmount = HEADER_SIZE(); classAmount = dataMsg->classTotal; dataAmount = dataMsg->msgTotal; sendInfo = (char *)&(dataMsg->classTotal); dataMsg->classId = SET_DATA_ENDIAN(dataMsg->classId,dataMsg->dataByteOrder); dataMsg->classId = SET_CLASS_ENDIAN(dataMsg->classId, dataMsg->classByteOrder); dataMsg->classId = SET_ALIGNMENT(dataMsg->classId); INT_TO_NET_INT(dataMsg->classTotal); INT_TO_NET_INT(dataMsg->msgTotal); INT_TO_NET_INT(dataMsg->parentRef); INT_TO_NET_INT(dataMsg->intent); INT_TO_NET_INT(dataMsg->classId); INT_TO_NET_INT(dataMsg->dispatchRef); INT_TO_NET_INT(dataMsg->msgRef); if (classAmount > 0) { tmpVec = x_ipc_copyVectorization(dataMsg->vec,2); tmpVec[0].iov_base = sendInfo; tmpVec[0].iov_len = headerAmount; tmpVec[1].iov_base = dataMsg->classData; tmpVec[1].iov_len = classAmount; res = x_ipc_writeNBuffers(sd, tmpVec,headerAmount+classAmount+dataAmount); } else if (dataAmount > 0) { tmpVec = x_ipc_copyVectorization(dataMsg->vec,1); tmpVec[0].iov_base = sendInfo; tmpVec[0].iov_len = headerAmount; res = x_ipc_writeNBuffers(sd, tmpVec,headerAmount+classAmount+dataAmount); } else { res = x_ipc_writeNBytes(sd, sendInfo, headerAmount); } NET_INT_TO_INT(dataMsg->classTotal); NET_INT_TO_INT(dataMsg->msgTotal); NET_INT_TO_INT(dataMsg->parentRef); NET_INT_TO_INT(dataMsg->intent); NET_INT_TO_INT(dataMsg->classId); NET_INT_TO_INT(dataMsg->dispatchRef); NET_INT_TO_INT(dataMsg->msgRef); dataMsg->classId = GET_CLASSID(dataMsg->classId); UNLOCK_IO_MUTEX; return res; }
/* *header definition *|----------|----------|----------|----------|----------| | LOCK |qtdChaves | bitmap | OFFSET#1 | LENGTH#1 | |----------|----------|----------|----------|----------| | KEY#1 | OFFSET#2 | LENGTH#2 | KEY#2 | OFFSET#N | |----------|----------|----------|----------|----------| | LENGTH#N | KEY#N | VALUE#1 ... VALUE#N | |----------|----------|--------------------------------| * *max_keys = inteiro *bitmap = variavel *key (char(10)) conteudo da chave *offset (int) = localizacao dentro do bucket do value (deslocamento em relacao ao inicio do arquivo) *length = int tamanho de value *value (variavel) = valor do par chave-valor * *slot (key+offset+length) */ int ss_create_bucket(char *srvName,char *dirName,char *idBucket,unsigned int maxKeys) { char *header; unsigned char bitMap[maxKeys+1]; unsigned int hsize,i; char caux[BYTES_LIMIT+1]; char lock_flag; size_t len_head; lock_flag = '0'; //gravacao do header //inicializacao do bitMap,preenche o vetor com zeros e o ultimo caracter nulo memset(bitMap,'\0',maxKeys+1); memset(bitMap,'0',maxKeys); //converte maxKeys para char[4]; ntochr(caux, maxKeys); //define o tamanho do header, para a criacao do bucket hsize = HEADER_SIZE(maxKeys); header = (unsigned char*) xmalloc(sizeof(unsigned char) * hsize); //inicializacao do header //escreve a qtd e chaves e o mapa de bits no header sprintf(header,"%c%s%s",lock_flag,caux,bitMap); //preenche o restante do header com espacos em branco len_head = strlen(header); memset(header+len_head,' ',hsize - len_head); //conecta no pool/servidor state = set_server(cluster,srvName,&ioctx); if(state >= 0){ //seta o diretorio state = set_directory(dirName,&ioctx); //grava o bucket no diretorio state = write_object_full(ioctx, idBucket, header, hsize); }else fprintf(stderr,"[create_bucket/iceph.c] Servidor não localizado!\n"); free(header); //encerra o contexto de io destroy_ioctx(&ioctx); return ((state >= 0)? 0 : 1); }
X_IPC_RETURN_STATUS_TYPE x_ipc_dataMsgRecv(int sd, DATA_MSG_PTR *dataMsg, int32 replyRef, void *replyBuf, int32 replyLen) { X_IPC_RETURN_STATUS_TYPE status; DATA_MSG_TYPE header; *dataMsg = NULL; LOCK_IO_MUTEX; status = x_ipc_readNBytes(sd, (char *)&(header.classTotal), HEADER_SIZE()); if (status != StatOK) { *dataMsg = NULL; UNLOCK_IO_MUTEX; return status; } NET_INT_TO_INT(header.classTotal); NET_INT_TO_INT(header.msgTotal); *dataMsg = x_ipc_dataMsgAlloc(header.classTotal + sizeof(DATA_MSG_TYPE)); **dataMsg = header; NET_INT_TO_INT((*dataMsg)->parentRef); NET_INT_TO_INT((*dataMsg)->intent); NET_INT_TO_INT((*dataMsg)->classId); NET_INT_TO_INT((*dataMsg)->dispatchRef); NET_INT_TO_INT((*dataMsg)->msgRef); if( header.msgTotal > 0) { (*dataMsg)->dataRefCountPtr = (int32 *)x_ipcMalloc(sizeof(int32)); *((*dataMsg)->dataRefCountPtr) = 1; } else { (*dataMsg)->dataRefCountPtr = NULL; } (*dataMsg)->refCount = 0; (*dataMsg)->dataStruct = NULL; (*dataMsg)->dataByteOrder = GET_DATA_ENDIAN((*dataMsg)->classId); (*dataMsg)->classByteOrder = GET_CLASS_ENDIAN((*dataMsg)->classId); (*dataMsg)->alignment = (ALIGNMENT_TYPE)GET_ALIGNMENT((*dataMsg)->classId); (*dataMsg)->classId = GET_CLASSID((*dataMsg)->classId); LOCK_M_MUTEX; GET_M_GLOBAL(byteOrder) = (*dataMsg)->dataByteOrder; GET_M_GLOBAL(alignment) = (*dataMsg)->alignment; UNLOCK_M_MUTEX; if (header.classTotal > 0) (*dataMsg)->classData = ((char *)*dataMsg + sizeof(DATA_MSG_TYPE)); else (*dataMsg)->classData = NULL; /* For now, we only handle packed data. */ if ((*dataMsg)->alignment != ALIGN_PACKED) { X_IPC_MOD_ERROR("ERROR: received message with data that is not packed."); UNLOCK_IO_MUTEX; return StatError; } /* Want to be able to use the already allocated buffer, if possible. */ if (((*dataMsg)->msgRef == replyRef) && (replyBuf != NULL) && (replyLen == header.msgTotal)) { (*dataMsg)->msgData = (char *)replyBuf; (*dataMsg)->dataStruct = (char *)replyBuf; } else if (header.msgTotal > 0) (*dataMsg)->msgData = (char *)x_ipcMalloc((unsigned) header.msgTotal); else (*dataMsg)->msgData = NULL; if ((header.msgTotal > 0) && (header.classTotal >0)) { status = x_ipc_read2Buffers(sd, (*dataMsg)->classData, header.classTotal, (*dataMsg)->msgData, header.msgTotal); } else if (header.classTotal > 0) { status = x_ipc_readNBytes(sd, (*dataMsg)->classData, header.classTotal); } else if (header.msgTotal > 0) { status = x_ipc_readNBytes(sd, (*dataMsg)->msgData, header.msgTotal); } /* Need to create the vector here. */ (*dataMsg)->vec = (struct iovec *)x_ipcMalloc(2 * sizeof(struct iovec)); (*dataMsg)->vec[0].iov_base = (*dataMsg)->msgData; (*dataMsg)->vec[0].iov_len = (*dataMsg)->msgTotal; (*dataMsg)->vec[1].iov_base = NULL; (*dataMsg)->vec[1].iov_len = 0; UNLOCK_IO_MUTEX; return status; }
uint64_t bitmap[1]; } CodeSpaceArena; /* 2 * 64 means header and gatekeeper */ #define BITMAP_SIZE(ALOCSIZ) \ (((CODE_SPACE_SIZE) * 8 - 2 * 64) / ((ALOCSIZ) * 8 + 1)) /* Last "+ 1" means gatekeeper */ #define HEADER_SIZE(ALOCSIZ) \ (((BITMAP_SIZE(ALOCSIZ) + 63) / 64) + 1) #define ALOCSIZLOG_MAX 10 static int csarena_allocarea_tab[ALOCSIZLOG_MAX] = { HEADER_SIZE(16), HEADER_SIZE(32), HEADER_SIZE(64), HEADER_SIZE(128), HEADER_SIZE(256), HEADER_SIZE(512), HEADER_SIZE(1024), HEADER_SIZE(2048), HEADER_SIZE(4096), HEADER_SIZE(8192), }; static CodeSpaceArena *arena_tab[ALOCSIZLOG_MAX]; static CodeSpaceArena *arena_search_tab[ALOCSIZLOG_MAX]; static size_t page_size;
JS_ArenaAllocate(JSArenaPool *pool, size_t nb) { JSArena **ap, **bp, *a, *b; jsuword extra, hdrsz, gross, sz; void *p; /* * Search pool from current forward till we find or make enough space. * * NB: subtract nb from a->limit in the loop condition, instead of adding * nb to a->avail, to avoid overflowing a 32-bit address space (possible * when running a 32-bit program on a 64-bit system where the kernel maps * the heap up against the top of the 32-bit address space). * * Thanks to Juergen Kreileder <*****@*****.**>, who brought this up in * https://bugzilla.mozilla.org/show_bug.cgi?id=279273. */ JS_ASSERT((nb & pool->mask) == 0); for (a = pool->current; nb > a->limit || a->avail > a->limit - nb; pool->current = a) { ap = &a->next; if (!*ap) { /* Not enough space in pool -- try to reclaim a free arena. */ extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0; hdrsz = sizeof *a + extra + pool->mask; gross = hdrsz + JS_MAX(nb, pool->arenasize); if (gross < nb) return NULL; bp = &arena_freelist; JS_ACQUIRE_LOCK(arena_freelist_lock); while ((b = *bp) != NULL) { /* * Insist on exact arenasize match to avoid leaving alloc'able * space after an oversized allocation as it grows. */ sz = JS_UPTRDIFF(b->limit, b); if (sz == gross) { *bp = b->next; JS_RELEASE_LOCK(arena_freelist_lock); b->next = NULL; COUNT(pool, nreclaims); goto claim; } bp = &b->next; } /* Nothing big enough on the freelist, so we must malloc. */ JS_RELEASE_LOCK(arena_freelist_lock); b = (JSArena *) malloc(gross); if (!b) return NULL; b->next = NULL; b->limit = (jsuword)b + gross; JS_COUNT_ARENA(pool,++); COUNT(pool, nmallocs); claim: /* If oversized, store ap in the header, just before a->base. */ *ap = a = b; JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a)); if (extra) { a->base = a->avail = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool); SET_HEADER(pool, a, ap); } else { a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1); } continue; } a = *ap; /* move to next arena */ }