void sqlDeleterAddAcc(struct sqlDeleter* sd, char* acc) /* Add an accession to list to to delete. */ { if (sd->deletesDone) errAbort("sqlDeleter: can't add accessions after a delete has been done"); #ifdef COPY_TO_DELETE_HACK /* always use direct if no tmp dir */ if ((sd->accCount < sd->directMax) || (sd->tmpDir[0] == '\0')) { struct slName* accRec = lmAlloc(sd->lm, sizeof(struct slName)+strlen(acc)); strcpy(accRec->name, acc); slAddHead(&sd->accs, accRec); } else { if (sd->accLoader == NULL) initLoader(sd); sqlUpdaterAddRow(sd->accLoader, "%s", acc); } #else { struct slName* accRec = lmAlloc(sd->lm, sizeof(struct slName)+strlen(acc)); strcpy(accRec->name, acc); slAddHead(&sd->accs, accRec); } #endif sd->accCount++; }
struct hashEl *hashAddN(struct hash *hash, char *name, int nameSize, void *val) /* Add name of given size to hash (no need to be zero terminated) */ { struct hashEl *el; if (hash->lm) el = lmAlloc(hash->lm, sizeof(*el)); else AllocVar(el); el->hashVal = hashString(name); int hashVal = el->hashVal & hash->mask; if (hash->lm) { el->name = lmAlloc(hash->lm, nameSize+1); memcpy(el->name, name, nameSize); } else el->name = cloneStringZ(name, nameSize); el->val = val; el->next = hash->table[hashVal]; hash->table[hashVal] = el; hash->elCount += 1; if (hash->autoExpand && hash->elCount > (int)(hash->size * hash->expansionFactor)) { /* double the size */ hashResize(hash, digitsBaseTwo(hash->size)); } return el; }
static struct hapCluster *lmHapCluster(struct cwaExtraData *helper) /* Use localMem to allocate a new cluster of the given len. */ { struct hapCluster *c = lmAlloc(helper->localMem, sizeof(struct hapCluster)); c->refCounts = lmAlloc(helper->localMem, helper->len * sizeof(unsigned short)); c->unkCounts = lmAlloc(helper->localMem, helper->len * sizeof(unsigned short)); return c; }
void getTrfUnsplit(struct sqlConnection *conn, struct hash *chromHash) /* Return a tree of ranges for simple repeats in all chromosomes, * from a single query on the whole (unsplit) simpleRepeat table. */ { struct rbTreeNode **stack = lmAlloc(qLm, 256 * sizeof(stack[0])); struct rbTree *tree = rbTreeNewDetailed(simpleRangeCmp, qLm, stack); struct simpleRange *range, *prevRange = NULL; struct sqlResult *sr; char **row; char *prevChrom = NULL; sr = sqlGetResult(conn, "NOSQLINJ select chrom,chromStart,chromEnd from simpleRepeat" " order by chrom,chromStart"); while ((row = sqlNextRow(sr)) != NULL) { if (prevChrom == NULL) prevChrom = cloneString(row[0]); else if (! sameString(prevChrom, row[0])) { rbTreeAdd(tree, prevRange); setTrf(prevChrom, chromHash, tree); prevRange = NULL; freeMem(prevChrom); stack = lmAlloc(qLm, 256 * sizeof(stack[0])); tree = rbTreeNewDetailed(simpleRangeCmp, qLm, stack); prevChrom = cloneString(row[0]); } lmAllocVar(tree->lm, range); range->start = sqlUnsigned(row[1]); range->end = sqlUnsigned(row[2]); if (prevRange == NULL) prevRange = range; else if (overlap(range, prevRange)) { /* merge r into prevR & discard; prevR gets passed forward. */ if (range->end > prevRange->end) prevRange->end = range->end; if (range->start < prevRange->start) prevRange->start = range->start; } else { rbTreeAdd(tree, prevRange); prevRange = range; } } if (prevChrom != NULL) { rbTreeAdd(tree, prevRange); setTrf(prevChrom, chromHash, tree); freeMem(prevChrom); } sqlFreeResult(&sr); }
void parseCmdLine( int argc, char** argv ) { // help requested? if ( YARPParseParameters::parse(argc, argv, "-help") ) { cout << "USAGE: " << argv[0] << endl; cout << " --dom <domain size> --cod <codomain size>" << endl; cout << " --ex <number of examples>" << endl; cout << " [--name <learner name>] [--net <network name>]" << endl; cout << " [--f] [--u <tolerance values>]" << endl; cout << " [--cl] [--filter] [--load]" << endl; exit(YARP_OK); } // basename of the ports YARPParseParameters::parse(argc, argv, "-name", portName); // network name YARPParseParameters::parse(argc, argv, "-net", netName); // domain size if( ! YARPParseParameters::parse(argc, argv, "-dom", &domainSize) ) { cout << "FATAL ERROR: must specify domain size." << endl; exit(YARP_FAIL); } // codomain size if( ! YARPParseParameters::parse(argc, argv, "-cod", &codomainSize) ) { cout << "FATAL ERROR: must specify codomain size." << endl; exit(YARP_FAIL); } // number of examples if( ! YARPParseParameters::parse(argc, argv, "-ex", &capacity) ) { cout << "FATAL ERROR: must specify number of examples." << endl; exit(YARP_FAIL); } // uniform machine required? if ( YARPParseParameters::parse(argc, argv, "-u") ) { if ( machineType != typeOfMachine::plain ) { // can't have both types of machine... cout << "FATAL ERROR: must specify one type only of machine." << endl; exit(YARP_FAIL); } machineType = typeOfMachine::uniform; unsigned int index; { foreach(argc,i) { YARPString argument(argv[i]); if ( argument == "--u" ) { index = i; break; } } } // must be followed by domainSize reals... if ( argc-index < domainSize ) { cout << "FATAL ERROR: must specify " << domainSize << " tolerances after --u." << endl; exit(YARP_FAIL); } lmAlloc(tolerance, domainSize); { foreach_s(index+1,index+1+domainSize,i) if ( sscanf(argv[i], "%lf", &tolerance[i-index-1]) != 1 ) { cout << "FATAL ERROR: invalid tolerance value." << endl; exit(YARP_FAIL); } } }
INLINE struct hacTree preClusterNodes(const struct sortWrapper *leafWraps, int i, int runLength, hacDistanceFunction *distF, hacMergeFunction *mergeF, void *extraData, struct lm *localMem) /* Caller has allocated a node, and this returns what to store there: * a recursively constructed cluster of nodes extracted from wrapped * leafNodes (leafWraps) starting at i, for runLength items. */ { struct hacTree ret = {NULL, NULL, NULL, NULL, 0, NULL}; if (runLength > 2) { struct hacTree *newClusters = lmAlloc(localMem, 2 * sizeof(struct hacTree)); int halfLength = runLength/2; newClusters[0] = preClusterNodes(leafWraps, i, halfLength, distF, mergeF, extraData, localMem); newClusters[1] = preClusterNodes(leafWraps, i+halfLength, runLength-halfLength, distF, mergeF, extraData, localMem); initNode(&ret, &(newClusters[0]), &(newClusters[1]), distF, mergeF, extraData); } else if (runLength == 2) { initNode(&ret, leafWraps[i].node, leafWraps[i+1].node, distF, mergeF, extraData); } else ret = *(leafWraps[i].node); return ret; }
static struct hacTree *pairUpItems(const struct slList *itemList, int itemCount, int *retPairCount, struct lm *localMem, hacDistanceFunction *distF, hacMergeFunction *mergeF, hacCmpFunction *cmpF, void *extraData) /* Allocate & initialize leaf nodes and all possible pairings of leaf nodes * which will be our seed clusters. If cmpF is given, pre-sort the leaf nodes * and pre-cluster identical leaves before generating seed clusters. */ { struct hacTree *leafNodes = leafNodesFromItems(itemList, itemCount, localMem); if (cmpF != NULL) leafNodes = sortAndPreCluster(leafNodes, &itemCount, localMem, distF, mergeF, cmpF, extraData); int pairCount = (itemCount == 1) ? 1 : (itemCount * (itemCount-1) / 2); struct hacTree *pairPool = lmAlloc(localMem, pairCount * sizeof(struct hacTree)); if (itemCount == 1) initNode(pairPool, leafNodes, NULL, distF, mergeF, extraData); else { int i, j, pairIx; for (i=0, pairIx=0; i < itemCount-1; i++) for (j=i+1; j < itemCount; j++, pairIx++) initNode(&(pairPool[pairIx]), &(leafNodes[i]), &(leafNodes[j]), distF, mergeF, extraData); } *retPairCount = pairCount; return pairPool; }
static char * mergeAllele(char *transcript, int offset, int variantWidth, char *newAlleleSeq, int alleleLength, struct lm *lm) /* merge a variant into an allele */ { char *newTranscript = NULL; //#*** This will be incorrect for an MNV that spans exon boundary -- //#*** so we should also clip allele to cds portion(s?!) before calling this. if (variantWidth == alleleLength) { newTranscript = lmCloneString(lm, transcript); memcpy(&newTranscript[offset], newAlleleSeq, alleleLength); } else { int insertionSize = alleleLength - variantWidth; int newLength = strlen(transcript) + insertionSize; newTranscript = lmAlloc(lm, newLength + 1); char *restOfTranscript = &transcript[offset + variantWidth]; // copy over the part before the variant memcpy(newTranscript, transcript, offset); // copy in the new allele memcpy(&newTranscript[offset], newAlleleSeq, alleleLength); // copy in the part after the variant memcpy(&newTranscript[offset + alleleLength], restOfTranscript, strlen(restOfTranscript) + 1); } return newTranscript; }
void BinReader::readStringPool() { // read to one allocated memory area for speed // number of strings in the string pool int stringPoolSize = sBytes->readInt(); stringPool.resize(stringPoolSize); // the complete size of the string buffer int stringBufferSize = sBytes->readInt(); stringBuffer = (const char*)lmAlloc(NULL, stringBufferSize); char *p = (char *)stringBuffer; for (UTsize i = 0; i < (UTsize)stringPoolSize; i++) { int length = sBytes->readInt(); const char *pstring = p; while (length--) { *p = sBytes->readByte(); p++; } *p = 0; p++; stringPool[i] = pstring; } }
static struct hacTree *sortAndPreCluster(struct hacTree *leafNodes, int *retItemCount, struct lm *localMem, hacDistanceFunction *distF, hacMergeFunction *mergeF, hacCmpFunction *cmpF, void *extraData) /* Use cmpF and extraData to sort wrapped leaf nodes so that identical leaves will be adjacent, * then replace leaves with clusters of identical leaves where possible. Place new * (hopefully smaller) item count in retItemCount. */ { int itemCount = *retItemCount; struct sortWrapper *leafWraps = makeSortedWraps(leafNodes, itemCount, localMem, cmpF, extraData); struct hacTree *newLeaves = lmAlloc(localMem, itemCount * sizeof(struct hacTree)); int i=0, newI=0; while (i < itemCount) { int nextRunStart; for (nextRunStart = i+1; nextRunStart < itemCount; nextRunStart++) if (distF(leafWraps[i].node->itemOrCluster, leafWraps[nextRunStart].node->itemOrCluster, extraData) != 0) break; int runLength = nextRunStart - i; newLeaves[newI] = preClusterNodes(leafWraps, i, runLength, distF, mergeF, extraData, localMem); i = nextRunStart; newI++; } *retItemCount = newI; return newLeaves; }
struct dnaSeq *genePredToGenomicSequence(struct genePred *pred, char *chromSeq, struct lm *lm) /* Return concatenated genomic sequence of exons of pred. */ { int txLen = 0; int i; for (i=0; i < pred->exonCount; i++) txLen += (pred->exonEnds[i] - pred->exonStarts[i]); char *seq = lmAlloc(lm, txLen + 1); int offset = 0; for (i=0; i < pred->exonCount; i++) { int blockStart = pred->exonStarts[i]; int blockSize = pred->exonEnds[i] - blockStart; memcpy(seq+offset, chromSeq+blockStart, blockSize*sizeof(*seq)); offset += blockSize; } if(pred->strand[0] == '-') reverseComplement(seq, txLen); struct dnaSeq *txSeq = NULL; lmAllocVar(lm, txSeq); txSeq->name = lmCloneString(lm, pred->name); txSeq->dna = seq; txSeq->size = txLen; return txSeq; }
void *lmCloneMem(struct lm *lm, void *pt, size_t size) /* Return a local mem copy of memory block. */ { void *d = lmAlloc(lm, size); memcpy(d, pt, size); return d; }
Bits *lmBitAlloc(struct lm *lm,int bitCount) // Allocate bits. Must supply local memory. { assert(lm != NULL); int byteCount = ((bitCount+7)>>3); return lmAlloc(lm,byteCount); }
void *loom_asset_imageDeserializer( void *buffer, size_t bufferLen, LoomAssetCleanupCallback *dtor ) { loom_asset_image_t *img; lmAssert(buffer != NULL, "buffer should not be null"); img = (loom_asset_image_t*)lmAlloc(gAssetAllocator, sizeof(loom_asset_image_t)); // parse any orientation info from exif format img->orientation = exifinfo_parse_orientation(buffer, (unsigned int)bufferLen); img->bits = stbi_load_from_memory((const stbi_uc *)buffer, (int)bufferLen, &img->width, &img->height, &img->bpp, 4); *dtor = loom_asset_imageDtor; if(!img->bits) { lmLogError(gImageAssetGroup, "Image load failed due to this cryptic reason: %s", stbi_failure_reason()); lmFree(gAssetAllocator, img); return 0; } lmLogDebug(gImageAssetGroup, "Allocated %d bytes for an image!", img->width * img->height * 4); return img; }
char *lCloneString(char *s) /* Clone string into local memory. */ { int len = strlen(s) + 1; char *d = lmAlloc(lm, len); memcpy(d, s, len); return d; }
/* * Like new, we want to guarantee that we NEVER * return NULL. Loop until there is free memory. * */ static char* malloc_never_null(const size_t b) { char *p = NULL; do { p = static_cast<char*>(lmAlloc(NULL, b)); } while (p == NULL); return p; }
MutexHandle loom_mutex_create_real(const char *file, int line) { CRITICAL_SECTION *cs; assert(sizeof(MutexHandle) >= sizeof(CRITICAL_SECTION *)); cs = (CRITICAL_SECTION *)lmAlloc(NULL, sizeof(CRITICAL_SECTION)); InitializeCriticalSectionAndSpinCount(cs, 800); // TODO: Tune wait time. return cs; }
struct range *rangeTreeAddValCount(struct rbTree *tree, int start, int end) /* Add range to tree, merging with existing ranges if need be. * Set range val to count of elements in the range. Counts are pointers to * ints allocated in tree localmem */ { int *a = lmAlloc(tree->lm, sizeof(*a)); /* keep the count in localmem */ *a = 1; return rangeTreeAddVal(tree, start, end, (void *)a, sumInt); }
struct slName *lmSlName(struct lm *lm, char *name) /* Return slName in memory. */ { struct slName *n; int size = sizeof(*n) + strlen(name) + 1; n = lmAlloc(lm, size); strcpy(n->name, name); return n; }
Bits *lmBitClone(struct lm *lm,Bits* orig, int bitCount) // Clone bits. Must supply local memory. { assert(lm != NULL); int byteCount = ((bitCount+7)>>3); Bits* bits = lmAlloc(lm,byteCount); memcpy(bits, orig, byteCount); return bits; }
struct tagStorm *tagStormNew(char *name) /* Create a new, empty, tagStorm. */ { struct lm *lm = lmInit(0); struct tagStorm *tagStorm = lmAlloc(lm, sizeof(*tagStorm)); tagStorm->lm = lm; tagStorm->fileName = lmCloneString(lm, name); return tagStorm; }
void utString::fromBytes(const void *bytes, int len) { // Free old value if any. lmSafeFree(NULL, p); // Copy the bytes into p. p = (char*)lmAlloc(NULL, len+1); memcpy(p, bytes, len); p[len] = 0; // Make sure we are NULL terminated. }
NativeDelegateCallNote(const NativeDelegate *target) { // Note our target delegate. delegate = target; delegateKey = target->_key; // Start with enough buffer space we won't need to realloc in most cases. ndata = 512; data = (unsigned char*)lmAlloc(NULL, ndata); offset = 0; }
void getSeqGapsUnsplit(struct sqlConnection *conn, struct hash *chromHash) /* Return a tree of ranges for sequence gaps in all chromosomes, * assuming an unsplit gap table -- when the table is unsplit, it's * probably for a scaffold assembly where we *really* don't want * to do one query per scaffold! */ { struct rbTreeNode **stack = lmAlloc(qLm, 256 * sizeof(stack[0])); struct rbTree *tree = rbTreeNewDetailed(simpleRangeCmp, qLm, stack); int rowOffset = hOffsetPastBin(sqlGetDatabase(conn), NULL, "gap"); struct sqlResult *sr; char **row; char *prevChrom = NULL; sr = sqlGetResult(conn, "NOSQLINJ select * from gap order by chrom"); while ((row = sqlNextRow(sr)) != NULL) { struct agpGap gap; struct simpleRange *range; agpGapStaticLoad(row+rowOffset, &gap); if (prevChrom == NULL) prevChrom = cloneString(gap.chrom); else if (! sameString(prevChrom, gap.chrom)) { setNGap(prevChrom, chromHash, tree); freeMem(prevChrom); stack = lmAlloc(qLm, 256 * sizeof(stack[0])); tree = rbTreeNewDetailed(simpleRangeCmp, qLm, stack); prevChrom = cloneString(gap.chrom); } lmAllocVar(tree->lm, range); range->start = gap.chromStart; range->end = gap.chromEnd; rbTreeAdd(tree, range); } if (prevChrom != NULL) { setNGap(prevChrom, chromHash, tree); freeMem(prevChrom); } sqlFreeResult(&sr); }
// "Text" file types are just loaded directly as binary safe strings. void *loom_asset_textDeserializer(void *ptr, size_t size, LoomAssetCleanupCallback *dtor) { // Blast the bits into the asset. void *data = lmAlloc(gAssetAllocator, size + 1); memcpy(data, ptr, size); // Null terminate so we don't overrun strings. *(((unsigned char *)data) + size) = 0; return data; }
struct rbmTree *rbmTreeNew(rbmTreeCompareFunction *compare, rbmTreeItemMergeFunction *itemMerge, rbmTreeItemSubtractFunction *itemSubtract, rbmTreeItemFreeFunction *itemFree) /* Allocates space for a red-black merging tree * and returns a pointer to it. */ { struct lm *lm = lmInit(0); struct rbTreeNode **stack = lmAlloc(lm, 256 * sizeof(stack[0])); return rbmTreeNewDetailed(compare, itemMerge, itemSubtract, itemFree, lm, stack); }
char *lmCloneStringZ(struct lm *lm, char *string, int size) /* Return local mem copy of string. */ { if (string == NULL) return NULL; else { char *s = lmAlloc(lm, size+1); memcpy(s, string, size); return s; } }
void QuadRenderer::initializeGraphicsResources() { LOOM_PROFILE_SCOPE(quadInit); lmLogInfo(gGFXQuadRendererLogGroup, "Initializing Graphics Resources"); GL_Context* ctx = Graphics::context(); // create the single initial vertex buffer ctx->glGenBuffers(1, &vertexBufferId); ctx->glBindBuffer(GL_ARRAY_BUFFER, vertexBufferId); ctx->glBufferData(GL_ARRAY_BUFFER, MAXBATCHQUADS * 4 * sizeof(VertexPosColorTex), 0, GL_STREAM_DRAW); ctx->glBindBuffer(GL_ARRAY_BUFFER, 0); // create the single, reused index buffer ctx->glGenBuffers(1, &indexBufferId); ctx->glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferId); uint16_t *pIndex = (uint16_t*)lmAlloc(gQuadMemoryAllocator, sizeof(unsigned short) * 6 * MAXBATCHQUADS); uint16_t *pStart = pIndex; int j = 0; for (int i = 0; i < 6 * MAXBATCHQUADS; i += 6, j += 4, pIndex += 6) { pIndex[0] = j; pIndex[1] = j + 2; pIndex[2] = j + 1; pIndex[3] = j + 1; pIndex[4] = j + 2; pIndex[5] = j + 3; } ctx->glBufferData(GL_ELEMENT_ARRAY_BUFFER, MAXBATCHQUADS * 6 * sizeof(uint16_t), pStart, GL_STREAM_DRAW); ctx->glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); lmFree(gQuadMemoryAllocator, pStart); // Create the system memory buffer for quads. batchedVertices = static_cast<VertexPosColorTex*>(lmAlloc(gQuadMemoryAllocator, MAXBATCHQUADS * 4 * sizeof(VertexPosColorTex))); }
static stringTableEntry_t *allocEntry(const char *str) { stringTableEntry_t *entry = (stringTableEntry_t *)lmAlloc(NULL, sizeof(stringTableEntry_t)); entry->next = NULL; #if LOOM_COMPILER != LOOM_COMPILER_MSVC entry->string = (const char *)strdup(str); #else entry->string = (const char *)_strdup(str); #endif return entry; }
char *lmCloneString(struct lm *lm, char *string) /* Return local mem copy of string. */ { if (string == NULL) return NULL; else { int size = strlen(string)+1; char *s = lmAlloc(lm, size); memcpy(s, string, size); return s; } }