const uint8_t *system_get_jump_tag(uint16_t tag, int *version, int *size) { const struct jump_tag *t; int used = 0; if (!jdata) return NULL; /* Search through tag data for a match */ while (used < jdata->jump_tag_total) { /* Check the next tag */ t = (const struct jump_tag *)(system_usable_ram_end() + used); used += sizeof(struct jump_tag) + ROUNDUP4(t->data_size); if (t->tag != tag) continue; /* Found a match */ if (size) *size = t->data_size; if (version) *version = t->data_version; return (const uint8_t *)(t + 1); } /* If we're still here, no match */ return NULL; }
bool Persistent::put(uint8_t tag, uint8_t* data, uint8_t length) { // find address if (pageSpaceLeft(_pageActive) < (ROUNDUP4(length) + 4)) { pageCopy(pageNext(_pageActive), _pageActive); _pageActive = pageNext(_pageActive); } return pagePutTag(_pageActive, tag, data, length); }
bool Persistent::pagePutTag(uint8_t* page, uint8_t tag, uint8_t* data, uint8_t length) { uint8_t* address = pageFreeBegin(page); Header hdr = { 0xFF, ROUNDUP4(length), length, tag }; if (FlashProgram((uint32_t*) &hdr, (uint32_t) address, 4)) return false; union { uint32_t i32; uint8_t b[4]; } v; int l; for (int i = 0; i < ROUNDUP4(length); i += 4) { if (i < length - 4) { l = 4; } else l = length - i; memcpy(v.b, data + i, l); FlashProgram(&v.i32, (uint32_t) (address + 4 + i), 4); } return true; }
int bopen(void *buf, int bufsize, int flags) { bFlags = flags; #ifdef BASTARD_TESTING srand(time(0L)); #endif /* BASTARD_TESTING */ /* * If bopen already called by a shared process, just increment the count * and return; */ if (++bopenCount > 1) { return 0; } if (buf == NULL) { if (bufsize == 0) { bufsize = B_DEFAULT_MEM; } #ifdef IRIX bufsize = ROUNDUP4(bufsize); #endif if ((buf = malloc(bufsize)) == NULL) { /* resetting bopenCount lets client code decide to attempt to call * bopen() again with a smaller memory request, should it desire to. * Fix suggested by Simon Byholm. */ --bopenCount; return -1; } #ifdef B_STATS bStatsMemMalloc += bufsize; #endif } else { bFlags |= B_USER_BUF; } bFreeSize = bFreeLeft = bufsize; bFreeBuf = bFreeNext = buf; memset(bQhead, 0, sizeof(bQhead)); #if (defined (B_FILL) || defined (B_VERIFY_CAUSES_SEVERE_OVERHEAD)) bFillBlock(buf, bufsize); #endif #ifdef B_STATS bStackStart = &buf; #endif #ifdef B_VERIFY_CAUSES_SEVERE_OVERHEAD verifyFreeBlock(buf, bufsize); #endif return 0; }
int bopen(void *buf, int bufsize, int flags) { bFlags = flags; #ifdef BASTARD_TESTING srand(time(0L)); #endif /* BASTARD_TESTING */ /* * If bopen already called by a shared process, just increment the count * and return; */ if (++bopenCount > 1) { return 0; } if (buf == NULL) { if (bufsize == 0) { bufsize = B_DEFAULT_MEM; } #ifdef IRIX bufsize = ROUNDUP4(bufsize); #endif #if FIXMEM buf = (void *)MEM_START; #else if ((buf = malloc(bufsize)) == NULL) { return -1; } #endif #ifdef B_STATS bStatsMemMalloc += bufsize; #endif } else { bFlags |= B_USER_BUF; } bFreeSize = bFreeLeft = bufsize; bFreeBuf = bFreeNext = buf; memset(bQhead, 0, sizeof(bQhead)); #if (defined (B_FILL) || defined (B_VERIFY_CAUSES_SEVERE_OVERHEAD)) bFillBlock(buf, bufsize); #endif #ifdef B_STATS bStackStart = &buf; #endif #ifdef B_VERIFY_CAUSES_SEVERE_OVERHEAD verifyFreeBlock(buf, bufsize); #endif return 0; }
static int command_jumptags(int argc, char **argv) { const struct jump_tag *t; int used = 0; /* Jump tags valid only after a sysjump */ if (!jdata) return EC_SUCCESS; while (used < jdata->jump_tag_total) { /* Check the next tag */ t = (const struct jump_tag *)(system_usable_ram_end() + used); used += sizeof(struct jump_tag) + ROUNDUP4(t->data_size); ccprintf("%08x: 0x%04x %c%c.%d %3d\n", (uintptr_t)t, t->tag, t->tag >> 8, (uint8_t)t->tag, t->data_version, t->data_size); } return EC_SUCCESS; }
int system_add_jump_tag(uint16_t tag, int version, int size, const void *data) { struct jump_tag *t; /* Only allowed during a sysjump */ if (!jdata || jdata->magic != JUMP_DATA_MAGIC) return EC_ERROR_UNKNOWN; /* Make room for the new tag */ if (size > 255) return EC_ERROR_INVAL; jdata->jump_tag_total += ROUNDUP4(size) + sizeof(struct jump_tag); t = (struct jump_tag *)system_usable_ram_end(); t->tag = tag; t->data_size = size; t->data_version = version; if (size) memcpy(t + 1, data, size); return EC_SUCCESS; }
void *balloc(B_ARGS_DEC, int size) { bType *bp; int q, memSize; /* * Call bopen with default values if the application has not yet done so */ if (bFreeBuf == NULL) { if (bopen(NULL, B_DEFAULT_MEM, 0) < 0) { return NULL; } } #ifdef B_VERIFY_CAUSES_SEVERE_OVERHEAD verifyBallocSpace(); #endif if (size < 0) { return NULL; } #ifdef BASTARD_TESTING if (rand() == 0x7fff) { return NULL; } #endif /* BASTARD_TESTING */ memSize = ballocGetSize(size, &q); if (q >= B_MAX_CLASS) { /* * Size if bigger than the maximum class. Malloc if use has been okayed */ if (bFlags & B_USE_MALLOC) { #ifdef B_STATS bstats(0, NULL); #endif #ifdef IRIX memSize = ROUNDUP4(memSize); #endif bp = (bType*) malloc(memSize); if (bp == NULL) { traceRaw(T("B: malloc failed\n")); return NULL; } #ifdef B_STATS bStatsMemMalloc += memSize; #endif #if (defined (B_FILL) || defined (B_VERIFY_CAUSES_SEVERE_OVERHEAD)) bFillBlock(bp, memSize); #endif } else { traceRaw(T("B: malloc failed\n")); return NULL; } /* * the u.size is the actual size allocated for data */ bp->u.size = memSize - sizeof(bType); bp->flags = B_MALLOCED; } else if ((bp = bQhead[q]) != NULL) { /* * Take first block off the relevant q if non-empty */ bQhead[q] = bp->u.next; #ifdef B_VERIFY_CAUSES_SEVERE_OVERHEAD verifyFreeBlock(bp, q); #endif #if (defined (B_FILL) || defined (B_VERIFY_CAUSES_SEVERE_OVERHEAD)) bFillBlock(bp, memSize); #endif bp->u.size = memSize - sizeof(bType); bp->flags = 0; } else { if (bFreeLeft > memSize) { /* * The q was empty, and the free list has spare memory so * create a new block out of the primary free block */ bp = (bType*) bFreeNext; #ifdef B_VERIFY_CAUSES_SEVERE_OVERHEAD verifyFreeBlock(bp, q); #endif bFreeNext += memSize; bFreeLeft -= memSize; #if (defined (B_FILL) || defined (B_VERIFY_CAUSES_SEVERE_OVERHEAD)) bFillBlock(bp, memSize); #endif bp->u.size = memSize - sizeof(bType); bp->flags = 0; } else if (bFlags & B_USE_MALLOC) { #ifdef B_STATS static int once = 0; if (once++ == 0) { bstats(0, NULL); } #endif /* * Nothing left on the primary free list, so malloc a new block */ #ifdef IRIX memSize = ROUNDUP4(memSize); #endif if ((bp = (bType*) malloc(memSize)) == NULL) { traceRaw(T("B: malloc failed\n")); return NULL; } #ifdef B_STATS bStatsMemMalloc += memSize; #endif #if (defined (B_FILL) || defined (B_VERIFY_CAUSES_SEVERE_OVERHEAD)) bFillBlock(bp, memSize); #endif bp->u.size = memSize - sizeof(bType); bp->flags = B_MALLOCED; } else { traceRaw(T("B: malloc failed\n")); return NULL; } } #ifdef B_STATS bStatsAlloc(B_ARGS, bp, q, memSize); #endif bp->flags |= B_INTEGRITY; /* * The following is a good place to put a breakpoint when trying to reduce * determine and reduce maximum memory use. */ #if 0 #ifdef B_STATS if (bStatsBallocInUse == bStatsBallocMax) { bstats(0, NULL); } #endif #endif return (void*) ((char*) bp + sizeof(bType)); }
/** Free previously allocated memory * @param buf Previously allocated buffer. */ void cmsMem_free(void *buf) { UINT32 size; if (buf != NULL) { UINT32 *intBuf = (UINT32 *) (((UINT32) buf) - CMS_MEM_HEADER_LENGTH); #ifdef CMS_MEM_LEAK_TRACING { AllocRecord *allocRec; dlist_for_each_entry(allocRec, &glbAllocRec, dlist) if (allocRec->bufAddr == buf) break; if ((DlistNode *) allocRec != &glbAllocRec) { dlist_unlink((struct dlist_node *) allocRec); free(allocRec); } else { /* * Buffers allocated from shared mem could have been freed by * another app, so if we have an alloc record but cannot find * it in shared mem, ignore it. But if the alloc record is in * private heap, that is an error. */ if (!IS_IN_SHARED_MEM(buf)) { cmsLog_error("possible double free, could not find allocRec for buf %p", buf); } } } #endif size = intBuf[1]; if (intBuf[1] != (intBuf[2] ^ 0xffffffff)) { cmsLog_error("memory underflow detected, %d %d", intBuf[1], intBuf[2]); cmsAst_assert(0); return; } #ifdef CMS_MEM_DEBUG { UINT32 allocSize, intSize, roundup4Size, i; UINT8 *charBuf = (UINT8 *) buf; allocSize = REAL_ALLOC_SIZE(intBuf[1]); intSize = allocSize / sizeof(UINT32); roundup4Size = ROUNDUP4(intBuf[1]); for (i=intBuf[1]; i < roundup4Size; i++) { if (charBuf[i] != (UINT8) (CMS_MEM_FOOTER_PATTERN & 0xff)) { cmsLog_error("memory overflow detected at idx=%d 0x%x 0x%x 0x%x", i, charBuf[i], intBuf[intSize-1], intBuf[intSize-2]); cmsAst_assert(0); return; } } if ((intBuf[intSize - 1] != CMS_MEM_FOOTER_PATTERN) || (intBuf[intSize - 2] != CMS_MEM_FOOTER_PATTERN)) { cmsLog_error("memory overflow detected, 0x%x 0x%x", intBuf[intSize - 1], intBuf[intSize - 2]); cmsAst_assert(0); return; } #ifdef CMS_MEM_POISON_ALLOC_FREE /* * write garbage into buffer which is about to be freed to detect * users of freed buffers. */ memset(intBuf, CMS_MEM_FREE_PATTERN, allocSize); #endif } #endif /* CMS_MEM_DEBUG */ buf = intBuf; /* buf points to real start of buffer */ #ifdef MDM_SHARED_MEM if (IS_IN_SHARED_MEM(buf)) { brel(buf); } else #endif { oal_free(buf); mStats.bytesAllocd -= size; mStats.numFrees++; } }
void *cmsMem_alloc(UINT32 size, UINT32 allocFlags) { void *buf; UINT32 allocSize; #ifdef CMS_MEM_LEAK_TRACING initAllocSeq(); #endif allocSize = REAL_ALLOC_SIZE(size); #ifdef MDM_SHARED_MEM if (allocFlags & ALLOC_SHARED_MEM) { #ifdef CMS_MEM_LEAK_TRACING buf = bget(allocSize, allocSeq); #else buf = bget(allocSize); #endif } else #endif { buf = oal_malloc(allocSize); if (buf) { mStats.bytesAllocd += size; mStats.numAllocs++; } } if (buf != NULL) { UINT32 *intBuf = (UINT32 *) buf; UINT32 intSize = allocSize / sizeof(UINT32); if (allocFlags & ALLOC_ZEROIZE) { memset(buf, 0, allocSize); } #ifdef CMS_MEM_POISON_ALLOC_FREE else { /* * Set alloc'ed buffer to garbage to catch use-before-init. * But we also allocate huge buffers for storing image downloads. * Don't bother writing garbage to those huge buffers. */ if (allocSize < 64 * 1024) { memset(buf, CMS_MEM_ALLOC_PATTERN, allocSize); } } #endif /* * Record the allocFlags in the first word, and the * size of user buffer in the next 2 words of the buffer. * Make 2 copies of the size in case one of the copies gets corrupted by * an underflow. Make one copy the XOR of the other so that there are * not so many 0's in size fields. */ intBuf[0] = allocFlags; intBuf[1] = size; intBuf[2] = intBuf[1] ^ 0xffffffff; buf = &(intBuf[3]); /* this gets returned to user */ #ifdef CMS_MEM_DEBUG { UINT8 *charBuf = (UINT8 *) buf; UINT32 i, roundup4Size = ROUNDUP4(size); for (i=size; i < roundup4Size; i++) { charBuf[i] = CMS_MEM_FOOTER_PATTERN & 0xff; } intBuf[intSize - 1] = CMS_MEM_FOOTER_PATTERN; intBuf[intSize - 2] = CMS_MEM_FOOTER_PATTERN; } #endif #ifdef CMS_MEM_LEAK_TRACING { AllocRecord *allocRec; if (!(allocRec = calloc(1, sizeof(AllocRecord)))) { cmsLog_error("could not malloc a record to track alloc"); } else { allocRec->bufAddr = buf; allocRec->userSize = size; allocRec->seq = allocSeq++; backtrace(allocRec->stackAddr, NUM_STACK_ENTRIES); /* * new allocs are placed at the beginning of the list, right after * the head. */ dlist_append((struct dlist_node *)allocRec, &glbAllocRec); } /* * do periodic garbage collection on the allocRecs which point * to shmBuf's that has been freed by another app. */ if ((allocSeq % 2000) == 0) { cmsLog_debug("Starting allocRec garbage collection"); garbageCollectAllocRec(); cmsLog_debug("garbage collection done"); } } #endif } return buf; }
int write_cmos_layout_bin(FILE *f) { const cmos_entry_t *cmos_entry; const cmos_enum_t *cmos_enum; cmos_checksum_layout_t layout; struct cmos_option_table table; struct cmos_entries entry; struct cmos_enums cenum; struct cmos_checksum csum; size_t sum = 0; int len; for (cmos_entry = first_cmos_entry(); cmos_entry != NULL; cmos_entry = next_cmos_entry(cmos_entry)) { if (cmos_entry == first_cmos_entry()) { sum += sizeof(table); table.header_length = sizeof(table); table.tag = LB_TAG_CMOS_OPTION_TABLE; table.size = 0; if (fwrite((char *)&table, sizeof(table), 1, f) != 1) { perror("Error writing image file"); goto err; } } memset(&entry, 0, sizeof(entry)); entry.tag = LB_TAG_OPTION; entry.config = cmos_entry->config; entry.config_id = (uint32_t)cmos_entry->config_id; entry.bit = cmos_entry->bit; entry.length = cmos_entry->length; if (!is_ident((char *)cmos_entry->name)) { fprintf(stderr, "Error - Name %s is an invalid identifier\n", cmos_entry->name); goto err; } memcpy(entry.name, cmos_entry->name, strlen(cmos_entry->name)); entry.name[strlen(cmos_entry->name)] = '\0'; len = strlen(cmos_entry->name) + 1; if (len % 4) ROUNDUP4(len); entry.size = sizeof(entry) - CMOS_MAX_NAME_LENGTH + len; sum += entry.size; if (fwrite((char *)&entry, entry.size, 1, f) != 1) { perror("Error writing image file"); goto err; } } for (cmos_enum = first_cmos_enum(); cmos_enum != NULL; cmos_enum = next_cmos_enum(cmos_enum)) { memset(&cenum, 0, sizeof(cenum)); cenum.tag = LB_TAG_OPTION_ENUM; memcpy(cenum.text, cmos_enum->text, strlen(cmos_enum->text)); cenum.text[strlen(cmos_enum->text)] = '\0'; len = strlen((char *)cenum.text) + 1; if (len % 4) ROUNDUP4(len); cenum.config_id = cmos_enum->config_id; cenum.value = cmos_enum->value; cenum.size = sizeof(cenum) - CMOS_MAX_TEXT_LENGTH + len; sum += cenum.size; if (fwrite((char *)&cenum, cenum.size, 1, f) != 1) { perror("Error writing image file"); goto err; } } layout.summed_area_start = cmos_checksum_start; layout.summed_area_end = cmos_checksum_end; layout.checksum_at = cmos_checksum_index; checksum_layout_to_bits(&layout); csum.tag = LB_TAG_OPTION_CHECKSUM; csum.size = sizeof(csum); csum.range_start = layout.summed_area_start; csum.range_end = layout.summed_area_end; csum.location = layout.checksum_at; csum.type = CHECKSUM_PCBIOS; sum += csum.size; if (fwrite((char *)&csum, csum.size, 1, f) != 1) { perror("Error writing image file"); goto err; } if (fseek(f, 0, SEEK_SET) != 0) { perror("Error while seeking"); goto err; } table.size = sum; if (fwrite((char *)&table, sizeof(table), 1, f) != 1) { perror("Error writing image file"); goto err; } return sum; err: fclose(f); exit(1); }