static struct pipe *pipe_alloc(void) { struct pipe *pipe; struct ring_buff *pipe_buff; void *storage; storage = sysmalloc(DEFAULT_PIPE_BUFFER_SIZE); if (!storage) { return NULL; } pipe = sysmalloc(sizeof(struct pipe)); if (!pipe) { sysfree(storage); return NULL; } pipe_buff = sysmalloc(sizeof(struct ring_buff)); if (!pipe_buff) { sysfree(storage); sysfree(pipe); return NULL; } pipe->buff = pipe_buff; pipe->buf_size = DEFAULT_PIPE_BUFFER_SIZE - 1; ring_buff_init(pipe_buff, 1, DEFAULT_PIPE_BUFFER_SIZE, storage); mutex_init(&pipe->mutex); return pipe; }
int Mem::printMem ( ) { // has anyone breeched their buffer? printBreeches_unlocked(); // print table entries sorted by most mem first int32_t *p = (int32_t *)sysmalloc ( m_memtablesize * 4 ); if ( ! p ) return 0; // stock up "p" and compute total bytes allocated int64_t total = 0; int32_t np = 0; for ( int32_t i = 0 ; i < (int32_t)m_memtablesize ; i++ ) { // skip empty buckets if ( ! s_mptrs[i] ) continue; total += s_sizes[i]; p[np++] = i; } // print out table sorted by sizes for ( int32_t i = 0 ; i < np ; i++ ) { int32_t a = p[i]; log(LOG_INFO,"mem: %05" PRId32") %zu 0x%" PTRFMT" %s", i,s_sizes[a] , (PTRTYPE)s_mptrs[a] , &s_labels[a*16] ); } sysfree ( p ); log(LOG_INFO,"mem: # current objects allocated now = %" PRId32, np ); log(LOG_INFO,"mem: totalMem allocated now = %" PRId64, total ); //log("mem: max allocated at one time = %" PRId32, (int32_t)(m_maxAllocated)); log(LOG_INFO,"mem: Memory allocated now: %" PRId64".\n", m_used ); log(LOG_INFO,"mem: Num allocs %" PRId32".\n", m_numAllocated ); return 1; }
/*---------------------------------------------------------------------------*/ extern Boolean sys_create_lan(Lan **lan) { if (!sysmalloc(sizeof(Lan), lan)) return(False); init_node((*lan), (*lan)); (*lan)->provider = (*lan); }
static int flash_emu_erase_block (struct flash_dev *dev, uint32_t block_base) { block_dev_t *bdev; int len; char * data; int rc; bdev = dev->privdata; if(NULL == bdev) { return -ENODEV; } len = bdev->driver->ioctl(bdev, IOCTL_GETBLKSIZE, NULL, 0); if(NULL == (data = sysmalloc(len))) { return -ENOMEM; } memset((void *) data, 0xFF, (size_t) len); rc = block_dev_write_buffered(bdev, (const char *) data, (size_t) len, block_base); sysfree(data); if(len == rc) { return 0; } return rc; }
void * operator new [] (size_t size) throw (std::bad_alloc) { logTrace( g_conf.m_logTraceMem, "size=%zu", size ); // don't let electric fence zap us if ( size == 0 ) return (void *)0x7fffffff; size_t max = g_conf.m_maxMem; // don't go over max if ( g_mem.getUsedMem() + size >= max && g_conf.m_maxMem > 1000000 ) { log(LOG_ERROR, "mem: new(%zu): Out of memory.", size ); throw std::bad_alloc(); //throw 1; } void *mem = sysmalloc ( size ); if ( ! mem && size > 0 ) { g_errno = errno; g_mem.incrementOOMCount(); log( LOG_WARN, "mem: new(%zu): %s", size, mstrerror(g_errno)); throw std::bad_alloc(); } g_mem.addMem ( (char*)mem , size, "TMPMEM" , 1 ); return mem; }
/*---------------------------------------------------------------------------*/ extern Boolean lac_create_port(Lac_system *system, Port_no port_no, Lac_port **port) { Lac_port *p; *port = NULL; p = &system->ports; while ((p = p->next) != &system->ports) if (p->port_no == port_no) return(False); /* else */ if (!sysmalloc(sizeof(Lac_port), &p)) return(False); *port = p; p->port_no = port_no; init_node(&p->mac, (void *)p); p->mac.rx_fn = &mac_rx; p->mac.rx_status_fn = &mac_status; p->mac.tx_status_fn = &mac_status; init_node(&p->mux, (void *)p); add_port(system, p); p->actor_admin.port_priority = Default_port_priority; p->actor_admin.port_no = port_no; p->actor_admin.system_priority = Default_system_priority; p->actor_admin.system_id = p->system->id; p->actor_admin.key = Default_key; p->actor_admin.state.lacp_activity = Default_lacp_activity; p->actor_admin.state.lacp_timeout = Default_lacp_timeout; p->actor_admin.state.aggregation = Default_aggregation; p->actor_admin.state.synchronization = False; p->actor_admin.state.defaulted = True; p->actor_admin.state.expired = False; p->partner_admin.port_priority = Default_port_priority; p->partner_admin.port_no = port_no; p->partner_admin.system_priority = Default_system_priority; p->partner_admin.system_id = Null_system; p->partner_admin.key = p->port_no; p->partner_admin.state.lacp_activity = False; /* Passive */ p->partner_admin.state.lacp_timeout = False; /* Long timeout */ p->partner_admin.state.aggregation = False; /* Individual */ p->partner_admin.state.synchronization = True; p->partner_admin.state.collecting = True; p->partner_admin.state.distributing = True; p->partner_admin.state.defaulted = True; p->partner_admin.state.expired = False; lac_init_port(system, port_no, Lacp_enabled); return(True); }
static struct sk_buff_data * skb_data_alloc_dynamic(size_t size) { ipl_t sp; struct sk_buff_data *skb_data; sp = ipl_save(); { skb_data = (struct sk_buff_data *) sysmalloc(SKB_DATA_SIZE(size)); } ipl_restore(sp); if (skb_data == NULL) { log_error("skb_data_alloc: error: no memory\n"); return NULL; /* error: no memory */ } skb_data->links = 1; return skb_data; }
/*---------------------------------------------------------------------------*/ extern Boolean lac_create_system(System_id system_id, Lac_system **system) { Lac_port *p; if (!sysmalloc(sizeof(Lac_system), system)) return(False); /* else */ (*system)->priority = Default_system_priority; (*system)->id = system_id; p = &((*system)->ports); p->port_no = Zero; p->next = p; p->system = (*system); sys_null_timer( &p->tick_timer); init_node( &p->mux, (void *)p); init_node( &p->mac, (void *)p); lac_init_system(*system); return(True); }
struct memhead *memory_head(void) { static struct memhead *head = NULL; if (head == NULL) { t_sysmalloc sysmalloc = malloc; size_t s = heap_size + sizeof(*head); memset(&border0, 0x42, sizeof(border0)); // Because size_t size is quite volatile... memset(&border1, 0x84, sizeof(border1)); head = (struct memhead*)sysmalloc(s); head->alloc_count = 0; head->total_count = 0; head->alloc = 0; head->total = 0; head->first = NULL; head->limit = &(((char*)head)[s]); head->last_position = &(((char*)head)[sizeof(*head)]); atexit(check_memory_state); } return (head); }
// . global override of new and delete operators // . seems like constructor and destructor are still called // . just use to check if enough memory // . before this just called mmalloc which sometimes returned NULL which // would cause us to throw an unhandled signal. So for now I don't // call mmalloc since it is limited in the mem it can use and would often // return NULL and set g_errno to ENOMEM void * operator new (size_t size) throw (std::bad_alloc) { logTrace( g_conf.m_logTraceMem, "size=%zu", size ); // don't let electric fence zap us if ( size == 0 ) return (void *)0x7fffffff; if ( allocationShouldFailRandomly() ) { g_errno = ENOMEM; log(LOG_ERROR, "mem: new-fake(%zu): %s",size, mstrerror(g_errno)); throw std::bad_alloc(); } // hack so hostid #0 can use more mem size_t max = g_conf.m_maxMem; //if ( g_hostdb.m_hostId == 0 ) max += 2000000000; // don't go over max if ( g_mem.getUsedMem() + size >= max && g_conf.m_maxMem > 1000000 ) { log(LOG_ERROR, "mem: new(%zu): Out of memory.", size ); throw std::bad_alloc(); } void *mem = sysmalloc ( size ); if ( ! mem && size > 0 ) { g_mem.incrementOOMCount(); g_errno = errno; log( LOG_WARN, "mem: new(%zu): %s",size,mstrerror(g_errno)); throw std::bad_alloc(); //return NULL; } g_mem.addMem ( mem , size , "TMPMEM" , 1 ); return mem; }
struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) { return sysmalloc(sizeof(struct jffs2_full_dirent) + namesize); }
struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) { return sysmalloc(sizeof(struct jffs2_tmp_dnode_info)); }
static int ext3fs_mount(void *dev, void *dir) { struct fs_driver *drv; struct ext2fs_dinode *dip = sysmalloc(sizeof(struct ext2fs_dinode)); char buf[SECTOR_SIZE * 2]; struct ext2_fs_info *fsi; int inode_sector, ret, rsize; struct node *dev_node = dev; struct nas *dir_nas = ((struct node *)dir)->nas; journal_t *jp = NULL; ext3_journal_specific_t *ext3_spec; journal_fs_specific_t spec = { .bmap = ext3_journal_bmap, .commit = ext3_journal_commit, .update = ext3_journal_update, .trans_freespace = ext3_journal_trans_freespace }; if (NULL == (drv = fs_driver_find_drv(EXT2_NAME))) { return -1; } if ((ret = drv->fsop->mount(dev, dir)) < 0) { return ret; } if (NULL == (ext3_spec = objalloc(&ext3_journal_cache))) { return -1; } spec.data = ext3_spec; if (NULL == (jp = journal_create(&spec))) { objfree(&ext3_journal_cache, ext3_spec); return -1; } /* Getting first block for inode number EXT3_JOURNAL_SUPERBLOCK_INODE */ dir_nas = ((struct node *)dir)->nas; fsi = dir_nas->fs->fsi; inode_sector = ino_to_fsba(fsi, EXT3_JOURNAL_SUPERBLOCK_INODE); rsize = ext2_read_sector(dir_nas, buf, 1, inode_sector); if (rsize * fsi->s_block_size != fsi->s_block_size) { return -EIO; } /* set pointer to inode struct in read buffer */ memcpy(dip, (buf + EXT2_DINODE_SIZE(fsi) * ino_to_fsbo(fsi, EXT3_JOURNAL_SUPERBLOCK_INODE)), sizeof(struct ext2fs_dinode)); /* XXX Hack to use ext2 functions */ dir_nas->fs->drv = &ext3fs_driver; ext3_spec->ext3_journal_inode = dip; if (0 > ext3_journal_load(jp, (struct block_dev *) dev_node->nas->fi->privdata, fsbtodb(fsi, dip->i_block[0]))) { return -EIO; } /* * FIXME Now journal supports block size only equal to filesystem block size * It is not critical but not flexible enough */ assert(jp->j_blocksize == fsi->s_block_size); fsi->journal = jp; return 0; }
struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) { return sysmalloc(sizeof(struct jffs2_full_dnode)); }
void *Mem::gbmalloc ( int size , const char *note ) { logTrace( g_conf.m_logTraceMem, "size=%d note='%s'", size, note ); // don't let electric fence zap us if ( size == 0 ) return (void *)0x7fffffff; // random oom testing //static int32_t s_mcount = 0; //s_mcount++; if ( g_conf.m_testMem && (rand() % 100) < 2 ) { //if ( s_mcount > 1055 && (rand() % 1000) < 2 ) { g_errno = ENOMEM; log( LOG_WARN, "mem: malloc-fake(%i,%s): %s",size,note, mstrerror(g_errno)); return NULL; } retry: int64_t max = g_conf.m_maxMem; // don't go over max if ( m_used + size + UNDERPAD + OVERPAD >= max ) { // try to free temp mem. returns true if it freed some. if ( freeCacheMem() ) goto retry; g_errno = ENOMEM; log( LOG_WARN, "mem: malloc(%i): Out of memory", size ); return NULL; } if ( size < 0 ) { g_errno = EBADENGINEER; log( LOG_ERROR, "mem: malloc(%i): Bad value.", size ); char *xx = NULL; *xx = 0; return NULL; } void *mem; g_inMemFunction = true; mem = (void *)sysmalloc ( size + UNDERPAD + OVERPAD ); g_inMemFunction = false; int32_t memLoop = 0; mallocmemloop: if ( ! mem && size > 0 ) { g_mem.m_outOfMems++; // try to free temp mem. returns true if it freed some. if ( freeCacheMem() ) goto retry; g_errno = errno; static int64_t s_lastTime; static int32_t s_missed = 0; int64_t now = gettimeofdayInMillisecondsLocal(); int64_t avail = (int64_t)g_conf.m_maxMem - (int64_t)m_used; if ( now - s_lastTime >= 1000LL ) { log(LOG_WARN, "mem: system malloc(%i,%s) availShouldBe=%" PRId64": " "%s (%s) (ooms suppressed since last log msg = %" PRId32")", size+UNDERPAD+OVERPAD, note, avail, mstrerror(g_errno), note, s_missed); s_lastTime = now; s_missed = 0; } else { s_missed++; } // to debug oom issues: //char *xx=NULL;*xx=0; // send an email alert if this happens! it is a sign of "memory fragmentation" //static bool s_sentEmail = false; // stop sending these now... seems to be problematic. says // 160MB is avail and can't alloc 20MB... static bool s_sentEmail = true; // assume only 90% is really available because of // inefficient mallocing avail = (int64_t)((float)avail * 0.80); // but if it is within about 15MB of what is theoretically // available, don't send an email, because there is always some // minor fragmentation if ( ! s_sentEmail && avail > size ) { s_sentEmail = true; char msgbuf[1024]; Host *h = g_hostdb.m_myHost; snprintf(msgbuf, 1024, "Possible memory fragmentation " "on host #%" PRId32" %s", h->m_hostId,h->m_note); log(LOG_WARN, "query: %s",msgbuf); g_pingServer.sendEmail(NULL, msgbuf,true,true); } return NULL; } if ( (PTRTYPE)mem < 0x00010000 ) { void *remem = sysmalloc(size); log ( LOG_WARN, "mem: Caught low memory allocation " "at %08" PTRFMT", " "reallocated to %08" PTRFMT"", (PTRTYPE)mem, (PTRTYPE)remem ); sysfree(mem); mem = remem; memLoop++; if ( memLoop > 100 ) { log ( LOG_WARN, "mem: Attempted to reallocate low " "memory allocation 100 times, " "aborting and returning NOMEM." ); g_errno = ENOMEM; return NULL; } goto mallocmemloop; } logTrace( g_conf.m_logTraceMem, "mem=%p size=%d note='%s'", mem, size, note ); addMem ( (char *)mem + UNDERPAD , size , note , 0 ); return (char *)mem + UNDERPAD; }
// . global override of new and delete operators // . seems like constructor and destructor are still called // . just use to check if enough memory // . before this just called mmalloc which sometimes returned NULL which // would cause us to throw an unhandled signal. So for now I don't // call mmalloc since it is limited in the mem it can use and would often // return NULL and set g_errno to ENOMEM void * operator new (size_t size) throw (std::bad_alloc) { logTrace( g_conf.m_logTraceMem, "size=%zu", size ); // don't let electric fence zap us if ( size == 0 ) return (void *)0x7fffffff; // . fail randomly // . good for testing if we can handle out of memory gracefully //static int32_t s_mcount = 0; //s_mcount++; //if ( s_mcount > 57 && (rand() % 1000) < 2 ) { if ( g_conf.m_testMem && (rand() % 100) < 2 ) { g_errno = ENOMEM; log(LOG_ERROR, "mem: new-fake(%" PRIu32"): %s",(uint32_t)size, mstrerror(g_errno)); throw std::bad_alloc(); // return NULL; } } // hack so hostid #0 can use more mem int64_t max = g_conf.m_maxMem; //if ( g_hostdb.m_hostId == 0 ) max += 2000000000; // don't go over max if ( g_mem.m_used + (int32_t)size >= max && g_conf.m_maxMem > 1000000 ) { log("mem: new(%" PRIu32"): Out of memory.", (uint32_t)size ); throw std::bad_alloc(); //throw 1; } g_inMemFunction = true; void *mem = sysmalloc ( size ); g_inMemFunction = false; int32_t memLoop = 0; newmemloop: if ( ! mem && size > 0 ) { g_mem.m_outOfMems++; g_errno = errno; log( LOG_WARN, "mem: new(%" PRId32"): %s",(int32_t)size,mstrerror(g_errno)); throw std::bad_alloc(); //return NULL; } if ( (PTRTYPE)mem < 0x00010000 ) { void *remem = sysmalloc(size); log ( LOG_WARN, "mem: Caught low memory allocation " "at %08" PTRFMT", " "reallocated to %08" PTRFMT, (PTRTYPE)mem, (PTRTYPE)remem ); sysfree(mem); mem = remem; if ( memLoop > 100 ) { log ( LOG_WARN, "mem: Attempted to reallocate low " "memory allocation 100 times, " "aborting and returning ENOMEM." ); g_errno = ENOMEM; throw std::bad_alloc(); } goto newmemloop; } g_mem.addMem ( mem , size , "TMPMEM" , 1 ); return mem; }
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) { struct jffs2_inode_cache *ret = sysmalloc(sizeof(struct jffs2_inode_cache)); D1(printk(KERN_DEBUG "Allocated inocache at %p\n", ret)); return ret; }
struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) { return sysmalloc(sizeof(struct jffs2_raw_dirent)); }
struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) { return sysmalloc(sizeof(struct jffs2_raw_inode)); }
// this is called after a memory block has been allocated and needs to be registered void Mem::addMem ( void *mem , size_t size , const char *note , char isnew ) { if(!s_lock.working) return; ScopedLock sl(s_lock); logTrace( g_conf.m_logTraceMem, "mem=%p size=%zu note='%s' is_new=%d", mem, size, note, isnew ); //validate(); // 4G/x = 600*1024 -> x = 4000000000.0/(600*1024) = 6510 // crap, g_hostdb.init() is called inmain.cpp before // g_conf.init() which is needed to set g_conf.m_maxMem... if ( ! s_initialized ) { //m_memtablesize = m_maxMem / 6510; // support 1.2M ptrs for now. good for about 8GB // raise from 3000 to 8194 to fix host #1 m_memtablesize = 8194*1024;//m_maxMem / 6510; //if ( m_maxMem < 8000000000 ) gbshutdownLogicError(); } if ( (int32_t)m_numAllocated + 100 >= (int32_t)m_memtablesize ) { static bool s_printed = false; if ( ! s_printed ) { log(LOG_WARN, "mem: using too many slots"); printMem(); s_printed = true; } } logDebug( g_conf.m_logDebugMem, "mem: add %08" PTRFMT" %zu bytes (%" PRId64") (%s)", (PTRTYPE)mem, size, m_used, note ); // check for breech after every call to alloc or free in order to // more easily isolate breeching code.. this slows things down a lot // though. if ( g_conf.m_logDebugMem ) printBreeches_unlocked(); // copy the magic character, iff not a new() call if ( size == 0 ) { sl.unlock(); gbshutdownLogicError(); } // sanity check -- for machines with > 4GB ram? if ( (PTRTYPE)mem + (PTRTYPE)size < (PTRTYPE)mem ) { log(LOG_LOGIC,"mem: Kernel returned mem at " "%08" PTRFMT" of size %" PRId32" " "which would wrap. Bad kernel.", (PTRTYPE)mem,(int32_t)size); sl.unlock(); gbshutdownLogicError(); } // umsg00 // bool useElectricFence = false; // if ( ! isnew && ! useElectricFence ) { if ( ! isnew ) { for ( int32_t i = 0 ; i < UNDERPAD ; i++ ) ((char *)mem)[0-i-1] = MAGICCHAR; for ( int32_t i = 0 ; i < OVERPAD ; i++ ) ((char *)mem)[0+size+i] = MAGICCHAR; } // if no label! if ( ! note[0] ) log(LOG_LOGIC,"mem: addmem: NO note."); // clear mem ptrs if this is our first call if ( ! s_initialized ) { s_mptrs = (void **)sysmalloc ( m_memtablesize*sizeof(void *)); s_sizes = (size_t *)sysmalloc(m_memtablesize * sizeof(size_t)); s_labels = (char *)sysmalloc ( m_memtablesize*16 ); s_isnew = (char *)sysmalloc ( m_memtablesize ); if ( ! s_mptrs || ! s_sizes || ! s_labels || ! s_isnew ) { if ( s_mptrs ) sysfree ( s_mptrs ); if ( s_sizes ) sysfree ( s_sizes ); if ( s_labels ) sysfree ( s_labels ); if ( s_isnew ) sysfree ( s_isnew ); log(LOG_WARN, "mem: addMem: Init failed. Disabling checks."); g_conf.m_detectMemLeaks = false; return; } s_initialized = true; memset ( s_mptrs , 0 , sizeof(char *) * m_memtablesize ); } // try to add ptr/size/note to leak-detecting table if ( (int32_t)s_n > (int32_t)m_memtablesize ) { log( LOG_WARN, "mem: addMem: No room in table for %s size=%zu.", note,size); return; } // hash into table uint32_t u = (PTRTYPE)mem * (PTRTYPE)0x4bf60ade; uint32_t h = u % (uint32_t)m_memtablesize; // chain to an empty bucket int32_t count = (int32_t)m_memtablesize; while ( s_mptrs[h] ) { // if an occupied bucket as our same ptr then chances are // we freed without calling rmMem() and a new addMem() got it if ( s_mptrs[h] == mem ) { // if we are being called from addnew(), the // overloaded "operator new" function above should // have stored a temp ptr in here... allow that, it // is used in case an engineer forgets to call // mnew() after calling new() so gigablast would never // realize that the memory was allocated. if ( s_sizes[h] == size && s_labels[h*16+0] == 'T' && s_labels[h*16+1] == 'M' && s_labels[h*16+2] == 'P' && s_labels[h*16+3] == 'M' && s_labels[h*16+4] == 'E' && s_labels[h*16+5] == 'M' ) { goto skipMe; } log( LOG_ERROR, "mem: addMem: Mem already added. rmMem not called? label=%c%c%c%c%c%c", s_labels[h*16+0], s_labels[h*16+1], s_labels[h*16+2], s_labels[h*16+3], s_labels[h*16+4], s_labels[h*16+5] ); sl.unlock(); gbshutdownAbort(true); } h++; if ( h == m_memtablesize ) h = 0; if ( --count == 0 ) { log( LOG_ERROR, "mem: addMem: Mem table is full."); printMem(); sl.unlock(); gbshutdownResourceError(); } } // add to debug table s_mptrs [ h ] = mem; s_sizes [ h ] = size; s_isnew [ h ] = isnew; //log("adding %" PRId32" size=%" PRId32" to [%" PRId32"] #%" PRId32" (%s)", //(int32_t)mem,size,h,s_n,note); s_n++; // debug if ( (size > MINMEM && g_conf.m_logDebugMemUsage) || size>=100000000 ) log(LOG_INFO,"mem: addMem(%zu): %s. ptr=0x%" PTRFMT" " "used=%" PRId64, size,note,(PTRTYPE)mem,m_used); // now update used mem // we do this here now since we always call addMem() now m_used += size; m_numAllocated++; m_numTotalAllocated++; if ( size > m_maxAlloc ) { m_maxAlloc = size; m_maxAllocBy = note; } if ( m_used > m_maxAllocated ) m_maxAllocated = m_used; skipMe: int32_t len = strlen(note); if ( len > 15 ) len = 15; char *here = &s_labels [ h * 16 ]; memcpy ( here , note , len ); // make sure NULL terminated here[len] = '\0'; //validate(); }
struct jffs2_node_frag *jffs2_alloc_node_frag(void) { return sysmalloc(sizeof(struct jffs2_node_frag)); }
void *Mem::gbmalloc ( size_t size , const char *note ) { logTrace( g_conf.m_logTraceMem, "size=%zu note='%s'", size, note ); // don't let electric fence zap us if ( size == 0 ) return (void *)0x7fffffff; if ( allocationShouldFailRandomly() ) { g_errno = ENOMEM; log( LOG_WARN, "mem: malloc-fake(%zu,%s): %s",size,note, mstrerror(g_errno)); return NULL; } retry: size_t max = g_conf.m_maxMem; // don't go over max if ( g_mem.getUsedMem() + size + UNDERPAD + OVERPAD >= max ) { // try to free temp mem. returns true if it freed some. if ( freeCacheMem() ) goto retry; g_errno = ENOMEM; log( LOG_WARN, "mem: malloc(%zu): Out of memory", size ); return NULL; } void *mem; mem = (void *)sysmalloc ( size + UNDERPAD + OVERPAD ); int32_t memLoop = 0; mallocmemloop: if ( ! mem && size > 0 ) { g_mem.m_outOfMems++; // try to free temp mem. returns true if it freed some. if ( freeCacheMem() ) goto retry; g_errno = errno; static int64_t s_lastTime; static int32_t s_missed = 0; int64_t now = gettimeofdayInMillisecondsLocal(); int64_t avail = (int64_t)g_conf.m_maxMem - (int64_t)m_used; if ( now - s_lastTime >= 1000LL ) { log(LOG_WARN, "mem: system malloc(%zu,%s) availShouldBe=%" PRId64": " "%s (%s) (ooms suppressed since last log msg = %" PRId32")", size+UNDERPAD+OVERPAD, note, avail, mstrerror(g_errno), note, s_missed); s_lastTime = now; s_missed = 0; } else { s_missed++; } return NULL; } if ( (PTRTYPE)mem < 0x00010000 ) { void *remem = sysmalloc(size); log( LOG_WARN, "mem: Caught low memory allocation " "at %08" PTRFMT", " "reallocated to %08" PTRFMT"", (PTRTYPE)mem, (PTRTYPE)remem ); sysfree(mem); mem = remem; memLoop++; if ( memLoop > 100 ) { log( LOG_WARN, "mem: Attempted to reallocate low " "memory allocation 100 times, " "aborting and returning NOMEM." ); g_errno = ENOMEM; return NULL; } goto mallocmemloop; } logTrace( g_conf.m_logTraceMem, "mem=%p size=%zu note='%s'", mem, size, note ); addMem ( (char *)mem + UNDERPAD , size , note , 0 ); return (char *)mem + UNDERPAD; }
struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) { return sysmalloc(sizeof(struct jffs2_raw_node_ref)); }