CDOMElement::~CDOMElement() { // printf("~DOMElement(%s)\n", this->tagName); CDOMNode *n; while( (n = this->firstChild) ) { this->firstChild = n->nextSibling; switch(n->nodeType) { case CDOMNode::XML_ELEMENT_NODE: delete ((CDOMElement *)n); break; } } if(this->tagName) _FREE(this->tagName); if(this->valueLCND) _FREE(this->valueLCND); if(this->valueLC) _FREE(this->valueLC); if(this->value) _FREE(this->value); };
void options_free( struct options *p ) { if ( _IS_NULL( p ) ) return; _FREE( p->fname ); _FREE( p ); }
/** 有参构造 @param filename 文件名 */ void Clear() { g_DataLen = 0; _FREE( g_pData ); g_IndexCount = 0; _FREE( g_pIndexList ); g_Point = g_CurLine = g_CurWord = 0; g_FileType = _DEFAULT_FILE_TYPE; }
struct qfq_if * qfq_alloc(struct ifnet *ifp, int how) { struct qfq_if *qif; qif = (how == M_WAITOK) ? zalloc(qfq_zone) : zalloc_noblock(qfq_zone); if (qif == NULL) return (NULL); bzero(qif, qfq_size); qif->qif_ifq = &ifp->if_snd; qif->qif_maxclasses = IFCQ_SC_MAX; /* * TODO: [email protected] * * Ideally I would like to have the following * but QFQ needs further modifications. * * qif->qif_maxslots = IFCQ_SC_MAX; */ qif->qif_maxslots = QFQ_MAX_SLOTS; if ((qif->qif_class_tbl = _MALLOC(sizeof (struct qfq_class *) * qif->qif_maxclasses, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { log(LOG_ERR, "%s: %s unable to allocate class table array\n", if_name(ifp), qfq_style(qif)); goto error; } if ((qif->qif_groups = _MALLOC(sizeof (struct qfq_group *) * (QFQ_MAX_INDEX + 1), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { log(LOG_ERR, "%s: %s unable to allocate group array\n", if_name(ifp), qfq_style(qif)); goto error; } if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s scheduler allocated\n", if_name(ifp), qfq_style(qif)); } return (qif); error: if (qif->qif_class_tbl != NULL) { _FREE(qif->qif_class_tbl, M_DEVBUF); qif->qif_class_tbl = NULL; } if (qif->qif_groups != NULL) { _FREE(qif->qif_groups, M_DEVBUF); qif->qif_groups = NULL; } zfree(qfq_zone, qif); return (NULL); }
CDOMDocument::~CDOMDocument() { if(this->documentElement) delete(this->documentElement); if(this->parser) XML_ParserFree(this->parser); if(this->path) _FREE(this->path); if(this->upath) _FREE(this->upath); }
bool CDOMDocument::loadXML(char *xml, unsigned long len) { bool ret = true; // void *buff; if(!this->parser) return(false); this->depth = -1; this->State = CDOMDocument::INTO_UNKNOWN; this->indexStart = 0; this->indexEnd = 0; this->tokenLen = 0; this->tokenLCLen = 0; this->tokenLCNDLen = 0; this->wordIndex = 0; this->parseText = true; if(this->path) _FREE(this->path); this->path = (char *)_MALLOC_WHY(200, "dom.cpp:loadXML:path"); if(this->path) { this->path_msize = 200; this->path[0] = '\0'; this->freepathoffset = 0; } if(this->upath) _FREE(this->upath); this->upath = (char *)_MALLOC_WHY(200, "dom.cpp:loadXML:upath"); if(this->upath) { this->upath_msize = 200; this->upath[0] = '\0'; this->freeupathoffset = 0; } if(XML_Parse(this->parser, xml, len, true) != XML_STATUS_ERROR) { } else { // handle parse error zSyslog._log(CSyslog::LOGL_ERR, CSyslog::LOGC_XMLERR, "Parse error at line %u:\n%s\n", XML_GetCurrentLineNumber(this->parser), XML_ErrorString(XML_GetErrorCode(this->parser))); ret = false; } return(ret); }
void tokenset_remove( struct tokenset *p, char *n ) { struct _token *s; HASH_FIND_STR( p->tokens, n, s ); if ( _IS_NULL( s ) ) return; _FREE( s->text ); HASH_DEL( p->tokens, s ); _FREE( s ); }
void cyclic_remove(cyclic_id_t cyclic) { wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic; ASSERT(cyclic != CYCLIC_NONE); while (!thread_call_cancel(wrapTC->TChdl)) { int ret = assert_wait(wrapTC, THREAD_UNINT); ASSERT(ret == THREAD_WAITING); wrapTC->when.cyt_interval = WAKEUP_REAPER; ret = thread_block(THREAD_CONTINUE_NULL); ASSERT(ret == THREAD_AWAKENED); } if (thread_call_free(wrapTC->TChdl)) _FREE(wrapTC, M_TEMP); else { /* Gut this cyclic and move on ... */ wrapTC->hdlr.cyh_func = noop_cyh_func; wrapTC->when.cyt_interval = NEARLY_FOREVER; } }
static void pflogfree(struct ifnet *ifp) { _FREE(ifp->if_softc, M_DEVBUF); ifp->if_softc = NULL; (void) ifnet_release(ifp); }
/* Initialize the cache operations. Called while initializing cache files. */ void afs_InitDualFSCacheOps(struct vnode *vp) { int code; static int inited = 0; #ifdef AFS_DARWIN80_ENV char *buffer = (char*)_MALLOC(MFSNAMELEN, M_TEMP, M_WAITOK); #endif if (inited) return; inited = 1; if (vp == NULL) return; #ifdef AFS_DARWIN80_ENV vfs_name(vnode_mount(vp), buffer); if (strncmp("hfs", buffer, 3) == 0) #else if (strncmp("hfs", vp->v_mount->mnt_vfc->vfc_name, 3) == 0) #endif afs_CacheFSType = AFS_APPL_HFS_CACHE; #ifdef AFS_DARWIN80_ENV else if (strncmp("ufs", buffer, 3) == 0) #else else if (strncmp("ufs", vp->v_mount->mnt_vfc->vfc_name, 3) == 0) #endif afs_CacheFSType = AFS_APPL_UFS_CACHE; else osi_Panic("Unknown cache vnode type\n"); #ifdef AFS_DARWIN80_ENV _FREE(buffer, M_TEMP); #endif }
/* * Clean the kernel info buffer to avoid memory leak */ kern_return_t clean_kinfo(struct kernel_info *kinfo) { if(kinfo->linkedit_buf != NULL) _FREE(kinfo->linkedit_buf, M_TEMP); return KERN_SUCCESS; }
void * __REALLOC( void *addr, size_t size, int type, int flags, vm_allocation_site_t *site) { struct _mhead *hdr; void *newaddr; size_t alloc; /* realloc(NULL, ...) is equivalent to malloc(...) */ if (addr == NULL) return (__MALLOC(size, type, flags, site)); /* Allocate a new, bigger (or smaller) block */ if ((newaddr = __MALLOC(size, type, flags, site)) == NULL) return (NULL); hdr = addr; --hdr; alloc = hdr->mlen - sizeof (*hdr); /* Copy over original contents */ bcopy(addr, newaddr, MIN(size, alloc)); _FREE(addr, type); return (newaddr); }
struct cfs_kern_file * kern_file_open(const char * filename, int uflags, int mode, int *err) { struct cfs_kern_file *fp; vnode_t vp; int error; fp = (struct cfs_kern_file *)_MALLOC(sizeof(struct cfs_kern_file), M_TEMP, M_WAITOK); if (fp == NULL) { if (err != NULL) *err = -ENOMEM; return NULL; } fp->f_flags = FFLAGS(uflags); fp->f_ctxt = vfs_context_create(NULL); if ((error = vnode_open(filename, fp->f_flags, mode, 0, &vp, fp->f_ctxt))){ if (err != NULL) *err = -error; _FREE(fp, M_TEMP); } else { if (err != NULL) *err = 0; fp->f_vp = vp; } return fp; }
/* ARGSUSED */ int bpfclose(dev_t dev, __unused int flags, __unused int fmt, __unused struct proc *p) { struct bpf_d *d; /* Take BPF lock to ensure no other thread is using the device */ lck_mtx_lock(bpf_mlock); d = bpf_dtab[minor(dev)]; if (d == 0 || d == (void *)1) { lck_mtx_unlock(bpf_mlock); return (ENXIO); } bpf_dtab[minor(dev)] = (void *)1; /* Mark closing */ if (d->bd_bif) bpf_detachd(d); selthreadclear(&d->bd_sel); #if CONFIG_MACF_NET mac_bpfdesc_label_destroy(d); #endif bpf_freed(d); /* Mark free in same context as bpfopen comes to check */ bpf_dtab[minor(dev)] = NULL; /* Mark closed */ lck_mtx_unlock(bpf_mlock); _FREE(d, M_DEVBUF); return (0); }
void ddi_soft_state_fini(void **state_p) { #pragma unused(state_p) int i; for (i = 0; i < NSOFT_STATES; ++i) _FREE( soft[i], M_TEMP ); }
void tokenset_reset( struct tokenset *p ) { struct _token *s; struct _token *t = p->tokens; while ( t != NULL ) { s = t; t = s->hh.next; _FREE( s->text ); HASH_DEL( p->tokens, s ); _FREE( s ); } p->count = 0; }
void cyclic_remove_omni(cyclic_id_list_t cyc_list) { ASSERT( cyc_list != (cyclic_id_list_t)CYCLIC_NONE ); dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list); _FREE(cyc_list, M_TEMP); }
void cyclic_timer_remove(cyclic_id_t cyclic) { ASSERT( cyclic != CYCLIC_NONE ); timer_call_remove_cyclic( cyclic ); _FREE((void *)cyclic, M_TEMP); }
int kern_file_close(struct cfs_kern_file *fp) { vnode_close(fp->f_vp, fp->f_flags, fp->f_ctxt); vfs_context_rele(fp->f_ctxt); _FREE(fp, M_TEMP); return 0; }
void tokenset_free( struct tokenset *p ) { struct _token *s; struct _token *t; if ( _IS_NULL( p ) ) return; t = p->tokens; while ( t != NULL ) { s = t; t = s->hh.next; _FREE( s->text ); HASH_DEL( p->tokens, s ); _FREE( s ); } _FREE( p ); }
/**\brief * 初始化 */ BOOL EGStaticObjMgr::LoadIni(char* filename,char* index) { EGCIniFile Ini(filename); m_nObjNum=Ini.ReadInt(index,"ObjNum"); m_pObjArray=new StaticObj[m_nObjNum]; //初始化模型 char* strFile; char* strIndex; for(unsigned int i=0; i<m_nObjNum; ++i) { strFile = Ini.ReadText(index, i); strIndex = Ini.ReadData(index,i); m_pObjArray[i].LoadIni(strFile, strIndex); _FREE(strFile); _FREE(strIndex); } return TRUE; }
void linereader_free( struct linereader *p ) { if ( _IS_NULL( p ) ) return; varstr_free( p->text ); if ( p->in != stdin && !_IS_NULL( p->in ) ) fclose( p->in ); _FREE( p ); }
static int qfq_destroy_locked(struct qfq_if *qif) { int i; IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); (void) qfq_clear_interface(qif); VERIFY(qif->qif_class_tbl != NULL); _FREE(qif->qif_class_tbl, M_DEVBUF); qif->qif_class_tbl = NULL; VERIFY(qif->qif_groups != NULL); for (i = 0; i <= QFQ_MAX_INDEX; i++) { struct qfq_group *grp = qif->qif_groups[i]; if (grp != NULL) { VERIFY(grp->qfg_slots != NULL); _FREE(grp->qfg_slots, M_DEVBUF); grp->qfg_slots = NULL; _FREE(grp, M_DEVBUF); qif->qif_groups[i] = NULL; } } _FREE(qif->qif_groups, M_DEVBUF); qif->qif_groups = NULL; if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s scheduler destroyed\n", if_name(QFQIF_IFP(qif)), qfq_style(qif)); } zfree(qfq_zone, qif); return (0); }
static FatalError_e evalStdLib_Float(Interp_t* i, uint16_t findex) { Element_t* op1; switch (findex) { case 0: /* int(number) */ _CHECK_TOP(); _FREE(TOP); _MK_INVALID(TOP); return FE_Ok; case 1: /* floor(number) */ _CHECK_TOP(); _FREE(TOP); _MK_INVALID(TOP); return FE_Ok; case 2: /* ceil(number) */ _CHECK_TOP(); _FREE(TOP); _MK_INVALID(TOP); return FE_Ok; case 3: /* pow(number1, number2) */ _POP(op1); _FREE(op1); _CHECK_TOP(); _FREE(TOP); _MK_INVALID(TOP); return FE_Ok; case 4: /* round(number) */ _CHECK_TOP(); _FREE(TOP); _MK_INVALID(TOP); return FE_Ok; case 5: /* sqrt(number) */ _CHECK_TOP(); _FREE(TOP); _MK_INVALID(TOP); return FE_Ok; case 6: /* maxFloat() */ _PUSH(); _MK_INVALID(TOP); return FE_Ok; case 7: /* minFloat() */ _PUSH(); _MK_INVALID(TOP); return FE_Ok; default: return FE_VerificationFailed; } }
int encap_detach(const struct encaptab *cookie) { const struct encaptab *ep = cookie; struct encaptab *p; for (p = LIST_FIRST(&encaptab); p; p = LIST_NEXT(p, chain)) { if (p == ep) { LIST_REMOVE(p, chain); _FREE(p, M_NETADDR); /*XXX*/ return 0; } } return EINVAL; }
void sslFree(void *p) { if(p != NULL) { #ifdef SSL_CANARIS p=p-4; uint32_t len=*(uint32_t *)p; uint32_t marker=*(uint32_t *)(p+4+len); printf("sslFree @%p len=0x%08x\n", p, len); if(marker!=0xdeadbeef) panic("Buffer overflow in SSL!\n"); #endif _FREE(p, M_TEMP); } }
void loop(void) { buffer = (char*)_CALLOC(256 * sizeof(char)); if (buffer) { sprintf(buffer, "count = %lu", count++); pc.println(buffer); _FREE(buffer); } pc.print("[BEFORE ALLOC] xPortGetFreeHeapSize = "); pc.println(xPortGetFreeHeapSize()); // Person *p = (Person*)_MALLOC(sizeof(Person)); // p = new (p) Person(); _NEW_INSTANCE(p, Person); pc.print("[AFTER ALLOC] xPortGetFreeHeapSize = "); pc.println(xPortGetFreeHeapSize()); p->set_name("Thanks. It seemed counterintuitive to use 'new' when the whole purpose of using TBB's scalable_allocator was to replace new. But I guess it's different because it's placement new that's being used. – Nav Feb 10 '11 at 11:16\r\n" "The parameter to allocate() is the number of objects, not the size in bytes. You then call the allocator's construct() function to construct the object." "scalable_allocator<SomeClass> sa;\r\n" "SomeClass* s = sa.allocate(1);\r\n" "sa.construct(s, SomeClass());\r\n" "// ...\r\n" "하늘이 푸릅니다\r\n" "sa.destroy(s);\r\n" "sa.deallocate(s);\r\n" "If want to use it with a standard library container or other std allocator aware type, simply give it the allocator type."); pc.println(p->get_name().data()); _DEL_INSTANCE(p, Person); pc.print("[AFTER FREE] xPortGetFreeHeapSize = "); pc.println(xPortGetFreeHeapSize()); pc.println(); delay(1000 - (millis() % 1000)); }
static int fairq_class_destroy(struct fairq_if *fif, struct fairq_class *cl) { struct ifclassq *ifq = fif->fif_ifq; int pri; IFCQ_LOCK_ASSERT_HELD(ifq); if (cl->cl_head) fairq_purgeq(fif, cl, 0, NULL, NULL); fif->fif_classes[cl->cl_pri] = NULL; if (fif->fif_poll_cache == cl) fif->fif_poll_cache = NULL; if (fif->fif_maxpri == cl->cl_pri) { for (pri = cl->cl_pri; pri >= 0; pri--) if (fif->fif_classes[pri] != NULL) { fif->fif_maxpri = pri; break; } if (pri < 0) fif->fif_maxpri = -1; } if (cl->cl_qalg.ptr != NULL) { #if CLASSQ_RIO if (cl->cl_qtype == Q_RIO) rio_destroy(cl->cl_rio); #endif /* CLASSQ_RIO */ #if CLASSQ_RED if (cl->cl_qtype == Q_RED) red_destroy(cl->cl_red); #endif /* CLASSQ_RED */ #if CLASSQ_BLUE if (cl->cl_qtype == Q_BLUE) blue_destroy(cl->cl_blue); #endif /* CLASSQ_BLUE */ if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL) sfb_destroy(cl->cl_sfb); cl->cl_qalg.ptr = NULL; cl->cl_qtype = Q_DROPTAIL; cl->cl_qstate = QS_RUNNING; } if (fif->fif_default == cl) fif->fif_default = NULL; if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n", if_name(FAIRQIF_IFP(fif)), fairq_style(fif), cl->cl_handle, cl->cl_pri); } _FREE(cl->cl_buckets, M_DEVBUF); cl->cl_head = NULL; /* sanity */ cl->cl_polled = NULL; /* sanity */ cl->cl_buckets = NULL; /* sanity */ zfree(fairq_cl_zone, cl); return (0); }
static struct fairq_class * fairq_class_create(struct fairq_if *fif, int pri, u_int32_t qlimit, u_int64_t bandwidth, u_int32_t nbuckets, int flags, u_int64_t hogs_m1, u_int64_t lssc_m1, u_int64_t lssc_d, u_int64_t lssc_m2, u_int32_t qid) { #pragma unused(lssc_d, lssc_m2) struct ifnet *ifp; struct ifclassq *ifq; struct fairq_class *cl; u_int32_t i; IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq); /* Sanitize flags unless internally configured */ if (fif->fif_flags & FAIRQIFF_ALTQ) flags &= FARF_USERFLAGS; #if !CLASSQ_RED if (flags & FARF_RED) { log(LOG_ERR, "%s: %s RED not available!\n", if_name(FAIRQIF_IFP(fif)), fairq_style(fif)); return (NULL); } #endif /* !CLASSQ_RED */ #if !CLASSQ_RIO if (flags & FARF_RIO) { log(LOG_ERR, "%s: %s RIO not available!\n", if_name(FAIRQIF_IFP(fif)), fairq_style(fif)); return (NULL); } #endif /* CLASSQ_RIO */ #if !CLASSQ_BLUE if (flags & FARF_BLUE) { log(LOG_ERR, "%s: %s BLUE not available!\n", if_name(FAIRQIF_IFP(fif)), fairq_style(fif)); return (NULL); } #endif /* CLASSQ_BLUE */ /* These are mutually exclusive */ if ((flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) && (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_RED && (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_RIO && (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_BLUE && (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_SFB) { log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n", if_name(FAIRQIF_IFP(fif)), fairq_style(fif)); return (NULL); } if (bandwidth == 0 || (bandwidth / 8) == 0) { log(LOG_ERR, "%s: %s invalid data rate %llu\n", if_name(FAIRQIF_IFP(fif)), fairq_style(fif), bandwidth); return (NULL); } if (nbuckets == 0) nbuckets = 256; if (nbuckets > FAIRQ_MAX_BUCKETS) nbuckets = FAIRQ_MAX_BUCKETS; /* enforce power-of-2 size */ while ((nbuckets ^ (nbuckets - 1)) != ((nbuckets << 1) - 1)) ++nbuckets; ifq = fif->fif_ifq; ifp = FAIRQIF_IFP(fif); if ((cl = fif->fif_classes[pri]) != NULL) { /* modify the class instead of creating a new one */ if (cl->cl_head) fairq_purgeq(fif, cl, 0, NULL, NULL); #if CLASSQ_RIO if (cl->cl_qtype == Q_RIO) rio_destroy(cl->cl_rio); #endif /* CLASSQ_RIO */ #if CLASSQ_RED if (cl->cl_qtype == Q_RED) red_destroy(cl->cl_red); #endif /* CLASSQ_RED */ #if CLASSQ_BLUE if (cl->cl_qtype == Q_BLUE) blue_destroy(cl->cl_blue); #endif /* CLASSQ_BLUE */ if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL) sfb_destroy(cl->cl_sfb); cl->cl_qalg.ptr = NULL; cl->cl_qtype = Q_DROPTAIL; cl->cl_qstate = QS_RUNNING; } else { cl = zalloc(fairq_cl_zone); if (cl == NULL) goto err_ret; bzero(cl, fairq_cl_size); cl->cl_nbuckets = nbuckets; cl->cl_nbucket_mask = nbuckets - 1; cl->cl_buckets = _MALLOC(sizeof (struct fairq_bucket) * cl->cl_nbuckets, M_DEVBUF, M_WAITOK|M_ZERO); if (cl->cl_buckets == NULL) goto err_buckets; cl->cl_head = NULL; } fif->fif_classes[pri] = cl; if (flags & FARF_DEFAULTCLASS) fif->fif_default = cl; if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) { qlimit = IFCQ_MAXLEN(ifq); if (qlimit == 0) qlimit = DEFAULT_QLIMIT; /* use default */ } cl->cl_qlimit = qlimit; for (i = 0; i < cl->cl_nbuckets; ++i) { _qinit(&cl->cl_buckets[i].queue, Q_DROPTAIL, qlimit); } cl->cl_bandwidth = bandwidth / 8; /* cvt to bytes per second */ cl->cl_qtype = Q_DROPTAIL; cl->cl_qstate = QS_RUNNING; cl->cl_flags = flags; cl->cl_pri = pri; if (pri > fif->fif_maxpri) fif->fif_maxpri = pri; cl->cl_fif = fif; cl->cl_handle = qid; cl->cl_hogs_m1 = hogs_m1 / 8; cl->cl_lssc_m1 = lssc_m1 / 8; /* NOT YET USED */ cl->cl_bw_current = 0; if (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) { #if CLASSQ_RED || CLASSQ_RIO u_int64_t ifbandwidth = ifnet_output_linkrate(ifp); int pkttime; #endif /* CLASSQ_RED || CLASSQ_RIO */ cl->cl_qflags = 0; if (flags & FARF_ECN) { if (flags & FARF_BLUE) cl->cl_qflags |= BLUEF_ECN; else if (flags & FARF_SFB) cl->cl_qflags |= SFBF_ECN; else if (flags & FARF_RED) cl->cl_qflags |= REDF_ECN; else if (flags & FARF_RIO) cl->cl_qflags |= RIOF_ECN; } if (flags & FARF_FLOWCTL) { if (flags & FARF_SFB) cl->cl_qflags |= SFBF_FLOWCTL; } if (flags & FARF_CLEARDSCP) { if (flags & FARF_RIO) cl->cl_qflags |= RIOF_CLEARDSCP; } #if CLASSQ_RED || CLASSQ_RIO /* * XXX: RED & RIO should be watching link speed and MTU * events and recompute pkttime accordingly. */ if (ifbandwidth < 8) pkttime = 1000 * 1000 * 1000; /* 1 sec */ else pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 / (ifbandwidth / 8); /* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */ #if CLASSQ_RIO if (flags & FARF_RIO) { cl->cl_rio = rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime); if (cl->cl_rio != NULL) cl->cl_qtype = Q_RIO; } #endif /* CLASSQ_RIO */ #if CLASSQ_RED if (flags & FARF_RED) { cl->cl_red = red_alloc(ifp, 0, 0, cl->cl_qlimit * 10/100, cl->cl_qlimit * 30/100, cl->cl_qflags, pkttime); if (cl->cl_red != NULL) cl->cl_qtype = Q_RED; } #endif /* CLASSQ_RED */ #endif /* CLASSQ_RED || CLASSQ_RIO */ #if CLASSQ_BLUE if (flags & FARF_BLUE) { cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags); if (cl->cl_blue != NULL) cl->cl_qtype = Q_BLUE; } #endif /* CLASSQ_BLUE */ if (flags & FARF_SFB) { if (!(cl->cl_flags & FARF_LAZY)) cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, cl->cl_qlimit, cl->cl_qflags); if (cl->cl_sfb != NULL || (cl->cl_flags & FARF_LAZY)) cl->cl_qtype = Q_SFB; } } if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d " "flags=%b\n", if_name(ifp), fairq_style(fif), cl->cl_handle, cl->cl_pri, cl->cl_qlimit, flags, FARF_BITS); } return (cl); err_buckets: if (cl->cl_buckets != NULL) _FREE(cl->cl_buckets, M_DEVBUF); err_ret: if (cl != NULL) { if (cl->cl_qalg.ptr != NULL) { #if CLASSQ_RIO if (cl->cl_qtype == Q_RIO) rio_destroy(cl->cl_rio); #endif /* CLASSQ_RIO */ #if CLASSQ_RED if (cl->cl_qtype == Q_RED) red_destroy(cl->cl_red); #endif /* CLASSQ_RED */ #if CLASSQ_BLUE if (cl->cl_qtype == Q_BLUE) blue_destroy(cl->cl_blue); #endif /* CLASSQ_BLUE */ if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL) sfb_destroy(cl->cl_sfb); cl->cl_qalg.ptr = NULL; cl->cl_qtype = Q_DROPTAIL; cl->cl_qstate = QS_RUNNING; } zfree(fairq_cl_zone, cl); } return (NULL); }
/* * entrypoint function to read necessary information from running kernel and kernel at disk * such as kaslr slide, linkedit location * the reads from disk are implemented using the available KPI VFS functions */ kern_return_t init_kernel_info(kernel_info *kinfo) { kern_return_t error = 0; // lookup vnode for /mach_kernel void *kernel_header = _MALLOC(HEADER_SIZE, M_TEMP, M_ZERO); if (kernel_header == NULL) { return KERN_FAILURE; } vnode_t kernel_vnode = NULLVP; vfs_context_t ctxt = NULL; int found_kernel = 0; for(int i = 0; i < sizeof(kernel_paths) / sizeof(*kernel_paths); i++) { kernel_vnode = NULLVP; ctxt = vfs_context_create(NULL); error = vnode_lookup(kernel_paths[i], 0, &kernel_vnode, ctxt); if(!error) { error = get_mach_header(kernel_header, kernel_vnode, ctxt); if(!error) { if(!is_current_kernel(kernel_header)) { vnode_put(kernel_vnode); } else { found_kernel = 1; break; } } } vfs_context_rele(ctxt); } if(!found_kernel) { _FREE(kernel_header, M_TEMP); return KERN_FAILURE; } error = process_kernel_mach_header(kernel_header, kinfo); if (error) goto failure; // compute kaslr slide get_running_text_address(kinfo, 0); // we know the location of linkedit and offsets into symbols and their strings // now we need to read linkedit into a buffer so we can process it later // __LINKEDIT total size is around 1MB // we should free this buffer later when we don't need anymore to solve symbols kinfo->linkedit_buf = _MALLOC(kinfo->linkedit_size, M_TEMP, M_ZERO); if (kinfo->linkedit_buf == NULL) { _FREE(kernel_header, M_TEMP); return KERN_FAILURE; } // read linkedit from filesystem error = get_kernel_linkedit(kernel_vnode, ctxt, kinfo); if (error) goto failure; success: _FREE(kernel_header, M_TEMP); vfs_context_rele(ctxt); // drop the iocount due to vnode_lookup() // we must do this else machine will block on shutdown/reboot vnode_put(kernel_vnode); return KERN_SUCCESS; failure: if (kinfo->linkedit_buf != NULL) _FREE(kinfo->linkedit_buf, M_TEMP); _FREE(kernel_header, M_TEMP); vfs_context_rele(ctxt); vnode_put(kernel_vnode); return KERN_FAILURE; }