status_t init_driver(void) { int i = 0, j = 0, is_detected; pci_info dev_info; //debug_fd = open("/tmp/broadcom_traffic_log",O_RDWR | B_CREATE_FILE); if (get_module(B_PCI_MODULE_NAME, (module_info **)&pci) != B_OK) return B_ERROR; while (pci->get_nth_pci_info(i++, &dev_info) == 0) { is_detected = 0; if ((dev_info.class_base == PCI_network) && (dev_info.class_sub == PCI_ethernet)) { for (j = 0; bcm5700_pci_tbl[j].vendor != 0; j++) { if ((dev_info.vendor_id == bcm5700_pci_tbl[j].vendor) && (dev_info.device_id == bcm5700_pci_tbl[j].device)) { is_detected = 1; break; } } } if (!is_detected) continue; if (cards_found >= 10) break; dev_list[cards_found] = (char *)malloc(16 /* net/bcm570x/xx */); sprintf(dev_list[cards_found],"net/bcm570x/%d",cards_found); be_b57_dev_cards[cards_found].pci_data = dev_info; be_b57_dev_cards[cards_found].packet_release_sem = create_sem(0,dev_list[cards_found]); be_b57_dev_cards[cards_found].mem_list_num = 0; be_b57_dev_cards[cards_found].lockmem_list_num = 0; be_b57_dev_cards[cards_found].opened = 0; be_b57_dev_cards[cards_found].block = 1; be_b57_dev_cards[cards_found].lock = 0; #ifdef HAIKU_TARGET_PLATFORM_HAIKU be_b57_dev_cards[cards_found].linkChangeSem = -1; #endif if (LM_GetAdapterInfo(&be_b57_dev_cards[cards_found].lm_dev) != LM_STATUS_SUCCESS) { for (j = 0; j < cards_found; j++) { free(dev_list[j]); delete_sem(be_b57_dev_cards[j].packet_release_sem); } put_module(B_PCI_MODULE_NAME); return ENODEV; } QQ_InitQueue(&be_b57_dev_cards[cards_found].RxPacketReadQ.Container,MAX_RX_PACKET_DESC_COUNT); cards_found++; } mempool_init((MAX_RX_PACKET_DESC_COUNT+MAX_TX_PACKET_DESC_COUNT) * cards_found); dev_list[cards_found] = NULL; return B_OK; }
void Stream5InitIcmp(Stream5GlobalConfig *gconfig) { if (gconfig == NULL) return; /* Finally ICMP */ if((icmp_lws_cache == NULL) && gconfig->track_icmp_sessions) { icmp_lws_cache = InitLWSessionCache(gconfig->max_icmp_sessions, 30, 5, 0, NULL); if(!icmp_lws_cache) { FatalError("Unable to init stream5 ICMP session cache, no ICMP " "stream inspection!\n"); } if (mempool_init(&icmp_session_mempool, gconfig->max_icmp_sessions, sizeof(IcmpSession)) != 0) { FatalError("%s(%d) Could not initialize icmp session memory pool.\n", __FILE__, __LINE__); } } }
/* Check configs & set up mempool. Mempool stuff is in this function because we want to parse & check *ALL* of the configs before allocating a mempool. */ static int DNP3CheckConfig(struct _SnortConfig *sc) { int rval; unsigned int max_sessions; /* Get default configuration */ dnp3_config_t *default_config = (dnp3_config_t *)sfPolicyUserDataGetDefault(dnp3_context_id); if ( !default_config ) { _dpd.errMsg( "ERROR: preprocessor dnp3 must be configured in the default policy.\n"); return -1; } /* Check all individual configurations */ if ((rval = sfPolicyUserDataIterate(sc, dnp3_context_id, DNP3CheckPolicyConfig))) return rval; /* Set up MemPool, but only if a config exists that's not "disabled". */ if (sfPolicyUserDataIterate(sc, dnp3_context_id, DNP3IsEnabled) == 0) return 0; // FIXTHIS default_config is null when configured in target policy only max_sessions = default_config->memcap / sizeof(dnp3_session_data_t); dnp3_mempool = (MemPool *)calloc(1, sizeof(MemPool)); if (mempool_init(dnp3_mempool, max_sessions, sizeof(dnp3_session_data_t)) != 0) { DynamicPreprocessorFatalMessage("Unable to allocate DNP3 mempool.\n"); } return 0; }
int main(int argc, char* argv[]) { if(argc!=3) { print_usage(); exit(0); } if(sock_str2addr(argv[2], &sa)==NULL) { print_usage(); exit(0); } if(strcmp(argv[1], "client")!=0 && strcmp(argv[1], "server")!=0) { print_usage(); exit(0); } sock_init(); fdwatch_init(); mempool_init(); threadpool_init(1); network_init(20000); if(strcmp(argv[1], "client")==0) { client_do(); } else { server_do(); } network_final(); threadpool_final(); mempool_final(); fdwatch_final(); sock_final(); return 0; }
int main(int argc, char *argv[]) { XSimple_isp_hp_wrapper ins; int32_t width = 3280; int32_t height = 2486; int32_t channel = 4; dma_buffer_t ibuf = {.ptr=NULL, .size=width*height*sizeof(uint16_t), .dim=0, .addr=0}; dma_buffer_t obuf = {.ptr=NULL, .size=channel*width*height*sizeof(uint8_t), .dim=0, .addr=0}; const uint16_t optical_black_clamp_value = 16; const float gamma_value = 1.0f/1.8f; const float saturation_value = 0.6f; uint32_t reg_data = 0; if (argc == 4) { sscanf(argv[1], "%d", &optical_black_clamp_value); sscanf(argv[2], "%f", &gamma_value); sscanf(argv[3], "%f", &saturation_value); } if (XSimple_isp_hp_wrapper_Initialize(&ins, "simple_isp_hp_wrapper") != XST_SUCCESS) { printf("Cannot initialize driver instance\n"); goto finally; } pool_t pool; if (mempool_init(&pool)) goto finally; if (mempool_alloc(&pool, &ibuf)) goto finally; if (mempool_alloc(&pool, &obuf)) goto finally; fill_bayer_pattern(&ibuf, width, height); memset(obuf.ptr, 0, obuf.size); XSimple_isp_hp_wrapper_Set_p_in_port_addr_bv_V(&ins, ibuf.addr); XSimple_isp_hp_wrapper_Set_p_out_port_addr_bv_V(&ins, obuf.addr); memcpy(®_data, &saturation_value, sizeof(float)); XSimple_isp_hp_wrapper_Set_p_saturation_value(&ins, reg_data); memcpy(®_data, &optical_black_clamp_value, sizeof(uint16_t)); XSimple_isp_hp_wrapper_Set_p_optical_black_value(&ins, reg_data); memcpy(®_data, &gamma_value, sizeof(float)); XSimple_isp_hp_wrapper_Set_p_gamma_value(&ins, reg_data); XSimple_isp_hp_wrapper_Start(&ins); while (XSimple_isp_hp_wrapper_IsDone(&ins) == 0) { usleep(10000); puts("."); fflush(stdout); } save_ppm("out.ppm", (const uint8_t*)obuf.ptr, channel, width, height); printf("test passed\n"); finally: mempool_fini(&pool); XSimple_isp_hp_wrapper_Release(&ins); return 0; }
static int ml_aitvaras_init(lua_State* l) { checkargs(0, "aitvaras.init"); invocation_cs = async_make_cs(); mempool_init(&invocation_pool, sizeof(Invocation)); cb_l = l; lua_getglobal(l, "aitvaras"); if(!_validate_conf(l)) return luaL_error(l, "invalid configuration"); const char* lobby_addr = _getstr(l, "lobby_addr"); const char* server_addr = _getstr(l, "server_addr"); char* enlist_req = alloca(strlen(lobby_addr) + strlen("/enlist") + 1); strcpy(enlist_req, lobby_addr); strcat(enlist_req, "/enlist"); http_post(enlist_req, false, server_addr, NULL, _enlist_cb); aatree_init(&clients); const char* options[] = { "listening_ports", _getstr(l, "listening_port"), "document_root", _getstr(l, "document_root"), NULL }; mg_ctx = mg_start(mg_callback, NULL, options); lua_pop(l, 1); return 0; }
void Stream5InitUdp(Stream5GlobalConfig *gconfig) { if (gconfig == NULL) return; /* Now UDP */ if ((udp_lws_cache == NULL) && (gconfig->track_udp_sessions)) { udp_lws_cache = InitLWSessionCache(gconfig->max_udp_sessions, 30, (3*60), 5, 0, &UdpSessionCleanup); if(!udp_lws_cache) { FatalError("Unable to init stream5 UDP session cache, no UDP " "stream inspection!\n"); } if (mempool_init(&udp_session_mempool, gconfig->max_udp_sessions, sizeof(UdpSession)) != 0) { FatalError("%s(%d) Could not initialize udp session memory pool.\n", __FILE__, __LINE__); } } }
int _proc_init_fork(void) { proc_pool = mempool_init(MEMPOOL_TYPE_NONBLOCKING, sizeof(struct proc_info), configMAXPROC); if (!proc_pool) return -ENOMEM; return 0; }
/* *函数名称:public_init *函数功能:public module init *输入参数:none *输出参数:none *返回值: on success, return 0(YT_SUCCESS). on error, -1(YT_FAILED) is returned. */ int public_init() { INFO_PRINT("\n----->Public模块初始化..........\n"); //计数器 global_shared.retain_count ++; mempool_init(); init_hash(); log_init(); return YT_SUCCESSFUL; }
void bbos_port_init(bbos_port_id_t id, size_t capacity, const int8_t* part, struct bbos_message** inbox) { ASSERT_PORT_ID(id); // TODO(d2rk): assert null pool BBOS_ASSERT(inbox != NULL); BBOS_ASSERT(part != NULL); PRINT_DEBUG("[I] Init port %d[capacity=%d, part=0x%x, inbox=0x%x]\n", id, capacity, part, inbox); bbos_ports[id].capacity = capacity; bbos_ports[id].pool = mempool_init(part, capacity, BBOS_MAX_MESSAGE_SIZE);; bbos_ports[id].head = bbos_ports[id].tail = 0; bbos_ports[id].inbox = inbox; bbos_ports[id].lock = 0; }
int __kinit__ procfs_init(void) { SUBSYS_DEP(ramfs_init); SUBSYS_INIT("procfs"); /* * This must be static as it's referenced and used in the file system via * the fs object system. */ static fs_t procfs_fs = { .fsname = PROCFS_FSNAME, .mount = procfs_mount, .sblist_head = SLIST_HEAD_INITIALIZER(), }; specinfo_pool = mempool_init(MEMPOOL_TYPE_NONBLOCKING, sizeof(struct procfs_info), configMAXPROC); if (!specinfo_pool) return -ENOMEM; FS_GIANT_INIT(&procfs_fs.fs_giant); /* * Inherit unimplemented vnops from ramfs. */ fs_inherit_vnops(&procfs_vnode_ops, &ramfs_vnode_ops); vn_procfs = fs_create_pseudofs_root(&procfs_fs, VDEV_MJNR_PROCFS); if (!vn_procfs) return -ENOMEM; struct fs_superblock * sb = vn_procfs->sb; sb->delete_vnode = procfs_delete_vnode; vn_procfs->sb->umount = procfs_umount; fs_register(&procfs_fs); int err = init_permanent_files(); if (err) return err; procfs_updatedir(vn_procfs); return 0; }
tSfActionQueueId sfActionQueueInit( int queueLength ) { tSfActionQueue *queue = SnortAlloc(sizeof(tSfActionQueue)); if (queue) { if (mempool_init(&queue->mempool, queueLength, sizeof(tSfActionNode)) != 0) { FatalError("%s(%d) Could not initialize action queue memory pool.\n", __FILE__, __LINE__); } } return queue; }
void Stream5InitIcmp(void) { /* Finally ICMP */ if((icmp_lws_cache == NULL) && s5_global_config.track_icmp_sessions) { icmp_lws_cache = InitLWSessionCache(s5_global_config.max_icmp_sessions, 30, 5, 0, NULL); if(!icmp_lws_cache) { FatalError("Unable to init stream5 ICMP session cache, no ICMP " "stream inspection!\n"); } mempool_init(&icmp_session_mempool, s5_global_config.max_icmp_sessions, sizeof(IcmpSession)); } }
HIDDEN struct map_info * map_alloc_info (void) { if (!map_init_done) { intrmask_t saved_mask; lock_acquire (&map_init_lock, saved_mask); /* Check again under the lock. */ if (!map_init_done) { mempool_init (&map_pool, sizeof(struct map_info), 0); map_init_done = 1; } lock_release (&map_init_lock, saved_mask); } return mempool_alloc (&map_pool); }
/* * return memory to memory pool * (Callback cleanup function was intented to release nested memory in the * memory area. Initially, memory had its structure which could point * other memory area. But the current code (#else) expects no structure. * Thus, the cleanup callback is not needed) * The current code (#else) uses the memory pool stored in the * per-thread-private data. */ int mempool_return(int type, void *object, mempool_cleanup_callback cleanup) { PR_ASSERT(type >= 0 && type < MEMPOOL_END); if (!config_get_mempool_switch()) { return LDAP_SUCCESS; /* memory pool: off */ } #ifdef SHARED_MEMPOOL if (NULL == mempool[type].mempool_mutex) { /* mutex is NULL; this mempool is not enabled */ return LDAP_SUCCESS; } PR_Lock(mempool[type].mempool_mutex); ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head; mempool[type].mempool_head = (struct mempool_object *)object; mempool[type].mempool_cleanup_fn = cleanup; mempool[type].mempool_count++; PR_Unlock(mempool[type].mempool_mutex); return LDAP_SUCCESS; #else { struct mempool *my_mempool; int maxfreelist; my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index); if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) { /* mempool is not initialized */ mempool_init(&my_mempool); } ((struct mempool_object *)object)->mempool_next = my_mempool[type].mempool_head; maxfreelist = config_get_mempool_maxfreelist(); if ((maxfreelist > 0) && (my_mempool[type].mempool_count > maxfreelist)) { return LDAP_UNWILLING_TO_PERFORM; } else { ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head; my_mempool[type].mempool_head = (struct mempool_object *)object; my_mempool[type].mempool_cleanup_fn = cleanup; my_mempool[type].mempool_count++; PR_SetThreadPrivate (mempool_index, (void *)my_mempool); return LDAP_SUCCESS; } } #endif }
int DAL_MessageInit(void) { dal_msg_control_t* p_control = &g_dal_msg_contaol; int iret = 0; if( p_control->init ) { DAL_ERROR(("already init\n")); return -1; } iret = mempool_init( &(p_control->msg_pool), "dal_mempool", DAL_MESSAGE_POOL_SIZE, sizeof(DAL_Message_t)); if( 0 != iret) { DAL_ERROR(("mempool_init failed\n")); return -1; } p_control->init = TRUE; return 0; }
void BoxLib::Initialize (int& argc, char**& argv, bool build_parm_parse, MPI_Comm mpi_comm) { #ifndef WIN32 // // Make sure to catch new failures. // std::set_new_handler(BoxLib::OutOfMemory); #endif #ifdef BL_BACKTRACING signal(SIGSEGV, BLBackTrace::handler); // catch seg falult feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW); // trap floating point exceptions signal(SIGFPE, BLBackTrace::handler); #endif ParallelDescriptor::StartParallel(&argc, &argv, mpi_comm); if(ParallelDescriptor::NProcsSidecar() > 0) { if(ParallelDescriptor::InSidecarGroup()) { if (ParallelDescriptor::IOProcessor()) std::cout << "===== SIDECARS INITIALIZED =====" << std::endl; ParallelDescriptor::SidecarProcess(); BoxLib::Finalize(); return; } } BL_PROFILE_INITIALIZE(); // // Initialize random seed after we're running in parallel. // BoxLib::InitRandom(ParallelDescriptor::MyProc()+1, ParallelDescriptor::NProcs()); #ifdef BL_USE_MPI if (ParallelDescriptor::IOProcessor()) { std::cout << "MPI initialized with " << ParallelDescriptor::NProcs() << " MPI processes\n"; } #endif #ifdef _OPENMP if (ParallelDescriptor::IOProcessor()) { std::cout << "OMP initialized with " << omp_get_max_threads() << " OMP threads\n"; } #endif #ifndef BL_AMRPROF if (build_parm_parse) { if (argc == 1) { ParmParse::Initialize(0,0,0); } else { if (strchr(argv[1],'=')) { ParmParse::Initialize(argc-1,argv+1,0); } else { ParmParse::Initialize(argc-2,argv+2,argv[1]); } } } #endif mempool_init(); std::cout << std::setprecision(10); if (double(std::numeric_limits<long>::max()) < 9.e18) { if (ParallelDescriptor::IOProcessor()) { std::cout << "!\n! WARNING: Maximum of long int, " << std::numeric_limits<long>::max() << ", might be too small for big runs.\n!\n"; } } }
/*====================================== * CORE : MAINROUTINE *--------------------------------------*/ int main (int argc, char **argv) { {// initialize program arguments char *p1 = SERVER_NAME = argv[0]; char *p2 = p1; while ((p1 = strchr(p2, '/')) != NULL || (p1 = strchr(p2, '\\')) != NULL) { SERVER_NAME = ++p1; p2 = p1; } arg_c = argc; arg_v = argv; } malloc_init();// needed for Show* in display_title() [FlavioJS] #ifdef MINICORE // minimalist Core display_title(); usercheck(); do_init(argc,argv); do_final(); #else// not MINICORE set_server_type(); display_title(); usercheck(); rathread_init(); mempool_init(); db_init(); signals_init(); #ifdef _WIN32 cevents_init(); #endif timer_init(); socket_init(); do_init(argc,argv); {// Main runtime cycle int next; while (runflag != CORE_ST_STOP) { next = do_timer(gettick_nocache()); do_sockets(next); } } do_final(); timer_final(); socket_final(); db_final(); mempool_final(); rathread_final(); #endif malloc_final(); return 0; }
/*====================================== * CORE : MAINROUTINE *--------------------------------------*/ int main (int argc, char **argv) { {// initialize program arguments char *p1; if((p1 = strrchr(argv[0], '/')) != NULL || (p1 = strrchr(argv[0], '\\')) != NULL ){ char *pwd = NULL; //path working directory int n=0; SERVER_NAME = ++p1; n = p1-argv[0]; //calc dir name len pwd = safestrncpy(malloc(n + 1), argv[0], n); if(chdir(pwd) != 0) ShowError("Couldn't change working directory to %s for %s, runtime will probably fail",pwd,SERVER_NAME); free(pwd); } } malloc_init();// needed for Show* in display_title() [FlavioJS] #ifdef MINICORE // minimalist Core display_title(); usercheck(); do_init(argc,argv); do_final(); #else// not MINICORE set_server_type(); display_title(); usercheck(); Sql_Init(); rathread_init(); mempool_init(); db_init(); signals_init(); #ifdef _WIN32 cevents_init(); #endif timer_init(); socket_init(); do_init(argc,argv); // Main runtime cycle while (runflag != CORE_ST_STOP) { int next = do_timer(gettick_nocache()); do_sockets(next); } do_final(); timer_final(); socket_final(); db_final(); mempool_final(); rathread_final(); ers_final(); #endif malloc_final(); return 0; }
status_t init_driver(void) { struct pci_info *item; int index; int cards; #ifdef DEBUG set_dprintf_enabled(true); load_driver_symbols("ipro1000"); #endif dprintf("ipro1000: " INFO "\n"); item = (pci_info *)malloc(sizeof(pci_info)); if (!item) return B_NO_MEMORY; if (get_module(B_PCI_MODULE_NAME, (module_info **)&gPci) < B_OK) { free(item); return B_ERROR; } for (cards = 0, index = 0; gPci->get_nth_pci_info(index++, item) == B_OK; ) { const char *info = identify_device(item); if (info) { char name[64]; sprintf(name, "net/ipro1000/%d", cards); dprintf("ipro1000: /dev/%s is a %s\n", name, info); gDevList[cards] = item; gDevNameList[cards] = strdup(name); gDevNameList[cards + 1] = NULL; cards++; item = (pci_info *)malloc(sizeof(pci_info)); if (!item) goto err_outofmem; if (cards == MAX_CARDS) break; } } free(item); if (!cards) goto err_cards; if (initialize_timer() != B_OK) { ERROROUT("timer init failed"); goto err_timer; } if (mempool_init(cards * 768) != B_OK) { ERROROUT("mempool init failed"); goto err_mempool; } return B_OK; err_mempool: terminate_timer(); err_timer: err_cards: err_outofmem: for (index = 0; index < cards; index++) { free(gDevList[index]); free(gDevNameList[index]); } put_module(B_PCI_MODULE_NAME); return B_ERROR; }
int main(void) { MemPool test; MemBucket *bucks[SIZE]; MemBucket *bucket = NULL; int i; //char *stuffs[4] = { "eenie", "meenie", "minie", "moe" }; char *stuffs2[36] = { "1eenie", "2meenie", "3minie", " 4moe", "1xxxxx", "2yyyyyy", "3zzzzz", " 4qqqq", "1eenie", "2meenie", "3minie", " 4moe", "1eenie", "2meenie", "3minie", " 4moe", "1eenie", "2meenie", "3minie", " 4moe", "1eenie", "2meenie", "3minie", " 4moe", "1eenie", "2meenie", "3minie", " 4moe", "1eenie", "2meenie", "3minie", " 4moe", "1eenie", "2meenie", "3minie", " 4moe" }; if(mempool_init(&test, 36, 256)) { printf("error in mempool initialization\n"); } for(i = 0; i < 36; i++) { if((bucks[i] = mempool_alloc(&test)) == NULL) { printf("error in mempool_alloc: i=%d\n", i); continue; } bucket = bucks[i]; bucket->data = strncpy(bucket->data, stuffs2[i], 256); printf("bucket->key: %p\n", bucket->key); printf("bucket->data: %s\n", (char *) bucket->data); } for(i = 0; i < 2; i++) { mempool_free(&test, bucks[i]); bucks[i] = NULL; } for(i = 0; i < 14; i++) { if((bucks[i] = mempool_alloc(&test)) == NULL) { printf("error in mempool_alloc: i=%d\n", i); continue; } bucket = bucks[i]; bucket->data = strncpy(bucket->data, stuffs2[i], 256); printf("bucket->key: %p\n", bucket->key); printf("bucket->data: %s\n", (char *) bucket->data); } printf("free: %u, used: %u\n", test.free_list.size, test.used_list.size); return 0; }
HIDDEN void tdep_init (void) { uint8_t f1_bytes[16] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; uint8_t nat_val_bytes[16] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; uint8_t int_val_bytes[16] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; intrmask_t saved_mask; uint8_t *lep, *bep; long i; sigfillset (&unwi_full_mask); sigprocmask (SIG_SETMASK, &unwi_full_mask, &saved_mask); mutex_lock (&unw.lock); { if (!tdep_needs_initialization) /* another thread else beat us to it... */ goto out; mi_init (); mempool_init (&unw.reg_state_pool, sizeof (struct ia64_reg_state), 0); mempool_init (&unw.labeled_state_pool, sizeof (struct ia64_labeled_state), 0); unw.read_only.r0 = 0; unw.read_only.f0.raw.bits[0] = 0; unw.read_only.f0.raw.bits[1] = 0; lep = (uint8_t *) &unw.read_only.f1_le + 16; bep = (uint8_t *) &unw.read_only.f1_be; for (i = 0; i < 16; ++i) { *--lep = f1_bytes[i]; *bep++ = f1_bytes[i]; } lep = (uint8_t *) &unw.nat_val_le + 16; bep = (uint8_t *) &unw.nat_val_be; for (i = 0; i < 16; ++i) { *--lep = nat_val_bytes[i]; *bep++ = nat_val_bytes[i]; } lep = (uint8_t *) &unw.int_val_le + 16; bep = (uint8_t *) &unw.int_val_be; for (i = 0; i < 16; ++i) { *--lep = int_val_bytes[i]; *bep++ = int_val_bytes[i]; } assert (8*sizeof(unw_hash_index_t) >= IA64_LOG_UNW_HASH_SIZE); #ifndef UNW_REMOTE_ONLY ia64_local_addr_space_init (); #endif tdep_needs_initialization = 0; /* signal that we're initialized... */ } out: mutex_unlock (&unw.lock); sigprocmask (SIG_SETMASK, &saved_mask, NULL); }
void iomplx_active_list_init(iomplx_active_list *active_list, unsigned int calls_number) { DLIST_INIT(active_list); mempool_init(&active_list->item_calls_pool, sizeof(iomplx_item_call), calls_number); active_list->available_item_calls = calls_number; }
void BoxLib::Initialize (int& argc, char**& argv, bool build_parm_parse, MPI_Comm mpi_comm) { ParallelDescriptor::StartParallel(&argc, &argv, mpi_comm); #ifndef WIN32 // // Make sure to catch new failures. // std::set_new_handler(BoxLib::OutOfMemory); if (argv[0][0] != '/') { char temp[1024]; getcwd(temp,1024); exename = temp; exename += "/"; } exename += argv[0]; #endif #ifdef BL_USE_UPCXX upcxx::init(&argc, &argv); if (upcxx::myrank() != ParallelDescriptor::MyProc()) BoxLib::Abort("UPC++ rank != MPI rank"); #endif #ifdef BL_USE_MPI3 MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &ParallelDescriptor::cp_win); MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &ParallelDescriptor::fb_win); MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &ParallelDescriptor::fpb_win); #endif while (!The_Initialize_Function_Stack.empty()) { // // Call the registered function. // (*The_Initialize_Function_Stack.top())(); // // And then remove it from the stack. // The_Initialize_Function_Stack.pop(); } if(ParallelDescriptor::NProcsSidecar() > 0) { if(ParallelDescriptor::InSidecarGroup()) { if (ParallelDescriptor::IOProcessor()) std::cout << "===== SIDECARS INITIALIZED =====" << std::endl; ParallelDescriptor::SidecarProcess(); BoxLib::Finalize(); return; } } BL_PROFILE_INITIALIZE(); // // Initialize random seed after we're running in parallel. // BoxLib::InitRandom(ParallelDescriptor::MyProc()+1, ParallelDescriptor::NProcs()); #ifdef BL_USE_MPI if (ParallelDescriptor::IOProcessor()) { std::cout << "MPI initialized with " << ParallelDescriptor::NProcs() << " MPI processes\n"; } #endif #ifdef _OPENMP if (ParallelDescriptor::IOProcessor()) { std::cout << "OMP initialized with " << omp_get_max_threads() << " OMP threads\n"; } #endif signal(SIGSEGV, BLBackTrace::handler); // catch seg falult signal(SIGINT, BLBackTrace::handler); #ifndef BL_AMRPROF if (build_parm_parse) { if (argc == 1) { ParmParse::Initialize(0,0,0); } else { if (strchr(argv[1],'=')) { ParmParse::Initialize(argc-1,argv+1,0); } else { ParmParse::Initialize(argc-2,argv+2,argv[1]); } } } { ParmParse pp("boxlib"); pp.query("v", verbose); pp.query("verbose", verbose); int invalid = 0, divbyzero=0, overflow=0; pp.query("fpe_trap_invalid", invalid); pp.query("fpe_trap_zero", divbyzero); pp.query("fpe_trap_overflow", overflow); int flags = 0; if (invalid) flags |= FE_INVALID; if (divbyzero) flags |= FE_DIVBYZERO; if (overflow) flags |= FE_OVERFLOW; #if defined(__linux__) #if !defined(__PGI) || (__PGIC__ >= 16) if (flags != 0) { feenableexcept(flags); // trap floating point exceptions signal(SIGFPE, BLBackTrace::handler); } #endif #endif } ParallelDescriptor::StartTeams(); ParallelDescriptor::StartSubCommunicator(); mempool_init(); #endif std::cout << std::setprecision(10); if (double(std::numeric_limits<long>::max()) < 9.e18) { if (ParallelDescriptor::IOProcessor()) { std::cout << "!\n! WARNING: Maximum of long int, " << std::numeric_limits<long>::max() << ", might be too small for big runs.\n!\n"; } } #if defined(BL_USE_FORTRAN_MPI) || defined(BL_USE_F_INTERFACES) int fcomm = MPI_Comm_c2f(ParallelDescriptor::Communicator()); bl_fortran_mpi_comm_init (fcomm); #endif #if defined(BL_MEM_PROFILING) && defined(BL_USE_F_BASELIB) MemProfiler_f::initialize(); #endif }
bool jsaxparser_init(jsaxparser_ref parser, JSchemaInfoRef schemaInfo, PJSAXCallbacks *callback, void *callback_ctxt) { memset(parser, 0, sizeof(struct jsaxparser) - sizeof(mem_pool_t)); parser->validator = NOTHING_VALIDATOR; parser->uri_resolver = NULL; parser->schemaInfo = schemaInfo; if (schemaInfo && schemaInfo->m_schema) { parser->validator = schemaInfo->m_schema->validator; parser->uri_resolver = schemaInfo->m_schema->uri_resolver; } if (callback == NULL) { parser->yajl_cb = no_callbacks; } else { parser->yajl_cb.yajl_null = callback->m_null ? (pj_yajl_null)callback->m_null : no_callbacks.yajl_null; parser->yajl_cb.yajl_boolean = callback->m_boolean ? (pj_yajl_boolean)callback->m_boolean : no_callbacks.yajl_boolean; parser->yajl_cb.yajl_integer = NULL; parser->yajl_cb.yajl_double = NULL; parser->yajl_cb.yajl_number = callback->m_number ? (pj_yajl_number)callback->m_number : no_callbacks.yajl_number; parser->yajl_cb.yajl_string = callback->m_string ? (pj_yajl_string)callback->m_string : no_callbacks.yajl_string; parser->yajl_cb.yajl_start_map = callback->m_objStart ? (pj_yajl_start_map)callback->m_objStart : no_callbacks.yajl_start_map; parser->yajl_cb.yajl_map_key = callback->m_objKey ? (pj_yajl_map_key)callback->m_objKey : no_callbacks.yajl_map_key; parser->yajl_cb.yajl_end_map = callback->m_objEnd ? (pj_yajl_end_map)callback->m_objEnd : no_callbacks.yajl_end_map; parser->yajl_cb.yajl_start_array = callback->m_arrStart ? (pj_yajl_start_array)callback->m_arrStart : no_callbacks.yajl_start_array; parser->yajl_cb.yajl_end_array = callback->m_arrEnd ? (pj_yajl_end_array)callback->m_arrEnd : no_callbacks.yajl_end_array; } parser->errorHandler.m_parser = err_parser; parser->errorHandler.m_schema = err_schema; parser->errorHandler.m_unknown = err_unknown; parser->errorHandler.m_ctxt = parser; validation_state_init(&(parser->validation_state), parser->validator, parser->uri_resolver, &jparse_notification); PJSAXContext __internalCtxt = { .ctxt = (callback_ctxt != NULL ? callback_ctxt : NULL), .m_handlers = &parser->yajl_cb, .m_errors = &parser->errorHandler, .m_error_code = 0, .errorDescription = NULL, .validation_state = &parser->validation_state, }; parser->internalCtxt = __internalCtxt; mempool_init(&parser->memory_pool); yajl_alloc_funcs allocFuncs = { mempool_malloc, mempool_realloc, mempool_free, &parser->memory_pool }; const bool allow_comments = true; #if YAJL_VERSION < 20000 yajl_parser_config yajl_opts = { allow_comments, 0, // currently only UTF-8 will be supported for input. }; parser->handle = yajl_alloc(&my_bounce, &yajl_opts, &allocFuncs, &parser->internalCtxt); #else parser->handle = yajl_alloc(&my_bounce, &allocFuncs, &parser->internalCtxt); yajl_config(parser->handle, yajl_allow_comments, allow_comments ? 1 : 0); // currently only UTF-8 will be supported for input. yajl_config(parser->handle, yajl_dont_validate_strings, 1); #endif // YAJL_VERSION return true; } static bool jsaxparser_process_error(jsaxparser_ref parser, const char *buf, int buf_len, bool final_stage) { if ( #if YAJL_VERSION < 20000 (final_stage || yajl_status_insufficient_data != parser->status) && #endif !handle_yajl_error(parser->status, parser->handle, buf, buf_len, parser->schemaInfo, &parser->internalCtxt) ) { if (parser->yajlError) { yajl_free_error(parser->handle, (unsigned char*)parser->yajlError); parser->yajlError = NULL; } parser->yajlError = (char*)yajl_get_error(parser->handle, 1, (unsigned char*)buf, buf_len); return false; } return true; } const char *jsaxparser_get_error(jsaxparser_ref parser) { SANITY_CHECK_POINTER(parser); if (parser->schemaError) return parser->schemaError; if (parser->yajlError) return parser->yajlError; return NULL; }
int main(int argc, char *argv[]) { int ret, i, cpu = 0, level; pid_t pid; char buf1[128], buf2[128], buf[8192]; // argument parse if(argc != 2){ USAGE(); exit(-1); } if(!strcmp(argv[1], "-h")){ USAGE(); exit(0); } // config file parse if( (ret = conf_init(argv[1])) < 0 ){ log(g_log, "conf[%s] init error\n", argv[1]); exit(-1); } else { log(g_log, "conf[%s] init success\n", argv[1]); } // log init if(!strncmp(g_global_conf.log_level, "log", 3)){ level = LOG_LEVEL_LOG; } else if(!strncmp(g_global_conf.log_level, "debug", 5)){ level = LOG_LEVEL_DEBUG; } else if(!strncmp(g_global_conf.log_level, "info", 4)){ level = LOG_LEVEL_INFO; } else if(!strncmp(g_global_conf.log_level, "none", 4)){ level = LOG_NONE; } else { log(g_log, "log_level[%s] unknown\n", g_global_conf.log_level); exit(-1); } if( (g_log = log_init(g_global_conf.log_path, level)) == NULL ){ log(g_log, "log[%s] init error\n", g_global_conf.log_path); exit(-1); } // signal init if( (signal_init()) < 0 ){ log(g_log, "signal init error\n"); exit(-1); } else { log(g_log, "signal init success\n"); } // timer init if( (timer_init()) < 0 ){ log(g_log, "timer init error\n"); exit(-1); } else { log(g_log, "timer init success\n"); } // conneciont init if( (ret = connection_init(g_global_conf.max_connections)) < 0 ){ log(g_log, "connection init error\n"); exit(-1); } else { log(g_log, "connection init success\n"); } // ipfilter init ret = ipfilter_conf_init(g_filter_conf.ipfilter_cycle1, g_filter_conf.ipfilter_cycle2, \ g_filter_conf.ipfilter_threshold1, g_filter_conf.ipfilter_threshold2, \ g_filter_conf.ipfilter_time1, g_filter_conf.ipfilter_time2); if(ret < 0){ log(g_log, "ipfilter init error\n"); } else { log(g_log, "ipfilter init success\n"); } // cookiefilter init ret = cookiefilter_conf_init(g_filter_conf.cookiefilter_cycle1, g_filter_conf.cookiefilter_cycle2, \ g_filter_conf.cookiefilter_threshold1, g_filter_conf.cookiefilter_threshold2, \ g_filter_conf.cookiefilter_time1, g_filter_conf.cookiefilter_time2); if(ret < 0){ log(g_log, "cookiefilter init error\n"); } else { log(g_log, "cookiefilter init success\n"); } // ippool & ipentry init if( (ret = ip_pool_init(g_global_conf.max_connections)) < 0 ){ log(g_log, "ip pool init error\n"); exit(-1); } else { log(g_log, "ip pool init success\n"); } // cookie pool init if( (ret = cookie_pool_init(1000000)) < 0 ){ log(g_log, "cookie pool init error\n"); exit(-1); } else { log(g_log, "cookie pool init success\n"); } // whitelist init if( (g_whitelist = iprange_init(g_filter_conf.whitelist, 1024)) == NULL ){ log(g_log, "whitelist[%s] init error\n", g_filter_conf.whitelist); exit(-1); } else { log(g_log, "whitelist[%s] init success\n", g_filter_conf.whitelist); } // blacklist init if( (g_blacklist = iprange_init(g_filter_conf.blacklist, 1024)) == NULL ){ log(g_log, "blacklist[%s] init error\n", g_filter_conf.blacklist); exit(-1); } else { log(g_log, "blacklist[%s] init success\n", g_filter_conf.blacklist); } // mempool init if( (ret = mempool_init(g_global_conf.buffer_size, g_global_conf.max_buffer)) < 0 ){ log(g_log, "mempool init error\n"); exit(-1); } else { log(g_log, "mempool init success\n"); } log(g_log, "all init success\n"); // make listen while(1){ g_listenfd = make_listen_nonblock(g_global_conf.listen_addr, g_global_conf.listen_port); if(g_listenfd < 0){ log(g_log, "make listen socket error\n"); } else { log(g_log, "make listen socket success %s:%s\n", \ g_global_conf.listen_addr, g_global_conf.listen_port); break; } sleep(5); } if(g_global_conf.daemon){ daemon(1, 0); } // fork children for(i = 0; i < g_global_conf.workers ; i++){ if( (pid = fork()) < 0 ){ log(g_log, "fork error: %s\n", strerror(errno)); exit(-1); } else if(pid > 0) { if(g_global_conf.cpu_attach == 1){ if(cpu_attach(pid, cpu++) == 0){ log(g_log, "cpu attach success\n"); } } continue; } else { work(); exit(-1); } } while(1){ sleep(5); // reopen to release log file when deleted log_deinit(g_log); if( (g_log = log_init(g_global_conf.log_path, level)) == NULL ){ log(g_log, "log init error\n"); } pid = waitpid(-1, NULL, WNOHANG); if(pid > 0){ log(g_log, "process[%d] exit, restart again\n", pid); while( (pid = fork()) == -1 ){ log(g_log, "fork error: %s\n", strerror(errno)); sleep(5); } if(pid > 0){ log(g_log, "fork success\n"); continue; } else { log(g_log, "goto work\n"); work(); exit(-1); } } else if(pid < 0) { log(g_log, "wait error: %s\n", strerror(errno)); } else { } } return 0; }