/* * Read and process the bluegene.conf configuration file so to interpret what * blocks are static/dynamic, torus/mesh, etc. */ extern int read_bg_conf(void) { int i; bool tmp_bool = 0; int count = 0; s_p_hashtbl_t *tbl = NULL; char *tmp_char = NULL; select_ba_request_t **blockreq_array = NULL; image_t **image_array = NULL; image_t *image = NULL; static time_t last_config_update = (time_t) 0; struct stat config_stat; ListIterator itr = NULL; char* bg_conf_file = NULL; static int *dims = NULL; if (!dims) dims = select_g_ba_get_dims(); if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("Reading the bluegene.conf file"); /* check if config file has changed */ bg_conf_file = get_extra_conf_path("bluegene.conf"); if (stat(bg_conf_file, &config_stat) < 0) fatal("can't stat bluegene.conf file %s: %m", bg_conf_file); if (last_config_update) { _reopen_bridge_log(); if (last_config_update == config_stat.st_mtime) { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("%s unchanged", bg_conf_file); } else { info("Restart slurmctld for %s changes " "to take effect", bg_conf_file); } last_config_update = config_stat.st_mtime; xfree(bg_conf_file); return SLURM_SUCCESS; } last_config_update = config_stat.st_mtime; /* initialization */ /* bg_conf defined in bg_node_alloc.h */ if (!(tbl = config_make_tbl(bg_conf_file))) fatal("something wrong with opening/reading bluegene " "conf file"); xfree(bg_conf_file); #ifdef HAVE_BGL if (s_p_get_array((void ***)&image_array, &count, "AltBlrtsImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->blrts_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_blrtsimage, "BlrtsImage", tbl)) { if (!list_count(bg_conf->blrts_list)) fatal("BlrtsImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->blrts_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_blrtsimage = xstrdup(image->name); info("Warning: using %s as the default BlrtsImage. " "If this isn't correct please set BlrtsImage", bg_conf->default_blrtsimage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default BlrtsImage %s", bg_conf->default_blrtsimage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_blrtsimage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->blrts_list, image); } if (s_p_get_array((void ***)&image_array, &count, "AltLinuxImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->linux_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_linuximage, "LinuxImage", tbl)) { if (!list_count(bg_conf->linux_list)) fatal("LinuxImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->linux_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_linuximage = xstrdup(image->name); info("Warning: using %s as the default LinuxImage. " "If this isn't correct please set LinuxImage", bg_conf->default_linuximage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default LinuxImage %s", bg_conf->default_linuximage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_linuximage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->linux_list, image); } if (s_p_get_array((void ***)&image_array, &count, "AltRamDiskImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->ramdisk_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_ramdiskimage, "RamDiskImage", tbl)) { if (!list_count(bg_conf->ramdisk_list)) fatal("RamDiskImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->ramdisk_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_ramdiskimage = xstrdup(image->name); info("Warning: using %s as the default RamDiskImage. " "If this isn't correct please set RamDiskImage", bg_conf->default_ramdiskimage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default RamDiskImage %s", bg_conf->default_ramdiskimage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_ramdiskimage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->ramdisk_list, image); } #elif defined HAVE_BGP if (s_p_get_array((void ***)&image_array, &count, "AltCnloadImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->linux_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_linuximage, "CnloadImage", tbl)) { if (!list_count(bg_conf->linux_list)) fatal("CnloadImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->linux_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_linuximage = xstrdup(image->name); info("Warning: using %s as the default CnloadImage. " "If this isn't correct please set CnloadImage", bg_conf->default_linuximage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default CnloadImage %s", bg_conf->default_linuximage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_linuximage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->linux_list, image); } if (s_p_get_array((void ***)&image_array, &count, "AltIoloadImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->ramdisk_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_ramdiskimage, "IoloadImage", tbl)) { if (!list_count(bg_conf->ramdisk_list)) fatal("IoloadImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->ramdisk_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_ramdiskimage = xstrdup(image->name); info("Warning: using %s as the default IoloadImage. " "If this isn't correct please set IoloadImage", bg_conf->default_ramdiskimage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default IoloadImage %s", bg_conf->default_ramdiskimage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_ramdiskimage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->ramdisk_list, image); } #endif if (s_p_get_array((void ***)&image_array, &count, "AltMloaderImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->mloader_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_mloaderimage, "MloaderImage", tbl)) { if (!list_count(bg_conf->mloader_list)) fatal("MloaderImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->mloader_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_mloaderimage = xstrdup(image->name); info("Warning: using %s as the default MloaderImage. " "If this isn't correct please set MloaderImage", bg_conf->default_mloaderimage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default MloaderImage %s", bg_conf->default_mloaderimage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_mloaderimage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->mloader_list, image); } if (!s_p_get_uint16(&bg_conf->mp_cnode_cnt, "MidplaneNodeCnt", tbl)) { if (!s_p_get_uint16(&bg_conf->mp_cnode_cnt, "BasePartitionNodeCnt", tbl)) { error("MidplaneNodeCnt not configured in bluegene.conf " "defaulting to 512 as MidplaneNodeCnt"); bg_conf->mp_cnode_cnt = 512; } } if (bg_conf->mp_cnode_cnt <= 0) fatal("You should have more than 0 nodes " "per base partition"); bg_conf->actual_cnodes_per_mp = bg_conf->mp_cnode_cnt; bg_conf->quarter_cnode_cnt = bg_conf->mp_cnode_cnt/4; /* bg_conf->cpus_per_mp should had already been set from the * node_init */ if (bg_conf->cpus_per_mp < bg_conf->mp_cnode_cnt) { fatal("For some reason we have only %u cpus per mp, but " "have %u cnodes per mp. You need at least the same " "number of cpus as you have cnodes per mp. " "Check the NodeName CPUs= " "definition in the slurm.conf.", bg_conf->cpus_per_mp, bg_conf->mp_cnode_cnt); } bg_conf->cpu_ratio = bg_conf->cpus_per_mp/bg_conf->mp_cnode_cnt; if (!bg_conf->cpu_ratio) fatal("We appear to have less than 1 cpu on a cnode. " "You specified %u for BasePartitionNodeCnt " "in the blugene.conf and %u cpus " "for each node in the slurm.conf", bg_conf->mp_cnode_cnt, bg_conf->cpus_per_mp); num_unused_cpus = 1; for (i = 0; i<SYSTEM_DIMENSIONS; i++) num_unused_cpus *= dims[i]; num_unused_cpus *= bg_conf->cpus_per_mp; num_possible_unused_cpus = num_unused_cpus; if (!s_p_get_uint16(&bg_conf->nodecard_cnode_cnt, "NodeBoardNodeCnt", tbl)) { if (!s_p_get_uint16(&bg_conf->nodecard_cnode_cnt, "NodeCardNodeCnt", tbl)) { error("NodeCardNodeCnt not configured in bluegene.conf " "defaulting to 32 as NodeCardNodeCnt"); bg_conf->nodecard_cnode_cnt = 32; } } if (bg_conf->nodecard_cnode_cnt <= 0) fatal("You should have more than 0 nodes per nodecard"); bg_conf->mp_nodecard_cnt = bg_conf->mp_cnode_cnt / bg_conf->nodecard_cnode_cnt; if (!s_p_get_uint16(&bg_conf->ionodes_per_mp, "IONodesPerMP", tbl)) if (!s_p_get_uint16(&bg_conf->ionodes_per_mp, "Numpsets", tbl)) fatal("Warning: IONodesPerMP not configured " "in bluegene.conf"); s_p_get_uint16(&bg_conf->max_block_err, "MaxBlockInError", tbl); tmp_bool = 0; s_p_get_boolean(&tmp_bool, "SubMidplaneSystem", tbl); bg_conf->sub_mp_sys = tmp_bool; #ifdef HAVE_BGQ tmp_bool = 0; s_p_get_boolean(&tmp_bool, "AllowSubBlockAllocations", tbl); bg_conf->sub_blocks = tmp_bool; /* You can only have 16 ionodes per midplane */ if (bg_conf->ionodes_per_mp > bg_conf->mp_nodecard_cnt) bg_conf->ionodes_per_mp = bg_conf->mp_nodecard_cnt; #endif for (i=0; i<SYSTEM_DIMENSIONS; i++) bg_conf->default_conn_type[i] = (uint16_t)NO_VAL; s_p_get_string(&tmp_char, "DefaultConnType", tbl); if (tmp_char) { verify_conn_type(tmp_char, bg_conf->default_conn_type); if ((bg_conf->default_conn_type[0] != SELECT_MESH) && (bg_conf->default_conn_type[0] != SELECT_TORUS)) fatal("Can't have a DefaultConnType of %s " "(only Mesh or Torus values are valid).", tmp_char); xfree(tmp_char); } else bg_conf->default_conn_type[0] = SELECT_TORUS; #ifndef HAVE_BG_L_P int first_conn_type = bg_conf->default_conn_type[0]; for (i=1; i<SYSTEM_DIMENSIONS; i++) { if (bg_conf->default_conn_type[i] == (uint16_t)NO_VAL) bg_conf->default_conn_type[i] = first_conn_type; else if (bg_conf->default_conn_type[i] >= SELECT_SMALL) fatal("Can't have a DefaultConnType of %s " "(only Mesh or Torus values are valid).", tmp_char); } #endif if (bg_conf->ionodes_per_mp) { bitstr_t *tmp_bitmap = NULL; int small_size = 1; /* THIS IS A HACK TO MAKE A 1 NODECARD SYSTEM WORK, * Sometime on a Q system the nodecard isn't in the 0 * spot so only do this if you know it is in that * spot. Otherwise say the whole midplane is there * and just make blocks over the whole thing. They * you can error out the blocks that aren't usable. */ if (bg_conf->sub_mp_sys && bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt) { #ifdef HAVE_BGQ bg_conf->quarter_ionode_cnt = 1; bg_conf->nodecard_ionode_cnt = 1; #else bg_conf->quarter_ionode_cnt = 2; bg_conf->nodecard_ionode_cnt = 2; #endif } else { bg_conf->quarter_ionode_cnt = bg_conf->ionodes_per_mp/4; bg_conf->nodecard_ionode_cnt = bg_conf->quarter_ionode_cnt/4; } /* How many nodecards per ionode */ bg_conf->nc_ratio = ((double)bg_conf->mp_cnode_cnt / (double)bg_conf->nodecard_cnode_cnt) / (double)bg_conf->ionodes_per_mp; /* How many ionodes per nodecard */ bg_conf->io_ratio = (double)bg_conf->ionodes_per_mp / ((double)bg_conf->mp_cnode_cnt / (double)bg_conf->nodecard_cnode_cnt); /* How many cnodes per ionode */ bg_conf->ionode_cnode_cnt = bg_conf->nodecard_cnode_cnt * bg_conf->nc_ratio; //info("got %f %f", bg_conf->nc_ratio, bg_conf->io_ratio); /* figure out the smallest block we can have on the system */ #ifdef HAVE_BGL if (bg_conf->io_ratio >= 1) bg_conf->smallest_block=32; else bg_conf->smallest_block=128; #else if (bg_conf->io_ratio >= 2) bg_conf->smallest_block=16; else if (bg_conf->io_ratio == 1) bg_conf->smallest_block=32; else if (bg_conf->io_ratio == .5) bg_conf->smallest_block=64; else if (bg_conf->io_ratio == .25) bg_conf->smallest_block=128; else if (bg_conf->io_ratio == .125) bg_conf->smallest_block=256; else { error("unknown ioratio %f. Can't figure out " "smallest block size, setting it to midplane", bg_conf->io_ratio); bg_conf->smallest_block = 512; } #endif if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("Smallest block possible on this system is %u", bg_conf->smallest_block); /* below we are creating all the possible bitmaps for * each size of small block */ if ((int)bg_conf->nodecard_ionode_cnt < 1) { bg_conf->nodecard_ionode_cnt = 0; } else { bg_lists->valid_small32 = list_create(_destroy_bitmap); /* This is suppose to be = and not ==, we only want to decrement when small_size equals something. */ if ((small_size = bg_conf->nodecard_ionode_cnt)) small_size--; i = 0; while (i<bg_conf->ionodes_per_mp) { tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp); bit_nset(tmp_bitmap, i, i+small_size); i += small_size+1; list_append(bg_lists->valid_small32, tmp_bitmap); } } /* If we only have 1 nodecard just jump to the end since this will never need to happen below. Pretty much a hack to avoid seg fault;). */ if (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt) goto no_calc; bg_lists->valid_small128 = list_create(_destroy_bitmap); if ((small_size = bg_conf->quarter_ionode_cnt)) small_size--; i = 0; while (i<bg_conf->ionodes_per_mp) { tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp); bit_nset(tmp_bitmap, i, i+small_size); i += small_size+1; list_append(bg_lists->valid_small128, tmp_bitmap); } #ifndef HAVE_BGL bg_lists->valid_small64 = list_create(_destroy_bitmap); if ((small_size = bg_conf->nodecard_ionode_cnt * 2)) small_size--; i = 0; while (i<bg_conf->ionodes_per_mp) { tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp); bit_nset(tmp_bitmap, i, i+small_size); i += small_size+1; list_append(bg_lists->valid_small64, tmp_bitmap); } bg_lists->valid_small256 = list_create(_destroy_bitmap); if ((small_size = bg_conf->quarter_ionode_cnt * 2)) small_size--; i = 0; while (i<bg_conf->ionodes_per_mp) { tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp); bit_nset(tmp_bitmap, i, i+small_size); i += small_size+1; list_append(bg_lists->valid_small256, tmp_bitmap); } #endif } else { fatal("your ionodes_per_mp is 0"); } no_calc: if (!s_p_get_uint16(&bg_conf->bridge_api_verb, "BridgeAPIVerbose", tbl)) info("Warning: BridgeAPIVerbose not configured " "in bluegene.conf"); if (!s_p_get_string(&bg_conf->bridge_api_file, "BridgeAPILogFile", tbl)) info("BridgeAPILogFile not configured in bluegene.conf"); else _reopen_bridge_log(); if (s_p_get_string(&tmp_char, "DenyPassthrough", tbl)) { if (strstr(tmp_char, "A")) ba_deny_pass |= PASS_DENY_A; if (strstr(tmp_char, "X")) ba_deny_pass |= PASS_DENY_X; if (strstr(tmp_char, "Y")) ba_deny_pass |= PASS_DENY_Y; if (strstr(tmp_char, "Z")) ba_deny_pass |= PASS_DENY_Z; if (!strcasecmp(tmp_char, "ALL")) ba_deny_pass |= PASS_DENY_ALL; bg_conf->deny_pass = ba_deny_pass; xfree(tmp_char); } if (!s_p_get_string(&tmp_char, "LayoutMode", tbl)) { info("Warning: LayoutMode was not specified in bluegene.conf " "defaulting to STATIC partitioning"); bg_conf->layout_mode = LAYOUT_STATIC; } else { if (!strcasecmp(tmp_char,"STATIC")) bg_conf->layout_mode = LAYOUT_STATIC; else if (!strcasecmp(tmp_char,"OVERLAP")) bg_conf->layout_mode = LAYOUT_OVERLAP; else if (!strcasecmp(tmp_char,"DYNAMIC")) bg_conf->layout_mode = LAYOUT_DYNAMIC; else { fatal("I don't understand this LayoutMode = %s", tmp_char); } xfree(tmp_char); } /* add blocks defined in file */ if (bg_conf->layout_mode != LAYOUT_DYNAMIC) { if (!s_p_get_array((void ***)&blockreq_array, &count, "MPs", tbl)) { if (!s_p_get_array((void ***)&blockreq_array, &count, "BPs", tbl)) { info("WARNING: no blocks defined in " "bluegene.conf, " "only making full system block"); /* create_full_system_block(NULL); */ if (bg_conf->sub_mp_sys || (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)) fatal("On a sub-midplane system you " "need to define the blocks you " "want on your system."); } } for (i = 0; i < count; i++) { add_bg_record(bg_lists->main, NULL, blockreq_array[i], 0, 0); } } else if (bg_conf->sub_mp_sys || (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)) /* we can't do dynamic here on a sub-midplane system */ fatal("On a sub-midplane system we can only do OVERLAP or " "STATIC LayoutMode. Please update your bluegene.conf."); #ifdef HAVE_BGQ if ((bg_recover != NOT_FROM_CONTROLLER) && s_p_get_string(&tmp_char, "RebootQOSList", tbl)) { bool valid; char *token, *last = NULL; slurmdb_qos_rec_t *qos = NULL; bg_conf->reboot_qos_bitmap = bit_alloc(g_qos_count); itr = list_iterator_create(assoc_mgr_qos_list); token = strtok_r(tmp_char, ",", &last); while (token) { valid = false; while((qos = list_next(itr))) { if (!strcasecmp(token, qos->name)) { bit_set(bg_conf->reboot_qos_bitmap, qos->id); valid = true; break; } } if (!valid) error("Invalid RebootQOSList value: %s", token); list_iterator_reset(itr); token = strtok_r(NULL, ",", &last); } list_iterator_destroy(itr); xfree(tmp_char); } #endif s_p_hashtbl_destroy(tbl); return SLURM_SUCCESS; }
// add new transaction to the transaction list // success: SUCCESS, // failure: ERRNO_MP_LIST_PUSH int transaction_add_to_list(transaction_t * tr, list_t * tr_list) { node_t * nd = node_create((void *)tr); if(SUCCESS != list_push(tr_list, nd, (void *)&(tr->transaction_id))) {return ERRNO_MP_LIST_PUSH;} return SUCCESS; }
AddrSpace_t *task_new(L4_ThreadId_t pager) { L4_Word_t tno; L4_ThreadId_t tid; L4_ThreadId_t space_spec; L4_Word_t utcb_location; AddrSpace_t *space = NULL; slab_t *sb; list_t *li; thread_t *this; mutex_lock(&thrlock); tno = threadno_find_free(bitmap, MAX_TASKS); if (!tno) { mutex_unlock(&thrlock); return NULL; } tid = L4_GlobalId(tno, 1); utcb_location = UTCB_AREA_LOCATION; space_spec = tid; sb = slab_alloc(&thrpool); if (!sb) { mutex_unlock(&thrlock); return NULL; } if (FALSE == (L4_ThreadControl(tid, space_spec, L4_Myself(), L4_nilthread, (void *) utcb_location))) { slab_free(&thrpool, sb); mutex_unlock(&thrlock); return NULL; } space = address_space_new(tid, pager); if (!space) { L4_ThreadControl(tid, L4_nilthread, L4_nilthread, L4_nilthread, (void *) -1); slab_free(&thrpool, sb); mutex_unlock(&thrlock); return NULL; } else { /* set self space, and the specified pager * FIXME - using myself as the scheduler */ L4_ThreadControl(tid, tid, L4_Myself(), pager, (void *) -1); } li = LIST_TYPE(sb->data); this = (thread_t *) li->data; list_push(&thread_list, li); this->tid = tid; this->space = space; this->index = 0; this->creation = L4_SystemClock(); threadno_alloc(bitmap, L4_ThreadNo(tid)); threadno_alloc(space->threads, 0); mutex_unlock(&thrlock); return space; }
/* Perform job initiation work */ static void _start_agent(bg_action_t *bg_action_ptr) { int rc, set_user_rc = SLURM_SUCCESS; bg_record_t *bg_record = NULL; bg_record_t *found_record = NULL; ListIterator itr; List delete_list = NULL; int requeue_job = 0; uint32_t req_job_id = bg_action_ptr->job_ptr->job_id; bool block_inited = 0; bool delete_it = 0; slurm_mutex_lock(&block_state_mutex); bg_record = find_bg_record_in_list(bg_lists->main, bg_action_ptr->bg_block_id); if (!bg_record) { bg_record->modifying = 0; slurm_mutex_unlock(&block_state_mutex); error("block %s not found in bg_lists->main", bg_action_ptr->bg_block_id); bg_requeue_job(req_job_id, 1, 0, JOB_BOOT_FAIL, false); return; } if ((bg_record->job_running <= NO_JOB_RUNNING) && !find_job_in_bg_record(bg_record, req_job_id)) { bg_record->modifying = 0; // bg_reset_block(bg_record); should already happened slurm_mutex_unlock(&block_state_mutex); debug("job %u finished during the queueing job " "(everything is ok)", req_job_id); return; } if ((bg_record->state == BG_BLOCK_TERM) || bg_record->free_cnt) { /* It doesn't appear state of a small block (conn_type) is held on a BGP system so if we to reset it so, just set the reboot flag and handle it later in that code. */ bg_action_ptr->reboot = 1; } delete_list = list_create(NULL); itr = list_iterator_create(bg_lists->main); while ((found_record = list_next(itr))) { if (bg_record == found_record) continue; if (!blocks_overlap(bg_record, found_record)) { debug2("block %s isn't part of %s", found_record->bg_block_id, bg_record->bg_block_id); continue; } if (found_record->job_ptr || (found_record->job_list && list_count(found_record->job_list))) { struct job_record *job_ptr = found_record->job_ptr; if (!found_record->job_ptr) job_ptr = find_job_in_bg_record( found_record, NO_VAL); error("Trying to start job %u on block %s, " "but there is a job %u running on an overlapping " "block %s it will not end until %ld. " "This should never happen.", req_job_id, bg_record->bg_block_id, job_ptr->job_id, found_record->bg_block_id, job_ptr->end_time); requeue_job = 1; break; } debug2("need to make sure %s is free, it's part of %s", found_record->bg_block_id, bg_record->bg_block_id); list_push(delete_list, found_record); } list_iterator_destroy(itr); if (requeue_job) { FREE_NULL_LIST(delete_list); bg_reset_block(bg_record, bg_action_ptr->job_ptr); bg_record->modifying = 0; slurm_mutex_unlock(&block_state_mutex); bg_requeue_job(req_job_id, 0, 0, JOB_BOOT_FAIL, false); return; } slurm_mutex_unlock(&block_state_mutex); if (bg_conf->layout_mode == LAYOUT_DYNAMIC) delete_it = 1; free_block_list(req_job_id, delete_list, delete_it, 1); FREE_NULL_LIST(delete_list); while (1) { slurm_mutex_lock(&block_state_mutex); /* Failure will unlock block_state_mutex so no need to unlock before return. No need to reset modifying here if the block doesn't exist. */ if (!_make_sure_block_still_exists(bg_action_ptr, bg_record)) { error("Problem with deallocating blocks to run job %u " "on block %s", req_job_id, bg_action_ptr->bg_block_id); return; } /* If another thread is freeing this block we need to wait until it is done or we will get into a state where this job will be killed. */ if (!bg_record->free_cnt) break; debug("Waiting for block %s to free for job %u. " "%d thread(s) trying to free it", bg_record->bg_block_id, req_job_id, bg_record->free_cnt); slurm_mutex_unlock(&block_state_mutex); sleep(1); } /* This was set in the start_job function to close the above window where a job could be mistakenly requeued if another thread is trying to free this block as we are trying to run on it, which is fine since we will reboot it later. */ bg_record->modifying = 0; if ((bg_record->job_running <= NO_JOB_RUNNING) && !find_job_in_bg_record(bg_record, req_job_id)) { // bg_reset_block(bg_record); should already happened slurm_mutex_unlock(&block_state_mutex); debug("job %u already finished before boot", req_job_id); return; } if (bg_record->job_list && (bg_action_ptr->job_ptr->total_cpus != bg_record->cpu_cnt) && (list_count(bg_record->job_list) != 1)) { /* We don't allow modification of a block or reboot of a block if we are running multiple jobs on the block. */ debug2("no reboot"); goto no_reboot; } rc = 0; #ifdef HAVE_BGL if (bg_action_ptr->blrtsimage && xstrcasecmp(bg_action_ptr->blrtsimage, bg_record->blrtsimage)) { debug3("changing BlrtsImage from %s to %s", bg_record->blrtsimage, bg_action_ptr->blrtsimage); xfree(bg_record->blrtsimage); bg_record->blrtsimage = xstrdup(bg_action_ptr->blrtsimage); rc = 1; } #elif defined HAVE_BGP if ((bg_action_ptr->conn_type[0] >= SELECT_SMALL) && (bg_action_ptr->conn_type[0] != bg_record->conn_type[0])) { if (bg_conf->slurm_debug_level >= LOG_LEVEL_DEBUG3) { char *req_conn_type = conn_type_string_full(bg_action_ptr->conn_type); char *conn_type = conn_type_string_full(bg_record->conn_type); debug3("changing small block mode from %s to %s", conn_type, req_conn_type); xfree(req_conn_type); xfree(conn_type); } rc = 1; # ifndef HAVE_BG_FILES /* since we don't check state on an emulated system we * have to change it here */ bg_record->conn_type[0] = bg_action_ptr->conn_type[0]; # endif } #endif #ifdef HAVE_BG_L_P if (bg_action_ptr->linuximage && xstrcasecmp(bg_action_ptr->linuximage, bg_record->linuximage)) { # ifdef HAVE_BGL debug3("changing LinuxImage from %s to %s", bg_record->linuximage, bg_action_ptr->linuximage); # else debug3("changing CnloadImage from %s to %s", bg_record->linuximage, bg_action_ptr->linuximage); # endif xfree(bg_record->linuximage); bg_record->linuximage = xstrdup(bg_action_ptr->linuximage); rc = 1; } if (bg_action_ptr->ramdiskimage && xstrcasecmp(bg_action_ptr->ramdiskimage, bg_record->ramdiskimage)) { # ifdef HAVE_BGL debug3("changing RamDiskImage from %s to %s", bg_record->ramdiskimage, bg_action_ptr->ramdiskimage); # else debug3("changing IoloadImage from %s to %s", bg_record->ramdiskimage, bg_action_ptr->ramdiskimage); # endif xfree(bg_record->ramdiskimage); bg_record->ramdiskimage = xstrdup(bg_action_ptr->ramdiskimage); rc = 1; } #endif if (bg_action_ptr->mloaderimage && xstrcasecmp(bg_action_ptr->mloaderimage, bg_record->mloaderimage)) { debug3("changing MloaderImage from %s to %s", bg_record->mloaderimage, bg_action_ptr->mloaderimage); xfree(bg_record->mloaderimage); bg_record->mloaderimage = xstrdup(bg_action_ptr->mloaderimage); rc = 1; } if (rc || bg_action_ptr->reboot) { bg_record->modifying = 1; /* Increment free_cnt to make sure we don't loose this * block since bg_free_block will unlock block_state_mutex. */ bg_record->free_cnt++; bg_free_block(bg_record, 1, 1); bg_record->free_cnt--; #if defined HAVE_BG_FILES && defined HAVE_BG_L_P #ifdef HAVE_BGL if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_BlrtsImg, bg_record->blrtsimage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_BlrtsImg): %s", bg_err_str(rc)); if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_LinuxImg, bg_record->linuximage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_LinuxImg): %s", bg_err_str(rc)); if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_RamdiskImg, bg_record->ramdiskimage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_RamdiskImg): %s", bg_err_str(rc)); #elif defined HAVE_BGP if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_CnloadImg, bg_record->linuximage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_CnloadImg): %s", bg_err_str(rc)); if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_IoloadImg, bg_record->ramdiskimage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_IoloadImg): %s", bg_err_str(rc)); if (bg_action_ptr->conn_type[0] > SELECT_SMALL) { char *conn_type = NULL; switch(bg_action_ptr->conn_type[0]) { case SELECT_HTC_S: conn_type = "s"; break; case SELECT_HTC_D: conn_type = "d"; break; case SELECT_HTC_V: conn_type = "v"; break; case SELECT_HTC_L: conn_type = "l"; break; default: break; } /* the option has to be set before the pool can be set */ if ((rc = bridge_block_modify( bg_record->bg_block_id, RM_MODIFY_Options, conn_type)) != SLURM_SUCCESS) error("bridge_set_data(RM_MODIFY_Options): %s", bg_err_str(rc)); } #endif if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_MloaderImg, bg_record->mloaderimage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_MloaderImg): %s", bg_err_str(rc)); #endif bg_record->modifying = 0; } no_reboot: if (bg_record->state == BG_BLOCK_FREE) { if ((rc = bridge_block_boot(bg_record)) != SLURM_SUCCESS) { char reason[200]; bg_record->boot_state = 0; bg_record->boot_count = 0; if (rc == BG_ERROR_INVALID_STATE) snprintf(reason, sizeof(reason), "Block %s is in an incompatible " "state. This usually means " "hardware is allocated " "by another block (maybe outside " "of SLURM).", bg_record->bg_block_id); else snprintf(reason, sizeof(reason), "Couldn't boot block %s: %s", bg_record->bg_block_id, bg_err_str(rc)); slurm_mutex_unlock(&block_state_mutex); requeue_and_error(bg_record, reason); return; } } else if (bg_record->state == BG_BLOCK_BOOTING) { #ifdef HAVE_BG_FILES bg_record->boot_state = 1; #else if (!block_ptr_exist_in_list(bg_lists->booted, bg_record)) list_push(bg_lists->booted, bg_record); bg_record->state = BG_BLOCK_INITED; last_bg_update = time(NULL); #endif } if ((bg_record->job_running <= NO_JOB_RUNNING) && !find_job_in_bg_record(bg_record, req_job_id)) { slurm_mutex_unlock(&block_state_mutex); debug("job %u finished during the start of the boot " "(everything is ok)", req_job_id); return; } /* Don't reset boot_count, it will be reset when state changes, and needs to outlast a job allocation. */ /* bg_record->boot_count = 0; */ if (bg_record->state == BG_BLOCK_INITED) { debug("block %s is already ready.", bg_record->bg_block_id); /* Just in case reset the boot flags */ bg_record->boot_state = 0; bg_record->boot_count = 0; set_user_rc = bridge_block_sync_users(bg_record); block_inited = 1; } slurm_mutex_unlock(&block_state_mutex); /* This lock needs to happen after the block_state_mutex to avoid deadlock. */ if (block_inited && bg_action_ptr->job_ptr) { slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; lock_slurmctld(job_write_lock); bg_action_ptr->job_ptr->job_state &= (~JOB_CONFIGURING); last_job_update = time(NULL); unlock_slurmctld(job_write_lock); } if (set_user_rc == SLURM_ERROR) { sleep(2); /* wait for the slurmd to begin the batch script, slurm_fail_job() is a no-op if issued prior to the script initiation do clean up just incase the fail job isn't ran */ (void) slurm_fail_job(req_job_id, JOB_BOOT_FAIL); } }
/* Parses gdb's stdout fd after requesting a backtrace. The result is stored in a linked list of FuncInfo structures. */ static List * parse_stack_trace (int gdb) { int i; int parentheses_are_off = 0; char c; char buf[4096]; List *stack = NULL; FuncInfo *f; List *libs = NULL; SharedLib *lib; enum { NONE, ADDR, FUNC, ARGS, FILE, LINE, LIB, QUOTE, FROMADDR, TOADDR, SYMS, SONAME } state = NONE, prev_state = NONE; while (read(gdb, &c, 1)) { switch (state) { case NONE: if (c == '#') { f = calloc(sizeof (FuncInfo), 1); while (read(gdb, &c, 1)) if (!isdigit(c)) break; while (read(gdb, &c, 1)) if (!isspace(c)) { buf[0] = c; i = 1; state = isdigit(c) ? ADDR : FUNC; break; } } else if (!stack && c == '0') { lib = calloc(sizeof (SharedLib), 1); state = FROMADDR; buf[0] = c; i = 1; } else if (stack && c == '(') { read(gdb, buf, 3); if (!strncmp(buf, "gdb", 3)) goto done; } else if (!stack && c == 'N') { read(gdb, buf, 7); if (!strncmp(buf, "o stack", 7)) goto done; } break; case FROMADDR: if (!isspace(c)) { buf[i++] = c; } else { buf[i] = 0; i = 0; sscanf(buf, "%p", &lib->from); read(gdb, buf, 1); state = TOADDR; } break; case TOADDR: if (!isspace(c)) { buf[i++] = c; } else { buf[i] = 0; i = 0; sscanf(buf, "%p", &lib->to); read(gdb, buf, 1); state = SYMS; } break; case SYMS: if (!isspace(c)) { buf[i++] = c; } else { buf[i] = 0; i = 0; if (eq(buf, "Yes")) lib->readsyms = 1; read(gdb, buf, 3); if (strncmp(buf, "(*)", 3)) lib->dbinfo = 1; while (read(gdb, &c, 1)) if (!isspace(c)) { buf[0] = c; i = 1; break; } state = SONAME; } break; case SONAME: if (c != '\n') { buf[i++] = c; } else { buf[i] = 0; i = 0; lib->name = strdup(buf); libs = list_push(libs, lib); state = NONE; } break; case ADDR: if (!isspace(c)) { buf[i++] = c; } else { buf[i] = 0; i = 0; sscanf(buf, "%p", &f->addr); f->lib = get_lib_name(libs, f->addr); read(gdb, buf, 3); state = FUNC; } break; case FUNC: if (!isspace(c)) { buf[i++] = c; } else { buf[i] = 0; i = 0; f->func = strdup(buf); state = ARGS; } break; case ARGS: buf[i++] = c; if (c == '(') parentheses_are_off++; else if (c == ')') { parentheses_are_off--; if (parentheses_are_off) continue; buf[i] = 0; i = 0; f->args = strdup(buf); read(gdb, &c, 1); if (c == '\n') { stack = list_push(stack, f); state = NONE; break; } read(gdb, buf, 3); if (!strncmp(buf, "at ", 3)) { state = FILE; } else if (!strncmp(buf, "fro", 3)) { state = LIB; read(gdb, buf, 2); } else { free(f); state = NONE; } } else if (c == '"') { prev_state = ARGS; state = QUOTE; } break; case QUOTE: buf[i++] = c; if (c == '\\') { read(gdb, &c, 1); buf[i++] = c; } else if (c == '"') state = prev_state; break; case LIB: if (!isspace(c)) { buf[i++] = c; } else { buf[i] = 0; i = 0; if (!f->lib) f->lib = strdup(buf); stack = list_push(stack, f); state = NONE; } break; case FILE: if (c != ':') { buf[i++] = c; } else { buf[i] = 0; i = 0; f->file = strdup(buf); state = LINE; } break; case LINE: if (isdigit(c)) { buf[i++] = c; } else { buf[i] = 0; i = 0; f->line = atoi(buf); stack = list_push(stack, f); state = NONE; } break; } } done: list_free(libs, shared_lib_free); return stack; }
/* find and store all urls in the provided string */ void find_all_url(const char *source_string, const int len) { char search_for[][10] = {"http://", "https://", "ftp://", "www."}; int next_start = 0; while (next_start < len) { int first_found = len-next_start; /* set to max */ int i; /* find the first of the url start strings */ for(i = 0; i < sizeof(search_for)/10; i++) { int found_at = get_string_occurance(search_for[i], source_string+next_start, len-next_start, 1); if ((found_at >= 0) && (found_at < first_found)) first_found = found_at; } /* if url found, store (if new) it then continue the search straight after the end */ if (first_found < len-next_start) { char *new_url = NULL; char *add_start = ""; size_t url_len; int url_start = next_start + first_found; int have_already = 0; /* find the url end */ for (next_start = url_start; next_start < len; next_start++) { char cur_char = source_string[next_start]; if(!cur_char || cur_char == ' ' || cur_char == '\n' || cur_char == '<' || cur_char == '>' || cur_char == '|' || cur_char == '"' || cur_char == '\'' || cur_char == '`' || cur_char == ']' || cur_char == ';' || cur_char == '\\' || (cur_char&0x80) != 0) break; } /* prefix www. with http:// */ if (strncmp(&source_string[url_start], "www.", 4) == 0) add_start = "http://"; /* extract the string */ url_len = strlen(add_start) + (next_start-url_start) + 1; new_url = (char *)malloc(sizeof(char)*url_len); /* could use safe_xxx() functions but I think its simpler not to here */ strcpy(new_url, add_start); strncat(new_url, &source_string[url_start], next_start-url_start ); new_url[url_len-1] = 0; /* check the new URL is not already in the list */ if (have_url_count) { list_node_t *local_head = newest_url; while (local_head != NULL) { /* if its already stored, just make existing version active */ if (strcmp(((URLDATA *)local_head->data)->text, new_url) == 0) { active_url = local_head; ((URLDATA *)local_head->data)->seen_count++; have_already = 1; free(new_url); break; } local_head = local_head->next; } } /* if its a new url, create a new node in the url list */ if (!have_already) { URLDATA *new_node = (URLDATA *)malloc(sizeof(URLDATA)); /* if there's a max number of url and we've reached it, remove the oldest */ /* we don't need to worry if its the active_url as thats going to change */ if (max_url_count && (max_url_count==have_url_count)) { list_node_t *local_head = newest_url; /* go to the oldest in the list */ while (local_head->next != NULL) local_head = local_head->next; free(((URLDATA *)local_head->data)->text); free(local_head->data); if (local_head==newest_url) { /* the special case is when max_url_count=1... */ free(local_head); newest_url = NULL; } else { local_head = local_head->prev; free(local_head->next); local_head->next = NULL; } have_url_count--; } new_node->seen_count = 1; new_node->visited = 0; new_node->text = new_url; list_push(&newest_url, new_node); active_url = newest_url; have_url_count++; } } /* end if url found */ /* no more urls found so stop looking */ else break; } } /* end find_all_url() */
extern void specific_info_front_end(popup_info_t *popup_win) { int resv_error_code = SLURM_SUCCESS; static front_end_info_msg_t *front_end_info_ptr = NULL; static front_end_info_t *front_end_ptr = NULL; specific_info_t *spec_info = popup_win->spec_info; sview_search_info_t *search_info = spec_info->search_info; char error_char[100]; GtkWidget *label = NULL; GtkTreeView *tree_view = NULL; List resv_list = NULL; List send_resv_list = NULL; int changed = 1; sview_front_end_info_t *sview_front_end_info_ptr = NULL; int i = -1; ListIterator itr = NULL; if (!spec_info->display_widget) { setup_popup_info(popup_win, display_data_front_end, SORTID_CNT); } if (spec_info->display_widget && popup_win->toggled) { gtk_widget_destroy(spec_info->display_widget); spec_info->display_widget = NULL; goto display_it; } resv_error_code = get_new_info_front_end(&front_end_info_ptr, popup_win->force_refresh); if (resv_error_code == SLURM_NO_CHANGE_IN_DATA) { if (!spec_info->display_widget || spec_info->view == ERROR_VIEW) goto display_it; changed = 0; } else if (resv_error_code != SLURM_SUCCESS) { if (spec_info->view == ERROR_VIEW) goto end_it; spec_info->view = ERROR_VIEW; if (spec_info->display_widget) gtk_widget_destroy(spec_info->display_widget); sprintf(error_char, "get_new_info_front_end: %s", slurm_strerror(slurm_get_errno())); label = gtk_label_new(error_char); gtk_table_attach_defaults(popup_win->table, label, 0, 1, 0, 1); gtk_widget_show(label); spec_info->display_widget = gtk_widget_ref(label); goto end_it; } display_it: resv_list = _create_front_end_info_list(front_end_info_ptr, changed); if (!resv_list) return; if (spec_info->view == ERROR_VIEW && spec_info->display_widget) { gtk_widget_destroy(spec_info->display_widget); spec_info->display_widget = NULL; } if (spec_info->type != INFO_PAGE && !spec_info->display_widget) { tree_view = create_treeview(local_display_data, &popup_win->grid_button_list); gtk_tree_selection_set_mode( gtk_tree_view_get_selection(tree_view), GTK_SELECTION_MULTIPLE); spec_info->display_widget = gtk_widget_ref(GTK_WIDGET(tree_view)); gtk_table_attach_defaults(popup_win->table, GTK_WIDGET(tree_view), 0, 1, 0, 1); /* since this function sets the model of the tree_view to the treestore we don't really care about the return value */ create_treestore(tree_view, popup_win->display_data, SORTID_CNT, SORTID_NAME, SORTID_COLOR); } setup_popup_grid_list(popup_win); spec_info->view = INFO_VIEW; if (spec_info->type == INFO_PAGE) { _display_info_front_end(resv_list, popup_win); goto end_it; } /* just linking to another list, don't free the inside, just the list */ send_resv_list = list_create(NULL); itr = list_iterator_create(resv_list); i = -1; while ((sview_front_end_info_ptr = list_next(itr))) { i++; front_end_ptr = sview_front_end_info_ptr->front_end_ptr; switch (spec_info->type) { case PART_PAGE: case BLOCK_PAGE: case NODE_PAGE: break; case JOB_PAGE: if (strcmp(front_end_ptr->name, search_info->gchar_data)) continue; break; case RESV_PAGE: switch (search_info->search_type) { case SEARCH_RESERVATION_NAME: if (!search_info->gchar_data) continue; if (strcmp(front_end_ptr->name, search_info->gchar_data)) continue; break; default: continue; } break; default: g_print("Unknown type %d\n", spec_info->type); continue; } list_push(send_resv_list, sview_front_end_info_ptr); } list_iterator_destroy(itr); post_setup_popup_grid_list(popup_win); _update_info_front_end(send_resv_list, GTK_TREE_VIEW(spec_info->display_widget)); list_destroy(send_resv_list); end_it: popup_win->toggled = 0; popup_win->force_refresh = 0; return; }
/** * Parse a URL file into a requests */ requests parse_urls(const char* url_filename) { unsigned long i; requests reqs; request* req; list* req_list; node* n; FILE* urls; char done = 0; yaml_parser_t parser; yaml_event_t event; reqs.count = 0; reqs.reqs = NULL; req_list = list_new(); urls = fopen(url_filename, "r"); yaml_parser_initialize(&parser); yaml_parser_set_input_file(&parser, urls); while (!done) { if (!yaml_parser_parse(&parser, &event)) { break; } else { switch (event.type) { case YAML_MAPPING_START_EVENT: // printf("event.type: MAPPING_START\n"); // printf(" calling parse_request()\n"); if ((req = parse_request(&parser)) == NULL) { done = 2; } else { // printf(" got a good request!\n"); n = list_node_new(req); list_push(req_list, n); } break; case YAML_STREAM_START_EVENT: case YAML_DOCUMENT_START_EVENT: case YAML_SEQUENCE_START_EVENT: case YAML_SEQUENCE_END_EVENT: break; case YAML_STREAM_END_EVENT: case YAML_DOCUMENT_END_EVENT: done = 1; break; default: // printf("got event type %d\n", event.type); fprintf(stderr, "Malformed URLS_FILE at pos: %lu\n", (unsigned long)parser.offset); done = 2; } } yaml_event_delete(&event); } yaml_parser_delete(&parser); if (done == 2) { fprintf(stderr, "Error parsing URLs file\n"); exit(1); } if (!(reqs.reqs = malloc(sizeof(request) * req_list->length))) { list_free(req_list, 1); exit(2); } n = req_list->head; for (i=0; i<req_list->length; i++) { req = (request *)(n->data); reqs.reqs[i].method = req->method; reqs.reqs[i].url = req->url; reqs.reqs[i].payload_length = req->payload_length; reqs.reqs[i].payload = req->payload; reqs.reqs[i].headers = req->headers; reqs.reqs[i].num_headers = req->num_headers; reqs.reqs[i].curl_headers = req->curl_headers; // printf("set request %lu with url %s\n", i, reqs.reqs[i].url); n = n->next; } reqs.count = req_list->length; list_free(req_list, 0); return reqs; }
ft_instance_t ft_create(ft_config_t *config) { ft_instance_t ft; int bytes; int idx; if (config->max_entries <= 0) { LOG_ERROR("Hash flow table only supports fixed number of buckets"); return NULL; } /* Allocate the flow table itself */ ft = INDIGO_MEM_ALLOC(sizeof(*ft)); if (ft == NULL) { LOG_ERROR("ERROR: Flow table (hash) creation failed"); return NULL; } INDIGO_MEM_SET(ft, 0, sizeof(*ft)); INDIGO_MEM_COPY(&ft->config, config, sizeof(ft_config_t)); list_init(&ft->free_list); list_init(&ft->all_list); /* Allocate and init the flow entries */ bytes = sizeof(ft_entry_t) * config->max_entries; ft->flow_entries = INDIGO_MEM_ALLOC(bytes); if (ft->flow_entries == NULL) { LOG_ERROR("ERROR: Flow table (hash) creation failed"); INDIGO_MEM_FREE(ft); return NULL; } INDIGO_MEM_SET(ft->flow_entries, 0, bytes); /* Put the flow entries on the free list */ for (idx = 0; idx < config->max_entries; idx++) { list_push(&ft->free_list, &ft->flow_entries[idx].table_links); } /* Allocate and init buckets for each search type */ bytes = sizeof(list_head_t) * config->prio_bucket_count; ft->prio_buckets = INDIGO_MEM_ALLOC(bytes); if (ft->prio_buckets == NULL) { LOG_ERROR("ERROR: Flow table, prio bucket alloc failed"); ft_destroy(ft); return NULL; } INDIGO_MEM_SET(ft->prio_buckets, 0, bytes); for (idx = 0; idx < config->prio_bucket_count; idx++) { list_init(&ft->prio_buckets[idx]); } bytes = sizeof(list_head_t) * config->match_bucket_count; ft->match_buckets = INDIGO_MEM_ALLOC(bytes); if (ft->match_buckets == NULL) { LOG_ERROR("ERROR: Flow table, match bucket alloc failed"); ft_destroy(ft); return NULL; } INDIGO_MEM_SET(ft->match_buckets, 0, bytes); for (idx = 0; idx < config->match_bucket_count; idx++) { list_init(&ft->match_buckets[idx]); } bytes = sizeof(list_head_t) * config->flow_id_bucket_count; ft->flow_id_buckets = INDIGO_MEM_ALLOC(bytes); if (ft->flow_id_buckets == NULL) { LOG_ERROR("ERROR: Flow table, flow id bucket alloc failed"); ft_destroy(ft); return NULL; } INDIGO_MEM_SET(ft->flow_id_buckets, 0, bytes); for (idx = 0; idx < config->flow_id_bucket_count; idx++) { list_init(&ft->flow_id_buckets[idx]); } return ft; }
/*! * @brief Add a data item onto the end of the list. * @param pList Pointer to the \c LIST to add the item to. * @param data The data that is to be added to the list. * @returns Indication of success or failure. * @sa list_push */ BOOL list_add(PLIST pList, LPVOID data) { return list_push(pList, data); }
/** * Parse a sequence of headers, and set them on the `req`. * * It is assumed that the `parser` has just encountered a * YAML_SCALAR_EVENT whose value was "headers", and has been * advanced beyond the YAML_SEQUENCE_START_EVENT; after this * are expected a series of YAML_SCALAR_EVENT pairs, the * header names and values. * * After return, the `parser` will have consumed the matching * YAML_SEQUENCE_END_EVENT. * * Return 1 on error or 0 on success. */ int parse_headers(yaml_parser_t* parser, request* req) { unsigned long i; char* full_header; yaml_event_t name_event, value_event; list* headers; node* n; header* hdr; headers = list_new(); while (1) { if (!yaml_parser_parse(parser, &name_event)) goto parse_headers_error; while (name_event.type == YAML_MAPPING_START_EVENT || name_event.type == YAML_MAPPING_END_EVENT) { yaml_event_delete(&name_event); if (!yaml_parser_parse(parser, &name_event)) goto parse_headers_error; } if (name_event.type == YAML_SEQUENCE_END_EVENT) break; if (name_event.type != YAML_SCALAR_EVENT) goto parse_headers_error; if (!yaml_parser_parse(parser, &value_event)) goto parse_headers_error; if (value_event.type != YAML_SCALAR_EVENT) { fprintf(stderr, "invalid header value for '%s'\n", name_event.data.scalar.value); goto parse_headers_error; } if (!(hdr = malloc(sizeof(header)))) goto parse_headers_error; if (!(hdr->name = malloc(sizeof(char) * (strlen((const char*)name_event.data.scalar.value) + 1)))) goto parse_headers_error; if (!(hdr->value = malloc(sizeof(char) * (strlen((const char*)value_event.data.scalar.value) + 1)))) goto parse_headers_error; strcpy(hdr->name, (const char*)name_event.data.scalar.value); strcpy(hdr->value, (const char*)value_event.data.scalar.value); n = list_node_new(hdr); list_push(headers, n); yaml_event_delete(&name_event); yaml_event_delete(&value_event); } if (!(req->headers = malloc(sizeof(header) * headers->length))) goto parse_headers_error; req->curl_headers = NULL; n = headers->head; for (i=0; i<headers->length; i++) { hdr = (header *)n->data; req->headers[i].name = hdr->name; req->headers[i].value = hdr->value; // also set up header list for libcurl full_header = malloc(sizeof(char) * (strlen(hdr->name) + strlen(hdr->value) + 3)); if (!full_header) goto parse_headers_error; sprintf(full_header, "%s: %s", hdr->name, hdr->value); req->curl_headers = curl_slist_append(req->curl_headers, full_header); if (req->curl_headers == NULL) goto parse_headers_error; free(full_header); n = n->next; } req->num_headers = headers->length; list_free(headers, 0); return 0; parse_headers_error: // printf("in parse_headers_error\n"); list_free(headers, 1); return 1; }
/* use a binary search to find the table in the entry */ static int find_handler(unsigned int tag) { int min = 0, max = Protocol_Size - 1, try; while (!global.sigCaught) { try = (max + min) / 2; if(tag == Protocol[try].message) return try; else if(min == max) return -1; /* not found */ else if(tag < Protocol[try].message) { if(try == min) return -1; max = try - 1; } else { if(try == max) return -1; min = try + 1; } ASSERT(min <= max); } return -1; } /* this is not a real handler, but takes the same arguments as one */ HANDLER(dispatch_command) { int l; tag_count_t *tagcount = 0; int tagDelta; u_char byte; ASSERT(validate_connection(con)); ASSERT(pkt != 0); /* HACK ALERT the handler routines all assume that the `pkt' argument is nul (\0) terminated, so we have to replace the byte after the last byte in this packet with a \0 to make sure we dont read overflow in the handlers. the handle_connection() function should always allocate 1 byte more than necessary for this purpose */ ASSERT(VALID_LEN(con->recvbuf->data, con->recvbuf->consumed + 4 + len + 1)); stats.tags++; byte = *(pkt + len); *(pkt + len) = 0; l = find_handler(tag); if(l != -1) { ASSERT(Protocol[l].handler != 0); if(ISUSER(con)) { tagcount = hash_lookup(con->user->tagCountHash, (void *) tag); if(!tagcount) { tagcount = CALLOC(1, sizeof(tag_count_t)); tagcount->count = 0; tagcount->lastInterval = global.current_time; hash_add(con->user->tagCountHash, (void *) tag, tagcount ); } tagcount->count++; if(tagcount->count % 1000 == 0) { tagDelta = global.current_time - tagcount->lastInterval; if(tagDelta == 0) tagDelta = 1; if((1000 / tagDelta) >= 100) log_message_level(LOG_LEVEL_ERROR, "dispatch_command: %s has done \"%s\"(%hu) %lu times (%d/sec)", con->user->nick, tag2hrf(tag), tag, tagcount->count, 1000/tagDelta); tagcount->lastInterval = global.current_time; } } else if(ISSERVER(con)) { tagcount = hash_lookup(con->sopt->tagCountHash, (void *) tag); if(!tagcount) { tagcount = CALLOC(1, sizeof(tag_count_t)); tagcount->count = 0; tagcount->lastInterval = global.current_time; hash_add(con->sopt->tagCountHash, (void *) tag, tagcount ); } tagcount->count++; if(tagcount->count % 1000 == 0) { tagcount->flag = 0; tagDelta = global.current_time - tagcount->lastInterval; if(tagDelta == 0) tagDelta = 1; if((1000 / tagDelta) >= 200) { log_message_level(LOG_LEVEL_ERROR, "dispatch_command: %s has done \"%s\"(%hu) %lu times (%d/sec)", con->host, tag2hrf(tag), tag, tagcount->count, 1000/tagDelta); tagcount->flag = 1; } tagcount->lastInterval = global.current_time; } } /* if(tag == 10018 || (tag != 2 && (tagcount && tagcount->flag))) { int i; char message[4096]; i=0; while (i<=len-1) { message[i] = isprint(pkt[i]) ? pkt[i] : '.'; i++; } message[i]=0; log_message_level(LOG_LEVEL_ERROR, "dispatch_command: tag: %d, pkt: %s", tag, message); } */ /* do flood control if enabled */ if(global.floodTime > 0 && !(Protocol[l].flags & F_EXEMPT) && ISUSER(con)) { /* this command is subject to flood control. */ if(con->flood_start + global.floodTime < global.current_time) { /* flood expired, reset counters */ con->flood_start = global.current_time; con->flood_commands = 0; } else if(++con->flood_commands >= global.floodCommands) { LIST *list; log_message_level( LOG_LEVEL_CLIENT, "dispatch_command: flooding from %s %s(%hu)", get_user(con, 2), tag2hrf(tag), tag); notify_mods(FLOODLOG_MODE, "Flooding from %s!%s %s(%hu)", con->user->nick, con->host, tag2hrf(tag), tag ); /* stop reading from the descriptor until the flood counter * expires. */ clear_read(con->fd); /* add to the list of flooders that is check in the main * loop. Since we don't traverse the entire client list we * have to keep track of which ones to check for expiration */ list = CALLOC(1, sizeof(LIST)); list->data = con; global.flooderList = list_push(global.flooderList, list); } } /* This is to get some info where e.g. pop_user is called from... */ global.current_tag = tag; /* i=0; while (i<=len-1) { message[i] = isprint(pkt[i]) ? pkt[i] : '.'; i++; } message[i]=0; log_message("%hu:R:%hu(%s)\t:%hu:\t%s", con->fd, tag, tag2hrf(tag), len+4, message); */ /* note that we pass only the data part of the packet */ Protocol[l].handler(con, tag, len, pkt); Protocol[l].count++; Protocol[l].bytes += len+4; } else { log_message_level(LOG_LEVEL_ERROR | LOG_LEVEL_SERVER, "dispatch_command: unknown message: tag=%hu, length=%hu, data=%s", tag, len, pkt); unknown_numeric.message = tag; unknown_numeric.count++; unknown_numeric.bytes += len+4; send_cmd(con, MSG_SERVER_NOSUCH, "Unknown command code %hu", tag); #if ONAP_DEBUG /* if this is a server connection, shut it down to avoid flooding the other server with these messages */ if(ISSERVER(con)) { u_char ch; int bytes; /* dump some bytes from the input buffer to see if it helps aid debugging */ bytes = con->recvbuf->datasize - con->recvbuf->consumed; /* print at most 128 bytes */ if(bytes > 128) bytes = 128; fprintf(stdout, "Dump(%d): ", con->recvbuf->datasize - con->recvbuf->consumed); for (l = con->recvbuf->consumed; bytes > 0; bytes--, l++) { ch = *(con->recvbuf->data + l); fputc(isprint(ch) ? ch : '.', stdout); } fputc('\n', stdout); } #endif /* ONAP_DEBUG */ } /* restore the byte we overwrite at the beginning of this function */ *(pkt + len) = byte; } void handle_connection(CONNECTION * con) { int n; u_short tag, len; /* char* msg[4096]; */ ASSERT(validate_connection(con)); #ifdef CSC if(ISUSER(con)) { if(con->uopt->csc) { do { n = READ(con->fd, Buf, sizeof(Buf)); if(n <= 0) { if(n == -1) { if(N_ERRNO == EWOULDBLOCK) break; log_message_level(LOG_LEVEL_ERROR, "handle_connection_z: read: %s (errno %d) for host %s (fd %d)", strerror(N_ERRNO), N_ERRNO, con->host, con->fd); } else { log_message_level(LOG_LEVEL_ERROR, "handle_connection_z: EOF from %s", con->user->nick); } destroy_connection(con); return; } global.bytes_in += n; if(global.min_read > 0 && n < global.min_read) { log_message_level(LOG_LEVEL_ERROR, "handle_connection_z: %d bytes from %s", n, con->host); } if(buffer_decompress(con->recvbuf, con->uopt->zin, Buf, n)) { destroy_connection(con); return; } } while (n == sizeof(Buf)); goto dcomp_ok; } } #endif if(ISSERVER(con)) { /* server data is compressed. read as much as we can and pass it to the decompressor. we attempt to read all data from the socket in this loop, which will prevent unnecessary passes through the main loop (since select would return immediately) */ do { n = READ(con->fd, Buf, sizeof(Buf)); if(n <= 0) { if(n == -1) { /* try to empty the socket each time, so we read until * we hit this error (queue empty). this should only * happen in the rare event that the data in the queue * is a multiple of sizeof(Buf) */ if(N_ERRNO == EWOULDBLOCK) break; /* not an error */ log_message_level(LOG_LEVEL_ERROR, "handle_connection: read: %s (errno %d) for host %s (fd %d)", strerror(N_ERRNO), N_ERRNO, con->host, con->fd); } else log_message_level(LOG_LEVEL_SERVER | LOG_LEVEL_ERROR , "handle_connection: EOF from %s", con->host); destroy_connection(con); return; } global.bytes_in += n; if(global.min_read > 0 && n < global.min_read) { log_message_level(LOG_LEVEL_ERROR | LOG_LEVEL_SERVER, "handle_connection: %d bytes from %s", n, con->host); } /* this can safely be called multiple times in this loop. the * decompressor will realloc the output buffer if there is not * enough room to store everything */ if(buffer_decompress(con->recvbuf, con->sopt->zin, Buf, n)) { destroy_connection(con); return; } /* if what we read was equal to sizeof(Buf) it's very likely * that more data exists in the queue */ } while (n == sizeof(Buf)); } else { /* create the input buffer if it doesn't yet exist */ if(!con->recvbuf) { con->recvbuf = CALLOC(1, sizeof(BUFFER)); if(!con->recvbuf) { OUTOFMEMORY("handle_connection"); destroy_connection(con); return; } #if ONAP_DEBUG con->recvbuf->magic = MAGIC_BUFFER; #endif con->recvbuf->data = MALLOC(RECVBUF_INITAL_SIZE + 1); if(!con->recvbuf->data) { OUTOFMEMORY("handle_connection"); destroy_connection(con); return; } con->recvbuf->datamax = RECVBUF_INITAL_SIZE; } /* read the packet header if we haven't seen it already */ while (con->recvbuf->datasize < 4) { n = READ(con->fd, con->recvbuf->data + con->recvbuf->datasize, 4 - con->recvbuf->datasize); if(n == -1) { if(N_ERRNO != EWOULDBLOCK) { log_message_level(LOG_LEVEL_ERROR | LOG_LEVEL_SERVER, "handle_connection: read: %s (errno %d) for host %s", strerror(N_ERRNO), N_ERRNO, con->host); destroy_connection(con); } return; } else if(n == 0) { destroy_connection(con); return; } global.bytes_in += n; con->recvbuf->datasize += n; } /* read the packet body */ memcpy(&len, con->recvbuf->data, 2); len = BSWAP16(len); if(len > 0) { if(global.maxCommandLen && len > global.maxCommandLen) { log_message_level(LOG_LEVEL_ERROR | LOG_LEVEL_SERVER, "handle_connection: %hu byte message from %s", len, con->host); destroy_connection(con); return; } /* if there isn't enough space to read the entire body, resize the input buffer */ if(con->recvbuf->datamax < 4 + len) { /* allocate 1 extra byte for the \0 that dispatch_command() requires */ if(safe_realloc((void **) &con->recvbuf->data, 4 + len + 1)) { OUTOFMEMORY("handle_connection"); destroy_connection(con); return; } con->recvbuf->datamax = 4 + len; } n = READ(con->fd, con->recvbuf->data + con->recvbuf->datasize, len + 4 - con->recvbuf->datasize); if(n == -1) { /* since the header and body could arrive in separate packets, we have to check for this here so we don't close the * connection on this nonfatal error. we just wait for the next packet to arrive */ if(N_ERRNO != EWOULDBLOCK) { log_message_level(LOG_LEVEL_ERROR | LOG_LEVEL_SERVER, "handle_connection: read: %s (errno %d) for host %s", strerror(N_ERRNO), N_ERRNO, con->host); destroy_connection(con); } return; } else if(n == 0) { log_message_level(LOG_LEVEL_ERROR, "handle_connection: EOF from %s", con->host); destroy_connection(con); return; } con->recvbuf->datasize += n; global.bytes_in += n; } } /* process as many complete commands as possible. for a client this will be exactly one, but a server link may have sent multiple commands in one compressed packet */ #ifdef CSC dcomp_ok: #endif while (con->recvbuf->consumed < con->recvbuf->datasize) { /* if we don't have the complete packet header, wait until we read more data */ if(con->recvbuf->datasize - con->recvbuf->consumed < 4) break; /* read the packet header */ memcpy(&len, con->recvbuf->data + con->recvbuf->consumed, 2); memcpy(&tag, con->recvbuf->data + con->recvbuf->consumed + 2, 2); len = BSWAP16(len); tag = BSWAP16(tag); /* check if the entire packet body has arrived */ if(con->recvbuf->consumed + 4 + len > con->recvbuf->datasize) break; /* bzero( msg, 4096 ); memcpy(&msg, con->recvbuf->data + con->recvbuf->consumed + 4, len); log_message_level( LOG_LEVEL_DEBUG, "recv: [%u] %s (%u)", tag, msg, len); */ /* require that the client register before doing anything else */ if(con->class == CLASS_UNKNOWN && (tag != MSG_CLIENT_LOGIN && tag != MSG_CLIENT_LOGIN_REGISTER && tag != MSG_CLIENT_REGISTER && tag != MSG_SERVER_LOGIN && tag != MSG_SERVER_LOGIN_ACK && tag != MSG_SERVER_ERROR && tag != 4 && /* unknown: v2.0 beta 5a sends this? */ tag != 300 && tag != 11 && tag != 920)) { log_message_level(LOG_LEVEL_ERROR, "handle_connection: %s is not registered", con->host); *(con->recvbuf->data + con->recvbuf->consumed + 4 + len) = 0; log_message_level(LOG_LEVEL_ERROR, "handle_connection: tag=%hu, len=%hu, data=%s", tag, len, con->recvbuf->data + con->recvbuf->consumed + 4); send_cmd(con, MSG_SERVER_ERROR, "invalid command"); destroy_connection(con); return; } if(ISUSER(con)) { /* check for end of share/unshare sequence. in order to avoid having to send a single message for each shared file, the add_file and remove_file commands set a flag noting the start of a possible series of commands. this routine checks to see if the end of the sequence has been reached (a command other than share/unshare has been issued) and then relays the final result to the peer servers. NOTE: the only issue with this is that if the user doesn't issue any commands after sharing files, the information will never get passed to the peer servers. This is probably ok since this case will seldom happen */ if(con->user->sharing) { if(tag != MSG_CLIENT_ADD_FILE && tag != MSG_CLIENT_SHARE_FILE && tag != MSG_CLIENT_ADD_DIRECTORY) { pass_message_args(con, MSG_SERVER_USER_SHARING, "%s %hu %u", con->user->nick, con->user->shared, con->user->libsize); con->user->sharing = 0; } } else if(con->user->unsharing) { if(tag != MSG_CLIENT_REMOVE_FILE) { pass_message_args(con, MSG_SERVER_USER_SHARING, "%s %hu %u", con->user->nick, con->user->shared, con->user->libsize); con->user->unsharing = 0; } } } /* call the protocol handler */ dispatch_command(con, tag, len, con->recvbuf->data + con->recvbuf->consumed + 4); /* mark data as processed */ con->recvbuf->consumed += 4 + len; } if(con->recvbuf->consumed) { n = con->recvbuf->datasize - con->recvbuf->consumed; if(n > 0) { /* shift down unprocessed data */ memmove(con->recvbuf->data, con->recvbuf->data + con->recvbuf->consumed, n); } con->recvbuf->datasize = n; con->recvbuf->consumed = 0; /* reset */ } } char* tag2hrf(int tag) { switch (tag) { case MSG_SERVER_ERROR: return "server error"; /* 0 */ case MSG_CLIENT_LOGIN: return "login"; /* 2 */ case MSG_SERVER_EMAIL: return "login ack"; /* 3 */ case MSG_CLIENT_VERSION_CHECK: return "version_check"; /* 4 */ case MSG_CLIENT_LOGIN_REGISTER: return "register login"; /* 6 */ case MSG_CLIENT_REGISTER: return "register nick"; /* 7 */ case MSG_SERVER_REGISTER_OK: return "register ok"; /* 8 */ case MSG_SERVER_REGISTER_FAIL: return "register fail"; /* 9 */ case MSG_SERVER_BAD_NICK: return "bad nick"; /* 10 */ case MSG_CLIENT_CHECK_PASS: return "check_password"; /* 11 */ case MSG_SERVER_PASS_OK: return "password ok"; /* 12 */ case MSG_SERVER_ECHO: return "server echo"; /* 13 */ case MSG_CLIENT_REGISTRATION_INFO: return "ignore_command"; /* 14 */ #ifndef ROUTING_ONLY case MSG_CLIENT_ADD_FILE: return "add_file"; /* 100 */ case MSG_CLIENT_REMOVE_FILE: return "remove_file"; /* 102 */ #endif case MSG_CLIENT_UNSHARE_ALL: return "unshare_all"; /* 110 */ #ifndef ROUTING_ONLY case MSG_CLIENT_SEARCH: return "search"; /* 200 */ #endif case MSG_SERVER_SEARCH_RESULT: return "search result"; /* 201 */ case MSG_SERVER_SEARCH_END: return "search end"; /* 202 */ case MSG_CLIENT_DOWNLOAD: return "download"; /* 203 */ case MSG_SERVER_FILE_READY: return "file ready"; /* 204 */ case MSG_CLIENT_PRIVMSG: return "privmsg"; /* 205 */ case MSG_SERVER_SEND_ERROR: return "send error"; /* 206 */ case MSG_CLIENT_ADD_HOTLIST: return "add_hotlist"; /* 207 */ case MSG_CLIENT_ADD_HOTLIST_SEQ: return "add_hotlist"; /* 208 */ case MSG_SERVER_USER_SIGNON: return "user signon"; /* 209 */ case MSG_SERVER_USER_SIGNOFF: return "user signoff"; /* 210 */ case MSG_CLIENT_BROWSE: return "browse"; /* 211 */ case MSG_SERVER_BROWSE_RESPONSE: return "browse response"; /* 212 */ case MSG_SERVER_BROWSE_END: return "browse end"; /* 213 */ case MSG_SERVER_STATS: return "server stats"; /* 214 */ case MSG_CLIENT_RESUME_REQUEST: return "resume request"; /* 215 */ case MSG_SERVER_RESUME_MATCH: return "resume match"; /* 216 */ case MSG_SERVER_RESUME_MATCH_END: return "resume match end"; /* 217 */ case MSG_CLIENT_DOWNLOAD_START: return "download start"; /* 218 */ case MSG_CLIENT_DOWNLOAD_END: return "download end"; /* 219 */ case MSG_CLIENT_UPLOAD_START: return "upload start"; /* 220 */ case MSG_CLIENT_UPLOAD_END: return "upload end"; /* 221 */ case MSG_CLIENT_CHECK_PORT: return "check port (ignored)"; /* 300 */ case MSG_SERVER_HOTLIST_ACK: return "hotlist ack"; /* 301 */ case MSG_SERVER_HOTLIST_ERROR: return "hotlist error"; /* 302 */ case MSG_CLIENT_REMOVE_HOTLIST: return "remove_hotlist"; /* 303 */ case MSG_SERVER_DISCONNECTING: return "disconnecting"; /* 316 */ case MSG_CLIENT_IGNORE_LIST: return "ignore list"; /* 320 */ case MSG_SERVER_IGNORE_ENTRY: return "ignore entry"; /* 321 */ case MSG_CLIENT_IGNORE_USER: return "ignore user"; /* 322 */ case MSG_CLIENT_UNIGNORE_USER: return "unignore user"; /* 323 */ case MSG_SERVER_NOT_IGNORED: return "not ignored"; /* 324 */ case MSG_SERVER_ALREADY_IGNORED: return "already ignored"; /* 325 */ case MSG_CLIENT_CLEAR_IGNORE: return "clear ignore"; /* 326 */ case MSG_CLIENT_JOIN: return "join"; /* 400 */ case MSG_CLIENT_PART: return "part"; /* 401 */ case MSG_CLIENT_PUBLIC: return "public"; /* 402 */ case MSG_SERVER_PUBLIC: return "public"; /* 403 */ case MSG_SERVER_NOSUCH: return "server error"; /* 404 */ case MSG_SERVER_JOIN_ACK: return "chan join ack"; /* 405 */ case MSG_SERVER_JOIN: return "chan join"; /* 406 */ case MSG_SERVER_PART: return "chan part"; /* 407 */ case MSG_SERVER_CHANNEL_USER_LIST: return "chan list"; /* 408 */ case MSG_SERVER_CHANNEL_USER_LIST_END: return "chan list end"; /* 409 */ case MSG_SERVER_TOPIC: return "chan topic"; /* 410 */ case MSG_CLIENT_CHANNEL_BAN_LIST: return "chan banlist"; /* 420 */ case MSG_SERVER_CHANNEL_BAN_LIST: return "chan banlist"; /* 421 */ case MSG_CLIENT_CHANNEL_BAN: return "chan ban"; /* 422 */ case MSG_CLIENT_CHANNEL_UNBAN: return "chan unban"; /* 423 */ case MSG_CLIENT_CHANNEL_CLEAR_BANS: return "chan clear bans"; /* 424 */ case MSG_CLIENT_DOWNLOAD_FIREWALL: return "download firewall"; /* 500 */ case MSG_SERVER_UPLOAD_FIREWALL: return "upload firewall"; /* 501 */ case MSG_CLIENT_USERSPEED: return "user speed"; /* 600 */ case MSG_SERVER_USER_SPEED: return "user speed"; /* 601 */ case MSG_CLIENT_WHOIS: return "whois"; /* 603 */ case MSG_SERVER_WHOIS_RESPONSE: return "whois"; /* 604 */ case MSG_SERVER_WHOWAS: return "whowas"; /* 605 */ case MSG_CLIENT_SETUSERLEVEL: return "level"; /* 606 */ case MSG_SERVER_UPLOAD_REQUEST: return "upload request"; /* 607 */ case MSG_CLIENT_UPLOAD_OK: return "upload ok"; /* 608 */ case MSG_CLIENT_ACCEPT_FAILED: return "accept failed"; /* 609 */ case MSG_CLIENT_KILL: return "kill"; /* 610 */ case MSG_CLIENT_NUKE: return "nuke"; /* 611 */ case MSG_CLIENT_BAN: return "ban"; /* 612 */ case MSG_CLIENT_ALTER_PORT: return "alter port"; /* 613 */ case MSG_CLIENT_UNBAN: return "unban"; /* 614 */ case MSG_CLIENT_BANLIST: return "banlist"; /* 615 */ case MSG_SERVER_IP_BANLIST: return "ip banlist"; /* 616 */ case MSG_SERVER_CHANNEL_LIST_END: return "chan list end"; /* 617 */ case MSG_SERVER_CHANNEL_LIST: return "chan list"; /* 618 */ case MSG_CLIENT_LIMIT: return "queue limit"; /* 619 */ case MSG_SERVER_LIMIT: return "queue limit"; /* 620 */ case MSG_CLIENT_MOTD: return "motd"; /* 621 */ case MSG_CLIENT_MUZZLE: return "muzzle"; /* 622 */ case MSG_CLIENT_UNMUZZLE: return "unmuzzle"; /* 623 */ case MSG_CLIENT_UNNUKE: return "unnuke?"; /* 624 */ case MSG_CLIENT_ALTER_SPEED: return "alter speed"; /* 625 */ case MSG_CLIENT_DATA_PORT_ERROR: return "data port error"; /* 626 */ case MSG_CLIENT_WALLOP: return "wallop"; /* 627 */ case MSG_CLIENT_ANNOUNCE: return "announce"; /* 628 */ case MSG_SERVER_NICK_BANLIST: return "nick banlist"; /* 629 */ case MSG_CLIENT_BROWSE_DIRECT: return "browse direct"; /* 640 */ case MSG_SERVER_BROWSE_DIRECT_OK: return "browse direct ok"; /* 641 */ case MSG_SERVER_BROWSE_DIRECT_ERR: return "browse direct error"; /* 642 */ case MSG_CLIENT_CLOAK: return "cloak"; /* 652 */ case MSG_CLIENT_CHANGE_SPEED: return "change_speed"; /* 700 */ case MSG_CLIENT_CHANGE_PASS: return "change_pass"; /* 701 */ case MSG_CLIENT_CHANGE_EMAIL: return "change_email"; /* 702 */ case MSG_CLIENT_CHANGE_DATA_PORT: return "change_data_port"; /* 703 */ case MSG_SERVER_GHOST: return "ghost"; /* 748 */ case MSG_CLIENT_PING_SERVER: return "ping server"; /* 750 */ case MSG_CLIENT_PING: return "ping"; /* 751 */ case MSG_CLIENT_PONG: return "pong"; /* 752 */ case MSG_CLIENT_ALTER_PASS: return "alter pass"; /* 753 */ case MSG_CLIENT_SERVER_RECONFIG: return "server reconfig"; /* 800 */ case MSG_CLIENT_SERVER_VERSION: return "server version"; /* 801 */ case MSG_CLIENT_SERVER_CONFIG: return "server config"; /* 810 */ case MSG_CLIENT_CLEAR_CHANNEL: return "clear channel"; /* 820 */ case MSG_CLIENT_REDIRECT: return "redirect client"; /* 821 */ case MSG_CLIENT_CYCLE: return "cycle client"; /* 822 */ case MSG_CLIENT_SET_CHAN_LEVEL: return "channel level"; /* 823 */ case MSG_CLIENT_EMOTE: return "emote"; /* 824 */ case MSG_SERVER_NAMES_LIST: return "names list"; /* 825 */ case MSG_CLIENT_CHANNEL_LIMIT: return "channel limit"; /* 826 */ case MSG_CLIENT_FULL_CHANNEL_LIST: return "full chan list"; /* 827 */ case MSG_SERVER_FULL_CHANNEL_INFO: return "full chan info"; /* 828 */ case MSG_CLIENT_KICK: return "kick"; /* 829 */ case MSG_CLIENT_NAMES_LIST: return "list users"; /* 830 */ case MSG_CLIENT_GLOBAL_USER_LIST: return "global user list"; /* 831 */ case MSG_SERVER_GLOBAL_USER_LIST: return "global user list"; /* 832 */ #ifndef ROUTING_ONLY case MSG_CLIENT_ADD_DIRECTORY: return "add_directory"; /* 870 */ #endif case 920: return "ignore_command"; /* 920 */ case MSG_CLIENT_ADD_SERVER: return "add server"; /* 9998 */ case MSG_CLIENT_LIST_SERVER: return "list server"; /* 9999 */ case MSG_CLIENT_QUIT: return "client quit"; /* 10000 */ case MSG_SERVER_LOGIN: return "server login"; /* 10010 */ case MSG_SERVER_LOGIN_ACK: return "server login ack"; /* 10011 */ case MSG_SERVER_USER_SHARING: return "user sharing"; /* 10012 */ case MSG_SERVER_USER_IP: return "user ip"; /* 10013 */ case MSG_SERVER_REGINFO: return "reginfo"; /* 10014 */ case MSG_SERVER_REMOTE_SEARCH: return "remote search"; /* 10015 */ case MSG_SERVER_REMOTE_SEARCH_RESULT: return "remote search result"; /* 10016 */ case MSG_SERVER_REMOTE_SEARCH_END: return "remote search end"; /* 10017 */ case MSG_SERVER_ENCAPSULATED: return "encapsulated"; /* 10018 */ case MSG_SERVER_LINK_INFO: return "link info"; /* 10019 */ case MSG_SERVER_QUIT: return "server disconnect"; /* 10020 - deprecated by 10101 */ case MSG_SERVER_NOTIFY_MODS: return "remote notify_mods"; /* 10021 */ case MSG_SERVER_SERVER_PONG: return "server pong"; /* 10022 */ case MSG_SERVER_TIME_CHECK: return "time check"; /* 10023 */ case MSG_SERVER_WHOIS_NOTIFY: return "whois notify"; /* 10024 */ case MSG_CLIENT_USERFLAGS: return "change userflags"; /* 10050 */ case MSG_CLIENT_CONNECT: return "server connect"; /* 10100 */ case MSG_CLIENT_DISCONNECT: return "server disconnect"; /* 10101 */ case MSG_CLIENT_KILL_SERVER: return "kill server"; /* 10110 */ case MSG_CLIENT_REMOVE_SERVER: return "remove server"; /* 10111 */ case MSG_CLIENT_LINKS: return "server links"; /* 10112 */ case MSG_CLIENT_USAGE_STATS: return "server usage"; /* 10115 */ case MSG_SERVER_SEARCH_STATS: return "search cache stats"; /* 10116 */ case MSG_CLIENT_REHASH: return "rehash"; /* 10117 */ case MSG_CLIENT_VERSION_STATS: return "client version stats"; /* 10118 */ case MSG_CLIENT_WHICH_SERVER: return "which server"; /* 10119 */ case MSG_CLIENT_PING_ALL_SERVERS: return "ping all servers"; /* 10120 */ case MSG_CLIENT_WHO_WAS: return "whowas"; /* 10121 */ case MSG_CLIENT_MASS_KILL: return "mass kill"; /* 10122 */ case MSG_CLIENT_HISTOGRAM: return "histogram recv"; /* 10123 */ case MSG_SERVER_HISTOGRAM: return "histogram recv end"; /* 10124 */ case MSG_CLIENT_SHISTOGRAM: return "histogram send"; /* 10125 */ case MSG_SERVER_SHISTOGRAM: return "histogram send end"; /* 10126 */ case MSG_CLIENT_REGISTER_USER: return "register user"; /* 10200 */ case MSG_CLIENT_USER_MODE: return "user mode cmd"; /* 10203 */ case MSG_CLIENT_OP: return "chan op"; /* 10204 */ case MSG_CLIENT_DEOP: return "chan deop"; /* 10205 */ case MSG_CLIENT_CHANNEL_WALLOP: return "chan wallop"; /* 10208 */ case MSG_CLIENT_CHANNEL_MODE: return "chan mode"; /* 10209 */ case MSG_CLIENT_CHANNEL_INVITE: return "chan invite"; /* 10210 */ case MSG_CLIENT_CHANNEL_VOICE: return "chan voice"; /* 10211 */ case MSG_CLIENT_CHANNEL_UNVOICE: return "chan unvoice"; /* 10212 */ case MSG_CLIENT_CHANNEL_MUZZLE: return "chan muzzle"; /* 10213 */ case MSG_CLIENT_CHANNEL_UNMUZZLE: return "chan unmuzzle"; /* 10214 */ case MSG_CLIENT_CLASS_ADD: return "acl generic add"; /* 10250 */ case MSG_CLIENT_CLASS_DEL: return "acl generic del"; /* 10251 */ case MSG_CLIENT_CLASS_LIST: return "acl generic list"; /* 10252 */ case MSG_CLIENT_DLINE_ADD: return "acl d-line add"; /* 10253 */ case MSG_CLIENT_DLINE_DEL: return "acl d-line del"; /* 10254 */ case MSG_CLIENT_DLINE_LIST: return "acl d-line list"; /* 10255 */ case MSG_CLIENT_ILINE_ADD: return "acl i-line add"; /* 10256 */ case MSG_CLIENT_ILINE_DEL: return "acl i-line del"; /* 10257 */ case MSG_CLIENT_ILINE_LIST: return "acl i-line list"; /* 10258 */ case MSG_CLIENT_ELINE_ADD: return "acl e-line add"; /* 10259 */ case MSG_CLIENT_ELINE_DEL: return "acl e-line del"; /* 10260 */ case MSG_CLIENT_ELINE_LIST: return "acl e-line list"; /* 10261 */ case MSG_SERVER_SYNC_END: return "server sync end"; /* 10262 */ case MSG_SERVER_SYNC_END_ACK: return "server sync end ack";/* 10263 */ case MSG_CLIENT_LOG_LEVEL: return "change log level"; case MSG_CLIENT_SHARE_FILE: return "share generic file"; /* 10300 */ case MSG_CLIENT_BROWSE_NEW: return "browse new"; /* 10301 */ case MSG_SERVER_BROWSE_RESULT_NEW: return "browse result new"; /* 10302 */ #ifdef USE_PROTNET case MSG_CLIENT_RESYNC_USER: return "resync user"; /* 10303 */ case MSG_CLIENT_DESYNC_USER: return "desync user"; /* 10304 */ #endif } return "unknown"; } void add_shist( unsigned int tag, unsigned int len ) { LIST *list; histogram_t *h; for (list = global.histOutList; list; list = list->next) { h = list->data; if(tag == h->tag) { h->count++; h->len += len; return; } } /* tag not found add one */ while (1) { h = CALLOC(1, sizeof(histogram_t)); if(!h) break; h->tag = tag; h->count = 1; h->len = len; list = CALLOC(1, sizeof(LIST)); if(!list) break; list->data = h; list->next = global.histOutList; global.histOutList = list; return; } OUTOFMEMORY("add_shist"); if(h) FREE(h); if(list) FREE(list); return; }
int main(int argc, char* argv[]) { char *filename; extern FILE* idlin; extern int idl_flex_debug; list defs; list toplevel_prefix; /* Current prefix for repository ID. */ int i; search_list = iluparser_new_list (); for (i = 1; i < argc; i++){ if (argv[i][0] != '-') break; switch (argv[i][1]){ case 'a': idl_subset |= IDL_STYLE_GUIDE; break; case 'I': if (argv[i][2]) list_insert (search_list, argv[i]+2); else list_insert (search_list, argv[++i]); break; case 'h': default: usage(); break; } } if (i+1 != argc) usage(); filename = argv[i]; /* Open top-level IDL file. */ init_types(); idl_flex_debug=0; if ((idlin = fopen (filename, "r")) == NULL) { perror(argv[1]); exit(1); } /* Syntactical analysis */ idlsetinitialfile(filename); if(idlparse()) return 1; defs = the_result; /* Semantical analysis */ /* join modules for re-opening */ defs = reopen_modules (defs); /* backlink, toplevel has no parent */ list_enumerate (defs, definition_backlink,0); /* resolve all names */ list_enumerate (defs, definition_resolvenames, defs); /* perform consistency checks, compute constants */ list_enumerate (defs, definition_check, defs); /* assign repository IDs */ toplevel_prefix = iluparser_new_list (); list_push (toplevel_prefix, ""); list_enumerate (defs, definition_setuid, toplevel_prefix); /* Test conformance with AB style guide */ list_enumerate (defs, ab_style, 0); /* Drop all results :-) */ return 0; }
void syscall_handler(){ switch (current_tcb->stack->r7) { case 0x1: /* fork */ if (task_count == TASK_LIMIT) { /* Cannot create a new task, return error */ current_tcb->stack->r0 = -1; } else { /* Compute how much of the stack is used */ size_t used = stacks[current_task] + STACK_SIZE - (unsigned int*)current_tcb->stack; /* New stack is END - used */ tasks[task_count].stack = (void*)(stacks[task_count] + STACK_SIZE - used); /* Copy only the used part of the stack */ memcpy(tasks[task_count].stack, current_tcb->stack, used * sizeof(unsigned int)); /* Set PID */ tasks[task_count].pid = task_count; /* Set priority, inherited from forked task */ tasks[task_count].priority = current_tcb->priority; /* Set return values in each process */ current_tcb->stack->r0 = task_count; tasks[task_count].stack->r0 = 0; list_init(&tasks[task_count].list); list_push(&ready_list[tasks[task_count].priority], &tasks[task_count].list); /* There is now one more task */ task_count++; } break; case 0x2: /* getpid */ current_tcb->stack->r0 = current_task; break; case 0x3: /* write */ { /* Check fd is valid */ int fd = current_tcb->stack->r0; if (fd < FILE_LIMIT && files[fd]) { /* Prepare file request, store reference in r0 */ requests[current_task].task = current_tcb; requests[current_task].buf = (void*)current_tcb->stack->r1; requests[current_task].size = current_tcb->stack->r2; current_tcb->stack->r0 = (int)&requests[current_task]; /* Write */ file_write(files[fd], &requests[current_task], &event_monitor); } else { current_tcb->stack->r0 = -1; } } break; case 0x4: /* read */ { /* Check fd is valid */ int fd = current_tcb->stack->r0; if (fd < FILE_LIMIT && files[fd]) { /* Prepare file request, store reference in r0 */ requests[current_task].task = current_tcb; requests[current_task].buf = (void*)current_tcb->stack->r1; requests[current_task].size = current_tcb->stack->r2; current_tcb->stack->r0 = (int)&requests[current_task]; /* Read */ file_read(files[fd], &requests[current_task], &event_monitor); } else { current_tcb->stack->r0 = -1; } } break; case 0x5: /* interrupt_wait */ /* Enable interrupt */ NVIC_EnableIRQ(current_tcb->stack->r0); /* Block task waiting for interrupt to happen */ event_monitor_block(&event_monitor, INTR_EVENT(current_tcb->stack->r0), current_tcb); current_tcb->status = TASK_WAIT_INTR; break; case 0x6: /* getpriority */ { int who = current_tcb->stack->r0; if (who > 0 && who < (int)task_count) current_tcb->stack->r0 = tasks[who].priority; else if (who == 0) current_tcb->stack->r0 = current_tcb->priority; else current_tcb->stack->r0 = -1; } break; case 0x7: /* setpriority */ { int who = current_tcb->stack->r0; int value = current_tcb->stack->r1; value = (value < 0) ? 0 : ((value > PRIORITY_LIMIT) ? PRIORITY_LIMIT : value); if (who > 0 && who < (int)task_count) { tasks[who].priority = value; if (tasks[who].status == TASK_READY) list_push(&ready_list[value], &tasks[who].list); } else if (who == 0) { current_tcb->priority = value; list_unshift(&ready_list[value], ¤t_tcb->list); } else { current_tcb->stack->r0 = -1; break; } current_tcb->stack->r0 = 0; } break; case 0x8: /* mknod */ current_tcb->stack->r0 = file_mknod(current_tcb->stack->r0, current_tcb->pid, files, current_tcb->stack->r2, &memory_pool, &event_monitor); break; case 0x9: /* sleep */ if (current_tcb->stack->r0 != 0) { current_tcb->stack->r0 += tick_count; event_monitor_block(&event_monitor, TIME_EVENT, current_tcb); current_tcb->status = TASK_WAIT_TIME; } break; case 0xa: /* lseek */ { /* Check fd is valid */ int fd = current_tcb->stack->r0; if (fd < FILE_LIMIT && files[fd]) { /* Prepare file request, store reference in r0 */ requests[current_task].task = current_tcb; requests[current_task].buf = NULL; requests[current_task].size = current_tcb->stack->r1; requests[current_task].whence = current_tcb->stack->r2; current_tcb->stack->r0 = (int)&requests[current_task]; /* Read */ file_lseek(files[fd], &requests[current_task], &event_monitor); } else { current_tcb->stack->r0 = -1; } } break; case 0xb: /* task_block */ { event_monitor_block(&event_monitor, TASK_EVENT(current_tcb->stack->r0), current_tcb); current_tcb->status = TASK_WAIT_TASK; } break; case 0xc: /* mutex_lock */ { unsigned int mutex_addr = current_tcb->stack->r0; /* search if mutex exist */ for(int i = 0; i < MUTEX_LIMIT; i++) { if(__mutex.addr[i] == mutex_addr) { event_monitor_block(&event_monitor, MUTEX_EVENT(i), current_tcb); current_tcb->status = TASK_WAIT_MUTEX; current_tcb->stack->r0 = 0; return; } } int empty_mutex = 0; for(; empty_mutex < MUTEX_LIMIT; empty_mutex++) { if(list_empty(&event_monitor.events[MUTEX_EVENT(empty_mutex)].list)) { break; } } event_monitor_block(&event_monitor, MUTEX_EVENT(empty_mutex), current_tcb); current_tcb->status = TASK_WAIT_MUTEX; __mutex.addr[empty_mutex] = mutex_addr; __mutex.count++; current_tcb->stack->r0 = 0; } break; case 0xd: /* mutex_unlock */ { unsigned int mutex_addr = current_tcb->stack->r0; /* search if mutex exist */ for(int i = 0; i < MUTEX_LIMIT; i++) { if(__mutex.addr[i] == mutex_addr) { event_monitor_release(&event_monitor, MUTEX_EVENT(i)); current_tcb->stack->r0 = 0; __mutex.count--; return; } } current_tcb->stack->r0 = -1; } break; default: break; } }
/* * Function for preparing a packet for forwarding. Performs * a buffer swap from the message pool. If there are no free * message in the pool, it returns the passed message and does not * put it on the send queue. */ message_wrapper_t* forward(message_wrapper_t* m, uint32_t arrivalTime_p) { pmesg(200, "%s :: %s :: Line #%d\n", __FILE__, __func__, __LINE__); static message_wrapper_t* newMsg; static fe_queue_entry_t *qe; static bcp_data_header_t *hdr; // In the event of either LIFO type, if arrival finds a full LIFO // discard the oldest element. conditionalFQDiscard(); //Make sure list pool is not full if(list_length(message_pool) >= MESSAGE_POOL_SIZE) { pmesg(10, "WARNING: BcpForwardingEngine.c - forward. Cannot forward, message pool is out of memory.\n"); return m; } //Make sure list pool is not full else if(list_length(q_entry_pool) >= Q_ENTRY_POOL_SIZE) { pmesg(10, "WARNING: BcpForwardingEngine.c - forward. Cannot forward, queue entry pool is out of memory.\n"); return m; } qe = memb_alloc(&q_entry_pool_mem); if (qe == NULL) { pmesg(200, "WARNING: BcpForwardingEngine.c - forward. q_entry_pool is full.\n"); return m; } list_add(q_entry_pool, qe); newMsg = memb_alloc(&message_pool_mem); if(newMsg == NULL) { pmesg(200, "WARNING: BcpForwardingEngine.c - forward. message_pool is full.\n"); list_remove(q_entry_pool, qe); memb_free(&q_entry_pool_mem, qe); return m; } list_add(message_pool, newMsg); memset(newMsg, 0, sizeof(message_wrapper_t)); // Copy the message, client may send more messages. memcpy(newMsg, m, sizeof(message_wrapper_t)); hdr = &(newMsg -> bcp_data_header); qe -> msg = newMsg; qe -> source = FORWARD; qe -> arrivalTime = arrivalTime_p; qe -> txCount = 0; qe -> firstTxTime = 0; qe -> bcpArrivalDelay = hdr->bcpDelay; if(!(list_length(send_stack) >= SEND_STACK_SIZE)) { #ifdef LIFO list_push(send_stack, qe); #endif #ifndef LIFO list_add(send_stack, qe); #endif pmesg(200, "Forwarder is forwarding packet with send_stack size = %d\n", list_length(send_stack)); forwarderActivity(); // Successful function exit point: return newMsg; } else { // There was a problem enqueuing to the send queue. // Free the allocated MessagePool and QEntryPool list_remove(message_pool, newMsg); memb_free(&message_pool_mem, newMsg); list_remove(q_entry_pool, qe); memb_free(&q_entry_pool_mem, qe); } pmesg(10, "ERROR BcpForwardingEngine: Cannot forward, unable to allocate resources.\n"); return m; }
int read_item() { config_file *f; int tk; config_item *item; ll_failed_return(tk = get_token()); switch (tk) { case token_eof: f = _files.pop_front(); f->close(); if (!_files.empty()) { getc(); } return ok; case '\n': return ok; case token_string: { item = _new<config_item>(_pool, _cur); item->name = _t; while (1) { ll_failed_return(tk = get_token()); switch (tk) { case EOF: case '\n': if (item->value_count) { config_text *t; while ((t = (config_text*)list_pop())) { _pool->grow(t); } item->values = (config_text**)_pool->finish(); } else { item->values = nullptr; } push_item(item); return ok; case token_string: item->value_count++; list_push(_t); break; default: error_at(getpos(), "expected string or new line or EOF."); return fail; } } break; } case '<': { ll_failed_return(tk = get_token()); if (tk != token_string) { error_at(getpos(), "expected string."); return fail; } item = _new<config_item>(_pool, _cur); item->name = _t; while (1) { ll_failed_return(tk = get_token()); switch (tk) { case '>': ll_failed_return(tk = get_token()); if (tk != '\n' && tk != EOF) { error_at(getpos(), "expected new line or EOF."); return fail; } if (item->value_count) { config_text **p, *t; p = item->values = (config_text**)_pool->alloc(sizeof(config_text*) * item->value_count); while ((t = (config_text*)list_pop())) { *p++ = t; } } else { item->values = nullptr; } push_item(item); _cur = item; return ok; case token_string: item->value_count++; list_push(_t); break; default: error_at(getpos(), "expected string or '>'."); return fail; } } break; } case token_block_end: { ll_failed_return(tk = get_token()); if (tk != token_string) { error_at(getpos(), "expected string."); return fail; } if (_cur == nullptr || strcmp(*static_cast<config_item*>(_cur)->name, *_t) != 0) { error_at(getpos(), "unmatching item block."); return fail; } _cur = _cur->_parent; ll_failed_return(tk = get_token()); if (tk != '>') { error_at(getpos(), "expected '>'."); return fail; } ll_failed_return(tk = get_token()); if (tk != '\n' && tk != EOF) { error_at(getpos(), "expected new line or EOF."); return fail; } return ok; } case token_include: { ll_failed_return(tk = get_token()); if (tk != token_string) { error_at(getpos(), "expected string."); return fail; } ll_failed_return(tk = get_token()); if (tk != '>') { error_at(getpos(), "expected '>'."); return fail; } ll_failed_return(tk = get_token()); if (tk != '\n' && tk != EOF) { error_at(getpos(), "expected new line or EOF."); return fail; } f = _new<config_file>(_pool); if (ll_failed(f->open(*_t))) { error_at(_t->loc.start, "open file '%s' failed.", _t->text()); return fail; } for (auto& f2 : _files) { if ((f->stat.st_dev == f2.stat.st_dev) && (f->stat.st_ino == f2.stat.st_ino)) { error_at(_t->loc.start, "recursive include file '%s'.", _t->text()); return fail; } } _files.push_front(f); getc(); return ok; } } return 0; }
/*---------------------------------------------------------------------------*/ error_t forwardingEngine_Send(message_wrapper_t* msg, uint8_t len) { pmesg(200, "%s :: %s :: Line #%d\n", __FILE__, __func__, __LINE__); // Send code for client send request static bcp_data_header_t* hdr; static uint32_t arrivalTime; //uint32_t arrivalTime = call DelayPacketTimer.getNow(); static error_t retVal; arrivalTime = 0; pmesg(200, "Forwarding Engine Sending Packet\n"); if (!isRunningForwardingEngine) {return EOFF;} hdr = &(msg -> bcp_data_header); hdr -> hopCount = 0; rimeaddr_copy(&(hdr -> origin), &rimeaddr_node_addr); hdr -> originSeqNo = seqno++; hdr -> bcpDelay = 0; hdr -> txCount = 0; hdr -> pktType = PKT_NORMAL; // If needed, discard an element from the forwarding queue conditionalFQDiscard(); //Make sure list pool is not full if(list_length(message_pool) >= MESSAGE_POOL_SIZE) { pmesg(200, "WARNING: BcpForwardingEngine.c - Send. Cannot send, message pool is out of memory.\n"); return EBUSY; } //Make sure list pool is not full else if(list_length(q_entry_pool) >= Q_ENTRY_POOL_SIZE) { pmesg(200, "WARNING: BcpForwardingEngine.c - Send. Cannot send, queue entry pool is out of memory.\n"); return EBUSY; } else { static message_wrapper_t* newMsg; static fe_queue_entry_t *qe; qe = memb_alloc(&q_entry_pool_mem); if (qe == NULL) { pmesg(10, "ERROR: BcpForwardingEngine.c - SEND. q_entry_pool is full.\n"); return FAIL; } list_add(q_entry_pool, qe); newMsg = memb_alloc(&message_pool_mem); if (newMsg == NULL) { pmesg(10, "ERROR: BcpForwardingEngine.c - SEND. message_pool is full.\n"); // Free the QEntryPool list_remove(q_entry_pool, qe); memb_free(&q_entry_pool_mem, qe); return FAIL; } list_add(message_pool, newMsg); memset(newMsg, 0, sizeof(message_wrapper_t)); // Copy the message, client may send more messages. memcpy(newMsg, msg, sizeof(message_wrapper_t)); qe -> msg = newMsg; qe -> source = LOCAL_SEND; qe -> arrivalTime = arrivalTime; qe -> txCount = 0; qe -> firstTxTime = 0; qe -> bcpArrivalDelay = 0; if(!(list_length(send_stack) >= SEND_STACK_SIZE)) { #ifdef LIFO list_push(send_stack, qe); #endif #ifndef LIFO list_add(send_stack, qe); #endif pmesg(100, "Forwarder is forwarding packet with send_stack size = %d\n", list_length(send_stack)); forwarderActivity(); if(ev_send_done != NULL) ev_send_done(msg, SUCCESS); // signal Send.sendDone(msg, SUCCESS); // Successful function exit point: return SUCCESS; } else { // There was a problem enqueuing to the send queue. // Free the allocated MessagePool and QEntryPool list_remove(message_pool, newMsg); memb_free(&message_pool_mem, newMsg); list_remove(q_entry_pool, qe); memb_free(&q_entry_pool_mem, qe); } } // NB: at this point, we have a resource acquistion problem. // Log the event, and drop the packet pmesg(10, "ERROR BcpForwardingEngine: Cannot SEND, unable to allocate resources.\n"); return FAIL; }
void watch(char *path, bool is_link) { // Add initial path to the watch list LIST_NODE *node = get_from_path(path); if (node == NULL) node = add_to_watch_list(path); // Searchs and increments the reference counter // of the link in list_link if (is_link) add_to_link_list(node); // Temporary list to perform breath-first-search LIST *list = list_init(); list_push(list, (void *) path); // Traverse directory DIR *dir_stream; struct dirent *dir; while (list->first != NULL) { // Directory to watch char *p = (char*) list_pop(list); // Traverse directory dir_stream = opendir(p); while (dir = readdir(dir_stream)) { if (dir->d_type == DT_DIR && strcmp(dir->d_name, ".") == 1 && strcmp(dir->d_name, "..") == 1) { char *path_to_watch = (char*) malloc(sizeof(char) * (strlen(p) + strlen(dir->d_name) + 2)); strcpy(path_to_watch, p); strcat(path_to_watch, dir->d_name); strcat(path_to_watch, "/"); // Add to the watch list if (get_from_path(path_to_watch) == NULL) add_to_watch_list(path_to_watch); // Continue directory traversing list_push(list, (void*) path_to_watch); } // Resolve symbolic link else if (dir->d_type == DT_LNK) { char *path_to_watch = (char*) malloc(sizeof(char) * (strlen(p) + strlen(dir->d_name) + 1)); strcpy(path_to_watch, p); strcat(path_to_watch, dir->d_name); char *real_path = resolve_real_path(path_to_watch); // Test for: // 1. is a real path // 2. is a directory if (real_path != NULL && opendir(real_path) != NULL) { // Add to the watch list if it's not present LIST_NODE *node = get_from_path(real_path); if (node == NULL) node = add_to_watch_list(real_path); // Searchs and increments the reference counter // of the link in list_link add_to_link_list(node); // Continue directory traversing list_push(list, (void*) real_path); } } } closedir(dir_stream); } // Free memory list_free(list); }
/* Prepend data to list and update list */ List list_prepend(List *list, void *data) { *list = list_push(*list, data); return *list; }
/* * Enumerate TLV entries in a buffer until hitting a given index (optionally for a given type as well). */ DWORD packet_find_tlv_buf( Packet *packet, PUCHAR payload, DWORD payloadLength, DWORD index, TlvType type, Tlv *tlv) { DWORD currentIndex = 0; DWORD offset = 0, length = 0; BOOL found = FALSE; PUCHAR current; memset(tlv, 0, sizeof(Tlv)); do { // Enumerate the TLV's for( current = payload, length = 0 ; !found && current ; offset += length, current += length ) { TlvHeader *header = (TlvHeader *)current; TlvType current_type = 0; if ((current + sizeof(TlvHeader) > payload + payloadLength) || (current < payload)) break; // TLV's length length = ntohl(header->length); // Matching type? current_type = ntohl( header->type ); // if the type has been compressed, temporarily remove the compression flag as compression is to be transparent. if( ( current_type & TLV_META_TYPE_COMPRESSED ) == TLV_META_TYPE_COMPRESSED ) current_type = current_type ^ TLV_META_TYPE_COMPRESSED; // check if the types match? if( (current_type != type) && (type != TLV_TYPE_ANY) ) continue; // Matching index? if (currentIndex != index) { currentIndex++; continue; } if ((current + length > payload + payloadLength) || (current < payload)) break; tlv->header.type = ntohl(header->type); tlv->header.length = ntohl(header->length) - sizeof(TlvHeader); tlv->buffer = payload + offset + sizeof(TlvHeader); if( ( tlv->header.type & TLV_META_TYPE_COMPRESSED ) == TLV_META_TYPE_COMPRESSED ) { DECOMPRESSED_BUFFER * decompressed_buf = NULL; do { decompressed_buf = (DECOMPRESSED_BUFFER *)malloc( sizeof(DECOMPRESSED_BUFFER) ); if( !decompressed_buf ) break; // the first DWORD in a compressed buffer is the decompressed buffer length. decompressed_buf->length = ntohl( *(DWORD *)tlv->buffer ); if( !decompressed_buf->length ) break; decompressed_buf->buffer = (BYTE *)malloc( decompressed_buf->length ); if( !decompressed_buf->buffer ) break; tlv->header.length -= sizeof( DWORD ); tlv->buffer += sizeof( DWORD ); if( uncompress( decompressed_buf->buffer, &decompressed_buf->length, tlv->buffer, tlv->header.length ) != Z_OK ) break; tlv->header.type = tlv->header.type ^ TLV_META_TYPE_COMPRESSED; tlv->header.length = decompressed_buf->length; tlv->buffer = decompressed_buf->buffer; if( !packet->decompressed_buffers ) packet->decompressed_buffers = list_create(); if( !packet->decompressed_buffers ) break; // each packet has a list of decompressed buffers which is used to // wipe and fee all decompressed buffers upon the packet being destroyed. list_push( packet->decompressed_buffers, decompressed_buf ); found = TRUE; } while( 0 ); if( !found && decompressed_buf ) { if( decompressed_buf->buffer ) free( decompressed_buf->buffer ); free( decompressed_buf ); } } else { found = TRUE; } } } while (0); return (found) ? ERROR_SUCCESS : ERROR_NOT_FOUND; }
/*---------------------------------------------------------------------------*/ static void send_packet(mac_callback_t sent, void *ptr) { struct rdc_buf_list *q; struct neighbor_queue *n; static uint16_t seqno; const rimeaddr_t *addr = packetbuf_addr(PACKETBUF_ADDR_RECEIVER); packetbuf_set_attr(PACKETBUF_ATTR_MAC_SEQNO, seqno++); /* Look for the neighbor entry */ n = neighbor_queue_from_addr(addr); if(n == NULL) { /* Allocate a new neighbor entry */ n = memb_alloc(&neighbor_memb); if(n != NULL) { /* Init neighbor entry */ rimeaddr_copy(&n->addr, addr); n->transmissions = 0; n->collisions = 0; n->deferrals = 0; /* Init packet list for this neighbor */ LIST_STRUCT_INIT(n, queued_packet_list); /* Add neighbor to the list */ list_add(neighbor_list, n); } } if(n != NULL) { /* Add packet to the neighbor's queue */ q = memb_alloc(&packet_memb); if(q != NULL) { q->ptr = memb_alloc(&metadata_memb); if(q->ptr != NULL) { q->buf = queuebuf_new_from_packetbuf(); if(q->buf != NULL) { struct qbuf_metadata *metadata = (struct qbuf_metadata *)q->ptr; /* Neighbor and packet successfully allocated */ if(packetbuf_attr(PACKETBUF_ATTR_MAX_MAC_TRANSMISSIONS) == 0) { /* Use default configuration for max transmissions */ metadata->max_transmissions = CSMA_MAX_MAC_TRANSMISSIONS; } else { metadata->max_transmissions = packetbuf_attr(PACKETBUF_ATTR_MAX_MAC_TRANSMISSIONS); } metadata->sent = sent; metadata->cptr = ptr; if(packetbuf_attr(PACKETBUF_ATTR_PACKET_TYPE) == PACKETBUF_ATTR_PACKET_TYPE_ACK) { list_push(n->queued_packet_list, q); } else { list_add(n->queued_packet_list, q); } /* If q is the first packet in the neighbor's queue, send asap */ if(list_head(n->queued_packet_list) == q) { ctimer_set(&n->transmit_timer, 0, transmit_packet_list, n); } return; } memb_free(&metadata_memb, q->ptr); PRINTF("csma: could not allocate queuebuf, dropping packet\n"); } memb_free(&packet_memb, q); PRINTF("csma: could not allocate queuebuf, dropping packet\n"); } /* The packet allocation failed. Remove and free neighbor entry if empty. */ if(list_length(n->queued_packet_list) == 0) { list_remove(neighbor_list, n); memb_free(&neighbor_memb, n); } PRINTF("csma: could not allocate packet, dropping packet\n"); } else { PRINTF("csma: could not allocate neighbor, dropping packet\n"); } mac_call_sent_callback(sent, ptr, MAC_TX_ERR, 1); }
int i_execute_line (SOURCE_FILE * sf,const int todo) { l_get_line(sf);pline = line; l_get_token(); if (token_type==TT_LINE_END) { return EXE_DO;// skip empty line } else if (IS_KEYWORD(token_type)) { if (token_type==KEY_END) { return EXE_END; } else if (token_type==KEY_IF) { return i_execute_if (sf,todo); } else if (token_type==KEY_WHILE) { return i_execute_while(sf,todo); } else if (token_type==KEY_REPEAT) { return i_execute_repeat(sf,todo); } if (token_type==KEY_UNTIL) { return EXE_UNTIL; } else if (token_type==KEY_FOR) { return i_execute_for(sf,todo); } else if (token_type==KEY_CASE) { return i_execute_case(sf,todo); } else if (token_type==KEY_ELSEIF) { return EXE_ELSEIF; } else if (token_type==KEY_WHEN) { return EXE_WHEN; } else if (token_type==KEY_ELSE) { return EXE_ELSE; } if (!todo) return EXE_DO; if (token_type==KEY_BREAK) { return EXE_BREAK; } else if (token_type==KEY_RETURN) { return EXE_RETURN; } else if (token_type==KEY_GOSUB) { extern int i_call_sub (SOURCE_FILE *,const char *); l_get_token(); return i_call_sub(sf,token); } else if (token_type==KEY_DIM) { ARRAY * array; long size; while(1) { l_get_token(); // new array array = (ARRAY*)calloc(sizeof(ARRAY),1); array->name = s_strdup(token); list_push(&list_array,array); // l_get_token();// skip '(' size = (long)calc_check(FALSE,pline); if (size<=0) { merror_msg("illegal array size %d!",size); } array->size = size; array->array = calloc(sizeof(real),size); { int i; for (i=0;i<size;++i) array->array[i] = 0.0; } l_get_token();// skip ')' l_get_token(); if (token_type==TT_COM) continue; else if (token_type==TT_LINE_END) break; } } else if (token_type==KEY_EXIT) { d_exit(); } } else if(token_type==TT_ID) { if (str_eq(token,"print")) { while(1) { l_get_token(); if (token_type==TT_STRING) { printf("%s",token); } else { real result; char buf[128]; l_put_back(); result = calc_check(FALSE,pline); d_ftoa(result,buf); printf(buf); } l_get_token(); if (token_type==TT_LINE_END)break; }//while }//print else if (str_eq(token,"input")) { VAR * v;char buf[64]; while(1) { l_get_token(); v = get_var(token); gets(64,buf);puts(buf); v->value = atof(buf); l_get_token(); if (token_type==TT_LINE_END)break; }//while }//input else { ARRAY * a; VAR * var; var = find_var(token); if (var!=NULL) { l_get_token(); //skip '=' var->value = calc_check(FALSE,pline); return EXE_DO; } a = find_array(token); if(a!=NULL) { int index; l_get_token();//skip ( index = (int)calc_check(FALSE,pline); // delete this line,calc_check call skip ( automatically //l_get_token();//skip ) l_get_token();// skip = assign_element(a,index,calc_check(FALSE,pline)); return EXE_DO; } var = create_var(token); l_get_token(); //skip '=' var->value = calc_check(FALSE,pline); return EXE_DO; }//assign }// TT_ID else { merror_illegal_token(); } return EXE_DO; }
/* * Perform any setup required to initiate a job * job_ptr IN - pointer to the job being initiated * RET - SLURM_SUCCESS or an error code * * NOTE: This happens in parallel with srun and slurmd spawning * the job. A prolog script is expected to defer initiation of * the job script until the BG block is available for use. */ extern int start_job(struct job_record *job_ptr) { int rc = SLURM_SUCCESS; bg_record_t *bg_record = NULL; bg_action_t *bg_action_ptr = NULL; select_jobinfo_t *jobinfo = job_ptr->select_jobinfo->data; slurm_mutex_lock(&block_state_mutex); bg_record = jobinfo->bg_record; if (!bg_record || !block_ptr_exist_in_list(bg_lists->main, bg_record)) { slurm_mutex_unlock(&block_state_mutex); error("bg_record %s doesn't exist, requested for job (%d)", jobinfo->bg_block_id, job_ptr->job_id); return SLURM_ERROR; } if ((jobinfo->conn_type[0] != SELECT_NAV) && (jobinfo->conn_type[0] < SELECT_SMALL)) { int dim; for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) jobinfo->conn_type[dim] = bg_record->conn_type[dim]; } /* If it isn't 0 then it was setup previous (sub-block) */ if (jobinfo->geometry[SYSTEM_DIMENSIONS] == 0) memcpy(jobinfo->geometry, bg_record->geo, sizeof(bg_record->geo)); if (bg_record->job_list) { /* Mark the ba_mp cnodes as used now. */ ba_mp_t *ba_mp = list_peek(bg_record->ba_mp_list); xassert(ba_mp); xassert(ba_mp->cnode_bitmap); bit_or(ba_mp->cnode_bitmap, jobinfo->units_avail); if (!find_job_in_bg_record(bg_record, job_ptr->job_id)) list_append(bg_record->job_list, job_ptr); } else { bg_record->job_running = job_ptr->job_id; bg_record->job_ptr = job_ptr; } job_ptr->job_state |= JOB_CONFIGURING; bg_action_ptr = xmalloc(sizeof(bg_action_t)); bg_action_ptr->op = START_OP; bg_action_ptr->job_ptr = job_ptr; /* FIXME: The below get_select_jobinfo calls could be avoided * by just using the jobinfo as we do above. */ get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_BLOCK_ID, &(bg_action_ptr->bg_block_id)); get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_REBOOT, &(bg_action_ptr->reboot)); get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_CONN_TYPE, &(bg_action_ptr->conn_type)); get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_MLOADER_IMAGE, &(bg_action_ptr->mloaderimage)); #ifdef HAVE_BG_L_P # ifdef HAVE_BGL get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_BLRTS_IMAGE, &(bg_action_ptr->blrtsimage)); if (!bg_action_ptr->blrtsimage) { bg_action_ptr->blrtsimage = xstrdup(bg_conf->default_blrtsimage); set_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_BLRTS_IMAGE, bg_action_ptr->blrtsimage); } # elif defined HAVE_BGP get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_CONN_TYPE, &(bg_action_ptr->conn_type)); # endif get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_LINUX_IMAGE, &(bg_action_ptr->linuximage)); if (!bg_action_ptr->linuximage) { bg_action_ptr->linuximage = xstrdup(bg_conf->default_linuximage); set_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_LINUX_IMAGE, bg_action_ptr->linuximage); } get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_RAMDISK_IMAGE, &(bg_action_ptr->ramdiskimage)); if (!bg_action_ptr->ramdiskimage) { bg_action_ptr->ramdiskimage = xstrdup(bg_conf->default_ramdiskimage); set_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_RAMDISK_IMAGE, bg_action_ptr->ramdiskimage); } #endif if (!bg_action_ptr->mloaderimage) { bg_action_ptr->mloaderimage = xstrdup(bg_conf->default_mloaderimage); set_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_MLOADER_IMAGE, bg_action_ptr->mloaderimage); } num_unused_cpus -= job_ptr->total_cpus; if (!block_ptr_exist_in_list(bg_lists->job_running, bg_record)) list_push(bg_lists->job_running, bg_record); if (!block_ptr_exist_in_list(bg_lists->booted, bg_record)) list_push(bg_lists->booted, bg_record); /* Just incase something happens to free this block before we start the job we will make it so this job doesn't get blown away. */ bg_record->modifying = 1; last_bg_update = time(NULL); slurm_mutex_unlock(&block_state_mutex); info("Queue start of job %u in BG block %s", job_ptr->job_id, bg_action_ptr->bg_block_id); _block_op(bg_action_ptr); return rc; }
void l_scan (SOURCE_FILE * sf) { int r = 1,in_sub = FALSE; int block_stack[STACK_SIZE],block_size = 0; list_init(&list_sub); list_init(&list_var); list_init(&list_array); while(r) { r = l_get_line(sf); pline = line; l_get_token (); // skip empty line if (token_type==TT_LINE_END) continue; // check command if (IS_KEYWORD(token_type)) { if (token_type==KEY_SUB) { USER_SUB * sub; if(in_sub) { merror_msg("sub procedure can not be nested"); } in_sub = TRUE; match_type(TT_ID); sub = (USER_SUB*)calloc(sizeof(USER_SUB),1); sub->name = s_strdup(token); sub->pos = sf->pos; list_push(&list_sub,sub); match_type(TT_LINE_END); push_block(B_SUB); continue; } else if (token_type==KEY_END) { if (block_size<=0 || block_top==B_REPEAT) merror_illegal_token(); match_type(TT_LINE_END); if(pop_block==B_SUB) { in_sub = FALSE; } continue; } else if(token_type==KEY_VAR) { while(1) { match_type(TT_ID); l_get_token(); if (token_type==TT_END) break; else if (token_type==TT_COM) continue; else merror_illegal_token(); } } if (!in_sub) merror_illegal_token(); if(token_type==KEY_IF) { match_exp(pline); match_type(TT_LINE_END); push_block(B_IF); } else if (token_type==KEY_ELSEIF) { if (block_size<=0 || block_top!=B_IF) merror_illegal_token(); match_exp(pline); match_type(TT_LINE_END); } else if (token_type==KEY_ELSE) { if (block_size<=0 || !(block_top==B_IF || block_top==B_CASE)) merror_illegal_token(); match_type(TT_LINE_END); } else if (token_type==KEY_WHILE) { match_exp(pline); match_type(TT_LINE_END); push_block(B_WHILE); } else if (token_type==KEY_FOR) { // 'for' ID '=' EXP 'to' EXP match_type(TT_ID); match_type(OPR_EQ); match_exp(pline); match_str("to"); match_exp(pline); l_get_token(); if (token_type!=TT_LINE_END) { if (!str_eq(token,"step")) merror_expect("step"); match_exp(pline); match_type(TT_LINE_END); } push_block(B_FOR); } else if (token_type==KEY_CASE) { match_type(TT_ID); match_type(TT_LINE_END); push_block(B_CASE); } else if (token_type==KEY_REPEAT) { match_type(TT_LINE_END); push_block(B_REPEAT); } else if (token_type==KEY_UNTIL) { if (block_size<=0 || block_top!=B_REPEAT) merror_illegal_token(); match_exp(pline); match_type(TT_LINE_END); pop_block; } else if (token_type==KEY_WHEN) { if (block_size<=0 || block_top!=B_CASE) merror_illegal_token(); match_exp(pline); match_type(TT_LINE_END); } else if (token_type==KEY_GOSUB) { match_type(TT_ID); match_type(TT_LINE_END); } else if (token_type==KEY_EXIT) { match_type(TT_LINE_END); } else if (token_type==KEY_BREAK) { match_type(TT_LINE_END); } else if (token_type==KEY_RETURN) { match_type(TT_LINE_END); } else if (token_type==KEY_DIM) { while(1) { match_type (TT_ID); match_type (TT_LBK); match_exp (pline); match_type (TT_RBK); l_get_token(); if (token_type==TT_COM) continue; else if (token_type==TT_LINE_END) break; else merror_illegal_token(); } } } else if (token_type==TT_ID) { if (!in_sub) merror_illegal_token(); if (str_eq(token,"print")) { while(1) { l_get_token(); if(token_type!=TT_STRING) { l_put_back(); match_exp(pline); } l_get_token(); if(token_type==TT_LINE_END) break; else if (token_type==TT_COM) continue; else merror_expect(","); } } else if (str_eq(token,"input")) { while(1) { match_type(TT_ID); l_get_token(); if(token_type==TT_LINE_END) break; else if (token_type==TT_COM) continue; else merror_expect(","); } } else { // match: [var][=][exp] l_get_token(); if (token_type==OPR_EQ) { match_exp(pline); match_type(TT_LINE_END); } else { if (token_type!=TT_LBK)merror_expect("("); match_exp(pline); match_type(TT_RBK); match_type(OPR_EQ); match_exp(pline); match_type(TT_LINE_END); } } } else merror_illegal_token (); } if (block_size>0) merror_msg("incompleted '%s' block!",BLOCK_NAME[block_top]); }
/*---------------------------------------------------------------------------*/ static void send_packet(mac_callback_t sent, void *ptr) { struct rdc_buf_list *q; struct neighbor_queue *n; static uint8_t initialized = 0; static uint16_t seqno; const linkaddr_t *addr = packetbuf_addr(PACKETBUF_ADDR_RECEIVER); if(!initialized) { initialized = 1; /* Initialize the sequence number to a random value as per 802.15.4. */ seqno = random_rand(); } if(seqno == 0) { /* PACKETBUF_ATTR_MAC_SEQNO cannot be zero, due to a pecuilarity in framer-802154.c. */ seqno++; } packetbuf_set_attr(PACKETBUF_ATTR_MAC_SEQNO, seqno++); /* Look for the neighbor entry */ n = neighbor_queue_from_addr(addr); if(n == NULL) { /* Allocate a new neighbor entry */ n = memb_alloc(&neighbor_memb); if(n != NULL) { /* Init neighbor entry */ linkaddr_copy(&n->addr, addr); n->transmissions = 0; n->collisions = 0; n->deferrals = 0; /* Init packet list for this neighbor */ LIST_STRUCT_INIT(n, queued_packet_list); /* Add neighbor to the list */ list_add(neighbor_list, n); } } if(n != NULL) { /* Add packet to the neighbor's queue */ if(list_length(n->queued_packet_list) < CSMA_MAX_PACKET_PER_NEIGHBOR) { q = memb_alloc(&packet_memb); if(q != NULL) { q->ptr = memb_alloc(&metadata_memb); if(q->ptr != NULL) { q->buf = queuebuf_new_from_packetbuf(); if(q->buf != NULL) { struct qbuf_metadata *metadata = (struct qbuf_metadata *)q->ptr; /* Neighbor and packet successfully allocated */ if(packetbuf_attr(PACKETBUF_ATTR_MAX_MAC_TRANSMISSIONS) == 0) { /* Use default configuration for max transmissions */ metadata->max_transmissions = CSMA_MAX_MAC_TRANSMISSIONS; } else { metadata->max_transmissions = packetbuf_attr(PACKETBUF_ATTR_MAX_MAC_TRANSMISSIONS); } metadata->sent = sent; metadata->cptr = ptr; #if PACKETBUF_WITH_PACKET_TYPE if(packetbuf_attr(PACKETBUF_ATTR_PACKET_TYPE) == PACKETBUF_ATTR_PACKET_TYPE_ACK) { list_push(n->queued_packet_list, q); } else #endif { list_add(n->queued_packet_list, q); } PRINTF("csma: send_packet, queue length %d, free packets %d\n", list_length(n->queued_packet_list), memb_numfree(&packet_memb)); /* If q is the first packet in the neighbor's queue, send asap */ if(list_head(n->queued_packet_list) == q) { ctimer_set(&n->transmit_timer, 0, transmit_packet_list, n); } return; } memb_free(&metadata_memb, q->ptr); PRINTF("csma: could not allocate queuebuf, dropping packet\n"); } memb_free(&packet_memb, q); PRINTF("csma: could not allocate queuebuf, dropping packet\n"); } /* The packet allocation failed. Remove and free neighbor entry if empty. */ if(list_length(n->queued_packet_list) == 0) { list_remove(neighbor_list, n); memb_free(&neighbor_memb, n); } } else { PRINTF("csma: Neighbor queue full\n"); } PRINTF("csma: could not allocate packet, dropping packet\n"); } else { PRINTF("csma: could not allocate neighbor, dropping packet\n"); } mac_call_sent_callback(sent, ptr, MAC_TX_ERR, 1); }
/* * slurm_job_step_get_pids - get the complete list of pids for a given * job step * * IN job_id * IN step_id * IN node_list, optional, if NULL then all nodes in step are returned. * OUT resp * RET SLURM_SUCCESS on success SLURM_ERROR else */ extern int slurm_job_step_get_pids(uint32_t job_id, uint32_t step_id, char *node_list, job_step_pids_response_msg_t **resp) { int rc = SLURM_SUCCESS; slurm_msg_t req_msg; job_step_id_msg_t req; ListIterator itr; List ret_list = NULL; ret_data_info_t *ret_data_info = NULL; slurm_step_layout_t *step_layout = NULL; job_step_pids_response_msg_t *resp_out; bool created = 0; xassert(resp); if (!node_list) { if (!(step_layout = slurm_job_step_layout_get(job_id, step_id))) { rc = errno; error("slurm_job_step_get_pids: " "problem getting step_layout for %u.%u: %s", job_id, step_id, slurm_strerror(rc)); return rc; } node_list = step_layout->node_list; } if (!*resp) { resp_out = xmalloc(sizeof(job_step_pids_response_msg_t)); *resp = resp_out; created = 1; } else resp_out = *resp; debug("slurm_job_step_get_pids: " "getting pid information of job %u.%u on nodes %s", job_id, step_id, node_list); slurm_msg_t_init(&req_msg); memset(&req, 0, sizeof(job_step_id_msg_t)); resp_out->job_id = req.job_id = job_id; resp_out->step_id = req.step_id = step_id; req_msg.msg_type = REQUEST_JOB_STEP_PIDS; req_msg.data = &req; if (!(ret_list = slurm_send_recv_msgs(node_list, &req_msg, 0, false))) { error("slurm_job_step_get_pids: got an error no list returned"); rc = SLURM_ERROR; if (created) { slurm_job_step_pids_response_msg_free(resp_out); *resp = NULL; } goto cleanup; } itr = list_iterator_create(ret_list); while((ret_data_info = list_next(itr))) { switch (ret_data_info->type) { case RESPONSE_JOB_STEP_PIDS: if (!resp_out->pid_list) resp_out->pid_list = list_create( slurm_free_job_step_pids); list_push(resp_out->pid_list, ret_data_info->data); ret_data_info->data = NULL; break; case RESPONSE_SLURM_RC: rc = slurm_get_return_code(ret_data_info->type, ret_data_info->data); error("slurm_job_step_get_pids: " "there was an error with the " "list pid request rc = %s", slurm_strerror(rc)); break; default: rc = slurm_get_return_code(ret_data_info->type, ret_data_info->data); error("slurm_job_step_get_pids: " "unknown return given %d rc = %s", ret_data_info->type, slurm_strerror(rc)); break; } } list_iterator_destroy(itr); list_destroy(ret_list); if (resp_out->pid_list) list_sort(resp_out->pid_list, (ListCmpF)_sort_pids_by_name); cleanup: slurm_step_layout_destroy(step_layout); return rc; }
// add the card to the card_list // success: SUCCESS // failure: ERRNO_MP_LIST_PUSH int card_add_to_list(card_t * card, list_t * card_list) { node_t * nd = node_create((void *)card); if(SUCCESS != list_push(card_list, nd, (void *)&(card->card_id))) {return ERRNO_MP_LIST_PUSH;} return SUCCESS; }
//void sendDataTask() { PROCESS_THREAD(sendDataTask, ev, data) { PROCESS_BEGIN(); while(1) { PROCESS_WAIT_EVENT(); pmesg(200, "%s :: %s :: Line #%d\n", __FILE__, __func__, __LINE__); static fe_queue_entry_t* qe; static fe_queue_entry_t* nullQe; static message_wrapper_t* nullMsg; static bcp_data_header_t* nullHdr; static int subsendResult; static error_t retVal; static uint8_t payloadLen; static rimeaddr_t dest; static message_wrapper_t* hdr; static uint32_t sendTime; static uint32_t checksum; checksum = 0; // Specialty handling of loopback or sudden sink designation if(rootControl_isRoot()) { sending = false; // If we are sending we'll abort if(sendQeOccupied == true) { qe = sendQe; sendQeOccupied = false; // Guaranteed succcessful service } else { if(list_length(send_stack) == 0 && virtualQueueSize == 0) { //This shouldn't be possible pmesg(10, "FAILURE IN BCP_FORWARDING_ENGINE.c SENDDATATASK()"); continue; } qe = sendQe = list_pop(send_stack); } memcpy(loopbackMsgPtr, qe -> msg, sizeof(message_wrapper_t)); //Deallocate the message in qe list_remove(message_pool, qe -> msg); memb_free(&message_pool_mem, qe -> msg); //Deallocate the qe object list_remove(q_entry_pool, qe); memb_free(&q_entry_pool_mem, qe); //Signal the event if(ev_msg_receive != NULL) loopbackMsgPtr = ev_msg_receive(loopbackMsgPtr); //Maybe do it again, if we are sink and there are data packets forwarderActivity(); continue; } if(sendQeOccupied == true) { qe = sendQe; } else { if(list_length(send_stack) == 0 && virtualQueueSize == 0) { pmesg(10, "ERROR: BcpForwardingEngine sendDataTask()\n"); continue; } //Check to see whether there exists a neighbor to route to with positive weight. retVal = routerForwarder_updateRouting(list_length(send_stack) + sendQeOccupied + virtualQueueSize); //NO_SNOOP: add another retVal response type, //if there is no entry in our routing table //request a RR beacon if(retVal == ESIZE) { sending = false; pmesg(200, "DEBUG: RR Beacon Send\n"); beaconType = RR_BEACON; process_post(&sendBeaconTask, NULL, NULL); //Stop the timer, reset it. We have two, one for keeping time, // one for the function call back ctimer_stop(&txRetryTimer); ctimer_set(&txRetryTimer, REROUTE_TIME, tx_retry_timer_fired, NULL); timer_reset(&txRetryTimerTime); continue; } if(retVal == FAIL) { //No neighbor is a good option right now, wait on a recompute-time sending = false; ctimer_stop(&txRetryTimer); ctimer_set(&txRetryTimer, REROUTE_TIME, tx_retry_timer_fired, NULL); timer_reset(&txRetryTimerTime); continue; } if(list_length(send_stack) == 0) { // Create a null packet, place it on the stack (must be here by virtue of a virtual backlog) nullQe = memb_alloc(&q_entry_pool_mem); if(nullQe == NULL) { pmesg(10, "ERROR: BcpForwardingEngine - sendDataTask. Cannot enqueue nullQe\n"); continue; } list_add(q_entry_pool, nullQe); nullMsg = memb_alloc(&message_pool_mem); if(nullMsg == NULL) { pmesg(10, "ERROR: BcpForwardingEngine - sendDataTask. Cannot enqueue nullMsg\n"); //Deallocate list_remove(q_entry_pool, nullQe); memb_free(&q_entry_pool_mem, nullQe); continue; } list_add(message_pool, nullMsg); nullHdr = &(nullMsg -> bcp_data_header); nullHdr -> hopCount = 0; rimeaddr_copy(&(nullHdr -> origin), &rimeaddr_node_addr); nullHdr -> originSeqNo = nullSeqNo++; nullHdr -> bcpDelay = 0; nullHdr -> txCount = 0; nullHdr -> pktType = PKT_NULL; nullQe -> arrivalTime = 0; //call DelayPacketTimer.getNow(); nullQe -> firstTxTime = 0; nullQe -> bcpArrivalDelay = 0; nullQe -> msg = nullMsg; nullQe -> source = LOCAL_SEND; nullQe -> txCount = 0; list_push(send_stack, nullQe); virtualQueueSize--; } qe = sendQe = list_pop(send_stack); pmesg(10, "SENDING MESSAGE ORIGINATING FROM = %d.%d\n", qe -> msg -> bcp_data_header.origin.u8[0], qe -> msg -> bcp_data_header.origin.u8[1]); qe -> firstTxTime = timer_remaining(&txRetryTimerTime); //call txRetryTimer.getNow(); sendQeOccupied = true; } //End else // payloadLen = sizeof(qe -> msg); //call SubPacket.payloadLength(qe->msg); // Give up on a link after MAX_RETX_ATTEMPTS retransmit attempts, link is lousy! // Furthermore, penalize by double MAX_RETX_ATTEMPTS, due to cutoff. if(qe -> txCount >= MAX_RETX_ATTEMPTS) { static bool isBroadcast = 0; isBroadcast = rimeaddr_cmp(&(qe -> msg -> from), &rimeaddr_null); routerForwarder_updateLinkSuccess(&(qe -> msg -> from), isBroadcast, 2*MAX_RETX_ATTEMPTS); //call RouterForwarderIF.updateLinkSuccess(call AMDataPacket.destination(qe->msg), 2*MAX_RETX_ATTEMPTS); // call BcpDebugIF.reportValues( 0,0,0,0,0,MAX_RETX_ATTEMPTS, call AMDataPacket.destination(qe->msg),0x77 ); qe -> txCount = 0; // Place back on the Stack, discard element if necesary conditionalFQDiscard(); list_push(send_stack, qe); // retVal = call SendStack.pushTop( qe ); sendQeOccupied = false; // Try again after a REROUTE_TIME, this choice was bad. sending = false; ctimer_stop(&txRetryTimer); ctimer_set(&txRetryTimer, REROUTE_TIME, tx_retry_timer_fired, NULL); timer_reset(&txRetryTimerTime); continue; } qe -> txCount++; localTXCount++; rimeaddr_copy(&dest, &nextHopAddress_m); //Request an ack, not going to support DL without ack (for now) //Store the local backpressure level to the backpressure field hdr = qe -> msg; //getHeader(qe->msg); hdr -> bcp_data_header.bcpBackpressure = list_length(send_stack) + sendQeOccupied + virtualQueueSize; //Fill in the next hop Backpressure value hdr -> bcp_data_header.nhBackpressure = nextHopBackpressure_m; //Fill in the node tx count field (burst success detection by neighbors #ifndef BEACON_ONLY hdr -> bcp_data_header.nodeTxCount = localTXCount; // Fill in the burstNotifyAddr, then reset to TOS_NODE_ID immediately rimeaddr_copy(&(hdr->bcp_data_header.burstNotifyAddr), ¬ifyBurstyLinkNeighbor_m); rimeaddr_copy(¬ifyBurstyLinkNeighbor_m, &rimeaddr_node_addr); #endif //Update the txCount field hdr -> bcp_data_header.txCount = hdr -> bcp_data_header.txCount + 1; sendTime = 0; //This timer is never implemented in TinyOS: timer_remaining(&delayPacketTimer); //regardless of transmission history, lastTxTime and BcpDelay are re-comptued. hdr -> bcp_data_header.bcpDelay = qe -> bcpArrivalDelay + (sendTime - qe -> arrivalTime) + PER_HOP_MAC_DLY; //Calculate the checksum! checksum = calcHdrChecksum(qe -> msg); hdr -> bcp_data_header.hdrChecksum = checksum; // #ifdef LOW_POWER_LISTENING // // call LowPowerListening.setRxSleepInterval(qe->msg, LPL_SLEEP_INTERVAL_MS); // call LowPowerListening.setRemoteWakeupInterval(qe->msg, LPL_SLEEP_INTERVAL_MS); // #endif //Send thge packet!! rimeaddr_copy(&(qe -> msg -> to), &dest); rimeaddr_copy(&(qe -> msg -> from), &rimeaddr_node_addr); payloadLen = sizeof(message_wrapper_t); //call SubPacket.payloadLength(qe->msg); packetbuf_clear(); packetbuf_set_datalen(payloadLen); packetbuf_copyfrom(qe -> msg, payloadLen); pmesg(10, "Checksum from packet about to send: %u\n", ((message_wrapper_t*)packetbuf_dataptr()) -> bcp_data_header.hdrChecksum); //Non-zero if the packet could be sent, zero otherwise subsendResult = unicast_send(&unicast, &dest); //Success if(subsendResult != 0) { // Successfully submitted to the data-link layer. pmesg(100, "BcpForwardingEngine: Successfully Sent Unicast Message\n"); //Print out end-to-end message only if packet is originating from here if(rimeaddr_cmp(&(qe -> msg -> from), &(qe -> msg -> bcp_data_header.origin)) != 0) printf("Sent Packet from: %d.%d with SequenceNum = %lu\n", qe -> msg -> bcp_data_header.origin.u8[0], qe -> msg -> bcp_data_header.origin.u8[1], qe -> msg -> bcp_data_header.packetSeqNum); continue; } else { pmesg(100, "BcpForwardingEngine: Failed to Send Unicast Message. Trying again\n"); // radioOn = false; // NO_SNOOP: set beacon type beaconType = NORMAL_BEACON; process_post(&sendDataTask, NULL, NULL); } } //End while(1) PROCESS_END(); }
void *_forward_thread(void *arg) { forward_msg_t *fwd_msg = (forward_msg_t *)arg; Buf buffer = init_buf(fwd_msg->buf_len); List ret_list = NULL; slurm_fd_t fd = -1; ret_data_info_t *ret_data_info = NULL; char *name = NULL; hostlist_t hl = hostlist_create(fwd_msg->header.forward.nodelist); slurm_addr_t addr; char *buf = NULL; int steps = 0; int start_timeout = fwd_msg->timeout; /* repeat until we are sure the message was sent */ while ((name = hostlist_shift(hl))) { if (slurm_conf_get_addr(name, &addr) == SLURM_ERROR) { error("forward_thread: can't find address for host " "%s, check slurm.conf", name); slurm_mutex_lock(fwd_msg->forward_mutex); mark_as_failed_forward(&fwd_msg->ret_list, name, SLURM_UNKNOWN_FORWARD_ADDR); free(name); if (hostlist_count(hl) > 0) { slurm_mutex_unlock(fwd_msg->forward_mutex); continue; } goto cleanup; } if ((fd = slurm_open_msg_conn(&addr)) < 0) { error("forward_thread to %s: %m", name); slurm_mutex_lock(fwd_msg->forward_mutex); mark_as_failed_forward( &fwd_msg->ret_list, name, SLURM_COMMUNICATIONS_CONNECTION_ERROR); free(name); if (hostlist_count(hl) > 0) { slurm_mutex_unlock(fwd_msg->forward_mutex); continue; } goto cleanup; } buf = hostlist_ranged_string_xmalloc(hl); xfree(fwd_msg->header.forward.nodelist); fwd_msg->header.forward.nodelist = buf; fwd_msg->header.forward.cnt = hostlist_count(hl); /* info("sending %d forwards (%s) to %s", */ /* fwd_msg->header.forward.cnt, */ /* fwd_msg->header.forward.nodelist, name); */ if (fwd_msg->header.forward.nodelist[0]) { debug3("forward: send to %s along with %s", name, fwd_msg->header.forward.nodelist); } else debug3("forward: send to %s ", name); pack_header(&fwd_msg->header, buffer); /* add forward data to buffer */ if (remaining_buf(buffer) < fwd_msg->buf_len) { buffer->size += (fwd_msg->buf_len + BUF_SIZE); xrealloc(buffer->head, buffer->size); } if (fwd_msg->buf_len) { memcpy(&buffer->head[buffer->processed], fwd_msg->buf, fwd_msg->buf_len); buffer->processed += fwd_msg->buf_len; } /* * forward message */ if (_slurm_msg_sendto(fd, get_buf_data(buffer), get_buf_offset(buffer), SLURM_PROTOCOL_NO_SEND_RECV_FLAGS ) < 0) { error("forward_thread: slurm_msg_sendto: %m"); slurm_mutex_lock(fwd_msg->forward_mutex); mark_as_failed_forward(&fwd_msg->ret_list, name, errno); free(name); if (hostlist_count(hl) > 0) { free_buf(buffer); buffer = init_buf(fwd_msg->buf_len); slurm_mutex_unlock(fwd_msg->forward_mutex); slurm_close_accepted_conn(fd); fd = -1; continue; } goto cleanup; } if ((fwd_msg->header.msg_type == REQUEST_SHUTDOWN) || (fwd_msg->header.msg_type == REQUEST_RECONFIGURE) || (fwd_msg->header.msg_type == REQUEST_REBOOT_NODES)) { slurm_mutex_lock(fwd_msg->forward_mutex); ret_data_info = xmalloc(sizeof(ret_data_info_t)); list_push(fwd_msg->ret_list, ret_data_info); ret_data_info->node_name = xstrdup(name); free(name); while ((name = hostlist_shift(hl))) { ret_data_info = xmalloc(sizeof(ret_data_info_t)); list_push(fwd_msg->ret_list, ret_data_info); ret_data_info->node_name = xstrdup(name); free(name); } goto cleanup; } if (fwd_msg->header.forward.cnt > 0) { static int message_timeout = -1; if (message_timeout < 0) message_timeout = slurm_get_msg_timeout() * 1000; steps = (fwd_msg->header.forward.cnt+1) / slurm_get_tree_width(); fwd_msg->timeout = (message_timeout*steps); /* info("got %d * %d = %d", message_timeout, steps, fwd_msg->timeout); */ steps++; fwd_msg->timeout += (start_timeout*steps); /* info("now + %d*%d = %d", start_timeout, steps, fwd_msg->timeout); */ } ret_list = slurm_receive_msgs(fd, steps, fwd_msg->timeout); /* info("sent %d forwards got %d back", */ /* fwd_msg->header.forward.cnt, list_count(ret_list)); */ if (!ret_list || (fwd_msg->header.forward.cnt != 0 && list_count(ret_list) <= 1)) { slurm_mutex_lock(fwd_msg->forward_mutex); mark_as_failed_forward(&fwd_msg->ret_list, name, errno); free(name); if (ret_list) list_destroy(ret_list); if (hostlist_count(hl) > 0) { free_buf(buffer); buffer = init_buf(fwd_msg->buf_len); slurm_mutex_unlock(fwd_msg->forward_mutex); slurm_close_accepted_conn(fd); fd = -1; continue; } goto cleanup; } else if ((fwd_msg->header.forward.cnt+1) != list_count(ret_list)) { /* this should never be called since the above should catch the failed forwards and pipe them back down, but this is here so we never have to worry about a locked mutex */ ListIterator itr = NULL; char *tmp = NULL; int first_node_found = 0; hostlist_iterator_t host_itr = hostlist_iterator_create(hl); error("We shouldn't be here. We forwarded to %d " "but only got %d back", (fwd_msg->header.forward.cnt+1), list_count(ret_list)); while ((tmp = hostlist_next(host_itr))) { int node_found = 0; itr = list_iterator_create(ret_list); while ((ret_data_info = list_next(itr))) { if (!ret_data_info->node_name) { first_node_found = 1; ret_data_info->node_name = xstrdup(name); } if (!strcmp(tmp, ret_data_info->node_name)) { node_found = 1; break; } } list_iterator_destroy(itr); if (!node_found) { mark_as_failed_forward( &fwd_msg->ret_list, tmp, SLURM_COMMUNICATIONS_CONNECTION_ERROR); } free(tmp); } hostlist_iterator_destroy(host_itr); if (!first_node_found) { mark_as_failed_forward(&fwd_msg->ret_list, name, SLURM_COMMUNICATIONS_CONNECTION_ERROR); } } break; } slurm_mutex_lock(fwd_msg->forward_mutex); if (ret_list) { while ((ret_data_info = list_pop(ret_list)) != NULL) { if (!ret_data_info->node_name) { ret_data_info->node_name = xstrdup(name); } list_push(fwd_msg->ret_list, ret_data_info); debug3("got response from %s", ret_data_info->node_name); } list_destroy(ret_list); } free(name); cleanup: if ((fd >= 0) && slurm_close_accepted_conn(fd) < 0) error ("close(%d): %m", fd); hostlist_destroy(hl); destroy_forward(&fwd_msg->header.forward); free_buf(buffer); pthread_cond_signal(fwd_msg->notify); slurm_mutex_unlock(fwd_msg->forward_mutex); return (NULL); }
NODE* parse(char **exp) { if (!sym_map) parser_init(); debug("Parse List: %s\n",*exp); NODE *head = NIL; while (**exp) { switch (*((*exp)++)) { case '\'': { debug("to quote: %s\n",*exp); NODE *quoted = parse(exp); debugVal(quoted->data,"quoted: "); list_push(newNODE(newPRIMFUNC(SPEC_QUOTE,l_quote),newNODE(quoted->data,NIL)),&head); if (quoted->addr) head = list_join(list_reverse((NODE*)quoted->addr),head); head = list_reverse(head); debugVal(head,"expression: "); return head; } case '(': list_push(parse(exp),&head); break; case ')': head = list_reverse(head); debugVal(head,"expression: "); return head; case ';': while (**exp != '\0' && **exp != '\r' && **exp != '\n') (*exp)++; break; case '\n': case '\r': case '\t': case ' ': break; default: { char *sym = *exp-1; debug("origin: %s\n",sym); while (**exp && **exp != ' ' && **exp != ')' && **exp != '\n' && **exp != '\r' && **exp != '\t') (*exp)++; char old = **exp; **exp = 0; debug("literal: %s\n",sym); switch (sym[0]) { case '+': case '-': if (!isdigit(sym[1])) { list_push(newSYMBOL(intern(sym)),&head); break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { bool real = false; char *scn = sym+1; while (*scn) { if (*scn == '.') { real = true; } else if (!isdigit(*scn)) { error("Malformed number character %c",*scn); } scn++; } if (real) { list_push(newREAL(atof(sym)),&head); } else { list_push(newINTEGER(atoi(sym)),&head); } } break; default: list_push(newSYMBOL(intern(sym)),&head); break; } **exp = old; if (head->data->type == ID_SYMBOL) { NODE *literal; if ((literal = binmap_find(head->data,literal_map))) { NODE *last = (NODE*)head->addr; incRef(last); decRef(head); incRef(literal->addr); head = last; list_push(literal->addr,&head); decRef(literal); } } debugVal(head,"parsed: "); } } } head = list_reverse(head); debugVal(head,"dangling: "); return head; }