/* * batch_bind - Set the batch request message so as to bind the shell to the * proper resources */ void batch_bind(batch_job_launch_msg_t *req) { bitstr_t *req_map, *hw_map; slurm_cred_arg_t arg; uint16_t sockets=0, cores=0, num_cpus; int start, task_cnt=0; if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) { error("task/affinity: job lacks a credential"); return; } start = _get_local_node_info(&arg, 0, &sockets, &cores); if (start != 0) { error("task/affinity: missing node 0 in job credential"); slurm_cred_free_args(&arg); return; } if ((sockets * cores) == 0) { error("task/affinity: socket and core count both zero"); slurm_cred_free_args(&arg); return; } num_cpus = MIN((sockets * cores), (conf->sockets * conf->cores)); req_map = (bitstr_t *) bit_alloc(num_cpus); hw_map = (bitstr_t *) bit_alloc(conf->block_map_size); #ifdef HAVE_FRONT_END { /* Since the front-end nodes are a shared resource, we limit each job * to one CPU based upon monotonically increasing sequence number */ static int last_id = 0; bit_set(hw_map, ((last_id++) % conf->block_map_size)); task_cnt = 1; } #else { char *str; int t, p; /* Transfer core_bitmap data to local req_map. * The MOD function handles the case where fewer processes * physically exist than are configured (slurmd is out of * sync with the slurmctld daemon). */ for (p = 0; p < (sockets * cores); p++) { if (bit_test(arg.job_core_bitmap, p)) bit_set(req_map, (p % num_cpus)); } str = (char *)bit_fmt_hexmask(req_map); debug3("task/affinity: job %u core mask from slurmctld: %s", req->job_id, str); xfree(str); for (p = 0; p < num_cpus; p++) { if (bit_test(req_map, p) == 0) continue; /* core_bitmap does not include threads, so we * add them here but limit them to what the job * requested */ for (t = 0; t < conf->threads; t++) { uint16_t pos = p * conf->threads + t; if (pos >= conf->block_map_size) { info("more resources configured than exist"); p = num_cpus; break; } bit_set(hw_map, pos); task_cnt++; } } } #endif if (task_cnt) { req->cpu_bind_type = CPU_BIND_MASK; if (conf->task_plugin_param & CPU_BIND_VERBOSE) req->cpu_bind_type |= CPU_BIND_VERBOSE; xfree(req->cpu_bind); req->cpu_bind = (char *)bit_fmt_hexmask(hw_map); info("task/affinity: job %u CPU input mask for node: %s", req->job_id, req->cpu_bind); /* translate abstract masks to actual hardware layout */ _lllp_map_abstract_masks(1, &hw_map); #ifdef HAVE_NUMA if (req->cpu_bind_type & CPU_BIND_TO_LDOMS) { _match_masks_to_ldom(1, &hw_map); } #endif xfree(req->cpu_bind); req->cpu_bind = (char *)bit_fmt_hexmask(hw_map); info("task/affinity: job %u CPU final HW mask for node: %s", req->job_id, req->cpu_bind); } else { error("task/affinity: job %u allocated no CPUs", req->job_id); } FREE_NULL_BITMAP(hw_map); FREE_NULL_BITMAP(req_map); slurm_cred_free_args(&arg); }
int huffman_compress(const unsigned char *original, unsigned char **compressed, int size) { BiTree *tree; HuffCode table[UCHAR_MAX + 1]; int freqs[UCHAR_MAX + 1], max, scale, hsize, ipos, opos, cpos, c, i; unsigned char *comp, *temp; /***************************************************************************** * Initially there is no buffer of compressed data. * *****************************************************************************/ *compressed = NULL; /***************************************************************************** * Get the frequency of each symbol in the original data. * *****************************************************************************/ for (c = 0; c <= UCHAR_MAX; c++) freqs[c] = 0; ipos = 0; if (size > 0) { while (ipos < size) { freqs[original[ipos]]++; ipos++; } } /***************************************************************************** * Scale the frequencies to fit into one byte. * *****************************************************************************/ max = UCHAR_MAX; for (c = 0; c <= UCHAR_MAX; c++) { if (freqs[c] > max) max = freqs[c]; } for (c = 0; c <= UCHAR_MAX; c++) { scale = (int)(freqs[c] / ((double)max / (double)UCHAR_MAX)); if (scale == 0 && freqs[c] != 0) freqs[c] = 1; else freqs[c] = scale; } /***************************************************************************** * Build the Huffman tree and table of codes for the data. * *****************************************************************************/ if (build_tree(freqs, &tree) != 0) return -1; for (c = 0; c <= UCHAR_MAX; c++) memset(&table[c], 0, sizeof(HuffCode)); build_table(bitree_root(tree), 0x0000, 0, table); bitree_destroy(tree); free(tree); /***************************************************************************** * Write the header information. * *****************************************************************************/ hsize = sizeof(int) + (UCHAR_MAX + 1); if ((comp = (unsigned char *)malloc(hsize)) == NULL) return -1; memcpy(comp, &size, sizeof(int)); for (c = 0; c <= UCHAR_MAX; c++) comp[sizeof(int) + c] = (unsigned char)freqs[c]; /***************************************************************************** * Compress the data. * *****************************************************************************/ ipos = 0; opos = hsize * 8; while (ipos < size) { /************************************************************************** * Get the next symbol in the original data. * **************************************************************************/ c = original[ipos]; /************************************************************************** * Write the code for the symbol to the buffer of compressed data. * **************************************************************************/ for (i = 0; i < table[c].size; i++) { if (opos % 8 == 0) { /******************************************************************** * Allocate another byte for the buffer of compressed data. * ********************************************************************/ if ((temp = (unsigned char *)realloc(comp, (opos / 8) + 1)) == NULL) { free(comp); return -1; } comp = temp; } cpos = (sizeof(short) * 8) - table[c].size + i; bit_set(comp, opos, bit_get((unsigned char *)&table[c].code, cpos)); opos++; } ipos++; } /***************************************************************************** * Point to the buffer of compressed data. * *****************************************************************************/ *compressed = comp; /***************************************************************************** * Return the number of bytes in the compressed data. * *****************************************************************************/ return ((opos - 1) / 8) + 1; }
int lz77_uncompress(const unsigned char *compressed, unsigned char **original) { unsigned char window[LZ77_WINDOW_SIZE], buffer[LZ77_BUFFER_SIZE], *orig, *temp, next; int offset, length, remaining, hsize, size, ipos, opos, tpos, state, i; /***************************************************************************** * * * Make the pointer to the original data not valid until later. * * * *****************************************************************************/ *original = orig = NULL; /***************************************************************************** * * * Get the header information. * * * *****************************************************************************/ hsize = sizeof(int); memcpy(&size, compressed, sizeof(int)); /***************************************************************************** * * * Initialize the sliding window and the look-ahead buffer. * * * *****************************************************************************/ memset(window, 0, LZ77_WINDOW_SIZE); memset(buffer, 0, LZ77_BUFFER_SIZE); /***************************************************************************** * * * Uncompress the data. * * * *****************************************************************************/ ipos = hsize * 8; opos = 0; remaining = size; while (remaining > 0) { /************************************************************************** * * * Get the next bit in the compressed data. * * * **************************************************************************/ state = bit_get(compressed, ipos); ipos++; if (state == 1) { /*********************************************************************** * * * Handle processing a phrase token. * * * ***********************************************************************/ memset(&offset, 0, sizeof(int)); for (i = 0; i < LZ77_WINOFF_BITS; i++) { tpos = (sizeof(int) * 8) - LZ77_WINOFF_BITS + i; bit_set((unsigned char *)&offset, tpos, bit_get(compressed, ipos)); ipos++; } memset(&length, 0, sizeof(int)); for (i = 0; i < LZ77_BUFLEN_BITS; i++) { tpos = (sizeof(int) * 8) - LZ77_BUFLEN_BITS + i; bit_set((unsigned char *)&length, tpos, bit_get(compressed, ipos)); ipos++; } next = 0x00; for (i = 0; i < LZ77_NEXT_BITS; i++) { tpos = (sizeof(unsigned char) * 8) - LZ77_NEXT_BITS + i; bit_set((unsigned char *)&next, tpos, bit_get(compressed, ipos)); ipos++; } /*********************************************************************** * * * Ensure that the offset and length have the correct byte ordering * * for the system. * * * ***********************************************************************/ offset = ntohl(offset); length = ntohl(length); /*********************************************************************** * * * Write the phrase from the window to the buffer of original data. * * * ***********************************************************************/ i = 0; if (opos > 0) { if ((temp = (unsigned char *)realloc(orig, opos+length+1)) == NULL) { free(orig); return -1; } orig = temp; } else { if ((orig = (unsigned char *)malloc(length + 1)) == NULL) return -1; } while (i < length && remaining > 0) { orig[opos] = window[offset + i]; opos++; /******************************************************************** * * * Record each symbol in the look-ahead buffer until ready to * * update the sliding window. * * * ********************************************************************/ buffer[i] = window[offset + i]; i++; /******************************************************************** * * * Adjust the total symbols remaining to account for each symbol * * consumed. * * * ********************************************************************/ remaining--; } /*********************************************************************** * * * Write the unmatched symbol to the buffer of original data. * * * ***********************************************************************/ if (remaining > 0) { orig[opos] = next; opos++; /******************************************************************** * * * Also record this symbol in the look-ahead buffer. * * * ********************************************************************/ buffer[i] = next; /******************************************************************** * * * Adjust the total symbols remaining to account for the unmatched * * symbol. * * * ********************************************************************/ remaining--; } /*********************************************************************** * * * Adjust the phrase length to account for the unmatched symbol. * * * ***********************************************************************/ length++; } else { /*********************************************************************** * * * Handle processing a symbol token. * * * ***********************************************************************/ next = 0x00; for (i = 0; i < LZ77_NEXT_BITS; i++) { tpos = (sizeof(unsigned char) * 8) - LZ77_NEXT_BITS + i; bit_set((unsigned char *)&next, tpos, bit_get(compressed, ipos)); ipos++; } /*********************************************************************** * * * Write the symbol to the buffer of original data. * * * ***********************************************************************/ if (opos > 0) { if ((temp = (unsigned char *)realloc(orig, opos + 1)) == NULL) { free(orig); return -1; } orig = temp; } else { if ((orig = (unsigned char *)malloc(1)) == NULL) return -1; } orig[opos] = next; opos++; /*********************************************************************** * * * Record the symbol in the look-ahead buffer until ready to update * * the sliding window. * * * ***********************************************************************/ if (remaining > 0) buffer[0] = next; /*********************************************************************** * * * Adjust the total symbols remaining to account for the unmatched * * symbol. * * * ***********************************************************************/ remaining--; /*********************************************************************** * * * Set the phrase length to account for the unmatched symbol. * * * ***********************************************************************/ length = 1; } /************************************************************************** * * * Copy the look-ahead buffer into the sliding window. * * * **************************************************************************/ memmove(&window[0], &window[length], LZ77_WINDOW_SIZE - length); memmove(&window[LZ77_WINDOW_SIZE - length], &buffer[0], length); } /***************************************************************************** * * * Point to the buffer of original data. * * * *****************************************************************************/ *original = orig; /***************************************************************************** * * * Return the number of bytes in the original data. * * * *****************************************************************************/ return opos; }
int32_t hid_interrupt(bthid_session_p s, uint8_t *data, int32_t len) { hid_device_p hid_device; hid_data_t d; hid_item_t h; int32_t report_id, usage, page, val, mouse_x, mouse_y, mouse_z, mouse_butt, mevents, kevents, i; assert(s != NULL); assert(s->srv != NULL); assert(data != NULL); if (len < 3) { syslog(LOG_ERR, "Got short message (%d bytes) on Interrupt " \ "channel from %s", len, bt_ntoa(&s->bdaddr, NULL)); return (-1); } if (data[0] != 0xa1) { syslog(LOG_ERR, "Got unexpected message 0x%x on " \ "Interrupt channel from %s", data[0], bt_ntoa(&s->bdaddr, NULL)); return (-1); } report_id = data[1]; data ++; len --; hid_device = get_hid_device(&s->bdaddr); assert(hid_device != NULL); mouse_x = mouse_y = mouse_z = mouse_butt = mevents = kevents = 0; for (d = hid_start_parse(hid_device->desc, 1 << hid_input, -1); hid_get_item(d, &h) > 0; ) { if ((h.flags & HIO_CONST) || (h.report_ID != report_id) || (h.kind != hid_input)) continue; page = HID_PAGE(h.usage); val = hid_get_data(data, &h); /* * When the input field is an array and the usage is specified * with a range instead of an ID, we have to derive the actual * usage by using the item value as an index in the usage range * list. */ if ((h.flags & HIO_VARIABLE)) { usage = HID_USAGE(h.usage); } else { const uint32_t usage_offset = val - h.logical_minimum; usage = HID_USAGE(h.usage_minimum + usage_offset); } switch (page) { case HUP_GENERIC_DESKTOP: switch (usage) { case HUG_X: mouse_x = val; mevents ++; break; case HUG_Y: mouse_y = val; mevents ++; break; case HUG_WHEEL: mouse_z = -val; mevents ++; break; case HUG_SYSTEM_SLEEP: if (val) syslog(LOG_NOTICE, "Sleep button pressed"); break; } break; case HUP_KEYBOARD: kevents ++; if (h.flags & HIO_VARIABLE) { if (val && usage < kbd_maxkey()) bit_set(s->keys1, usage); } else { if (val && val < kbd_maxkey()) bit_set(s->keys1, val); for (i = 1; i < h.report_count; i++) { h.pos += h.report_size; val = hid_get_data(data, &h); if (val && val < kbd_maxkey()) bit_set(s->keys1, val); } } break; case HUP_BUTTON: if (usage != 0) { if (usage == 2) usage = 3; else if (usage == 3) usage = 2; mouse_butt |= (val << (usage - 1)); mevents ++; } break; case HUP_CONSUMER: if (!val) break; switch (usage) { case HUC_AC_PAN: /* Horizontal scroll */ if (val < 0) mouse_butt |= (1 << 5); else mouse_butt |= (1 << 6); mevents ++; val = 0; break; case 0xb5: /* Scan Next Track */ val = 0x19; break; case 0xb6: /* Scan Previous Track */ val = 0x10; break; case 0xb7: /* Stop */ val = 0x24; break; case 0xcd: /* Play/Pause */ val = 0x22; break; case 0xe2: /* Mute */ val = 0x20; break; case 0xe9: /* Volume Up */ val = 0x30; break; case 0xea: /* Volume Down */ val = 0x2E; break; case 0x183: /* Media Select */ val = 0x6D; break; case 0x018a: /* Mail */ val = 0x6C; break; case 0x192: /* Calculator */ val = 0x21; break; case 0x194: /* My Computer */ val = 0x6B; break; case 0x221: /* WWW Search */ val = 0x65; break; case 0x223: /* WWW Home */ val = 0x32; break; case 0x224: /* WWW Back */ val = 0x6A; break; case 0x225: /* WWW Forward */ val = 0x69; break; case 0x226: /* WWW Stop */ val = 0x68; break; case 0x227: /* WWW Refresh */ val = 0x67; break; case 0x22a: /* WWW Favorites */ val = 0x66; break; default: val = 0; break; } /* XXX FIXME - UGLY HACK */ if (val != 0) { if (hid_device->keyboard) { int32_t buf[4] = { 0xe0, val, 0xe0, val|0x80 }; assert(s->vkbd != -1); write(s->vkbd, buf, sizeof(buf)); } else syslog(LOG_ERR, "Keyboard events " \ "received from non-keyboard " \ "device %s. Please report", bt_ntoa(&s->bdaddr, NULL)); } break; case HUP_MICROSOFT: switch (usage) { case 0xfe01: if (!hid_device->battery_power) break; switch (val) { case 1: syslog(LOG_INFO, "Battery is OK on %s", bt_ntoa(&s->bdaddr, NULL)); break; case 2: syslog(LOG_NOTICE, "Low battery on %s", bt_ntoa(&s->bdaddr, NULL)); break; case 3: syslog(LOG_WARNING, "Very low battery "\ "on %s", bt_ntoa(&s->bdaddr, NULL)); break; } break; } break; } } hid_end_parse(d); /* * XXX FIXME Feed keyboard events into kernel. * The code below works, bit host also needs to track * and handle repeat. * * Key repeat currently works in X, but not in console. */ if (kevents > 0) { if (hid_device->keyboard) { assert(s->vkbd != -1); kbd_process_keys(s); } else syslog(LOG_ERR, "Keyboard events received from " \ "non-keyboard device %s. Please report", bt_ntoa(&s->bdaddr, NULL)); } /* * XXX FIXME Feed mouse events into kernel. * The code block below works, but it is not good enough. * Need to track double-clicks etc. * * Double click currently works in X, but not in console. */ if (mevents > 0) { struct mouse_info mi; mi.operation = MOUSE_ACTION; mi.u.data.x = mouse_x; mi.u.data.y = mouse_y; mi.u.data.z = mouse_z; mi.u.data.buttons = mouse_butt; if (ioctl(s->srv->cons, CONS_MOUSECTL, &mi) < 0) syslog(LOG_ERR, "Could not process mouse events from " \ "%s. %s (%d)", bt_ntoa(&s->bdaddr, NULL), strerror(errno), errno); } return (0); }
static void _sig_hup_ipc_handler(int n) { bit_set(&runi.flags, USCHED_RUNTIME_FLAG_RELOAD); }
int main(void) { // Disable AVR internal Voltage Regulator REGCR |= (1 << REGDIS); // Set clock @ 8Mhz //CPU_PRESCALE(1); //Jon 17/10/2013, 16mhz seems to fix issues, works with psx/360 converters :D // Set clock @ 16Mhz CPU_PRESCALE(0); //Set initial pin states. These are adjusted based on eeprom settings. bit_clear(DDRD, 0x80); bit_set(PORTD, 0x80); //hwb bit_clear(DDRC, 0x04); bit_set(PORTC, 0x04); //a1 bit_clear(DDRD, 0x01); bit_set(PORTD, 0x01); //a2 bit_clear(DDRD, 0x02); bit_set(PORTD, 0x02); //a3 bit_clear(DDRD, 0x04); bit_set(PORTD, 0x04); //a4 bit_clear(DDRD, 0x08); bit_set(PORTD, 0x08); //a5 bit_clear(DDRD, 0x10); bit_set(PORTD, 0x10); //a6 bit_clear(DDRD, 0x20); bit_set(PORTD, 0x20); //a7 bit_clear(DDRD, 0x40); bit_set(PORTD, 0x40); //a8 bit_clear(DDRB, 0x20); bit_set(PORTB, 0x20); //b4 bit_clear(DDRB, 0x40); bit_set(PORTB, 0x40); //b5 bit_clear(DDRB, 0x80); bit_set(PORTB, 0x80); //b6 bit_clear(DDRC, 0x80); bit_set(PORTC, 0x80); //b7 bit_clear(DDRC, 0x40); bit_set(PORTC, 0x40); //b8 bit_clear(DDRC, 0x20); bit_set(PORTC, 0x20); //b9 bit_clear(DDRC, 0x10); bit_set(PORTC, 0x10); //b10 ***added //dpad and pad restrictions uint8_t u=0, d=0, l=0, r=0, pu=0, pd=0, pl=0, pr=0; uint8_t dp_pp=0, dp_lp=0, set; //pin assignments and button states uint8_t pos=0, cnt=0; uint8_t ass[40], state[20]; //handle shift and shift lock uint8_t shift=0, shift_last=0, shift_count=0, shift_lock=0; //handle settings uint8_t invert, restrict4; uint8_t setting_delay; uint8_t autofire, auto_toggle=1; uint8_t active=0, delay_power=0, led_active=0; int up, down, left, right, sqre, cross, circle, triangle, select, start, l1, r1, l2, r2, l3, r3, lx, ly, rx, ry; //Flash LEDs //Commented out for testing of XBOX 360 adapter!! #include "..\shared\disco.c" //read first 40 eeprom into an array (pins + shifted pins) for(cnt=0;cnt<40;cnt++){ ass[cnt]=read_eeprom_byte(cnt); //set output pins if ((ass[cnt]==27)||(ass[cnt]==28)){ #include "..\shared\outputs.c" } if (ass[cnt]==28){delay_power=1;} } //Read other settings (40+ in Eeprom) setting_delay=read_eeprom_byte(41); //wait for specified delay time (allow 2 secs for led flash) if ((delay_power==1)&&(setting_delay>0)){ for(cnt=0;cnt<=setting_delay-3;cnt++){ _delay_ms(1000); } active=1; for(cnt=0;cnt<40;cnt++){ if (ass[cnt]==28){ led_active = shift; #include "..\shared\showleds.c" } } } // Init PS Pad emulator SPI hardware pspad_init(); for(;;) { //read KADE pin states into an array #include "..\shared\state.c" //set shifted status and detect shift lock (double click) #include "..\shared\shift.c" //set default button states up=0; down=0; left=0; right=0; sqre=0; cross=0; circle=0; triangle=0; select=0; start=0; l1=0; r1=0; l2=0; r2=0; l3=0; r3=0; // lx=0x7F; // ly=0x7F; // rx=0x7F; // ry=0x7F; lx=127; ly=127; rx=127; ry=127; invert=0; restrict4=0; autofire=0; //pre-loop to deal with any switches/toggles pu=u; pd=d; pl=l; pr=r; u=0; d=0; l=0; r=0; for(cnt=0;cnt<20;cnt++) { if (!(state[cnt])) { //there is input on this pin, also account for shifted input pos = cnt; if (shift==1){pos=pos+20;} if ((ass[cnt]==22)||(ass[pos]==22)){invert=1;} //invert Y axis if ((ass[cnt]==21)||(ass[pos]==21)){restrict4=1;} //restrict dPad 4-way if ((ass[pos]>=23)&&(ass[pos]<=26)){autofire=1;} //autofire mode if (ass[pos]==1){u=1;} if (ass[pos]==2){d=1;} if (ass[pos]==3){l=1;} if (ass[pos]==4){r=1;} } //Show the shift status LED(s) and power up externals with delay if (ass[cnt]==27){ active = shift; #include "..\shared\showleds.c" } } //autofire toggle if(autofire==1){ auto_toggle = auto_toggle * -1; } //Handle dpad and restricted movement if(restrict4==0){ if (u==1){up=1;} if (d==1){down=1;} if (l==1){left=1;} if (r==1){right=1;} } else { //Restrict Dpad to 4-way operation. //Movement is based on weighting of directions compared to current position set=0; if ((u==pu)&&(d==pd)&&(l==pl)&&(r==pr)){ dp_pp = dp_lp; } dp_lp=dp_pp; if(dp_pp==1){ if ((d==1)&&(set==0)){down=1; dp_pp=2; set=1;} if ((l==1)&&(set==0)){left=1; dp_pp=3; set=1;} if ((r==1)&&(set==0)){right=1; dp_pp=4; set=1;} if ((u==1)&&(set==0)){up=1; dp_pp=1; set=1;} } if(dp_pp==2){ if ((u==1)&&(set==0)){up=1; dp_pp=1; set=1;} if ((l==1)&&(set==0)){left=1; dp_pp=3; set=1;} if ((r==1)&&(set==0)){right=1; dp_pp=4; set=1;} if ((d==1)&&(set==0)){down=1; dp_pp=2; set=1;} } if(dp_pp==3){ if ((r==1)&&(set==0)){right=1; dp_pp=4; set=1;} if ((u==1)&&(set==0)){up=1; dp_pp=1; set=1;} if ((d==1)&&(set==0)){down=1; dp_pp=2; set=1;} if ((l==1)&&(set==0)){left=1; dp_pp=3; set=1;} } if((dp_pp==4)||(dp_pp==0)){ if ((l==1)&&(set==0)){left=1; dp_pp=3; set=1;} if ((u==1)&&(set==0)){up=1; dp_pp=1; set=1;} if ((d==1)&&(set==0)){down=1; dp_pp=2; set=1;} if ((r==1)&&(set==0)){right=1; dp_pp=4; set=1;} } } //loop through pins checking for inputs from those that are assigned a function for(cnt=0;cnt<20;cnt++) { pos=cnt; if (!(state[cnt])) { //there is input on this pin if (shift==1){pos=pos+20;} //+20 if this is shifted input if (ass[pos]>0) { //there is an assignment to a function //A, B, X, Y if (ass[pos]==5){sqre=1;} if (ass[pos]==6){cross=1;} if (ass[pos]==7){circle=1;} if (ass[pos]==8){triangle=1;} //Autofire if (autofire==1){ if(auto_toggle==1){ if (ass[pos]==23){sqre=1;} if (ass[pos]==24){cross=1;} if (ass[pos]==25){circle=1;} if (ass[pos]==26){triangle=1;} } _delay_ms(15); } //Triggers if (ass[pos]==9){l1=1;} //L1 if (ass[pos]==10){r1=1;} //R1 if (ass[pos]==11){l2=1;} //L2 if (ass[pos]==12){r2=1;} //R2 if (ass[pos]==15){l3=1;} //L3 if (ass[pos]==16){r3=1;} //R3 //Special if (ass[pos]==13){select=1;} //Select if (ass[pos]==14){start=1;} //Start //Left Analog Stick if (ass[pos]==17+invert){ly = 0;} //Left Analog Up if (ass[pos]==18-invert){ly = 255;} //Left Analog Down if (ass[pos]==19){lx = 0;} //Left Analog Left if (ass[pos]==20){lx = 255;} //Left Analog Right //Right Analog Stick if (ass[pos]==29+invert){ry = 0;} //Right Analog Up if (ass[pos]==30-invert){ry = 255;} //Right Analog Down if (ass[pos]==31){rx = 0;} //Right Analog Left if (ass[pos]==32){rx = 255;} //Right Analog Right // //Enter programming mode // if (ass[pos]==33){Jump_To_Bootloader();} //Program mode } } } pspad_set_pad_state(left, right, up, down, sqre, triangle, circle, cross, select, start, l1, l2, r1, r2, l3, r3, lx, ly, rx, ry); _delay_ms(2); //debounce } }
int main(int argc, char **argv) { int c; int count, waittime; int set_lun; int fd, retval; struct ctlstat_context ctx; /* default values */ retval = 0; waittime = 1; count = -1; memset(&ctx, 0, sizeof(ctx)); ctx.numdevs = 3; ctx.mode = CTLSTAT_MODE_STANDARD; ctx.flags |= CTLSTAT_FLAG_CPU; ctx.flags |= CTLSTAT_FLAG_FIRST_RUN; ctx.flags |= CTLSTAT_FLAG_HEADER; while ((c = getopt(argc, argv, ctlstat_opts)) != -1) { switch (c) { case 'C': ctx.flags &= ~CTLSTAT_FLAG_CPU; break; case 'c': count = atoi(optarg); break; case 'd': ctx.flags |= CTLSTAT_FLAG_DMA_TIME; break; case 'D': ctx.mode = CTLSTAT_MODE_DUMP; waittime = 30; break; case 'h': ctx.flags &= ~CTLSTAT_FLAG_HEADER; break; case 'j': ctx.mode = CTLSTAT_MODE_JSON; waittime = 30; break; case 'l': { int cur_lun; cur_lun = atoi(optarg); if (cur_lun > CTL_STAT_LUN_BITS) errx(1, "Invalid LUN number %d", cur_lun); bit_ffs(ctx.lun_mask, CTL_STAT_LUN_BITS, &set_lun); if (set_lun == -1) ctx.numdevs = 1; else ctx.numdevs++; bit_set(ctx.lun_mask, cur_lun); break; } case 'n': ctx.numdevs = atoi(optarg); break; case 't': ctx.flags |= CTLSTAT_FLAG_TOTALS; ctx.numdevs = 3; break; case 'w': waittime = atoi(optarg); break; default: retval = 1; usage(retval); exit(retval); break; } } bit_ffs(ctx.lun_mask, CTL_STAT_LUN_BITS, &set_lun); if ((F_TOTALS(&ctx)) && (set_lun != -1)) { errx(1, "Total Mode (-t) is incompatible with individual " "LUN mode (-l)"); } else if (set_lun == -1) { /* * Note that this just selects the first N LUNs to display, * but at this point we have no knoweledge of which LUN * numbers actually exist. So we may select LUNs that * aren't there. */ bit_nset(ctx.lun_mask, 0, min(ctx.numdevs - 1, CTL_STAT_LUN_BITS - 1)); } if ((fd = open(CTL_DEFAULT_DEV, O_RDWR)) == -1) err(1, "cannot open %s", CTL_DEFAULT_DEV); for (;count != 0;) { ctx.tmp_lun_stats = ctx.prev_lun_stats; ctx.prev_lun_stats = ctx.cur_lun_stats; ctx.cur_lun_stats = ctx.tmp_lun_stats; ctx.prev_time = ctx.cur_time; ctx.prev_cpu = ctx.cur_cpu; if (getstats(fd, &ctx.num_luns, &ctx.cur_lun_stats, &ctx.cur_time, &ctx.flags) != 0) errx(1, "error returned from getstats()"); switch(ctx.mode) { case CTLSTAT_MODE_STANDARD: ctlstat_standard(&ctx); break; case CTLSTAT_MODE_DUMP: ctlstat_dump(&ctx); break; case CTLSTAT_MODE_JSON: ctlstat_json(&ctx); break; default: break; } fprintf(stdout, "\n"); ctx.flags &= ~CTLSTAT_FLAG_FIRST_RUN; if (count != 1) sleep(waittime); if (count > 0) count--; } exit (retval); }
void notify_all(nb_func_t func, nb_arg_t arg0, nb_arg_t arg1) { sn this = cpuno(); assert(irq_is_disabled()); if (unlikely(notify_intvec == 0)) { func(&cpu_vm[this], arg0, arg1); return; } note_t note; un count = 0; note.nb_func = func; note.nb_arg0 = arg0; note.nb_arg1 = arg1; note.nb_completion_count = 0; /* No need to list_init(¬e.nb_list[i]) because adding an element * to a list will overwrite the element's prev and next ptrs. */ un notify_mask = 0; for_each_active_cpu(i) { if (i == this) { continue; } vm_t *v = &cpu_vm[i]; nested_spinlock(&v->v_notes_lock); list_add_tail(&v->v_notes_list, ¬e.nb_list[i]); nested_spinunlock(&v->v_notes_lock); bit_set(notify_mask, i); apic_send_IPI(i, notify_intvec); count++; } func(&cpu_vm[this], arg0, arg1); fence(); u64 timeout = rdtsc() + TSC_TIMEOUT; while (volatile_read(¬e.nb_completion_count) < count) { /* Poll for incoming notifications */ notify_irq(0); if (rdtsc() > timeout) { kprintf("notify_all>TIMEOUT %u %ld\n", volatile_read(¬e.nb_completion_count), count /* , note.nb_ack_map */); for_each_cpu(i, notify_mask) { vm_t *v = &cpu_vm[i]; nested_spinlock(&v->v_notes_lock); /* If the element was already removed, this * is a no-op. */ list_remove(¬e.nb_list[i]); nested_spinunlock(&v->v_notes_lock); } return; }
/* * slurm_sprint_job_info - output information about a specific Slurm * job based upon message as loaded using slurm_load_jobs * IN job_ptr - an individual job information record pointer * IN one_liner - print as a single line if true * RET out - char * containing formatted output (must be freed after call) * NULL is returned on failure. */ extern char * slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) { int i, j; char time_str[32], *group_name, *user_name; char tmp1[128], tmp2[128], tmp3[128], tmp4[128], tmp5[128], *tmp6_ptr; char tmp_line[512]; char *ionodes = NULL; uint16_t exit_status = 0, term_sig = 0; job_resources_t *job_resrcs = job_ptr->job_resrcs; char *out = NULL; time_t run_time; uint32_t min_nodes, max_nodes = 0; char *nodelist = "NodeList"; bitstr_t *core_bitmap; char *host; int sock_inx, sock_reps, last; int abs_node_inx, rel_node_inx; int bit_inx, bit_reps; uint32_t *last_mem_alloc_ptr = NULL; uint32_t last_mem_alloc = NO_VAL; char *last_hosts; hostlist_t hl, hl_last; char select_buf[122]; uint32_t cluster_flags = slurmdb_setup_cluster_flags(); if (cluster_flags & CLUSTER_FLAG_BG) { nodelist = "MidplaneList"; select_g_select_jobinfo_get(job_ptr->select_jobinfo, SELECT_JOBDATA_IONODES, &ionodes); } /****** Line 1 ******/ snprintf(tmp_line, sizeof(tmp_line), "JobId=%u ", job_ptr->job_id); out = xstrdup(tmp_line); if (job_ptr->array_job_id) { snprintf(tmp_line, sizeof(tmp_line), "ArrayJobId=%u ArrayTaskId=%u ", job_ptr->array_job_id, job_ptr->array_task_id); xstrcat(out, tmp_line); } snprintf(tmp_line, sizeof(tmp_line), "Name=%s", job_ptr->name); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 2 ******/ user_name = uid_to_string((uid_t) job_ptr->user_id); group_name = gid_to_string((gid_t) job_ptr->group_id); snprintf(tmp_line, sizeof(tmp_line), "UserId=%s(%u) GroupId=%s(%u)", user_name, job_ptr->user_id, group_name, job_ptr->group_id); xfree(user_name); xfree(group_name); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 3 ******/ snprintf(tmp_line, sizeof(tmp_line), "Priority=%u Account=%s QOS=%s", job_ptr->priority, job_ptr->account, job_ptr->qos); xstrcat(out, tmp_line); if (slurm_get_track_wckey()) { snprintf(tmp_line, sizeof(tmp_line), " WCKey=%s", job_ptr->wckey); xstrcat(out, tmp_line); } if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 4 ******/ if (job_ptr->state_desc) { /* Replace white space with underscore for easier parsing */ for (j=0; job_ptr->state_desc[j]; j++) { if (isspace((int)job_ptr->state_desc[j])) job_ptr->state_desc[j] = '_'; } tmp6_ptr = job_ptr->state_desc; } else tmp6_ptr = job_reason_string(job_ptr->state_reason); snprintf(tmp_line, sizeof(tmp_line), "JobState=%s Reason=%s Dependency=%s", job_state_string(job_ptr->job_state), tmp6_ptr, job_ptr->dependency); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 5 ******/ snprintf(tmp_line, sizeof(tmp_line), "Requeue=%u Restarts=%u BatchFlag=%u ", job_ptr->requeue, job_ptr->restart_cnt, job_ptr->batch_flag); xstrcat(out, tmp_line); if (WIFSIGNALED(job_ptr->exit_code)) term_sig = WTERMSIG(job_ptr->exit_code); exit_status = WEXITSTATUS(job_ptr->exit_code); snprintf(tmp_line, sizeof(tmp_line), "ExitCode=%u:%u", exit_status, term_sig); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 5a (optional) ******/ if (!(job_ptr->show_flags & SHOW_DETAIL)) goto line6; if (WIFSIGNALED(job_ptr->derived_ec)) term_sig = WTERMSIG(job_ptr->derived_ec); else term_sig = 0; exit_status = WEXITSTATUS(job_ptr->derived_ec); snprintf(tmp_line, sizeof(tmp_line), "DerivedExitCode=%u:%u", exit_status, term_sig); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 6 ******/ line6: snprintf(tmp_line, sizeof(tmp_line), "RunTime="); xstrcat(out, tmp_line); if (IS_JOB_PENDING(job_ptr)) run_time = 0; else if (IS_JOB_SUSPENDED(job_ptr)) run_time = job_ptr->pre_sus_time; else { time_t end_time; if (IS_JOB_RUNNING(job_ptr) || (job_ptr->end_time == 0)) end_time = time(NULL); else end_time = job_ptr->end_time; if (job_ptr->suspend_time) { run_time = (time_t) (difftime(end_time, job_ptr->suspend_time) + job_ptr->pre_sus_time); } else run_time = (time_t) difftime(end_time, job_ptr->start_time); } secs2time_str(run_time, tmp1, sizeof(tmp1)); sprintf(tmp_line, "%s ", tmp1); xstrcat(out, tmp_line); snprintf(tmp_line, sizeof(tmp_line), "TimeLimit="); xstrcat(out, tmp_line); if (job_ptr->time_limit == NO_VAL) sprintf(tmp_line, "Partition_Limit"); else { mins2time_str(job_ptr->time_limit, tmp_line, sizeof(tmp_line)); } xstrcat(out, tmp_line); snprintf(tmp_line, sizeof(tmp_line), " TimeMin="); xstrcat(out, tmp_line); if (job_ptr->time_min == 0) sprintf(tmp_line, "N/A"); else { mins2time_str(job_ptr->time_min, tmp_line, sizeof(tmp_line)); } xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 7 ******/ slurm_make_time_str((time_t *)&job_ptr->submit_time, time_str, sizeof(time_str)); snprintf(tmp_line, sizeof(tmp_line), "SubmitTime=%s ", time_str); xstrcat(out, tmp_line); slurm_make_time_str((time_t *)&job_ptr->eligible_time, time_str, sizeof(time_str)); snprintf(tmp_line, sizeof(tmp_line), "EligibleTime=%s", time_str); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 8 (optional) ******/ if (job_ptr->resize_time) { slurm_make_time_str((time_t *)&job_ptr->resize_time, time_str, sizeof(time_str)); snprintf(tmp_line, sizeof(tmp_line), "ResizeTime=%s", time_str); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); } /****** Line 9 ******/ slurm_make_time_str((time_t *)&job_ptr->start_time, time_str, sizeof(time_str)); snprintf(tmp_line, sizeof(tmp_line), "StartTime=%s ", time_str); xstrcat(out, tmp_line); snprintf(tmp_line, sizeof(tmp_line), "EndTime="); xstrcat(out, tmp_line); if ((job_ptr->time_limit == INFINITE) && (job_ptr->end_time > time(NULL))) sprintf(tmp_line, "Unknown"); else { slurm_make_time_str ((time_t *)&job_ptr->end_time, time_str, sizeof(time_str)); sprintf(tmp_line, "%s", time_str); } xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 10 ******/ if (job_ptr->preempt_time == 0) sprintf(tmp_line, "PreemptTime=None "); else { slurm_make_time_str((time_t *)&job_ptr->preempt_time, time_str, sizeof(time_str)); snprintf(tmp_line, sizeof(tmp_line), "PreemptTime=%s ", time_str); } xstrcat(out, tmp_line); if (job_ptr->suspend_time) { slurm_make_time_str ((time_t *)&job_ptr->suspend_time, time_str, sizeof(time_str)); } else { strncpy(time_str, "None", sizeof(time_str)); } snprintf(tmp_line, sizeof(tmp_line), "SuspendTime=%s SecsPreSuspend=%ld", time_str, (long int)job_ptr->pre_sus_time); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 11 ******/ snprintf(tmp_line, sizeof(tmp_line), "Partition=%s AllocNode:Sid=%s:%u", job_ptr->partition, job_ptr->alloc_node, job_ptr->alloc_sid); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 12 ******/ snprintf(tmp_line, sizeof(tmp_line), "Req%s=%s Exc%s=%s", nodelist, job_ptr->req_nodes, nodelist, job_ptr->exc_nodes); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 13 ******/ xstrfmtcat(out, "%s=", nodelist); xstrcat(out, job_ptr->nodes); if (job_ptr->nodes && ionodes) { snprintf(tmp_line, sizeof(tmp_line), "[%s]", ionodes); xstrcat(out, tmp_line); xfree(ionodes); } if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 14 (optional) ******/ if (job_ptr->batch_host) { snprintf(tmp_line, sizeof(tmp_line), "BatchHost=%s", job_ptr->batch_host); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); } /****** Line 15 ******/ if (cluster_flags & CLUSTER_FLAG_BG) { select_g_select_jobinfo_get(job_ptr->select_jobinfo, SELECT_JOBDATA_NODE_CNT, &min_nodes); if ((min_nodes == 0) || (min_nodes == NO_VAL)) { min_nodes = job_ptr->num_nodes; max_nodes = job_ptr->max_nodes; } else if (job_ptr->max_nodes) max_nodes = min_nodes; } else { min_nodes = job_ptr->num_nodes; max_nodes = job_ptr->max_nodes; } _sprint_range(tmp1, sizeof(tmp1), job_ptr->num_cpus, job_ptr->max_cpus); _sprint_range(tmp2, sizeof(tmp2), min_nodes, max_nodes); if (job_ptr->sockets_per_node == (uint16_t) NO_VAL) strcpy(tmp3, "*"); else snprintf(tmp3, sizeof(tmp3), "%u", job_ptr->sockets_per_node); if (job_ptr->cores_per_socket == (uint16_t) NO_VAL) strcpy(tmp4, "*"); else snprintf(tmp4, sizeof(tmp4), "%u", job_ptr->cores_per_socket); if (job_ptr->threads_per_core == (uint16_t) NO_VAL) strcpy(tmp5, "*"); else snprintf(tmp5, sizeof(tmp5), "%u", job_ptr->threads_per_core); snprintf(tmp_line, sizeof(tmp_line), "NumNodes=%s NumCPUs=%s CPUs/Task=%u ReqS:C:T=%s:%s:%s", tmp2, tmp1, job_ptr->cpus_per_task, tmp3, tmp4, tmp5); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); if (!job_resrcs) goto line15; if (cluster_flags & CLUSTER_FLAG_BG) { if ((job_resrcs->cpu_array_cnt > 0) && (job_resrcs->cpu_array_value) && (job_resrcs->cpu_array_reps)) { int length = 0; xstrcat(out, "CPUs="); length += 10; for (i = 0; i < job_resrcs->cpu_array_cnt; i++) { if (length > 70) { /* skip to last CPU group entry */ if (i < job_resrcs->cpu_array_cnt - 1) { continue; } /* add ellipsis before last entry */ xstrcat(out, "...,"); length += 4; } snprintf(tmp_line, sizeof(tmp_line), "%d", job_resrcs->cpus[i]); xstrcat(out, tmp_line); length += strlen(tmp_line); if (job_resrcs->cpu_array_reps[i] > 1) { snprintf(tmp_line, sizeof(tmp_line), "*%d", job_resrcs->cpu_array_reps[i]); xstrcat(out, tmp_line); length += strlen(tmp_line); } if (i < job_resrcs->cpu_array_cnt - 1) { xstrcat(out, ","); length++; } } if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); } } else { if (!job_resrcs->core_bitmap) goto line15; last = bit_fls(job_resrcs->core_bitmap); if (last == -1) goto line15; hl = hostlist_create(job_ptr->nodes); if (!hl) { error("slurm_sprint_job_info: hostlist_create: %s", job_ptr->nodes); return NULL; } hl_last = hostlist_create(NULL); if (!hl_last) { error("slurm_sprint_job_info: hostlist_create: NULL"); hostlist_destroy(hl); return NULL; } bit_inx = 0; i = sock_inx = sock_reps = 0; abs_node_inx = job_ptr->node_inx[i]; /* tmp1[] stores the current cpu(s) allocated */ tmp2[0] = '\0'; /* stores last cpu(s) allocated */ for (rel_node_inx=0; rel_node_inx < job_resrcs->nhosts; rel_node_inx++) { if (sock_reps >= job_resrcs->sock_core_rep_count[sock_inx]) { sock_inx++; sock_reps = 0; } sock_reps++; bit_reps = job_resrcs->sockets_per_node[sock_inx] * job_resrcs->cores_per_socket[sock_inx]; core_bitmap = bit_alloc(bit_reps); for (j=0; j < bit_reps; j++) { if (bit_test(job_resrcs->core_bitmap, bit_inx)) bit_set(core_bitmap, j); bit_inx++; } bit_fmt(tmp1, sizeof(tmp1), core_bitmap); FREE_NULL_BITMAP(core_bitmap); host = hostlist_shift(hl); /* * If the allocation values for this host are not the same as the * last host, print the report of the last group of hosts that had * identical allocation values. */ if (strcmp(tmp1, tmp2) || (last_mem_alloc_ptr != job_resrcs->memory_allocated) || (job_resrcs->memory_allocated && (last_mem_alloc != job_resrcs->memory_allocated[rel_node_inx]))) { if (hostlist_count(hl_last)) { last_hosts = hostlist_ranged_string_xmalloc( hl_last); snprintf(tmp_line, sizeof(tmp_line), " Nodes=%s CPU_IDs=%s Mem=%u", last_hosts, tmp2, last_mem_alloc_ptr ? last_mem_alloc : 0); xfree(last_hosts); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); hostlist_destroy(hl_last); hl_last = hostlist_create(NULL); } strcpy(tmp2, tmp1); last_mem_alloc_ptr = job_resrcs->memory_allocated; if (last_mem_alloc_ptr) last_mem_alloc = job_resrcs-> memory_allocated[rel_node_inx]; else last_mem_alloc = NO_VAL; } hostlist_push_host(hl_last, host); free(host); if (bit_inx > last) break; if (abs_node_inx > job_ptr->node_inx[i+1]) { i += 2; abs_node_inx = job_ptr->node_inx[i]; } else { abs_node_inx++; } } if (hostlist_count(hl_last)) { last_hosts = hostlist_ranged_string_xmalloc(hl_last); snprintf(tmp_line, sizeof(tmp_line), " Nodes=%s CPU_IDs=%s Mem=%u", last_hosts, tmp2, last_mem_alloc_ptr ? last_mem_alloc : 0); xfree(last_hosts); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); } hostlist_destroy(hl); hostlist_destroy(hl_last); } /****** Line 15 ******/ line15: if (job_ptr->pn_min_memory & MEM_PER_CPU) { job_ptr->pn_min_memory &= (~MEM_PER_CPU); tmp6_ptr = "CPU"; } else tmp6_ptr = "Node"; if (cluster_flags & CLUSTER_FLAG_BG) { convert_num_unit((float)job_ptr->pn_min_cpus, tmp1, sizeof(tmp1), UNIT_NONE); snprintf(tmp_line, sizeof(tmp_line), "MinCPUsNode=%s", tmp1); } else { snprintf(tmp_line, sizeof(tmp_line), "MinCPUsNode=%u", job_ptr->pn_min_cpus); } xstrcat(out, tmp_line); convert_num_unit((float)job_ptr->pn_min_memory, tmp1, sizeof(tmp1), UNIT_MEGA); convert_num_unit((float)job_ptr->pn_min_tmp_disk, tmp2, sizeof(tmp2), UNIT_MEGA); snprintf(tmp_line, sizeof(tmp_line), " MinMemory%s=%s MinTmpDiskNode=%s", tmp6_ptr, tmp1, tmp2); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 16 ******/ snprintf(tmp_line, sizeof(tmp_line), "Features=%s Gres=%s Reservation=%s", job_ptr->features, job_ptr->gres, job_ptr->resv_name); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 17 ******/ snprintf(tmp_line, sizeof(tmp_line), "Shared=%s Contiguous=%d Licenses=%s Network=%s", (job_ptr->shared == 0 ? "0" : job_ptr->shared == 1 ? "1" : "OK"), job_ptr->contiguous, job_ptr->licenses, job_ptr->network); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 18 ******/ snprintf(tmp_line, sizeof(tmp_line), "Command=%s", job_ptr->command); xstrcat(out, tmp_line); if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); /****** Line 19 ******/ snprintf(tmp_line, sizeof(tmp_line), "WorkDir=%s", job_ptr->work_dir); xstrcat(out, tmp_line); if (cluster_flags & CLUSTER_FLAG_BG) { /****** Line 20 (optional) ******/ select_g_select_jobinfo_sprint(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_BG_ID); if (select_buf[0] != '\0') { if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); snprintf(tmp_line, sizeof(tmp_line), "Block_ID=%s", select_buf); xstrcat(out, tmp_line); } /****** Line 21 (optional) ******/ select_g_select_jobinfo_sprint(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_MIXED_SHORT); if (select_buf[0] != '\0') { if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); xstrcat(out, select_buf); } if (cluster_flags & CLUSTER_FLAG_BGL) { /****** Line 22 (optional) ******/ select_g_select_jobinfo_sprint( job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_BLRTS_IMAGE); if (select_buf[0] != '\0') { if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); snprintf(tmp_line, sizeof(tmp_line), "BlrtsImage=%s", select_buf); xstrcat(out, tmp_line); } } /****** Line 23 (optional) ******/ select_g_select_jobinfo_sprint(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_LINUX_IMAGE); if (select_buf[0] != '\0') { if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); if (cluster_flags & CLUSTER_FLAG_BGL) snprintf(tmp_line, sizeof(tmp_line), "LinuxImage=%s", select_buf); else snprintf(tmp_line, sizeof(tmp_line), "CnloadImage=%s", select_buf); xstrcat(out, tmp_line); } /****** Line 24 (optional) ******/ select_g_select_jobinfo_sprint(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_MLOADER_IMAGE); if (select_buf[0] != '\0') { if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); snprintf(tmp_line, sizeof(tmp_line), "MloaderImage=%s", select_buf); xstrcat(out, tmp_line); } /****** Line 25 (optional) ******/ select_g_select_jobinfo_sprint(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_RAMDISK_IMAGE); if (select_buf[0] != '\0') { if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); if (cluster_flags & CLUSTER_FLAG_BGL) snprintf(tmp_line, sizeof(tmp_line), "RamDiskImage=%s", select_buf); else snprintf(tmp_line, sizeof(tmp_line), "IoloadImage=%s", select_buf); xstrcat(out, tmp_line); } } /****** Line 26 (optional) ******/ if (job_ptr->comment) { if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); snprintf(tmp_line, sizeof(tmp_line), "Comment=%s ", job_ptr->comment); xstrcat(out, tmp_line); } /****** Line 27 (optional) ******/ if (job_ptr->batch_script) { if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); xstrcat(out, "BatchScript=\n"); xstrcat(out, job_ptr->batch_script); } /****** Line 28 (optional) ******/ if (job_ptr->req_switch) { char time_buf[32]; if (one_liner) xstrcat(out, " "); else xstrcat(out, "\n "); secs2time_str((time_t) job_ptr->wait4switch, time_buf, sizeof(time_buf)); snprintf(tmp_line, sizeof(tmp_line), "Switches=%u@%s\n", job_ptr->req_switch, time_buf); xstrcat(out, tmp_line); } /****** Line 29 (optional) ******/ if (one_liner) xstrcat(out, "\n"); else xstrcat(out, "\n\n"); return out; }
/* - determine the number of windows in a window_sequence named num_windows - determine the number of window_groups named num_window_groups - determine the number of windows in each group named window_group_length[g] - determine the total number of scalefactor window bands named num_swb for the actual window type - determine swb_offset[swb], the offset of the first coefficient in scalefactor window band named swb of the window actually used - determine sect_sfb_offset[g][section],the offset of the first coefficient in section named section. This offset depends on window_sequence and scale_factor_grouping and is needed to decode the spectral_data(). */ uint8_t window_grouping_info(NeAACDecHandle hDecoder, ic_stream *ics) { uint8_t i, g; uint8_t sf_index = hDecoder->sf_index; switch (ics->window_sequence) { case ONLY_LONG_SEQUENCE: case LONG_START_SEQUENCE: case LONG_STOP_SEQUENCE: ics->num_windows = 1; ics->num_window_groups = 1; ics->window_group_length[ics->num_window_groups-1] = 1; #ifdef LD_DEC if (hDecoder->object_type == LD) { if (hDecoder->frameLength == 512) ics->num_swb = num_swb_512_window[sf_index]; else /* if (hDecoder->frameLength == 480) */ ics->num_swb = num_swb_480_window[sf_index]; } else { #endif if (hDecoder->frameLength == 1024) ics->num_swb = num_swb_1024_window[sf_index]; else /* if (hDecoder->frameLength == 960) */ ics->num_swb = num_swb_960_window[sf_index]; #ifdef LD_DEC } #endif /* preparation of sect_sfb_offset for long blocks */ /* also copy the last value! */ #ifdef LD_DEC if (hDecoder->object_type == LD) { if (hDecoder->frameLength == 512) { for (i = 0; i < ics->num_swb; i++) { ics->sect_sfb_offset[0][i] = swb_offset_512_window[sf_index][i]; ics->swb_offset[i] = swb_offset_512_window[sf_index][i]; } } else /* if (hDecoder->frameLength == 480) */ { for (i = 0; i < ics->num_swb; i++) { ics->sect_sfb_offset[0][i] = swb_offset_480_window[sf_index][i]; ics->swb_offset[i] = swb_offset_480_window[sf_index][i]; } } ics->sect_sfb_offset[0][ics->num_swb] = hDecoder->frameLength; ics->swb_offset[ics->num_swb] = hDecoder->frameLength; } else { #endif for (i = 0; i < ics->num_swb; i++) { ics->sect_sfb_offset[0][i] = swb_offset_1024_window[sf_index][i]; ics->swb_offset[i] = swb_offset_1024_window[sf_index][i]; } ics->sect_sfb_offset[0][ics->num_swb] = hDecoder->frameLength; ics->swb_offset[ics->num_swb] = hDecoder->frameLength; #ifdef LD_DEC } #endif return 0; case EIGHT_SHORT_SEQUENCE: ics->num_windows = 8; ics->num_window_groups = 1; ics->window_group_length[ics->num_window_groups-1] = 1; ics->num_swb = num_swb_128_window[sf_index]; for (i = 0; i < ics->num_swb; i++) ics->swb_offset[i] = swb_offset_128_window[sf_index][i]; ics->swb_offset[ics->num_swb] = hDecoder->frameLength/8; for (i = 0; i < ics->num_windows-1; i++) { if (bit_set(ics->scale_factor_grouping, 6-i) == 0) { ics->num_window_groups += 1; ics->window_group_length[ics->num_window_groups-1] = 1; } else { ics->window_group_length[ics->num_window_groups-1] += 1; } } /* preparation of sect_sfb_offset for short blocks */ for (g = 0; g < ics->num_window_groups; g++) { uint16_t width; uint8_t sect_sfb = 0; uint16_t offset = 0; for (i = 0; i < ics->num_swb; i++) { if (i+1 == ics->num_swb) { width = (hDecoder->frameLength/8) - swb_offset_128_window[sf_index][i]; } else { width = swb_offset_128_window[sf_index][i+1] - swb_offset_128_window[sf_index][i]; } width *= ics->window_group_length[g]; ics->sect_sfb_offset[g][sect_sfb++] = offset; offset += width; } ics->sect_sfb_offset[g][sect_sfb] = offset; } return 0; default: return 1; } }
//toggle en static void LCDtoggleEn() { bit_set(LCD_EN_PORT, BIT(LCD_EN_BIT)); _delay_us(LCD_EN_DELAY); bit_clear(LCD_EN_PORT, BIT(LCD_EN_BIT)); }
/* * _task_layout_lllp_cyclic * * task_layout_lllp_cyclic creates a cyclic distribution at the * lowest level of logical processor which is either socket, core or * thread depending on the system architecture. The Cyclic algorithm * is the same as the Cyclic distribution performed in srun. * * Distribution at the lllp: * -m hostfile|block|cyclic:block|cyclic * * The first distribution "hostfile|block|cyclic" is computed * in srun. The second distribution "block|cyclic" is computed * locally by each slurmd. * * The input to the lllp distribution algorithms is the gids (tasks * ids) generated for the local node. * * The output is a mapping of the gids onto logical processors * (thread/core/socket) with is expressed cpu_bind masks. * * If a task asks for more than one CPU per task, put the tasks as * close as possible (fill core rather than going next socket for the * extra task) * */ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req, uint32_t node_id, bitstr_t ***masks_p) { int last_taskcount = -1, taskcount = 0; uint16_t i, s, hw_sockets = 0, hw_cores = 0, hw_threads = 0; uint16_t offset = 0, p = 0; int size, max_tasks = req->tasks_to_launch[(int)node_id]; int max_cpus = max_tasks * req->cpus_per_task; bitstr_t *avail_map; bitstr_t **masks = NULL; int *socket_last_pu = NULL; int core_inx, pu_per_core, *core_tasks = NULL; info ("_task_layout_lllp_cyclic "); avail_map = _get_avail_map(req, &hw_sockets, &hw_cores, &hw_threads); if (!avail_map) return SLURM_ERROR; size = bit_set_count(avail_map); if (size < max_tasks) { error("task/affinity: only %d bits in avail_map for %d tasks!", size, max_tasks); FREE_NULL_BITMAP(avail_map); return SLURM_ERROR; } if (size < max_cpus) { /* Possible result of overcommit */ i = size / max_tasks; info("task/affinity: reset cpus_per_task from %d to %d", req->cpus_per_task, i); req->cpus_per_task = i; } pu_per_core = hw_threads; core_tasks = xmalloc(sizeof(int) * hw_sockets * hw_cores); socket_last_pu = xmalloc(hw_sockets * sizeof(int)); *masks_p = xmalloc(max_tasks * sizeof(bitstr_t*)); masks = *masks_p; size = bit_size(avail_map); offset = hw_cores * hw_threads; s = 0; while (taskcount < max_tasks) { if (taskcount == last_taskcount) fatal("_task_layout_lllp_cyclic failure"); last_taskcount = taskcount; for (i = 0; i < size; i++) { bool already_switched = false; uint16_t bit; uint16_t orig_s = s; while (socket_last_pu[s] >= offset) { /* Switch to the next socket we have * ran out here. */ /* This only happens if the slurmctld * gave us an allocation that made a * task split sockets. Or if the * entire allocation is on one socket. */ s = (s + 1) % hw_sockets; if (orig_s == s) { /* This should rarely happen, * but is here for sanity sake. */ debug("allocation is full, " "oversubscribing"); memset(core_tasks, 0, (sizeof(int) * hw_sockets * hw_cores)); memset(socket_last_pu, 0, sizeof(hw_sockets * sizeof(int))); } } bit = socket_last_pu[s] + (s * offset); /* In case hardware and config differ */ bit %= size; /* set up for the next one */ socket_last_pu[s]++; /* skip unrequested threads */ if (req->cpu_bind_type & CPU_BIND_ONE_THREAD_PER_CORE) socket_last_pu[s] += hw_threads - 1; if (!bit_test(avail_map, bit)) continue; core_inx = bit / pu_per_core; if ((req->ntasks_per_core != 0) && (core_tasks[core_inx] >= req->ntasks_per_core)) continue; core_tasks[core_inx]++; if (!masks[taskcount]) masks[taskcount] = bit_alloc(conf->block_map_size); //info("setting %d %d", taskcount, bit); bit_set(masks[taskcount], bit); if (!already_switched && (((req->task_dist & SLURM_DIST_STATE_BASE) == SLURM_DIST_CYCLIC_CFULL) || ((req->task_dist & SLURM_DIST_STATE_BASE) == SLURM_DIST_BLOCK_CFULL))) { /* This means we are laying out cpus * within a task cyclically as well. */ s = (s + 1) % hw_sockets; already_switched = true; } if (++p < req->cpus_per_task) continue; /* Binding to cores, skip remaining of the threads */ if (!(req->cpu_bind_type & CPU_BIND_ONE_THREAD_PER_CORE) && ((req->cpu_bind_type & CPU_BIND_TO_CORES) || (req->ntasks_per_core == 1))) { int threads_not_used; if (req->cpus_per_task < hw_threads) threads_not_used = hw_threads - req->cpus_per_task; else threads_not_used = req->cpus_per_task % hw_threads; socket_last_pu[s] += threads_not_used; } p = 0; if (!already_switched) { /* Now that we have finished a task, switch to * the next socket. */ s = (s + 1) % hw_sockets; } if (++taskcount >= max_tasks) break; } } /* last step: expand the masks to bind each task * to the requested resource */ _expand_masks(req->cpu_bind_type, max_tasks, masks, hw_sockets, hw_cores, hw_threads, avail_map); FREE_NULL_BITMAP(avail_map); xfree(core_tasks); xfree(socket_last_pu); return SLURM_SUCCESS; }
/* * Given a job step request, return an equivalent local bitmap for this node * IN req - The job step launch request * OUT hw_sockets - number of actual sockets on this node * OUT hw_cores - number of actual cores per socket on this node * OUT hw_threads - number of actual threads per core on this node * RET: bitmap of processors available to this job step on this node * OR NULL on error */ static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req, uint16_t *hw_sockets, uint16_t *hw_cores, uint16_t *hw_threads) { bitstr_t *req_map, *hw_map; slurm_cred_arg_t arg; uint16_t p, t, new_p, num_cpus, sockets, cores; int job_node_id; int start; char *str; int spec_thread_cnt = 0; *hw_sockets = conf->sockets; *hw_cores = conf->cores; *hw_threads = conf->threads; if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) { error("task/affinity: job lacks a credential"); return NULL; } /* we need this node's ID in relation to the whole * job allocation, not just this jobstep */ job_node_id = nodelist_find(arg.job_hostlist, conf->node_name); start = _get_local_node_info(&arg, job_node_id, &sockets, &cores); if (start < 0) { error("task/affinity: missing node %d in job credential", job_node_id); slurm_cred_free_args(&arg); return NULL; } debug3("task/affinity: slurmctld s %u c %u; hw s %u c %u t %u", sockets, cores, *hw_sockets, *hw_cores, *hw_threads); num_cpus = MIN((sockets * cores), ((*hw_sockets)*(*hw_cores))); req_map = (bitstr_t *) bit_alloc(num_cpus); hw_map = (bitstr_t *) bit_alloc(conf->block_map_size); /* Transfer core_bitmap data to local req_map. * The MOD function handles the case where fewer processes * physically exist than are configured (slurmd is out of * sync with the slurmctld daemon). */ for (p = 0; p < (sockets * cores); p++) { if (bit_test(arg.step_core_bitmap, start+p)) bit_set(req_map, (p % num_cpus)); } str = (char *)bit_fmt_hexmask(req_map); debug3("task/affinity: job %u.%u core mask from slurmctld: %s", req->job_id, req->job_step_id, str); xfree(str); for (p = 0; p < num_cpus; p++) { if (bit_test(req_map, p) == 0) continue; /* If we are pretending we have a larger system than we really have this is needed to make sure we don't bust the bank. */ new_p = p % conf->block_map_size; /* core_bitmap does not include threads, so we * add them here but limit them to what the job * requested */ for (t = 0; t < (*hw_threads); t++) { uint16_t bit = new_p * (*hw_threads) + t; bit %= conf->block_map_size; bit_set(hw_map, bit); } } if ((req->job_core_spec != (uint16_t) NO_VAL) && (req->job_core_spec & CORE_SPEC_THREAD) && (req->job_core_spec != CORE_SPEC_THREAD)) { spec_thread_cnt = req->job_core_spec & (~CORE_SPEC_THREAD); } if (spec_thread_cnt) { /* Skip specialized threads as needed */ int i, t, c, s; for (t = conf->threads - 1; ((t >= 0) && (spec_thread_cnt > 0)); t--) { for (c = conf->cores - 1; ((c >= 0) && (spec_thread_cnt > 0)); c--) { for (s = conf->sockets - 1; ((s >= 0) && (spec_thread_cnt > 0)); s--) { i = s * conf->cores + c; i = (i * conf->threads) + t; bit_clear(hw_map, i); spec_thread_cnt--; } } } } str = (char *)bit_fmt_hexmask(hw_map); debug3("task/affinity: job %u.%u CPU final mask for local node: %s", req->job_id, req->job_step_id, str); xfree(str); FREE_NULL_BITMAP(req_map); slurm_cred_free_args(&arg); return hw_map; }
/* Determine which CPUs a job step can use. * OUT whole_<entity>_count - returns count of whole <entities> in this * allocation for this node * OUT part__<entity>_count - returns count of partial <entities> in this * allocation for this node * RET - a string representation of the available mask or NULL on error * NOTE: Caller must xfree() the return value. */ static char *_alloc_mask(launch_tasks_request_msg_t *req, int *whole_node_cnt, int *whole_socket_cnt, int *whole_core_cnt, int *whole_thread_cnt, int *part_socket_cnt, int *part_core_cnt) { uint16_t sockets, cores, threads; int c, s, t, i; int c_miss, s_miss, t_miss, c_hit, t_hit; bitstr_t *alloc_bitmap; char *str_mask; bitstr_t *alloc_mask; *whole_node_cnt = 0; *whole_socket_cnt = 0; *whole_core_cnt = 0; *whole_thread_cnt = 0; *part_socket_cnt = 0; *part_core_cnt = 0; alloc_bitmap = _get_avail_map(req, &sockets, &cores, &threads); if (!alloc_bitmap) return NULL; alloc_mask = bit_alloc(bit_size(alloc_bitmap)); i = 0; for (s=0, s_miss=false; s<sockets; s++) { for (c=0, c_hit=c_miss=false; c<cores; c++) { for (t=0, t_hit=t_miss=false; t<threads; t++) { /* If we are pretending we have a larger system than we really have this is needed to make sure we don't bust the bank. */ if (i >= bit_size(alloc_bitmap)) i = 0; if (bit_test(alloc_bitmap, i)) { bit_set(alloc_mask, i); (*whole_thread_cnt)++; t_hit = true; c_hit = true; } else t_miss = true; i++; } if (!t_miss) (*whole_core_cnt)++; else { if (t_hit) (*part_core_cnt)++; c_miss = true; } } if (!c_miss) (*whole_socket_cnt)++; else { if (c_hit) (*part_socket_cnt)++; s_miss = true; } } if (!s_miss) (*whole_node_cnt)++; FREE_NULL_BITMAP(alloc_bitmap); if ((req->job_core_spec != (uint16_t) NO_VAL) && (req->job_core_spec & CORE_SPEC_THREAD) && (req->job_core_spec != CORE_SPEC_THREAD)) { int spec_thread_cnt; spec_thread_cnt = req->job_core_spec & (~CORE_SPEC_THREAD); for (t = threads - 1; ((t > 0) && (spec_thread_cnt > 0)); t--) { for (c = cores - 1; ((c > 0) && (spec_thread_cnt > 0)); c--) { for (s = sockets - 1; ((s >= 0) && (spec_thread_cnt > 0)); s--) { i = s * cores + c; i = (i * threads) + t; bit_clear(alloc_mask, i); spec_thread_cnt--; } } } } /* translate abstract masks to actual hardware layout */ _lllp_map_abstract_masks(1, &alloc_mask); #ifdef HAVE_NUMA if (req->cpu_bind_type & CPU_BIND_TO_LDOMS) { _match_masks_to_ldom(1, &alloc_mask); } #endif str_mask = bit_fmt_hexmask(alloc_mask); FREE_NULL_BITMAP(alloc_mask); return str_mask; }
int CS_put(struct content_obj * content) { content = content_copy(content); log_assert(g_log, content != NULL, "CS: failed to allocate content"); pthread_mutex_lock(&_cs.lock); struct CS_segment * segment = (struct CS_segment * ) hash_get(_cs.table, content_prefix(content->name)); pthread_mutex_unlock(&_cs.lock); int rv = 0; if (content_is_segmented(content->name)) { if (segment) { pthread_mutex_lock(&segment->lock); int seq_no = content_seq_no(content->name); if (seq_no >= segment->num_chunks) { segment->chunks = realloc(segment->chunks, sizeof(struct content_obj * ) * (seq_no + 1)); segment->num_chunks = seq_no + 1; segment->chunks[seq_no] = content; struct bitmap * larger = bit_create(segment->num_chunks); memcpy(larger->map, segment->valid->map, segment->valid->num_words); bit_destroy(segment->valid); segment->valid = larger; bit_set(larger, seq_no); } else { if (bit_test(segment->valid, seq_no)) { content_obj_destroy(segment->chunks[seq_no]); } segment->chunks[seq_no] = content; bit_set(segment->valid, seq_no); } pthread_mutex_unlock(&segment->lock); } else { rv = -1; } } else { if (!segment) { segment = malloc(sizeof(struct CS_segment)); /* this is a content matching a prefix */ char * key = malloc(strlen(content->name->full_name)); strcpy(key, content->name->full_name); segment->index_chunk = content; segment->chunks = NULL; segment->num_chunks = -1; segment->valid = bit_create(0); pthread_mutex_init(&segment->lock, NULL); pthread_mutex_lock(&_cs.lock); hash_put(_cs.table, key, (void * ) segment); pthread_mutex_unlock(&_cs.lock); } else { pthread_mutex_lock(&segment->lock); if (segment->index_chunk) { content_obj_destroy(segment->index_chunk); } segment->index_chunk = content; pthread_mutex_unlock(&segment->lock); } } return rv; }
static void dbg_showcon(const char *fn, u32 con) { printk(KERN_DEBUG "%s: LRI=%d, TXFEMPT=%d, RXFEMPT=%d, TXFFULL=%d, RXFFULL=%d\n", fn, bit_set(con, S3C2412_IISCON_LRINDEX), bit_set(con, S3C2412_IISCON_TXFIFO_EMPTY), bit_set(con, S3C2412_IISCON_RXFIFO_EMPTY), bit_set(con, S3C2412_IISCON_TXFIFO_FULL), bit_set(con, S3C2412_IISCON_RXFIFO_FULL)); printk(KERN_DEBUG "%s: PAUSE: TXDMA=%d, RXDMA=%d, TXCH=%d, RXCH=%d\n", fn, bit_set(con, S3C2412_IISCON_TXDMA_PAUSE), bit_set(con, S3C2412_IISCON_RXDMA_PAUSE), bit_set(con, S3C2412_IISCON_TXCH_PAUSE), bit_set(con, S3C2412_IISCON_RXCH_PAUSE)); printk(KERN_DEBUG "%s: ACTIVE: TXDMA=%d, RXDMA=%d, IIS=%d\n", fn, bit_set(con, S3C2412_IISCON_TXDMA_ACTIVE), bit_set(con, S3C2412_IISCON_RXDMA_ACTIVE), bit_set(con, S3C2412_IISCON_IIS_ACTIVE)); }
void hitachi_lcd_cgram_symbol(uint8_t __flash * symbol, uint8_t addr){ CustomSymbol[addr] = symbol; bit_set(NewCustomSymbolFlags, addr); }
int main(int argc, char *argv[]) { note("Testing static decl"); { bitstr_t bit_decl(bs, 65); /*bitstr_t *bsp = bs;*/ bit_set(bs,9); bit_set(bs,14); TEST(bit_test(bs,9), "bit 9 set"); TEST(!bit_test(bs,12), "bit 12 not set"); TEST(bit_test(bs,14), "bit 14 set" ); /*bit_free(bsp);*/ /* triggers TEST in bit_free - OK */ } note("Testing basic vixie functions"); { bitstr_t *bs = bit_alloc(16), *bs2; /*bit_set(bs, 42);*/ /* triggers TEST in bit_set - OK */ bit_set(bs,9); bit_set(bs,14); TEST(bit_test(bs,9), "bit 9 set"); TEST(!bit_test(bs,12), "bit 12 not set" ); TEST(bit_test(bs,14), "bit 14 set"); bs2 = bit_copy(bs); bit_fill_gaps(bs2); TEST(bit_ffs(bs2) == 9, "first bit set = 9 "); TEST(bit_fls(bs2) == 14, "last bit set = 14"); TEST(bit_set_count(bs2) == 6, "bitstring"); TEST(bit_test(bs2,12), "bitstring"); TEST(bit_super_set(bs,bs2) == 1, "bitstring"); TEST(bit_super_set(bs2,bs) == 0, "bitstring"); bit_clear(bs,14); TEST(!bit_test(bs,14), "bitstring"); bit_nclear(bs,9,14); TEST(!bit_test(bs,9), "bitstring"); TEST(!bit_test(bs,12), "bitstring"); TEST(!bit_test(bs,14), "bitstring"); bit_nset(bs,9,14); TEST(bit_test(bs,9), "bitstring"); TEST(bit_test(bs,12), "bitstring"); TEST(bit_test(bs,14), "bitstring"); TEST(bit_ffs(bs) == 9, "ffs"); TEST(bit_ffc(bs) == 0, "ffc"); bit_nset(bs,0,8); TEST(bit_ffc(bs) == 15, "ffc"); bit_free(bs); /*bit_set(bs,9); */ /* triggers TEST in bit_set - OK */ } note("Testing and/or/not"); { bitstr_t *bs1 = bit_alloc(128); bitstr_t *bs2 = bit_alloc(128); bit_set(bs1, 100); bit_set(bs1, 104); bit_set(bs2, 100); bit_and(bs1, bs2); TEST(bit_test(bs1, 100), "and"); TEST(!bit_test(bs1, 104), "and"); bit_set(bs2, 110); bit_set(bs2, 111); bit_set(bs2, 112); bit_or(bs1, bs2); TEST(bit_test(bs1, 100), "or"); TEST(bit_test(bs1, 110), "or"); TEST(bit_test(bs1, 111), "or"); TEST(bit_test(bs1, 112), "or"); bit_not(bs1); TEST(!bit_test(bs1, 100), "not"); TEST(bit_test(bs1, 12), "not"); bit_free(bs1); bit_free(bs2); } note("testing bit selection"); { bitstr_t *bs1 = bit_alloc(128), *bs2; bit_set(bs1, 21); bit_set(bs1, 100); bit_fill_gaps(bs1); bs2 = bit_pick_cnt(bs1,20); if (bs2) { TEST(bit_set_count(bs2) == 20, "pick"); TEST(bit_ffs(bs2) == 21, "pick"); TEST(bit_fls(bs2) == 40, "pick"); bit_free(bs2); } else TEST(0, "alloc fail"); bit_free(bs1); } note("Testing realloc"); { bitstr_t *bs = bit_alloc(1); TEST(bit_ffs(bs) == -1, "bitstring"); bit_set(bs,0); /*bit_set(bs, 1000);*/ /* triggers TEST in bit_set - OK */ bs = bit_realloc(bs,1048576); bit_set(bs,1000); bit_set(bs,1048575); TEST(bit_test(bs, 0), "bitstring"); TEST(bit_test(bs, 1000), "bitstring"); TEST(bit_test(bs, 1048575), "bitstring"); TEST(bit_set_count(bs) == 3, "bitstring"); bit_clear(bs,0); bit_clear(bs,1000); TEST(bit_set_count(bs) == 1, "bitstring"); TEST(bit_ffs(bs) == 1048575, "bitstring"); bit_free(bs); } note("Testing bit_fmt"); { char tmpstr[1024]; bitstr_t *bs = bit_alloc(1024); TEST(!strcmp(bit_fmt(tmpstr,sizeof(tmpstr),bs), ""), "bitstring"); bit_set(bs,42); TEST(!strcmp(bit_fmt(tmpstr,sizeof(tmpstr),bs), "42"), "bitstring"); bit_set(bs,102); TEST(!strcmp(bit_fmt(tmpstr,sizeof(tmpstr),bs), "42,102"), "bitstring"); bit_nset(bs,9,14); TEST(!strcmp(bit_fmt(tmpstr,sizeof(tmpstr), bs), "9-14,42,102"), "bitstring"); } note("Testing bit_nffc/bit_nffs"); { bitstr_t *bs = bit_alloc(1024); bit_set(bs, 2); bit_set(bs, 6); bit_set(bs, 7); bit_nset(bs,12,1018); TEST(bit_nffc(bs, 2) == 0, "bitstring"); TEST(bit_nffc(bs, 3) == 3, "bitstring"); TEST(bit_nffc(bs, 4) == 8, "bitstring"); TEST(bit_nffc(bs, 5) == 1019, "bitstring"); TEST(bit_nffc(bs, 6) == -1, "bitstring"); TEST(bit_nffs(bs, 1) == 2, "bitstring"); TEST(bit_nffs(bs, 2) == 6, "bitstring"); TEST(bit_nffs(bs, 100) == 12, "bitstring"); TEST(bit_nffs(bs, 1023) == -1, "bitstring"); bit_free(bs); } note("Testing bit_unfmt"); { bitstr_t *bs = bit_alloc(1024); bitstr_t *bs2 = bit_alloc(1024); char tmpstr[4096]; bit_set(bs,1); bit_set(bs,3); bit_set(bs,30); bit_nset(bs,42,64); bit_nset(bs,97,1000); bit_fmt(tmpstr, sizeof(tmpstr), bs); TEST(bit_unfmt(bs2, tmpstr) != -1, "bitstring"); TEST(bit_equal(bs, bs2), "bitstring"); } totals(); return failed; }
static void page_ok(int page) { if (ENABLE_FEATURE_MKSWAP_V0) { bit_set(signature_page, page); } }
/* * Common code for mount and mountroot */ static int reiserfs_mountfs(struct vnode *devvp, struct mount *mp, struct thread *td) { int error, old_format = 0; struct reiserfs_mount *rmp; struct reiserfs_sb_info *sbi; struct reiserfs_super_block *rs; struct cdev *dev; struct g_consumer *cp; struct bufobj *bo; //ronly = (mp->mnt_flag & MNT_RDONLY) != 0; dev = devvp->v_rdev; dev_ref(dev); DROP_GIANT(); g_topology_lock(); error = g_vfs_open(devvp, &cp, "reiserfs", /* read-only */ 0); g_topology_unlock(); PICKUP_GIANT(); VOP_UNLOCK(devvp, 0); if (error) { dev_rel(dev); return (error); } bo = &devvp->v_bufobj; bo->bo_private = cp; bo->bo_ops = g_vfs_bufops; if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; rmp = NULL; sbi = NULL; /* rmp contains any information about this specific mount */ rmp = malloc(sizeof *rmp, M_REISERFSMNT, M_WAITOK | M_ZERO); if (!rmp) { error = (ENOMEM); goto out; } sbi = malloc(sizeof *sbi, M_REISERFSMNT, M_WAITOK | M_ZERO); if (!sbi) { error = (ENOMEM); goto out; } rmp->rm_reiserfs = sbi; rmp->rm_mountp = mp; rmp->rm_devvp = devvp; rmp->rm_dev = dev; rmp->rm_bo = &devvp->v_bufobj; rmp->rm_cp = cp; /* Set default values for options: non-aggressive tails */ REISERFS_SB(sbi)->s_mount_opt = (1 << REISERFS_SMALLTAIL); REISERFS_SB(sbi)->s_rd_only = 1; REISERFS_SB(sbi)->s_devvp = devvp; /* Read the super block */ if ((error = read_super_block(rmp, REISERFS_OLD_DISK_OFFSET)) == 0) { /* The read process succeeded, it's an old format */ old_format = 1; } else if ((error = read_super_block(rmp, REISERFS_DISK_OFFSET)) != 0) { reiserfs_log(LOG_ERR, "can not find a ReiserFS filesystem\n"); goto out; } rs = SB_DISK_SUPER_BLOCK(sbi); /* * Let's do basic sanity check to verify that underlying device is * not smaller than the filesystem. If the check fails then abort and * scream, because bad stuff will happen otherwise. */ #if 0 if (s->s_bdev && s->s_bdev->bd_inode && i_size_read(s->s_bdev->bd_inode) < sb_block_count(rs) * sb_blocksize(rs)) { reiserfs_log(LOG_ERR, "reiserfs: filesystem cannot be mounted because it is " "bigger than the device.\n"); reiserfs_log(LOG_ERR, "reiserfs: you may need to run fsck " "rr may be you forgot to reboot after fdisk when it " "told you to.\n"); goto out; } #endif /* * XXX This is from the original Linux code, but why affecting 2 values * to the same variable? */ sbi->s_mount_state = SB_REISERFS_STATE(sbi); sbi->s_mount_state = REISERFS_VALID_FS; if ((error = (old_format ? read_old_bitmaps(rmp) : read_bitmaps(rmp)))) { reiserfs_log(LOG_ERR, "unable to read bitmap\n"); goto out; } /* Make data=ordered the default */ if (!reiserfs_data_log(sbi) && !reiserfs_data_ordered(sbi) && !reiserfs_data_writeback(sbi)) { REISERFS_SB(sbi)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED); } if (reiserfs_data_log(sbi)) { reiserfs_log(LOG_INFO, "using journaled data mode\n"); } else if (reiserfs_data_ordered(sbi)) { reiserfs_log(LOG_INFO, "using ordered data mode\n"); } else { reiserfs_log(LOG_INFO, "using writeback data mode\n"); } /* TODO Not yet supported */ #if 0 if(journal_init(sbi, jdev_name, old_format, commit_max_age)) { reiserfs_log(LOG_ERR, "unable to initialize journal space\n"); goto out; } else { jinit_done = 1 ; /* once this is set, journal_release must be called if we error out of the mount */ } if (reread_meta_blocks(sbi)) { reiserfs_log(LOG_ERR, "unable to reread meta blocks after journal init\n"); goto out; } #endif /* Define and initialize hash function */ sbi->s_hash_function = hash_function(rmp); if (sbi->s_hash_function == NULL) { reiserfs_log(LOG_ERR, "couldn't determined hash function\n"); error = (EINVAL); goto out; } if (is_reiserfs_3_5(rs) || (is_reiserfs_jr(rs) && SB_VERSION(sbi) == REISERFS_VERSION_1)) bit_set(&(sbi->s_properties), REISERFS_3_5); else bit_set(&(sbi->s_properties), REISERFS_3_6); mp->mnt_data = rmp; mp->mnt_stat.f_fsid.val[0] = dev2udev(dev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_MPSAFE; MNT_IUNLOCK(mp); #if defined(si_mountpoint) devvp->v_rdev->si_mountpoint = mp; #endif return (0); out: reiserfs_log(LOG_INFO, "*** error during mount ***\n"); if (sbi) { if (SB_AP_BITMAP(sbi)) { int i; for (i = 0; i < SB_BMAP_NR(sbi); i++) { if (!SB_AP_BITMAP(sbi)[i].bp_data) break; free(SB_AP_BITMAP(sbi)[i].bp_data, M_REISERFSMNT); } free(SB_AP_BITMAP(sbi), M_REISERFSMNT); } if (sbi->s_rs) { free(sbi->s_rs, M_REISERFSMNT); sbi->s_rs = NULL; } } if (cp != NULL) { DROP_GIANT(); g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); PICKUP_GIANT(); } if (sbi) free(sbi, M_REISERFSMNT); if (rmp) free(rmp, M_REISERFSMNT); dev_rel(dev); return (error); }
/* Perform any power change work to nodes */ static void _do_power_work(time_t now) { static time_t last_log = 0, last_work_scan = 0; int i, wake_cnt = 0, sleep_cnt = 0, susp_total = 0; time_t delta_t; uint32_t susp_state; bitstr_t *wake_node_bitmap = NULL, *sleep_node_bitmap = NULL; struct node_record *node_ptr; bool run_suspend = false; /* Set limit on counts of nodes to have state changed */ delta_t = now - last_work_scan; if (delta_t >= 60) { suspend_cnt_f = 0.0; resume_cnt_f = 0.0; } else { float rate = (60 - delta_t) / 60.0; suspend_cnt_f *= rate; resume_cnt_f *= rate; } suspend_cnt = (suspend_cnt_f + 0.5); resume_cnt = (resume_cnt_f + 0.5); if (now > (last_suspend + suspend_timeout)) { /* ready to start another round of node suspends */ run_suspend = true; if (last_suspend) { bit_nclear(suspend_node_bitmap, 0, (node_record_count - 1)); bit_nclear(resume_node_bitmap, 0, (node_record_count - 1)); last_suspend = (time_t) 0; } } last_work_scan = now; /* Build bitmaps identifying each node which should change state */ for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count; i++, node_ptr++) { susp_state = IS_NODE_POWER_SAVE(node_ptr); if (susp_state) susp_total++; /* Resume nodes as appropriate */ if (susp_state && ((resume_rate == 0) || (resume_cnt < resume_rate)) && (bit_test(suspend_node_bitmap, i) == 0) && (IS_NODE_ALLOCATED(node_ptr) || (node_ptr->last_idle > (now - idle_time)))) { if (wake_node_bitmap == NULL) { wake_node_bitmap = bit_alloc(node_record_count); } wake_cnt++; resume_cnt++; resume_cnt_f++; node_ptr->node_state &= (~NODE_STATE_POWER_SAVE); node_ptr->node_state |= NODE_STATE_POWER_UP; node_ptr->node_state |= NODE_STATE_NO_RESPOND; bit_clear(power_node_bitmap, i); bit_clear(avail_node_bitmap, i); node_ptr->last_response = now + resume_timeout; bit_set(wake_node_bitmap, i); bit_set(resume_node_bitmap, i); } /* Suspend nodes as appropriate */ if (run_suspend && (susp_state == 0) && ((suspend_rate == 0) || (suspend_cnt < suspend_rate)) && (IS_NODE_IDLE(node_ptr) || IS_NODE_DOWN(node_ptr)) && (node_ptr->sus_job_cnt == 0) && (!IS_NODE_COMPLETING(node_ptr)) && (!IS_NODE_POWER_UP(node_ptr)) && (node_ptr->last_idle < (now - idle_time)) && ((exc_node_bitmap == NULL) || (bit_test(exc_node_bitmap, i) == 0))) { if (sleep_node_bitmap == NULL) { sleep_node_bitmap = bit_alloc(node_record_count); } sleep_cnt++; suspend_cnt++; suspend_cnt_f++; node_ptr->node_state |= NODE_STATE_POWER_SAVE; node_ptr->node_state &= (~NODE_STATE_NO_RESPOND); if (!IS_NODE_DOWN(node_ptr) && !IS_NODE_DRAIN(node_ptr)) bit_set(avail_node_bitmap, i); bit_set(power_node_bitmap, i); bit_set(sleep_node_bitmap, i); bit_set(suspend_node_bitmap, i); last_suspend = now; } } if (((now - last_log) > 600) && (susp_total > 0)) { info("Power save mode: %d nodes", susp_total); last_log = now; } if (sleep_node_bitmap) { char *nodes; nodes = bitmap2node_name(sleep_node_bitmap); if (nodes) _do_suspend(nodes); else error("power_save: bitmap2nodename"); xfree(nodes); FREE_NULL_BITMAP(sleep_node_bitmap); /* last_node_update could be changed already by another thread! last_node_update = now; */ } if (wake_node_bitmap) { char *nodes; nodes = bitmap2node_name(wake_node_bitmap); if (nodes) _do_resume(nodes); else error("power_save: bitmap2nodename"); xfree(nodes); FREE_NULL_BITMAP(wake_node_bitmap); /* last_node_update could be changed already by another thread! last_node_update = now; */ } }
static void handle_picmg_cmd_set_fru_activation(lmc_data_t *mc, msg_t *msg, unsigned char *rdata, unsigned int *rdata_len, void *cb_data) { int op; sensor_t *hssens; if (check_msg_length(msg, 3, rdata, rdata_len)) return; if (msg->data[1] != 0) { rdata[0] = IPMI_DESTINATION_UNAVAILABLE_CC; *rdata_len = 1; return; } if (! mc->hs_sensor) { handle_invalid_cmd(mc, rdata, rdata_len); return; } op = msg->data[2]; if (op >= 2) { rdata[0] = IPMI_INVALID_DATA_FIELD_CC; *rdata_len = 1; return; } hssens = mc->hs_sensor; switch (op) { case 0: if (bit_set(hssens->event_status, 3) || bit_set(hssens->event_status, 4) || bit_set(hssens->event_status, 5)) { /* Transition to m6. */ ipmi_mc_sensor_set_bit_clr_rest(mc, hssens->lun, hssens->num, 6, 1); /* Transition to m1. */ ipmi_mc_sensor_set_bit_clr_rest(mc, hssens->lun, hssens->num, 1, 1); } break; case 1: if (bit_set(hssens->event_status, 2)) { /* Transition to m3. */ ipmi_mc_sensor_set_bit_clr_rest(mc, hssens->lun, hssens->num, 3, 1); /* Transition to m4. */ ipmi_mc_sensor_set_bit_clr_rest(mc, hssens->lun, hssens->num, 4, 1); } } rdata[0] = 0; rdata[1] = IPMI_PICMG_GRP_EXT; *rdata_len = 2; }
static void _sig_term_ipc_handler(int n) { bit_set(&runi.flags, USCHED_RUNTIME_FLAG_TERMINATE); }
/* * Handle a read request; fill in parts of the request that can * be satisfied by the cache, use the supplied strategy routine to do * device I/O and then use the I/O results to populate the cache. */ static int read_strategy(void *devdata, int unit, int rw, daddr_t blk, size_t size, char *buf, size_t *rsize) { struct bcache_devdata *dd = (struct bcache_devdata *)devdata; int p_size, result; daddr_t p_blk, i, j, nblk; caddr_t p_buf; nblk = size / bcache_blksize; result = 0; /* Satisfy any cache hits up front */ for (i = 0; i < nblk; i++) { if (bcache_lookup(buf + (bcache_blksize * i), blk + i)) { bit_set(bcache_miss, i); /* cache miss */ bcache_misses++; } else { bit_clear(bcache_miss, i); /* cache hit */ bcache_hits++; } } /* Go back and fill in any misses XXX optimise */ p_blk = -1; p_buf = NULL; p_size = 0; for (i = 0; i < nblk; i++) { if (bit_test(bcache_miss, i)) { /* miss, add to pending transfer */ if (p_blk == -1) { p_blk = blk + i; p_buf = buf + (bcache_blksize * i); p_size = 1; } else { p_size++; } } else if (p_blk != -1) { /* hit, complete pending transfer */ result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, p_size * bcache_blksize, p_buf, NULL); if (result != 0) goto done; for (j = 0; j < p_size; j++) bcache_insert(p_buf + (j * bcache_blksize), p_blk + j); p_blk = -1; } } if (p_blk != -1) { /* pending transfer left */ result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, p_size * bcache_blksize, p_buf, NULL); if (result != 0) goto done; for (j = 0; j < p_size; j++) bcache_insert(p_buf + (j * bcache_blksize), p_blk + j); } done: if ((result == 0) && (rsize != NULL)) *rsize = size; return(result); }
IOReturn FakeSMCDevice::callPlatformFunction(const OSSymbol *functionName, bool waitForFunction, void *param1, void *param2, void *param3, void *param4 ) { IOReturn result = kIOReturnUnsupported; if (functionName->isEqualTo(kFakeSMCAddKeyHandler)) { result = kIOReturnBadArgument; if (param1 && param2 && param3 && param4) { const char *name = (const char *)param1; const char *type = (const char *)param2; UInt8 size = (UInt64)param3; IOService *handler = (IOService*)param4; if (name && type && size > 0 && handler) { if (addKeyWithHandler(name, type, size, handler)) result = kIOReturnSuccess; else result = kIOReturnError; } } } else if (functionName->isEqualTo(kFakeSMCGetKeyHandler)) { result = kIOReturnBadArgument; if (const char *name = (const char *)param1) { result = kIOReturnError; if (FakeSMCKey *key = OSDynamicCast(FakeSMCKey, getKey(name))) { result = kIOReturnSuccess; if (key->getHandler()) { result = kIOReturnBadArgument; if (param2) { IOService **handler = (IOService**)param2; *handler = key->getHandler(); result = kIOReturnSuccess; } } } } } else if (functionName->isEqualTo(kFakeSMCRemoveKeyHandler)) { result = kIOReturnBadArgument; if (param1) { result = kIOReturnError; if (OSCollectionIterator *iterator = OSCollectionIterator::withCollection(keys)) { IOService *handler = (IOService *)param1; while (FakeSMCKey *key = OSDynamicCast(FakeSMCKey, iterator->getNextObject())) { if (key->getHandler() == handler) key->setHandler(NULL); } result = kIOReturnSuccess; OSSafeRelease(iterator); } } } else if (functionName->isEqualTo(kFakeSMCAddKeyValue)) { result = kIOReturnBadArgument; if (param1 && param2 && param3) { const char *name = (const char *)param1; const char *type = (const char *)param2; UInt8 size = (UInt64)param3; const void *value = (const void *)param4; if (name && type && size > 0) { if (addKeyWithValue(name, type, size, value)) result = kIOReturnSuccess; else result = kIOReturnError; } } } else if (functionName->isEqualTo(kFakeSMCSetKeyValue)) { result = kIOReturnBadArgument; if (param1 && param2 && param3) { const char *name = (const char *)param1; UInt8 size = (UInt64)param2; const void *data = (const void *)param3; result = kIOReturnError; if (name && data && size > 0) { if (FakeSMCKey *key = OSDynamicCast(FakeSMCKey, getKey(name))) { if (key->setValueFromBuffer(data, size)) { result = kIOReturnSuccess; } } } } } else if (functionName->isEqualTo(kFakeSMCGetKeyValue)) { result = kIOReturnBadArgument; if (const char *name = (const char *)param1) { result = kIOReturnError; if (FakeSMCKey *key = getKey(name)) { result = kIOReturnBadArgument; if (param2 && param3) { UInt8 *size = (UInt8*)param2; const void **value = (const void **)param3; *size = key->getSize(); *value = key->getValue(); result = kIOReturnSuccess; } } } } else if (functionName->isEqualTo(kFakeSMCTakeVacantGPUIndex)) { result = kIOReturnBadArgument; KEYSLOCK; if (SInt8 *index = (SInt8*)param1) { for (UInt8 i = 0; i <= 0xf; i++) { if (!bit_get(vacantGPUIndex, BIT(i))) { bit_set(vacantGPUIndex, BIT(i)); *index = i; result = kIOReturnSuccess; break; } } if (result != kIOReturnSuccess) result = kIOReturnError; } KEYSUNLOCK; } else if (functionName->isEqualTo(kFakeSMCTakeGPUIndex)) { result = kIOReturnBadArgument; KEYSLOCK; if (UInt8 *index = (UInt8*)param1) { if (*index < 0xf && !bit_get(vacantGPUIndex, BIT(*index))) { bit_set(vacantGPUIndex, BIT(*index)); result = kIOReturnSuccess; } if (result != kIOReturnSuccess) result = kIOReturnError; } KEYSUNLOCK; } else if (functionName->isEqualTo(kFakeSMCReleaseGPUIndex)) { result = kIOReturnBadArgument; KEYSLOCK; if (UInt8 *index = (UInt8*)param1) { if (*index <= 0xf) { bit_clear(vacantGPUIndex, BIT(*index)); result = kIOReturnSuccess; } } KEYSUNLOCK; } else if (functionName->isEqualTo(kFakeSMCTakeVacantFanIndex)) { result = kIOReturnBadArgument; KEYSLOCK; if (SInt8 *index = (SInt8*)param1) { for (UInt8 i = 0; i <= 0xf; i++) { if (!bit_get(vacantFanIndex, BIT(i))) { bit_set(vacantFanIndex, BIT(i)); *index = i; updateFanCounterKey(); result = kIOReturnSuccess; break; } } if (result != kIOReturnSuccess) result = kIOReturnError; } KEYSUNLOCK; } else if (functionName->isEqualTo(kFakeSMCReleaseFanIndex)) { result = kIOReturnBadArgument; KEYSLOCK; if (UInt8 *index = (UInt8*)param1) { if (*index <= 0xf) { bit_clear(vacantFanIndex, BIT(*index)); updateFanCounterKey(); result = kIOReturnSuccess; } } KEYSUNLOCK; } else { result = super::callPlatformFunction(functionName, waitForFunction, param1, param2, param3, param4); } return result; }
/* * Read and process the bluegene.conf configuration file so to interpret what * blocks are static/dynamic, torus/mesh, etc. */ extern int read_bg_conf(void) { int i; bool tmp_bool = 0; int count = 0; s_p_hashtbl_t *tbl = NULL; char *tmp_char = NULL; select_ba_request_t **blockreq_array = NULL; image_t **image_array = NULL; image_t *image = NULL; static time_t last_config_update = (time_t) 0; struct stat config_stat; ListIterator itr = NULL; char* bg_conf_file = NULL; static int *dims = NULL; if (!dims) dims = select_g_ba_get_dims(); if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("Reading the bluegene.conf file"); /* check if config file has changed */ bg_conf_file = get_extra_conf_path("bluegene.conf"); if (stat(bg_conf_file, &config_stat) < 0) fatal("can't stat bluegene.conf file %s: %m", bg_conf_file); if (last_config_update) { _reopen_bridge_log(); if (last_config_update == config_stat.st_mtime) { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("%s unchanged", bg_conf_file); } else { info("Restart slurmctld for %s changes " "to take effect", bg_conf_file); } last_config_update = config_stat.st_mtime; xfree(bg_conf_file); return SLURM_SUCCESS; } last_config_update = config_stat.st_mtime; /* initialization */ /* bg_conf defined in bg_node_alloc.h */ if (!(tbl = config_make_tbl(bg_conf_file))) fatal("something wrong with opening/reading bluegene " "conf file"); xfree(bg_conf_file); #ifdef HAVE_BGL if (s_p_get_array((void ***)&image_array, &count, "AltBlrtsImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->blrts_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_blrtsimage, "BlrtsImage", tbl)) { if (!list_count(bg_conf->blrts_list)) fatal("BlrtsImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->blrts_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_blrtsimage = xstrdup(image->name); info("Warning: using %s as the default BlrtsImage. " "If this isn't correct please set BlrtsImage", bg_conf->default_blrtsimage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default BlrtsImage %s", bg_conf->default_blrtsimage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_blrtsimage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->blrts_list, image); } if (s_p_get_array((void ***)&image_array, &count, "AltLinuxImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->linux_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_linuximage, "LinuxImage", tbl)) { if (!list_count(bg_conf->linux_list)) fatal("LinuxImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->linux_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_linuximage = xstrdup(image->name); info("Warning: using %s as the default LinuxImage. " "If this isn't correct please set LinuxImage", bg_conf->default_linuximage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default LinuxImage %s", bg_conf->default_linuximage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_linuximage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->linux_list, image); } if (s_p_get_array((void ***)&image_array, &count, "AltRamDiskImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->ramdisk_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_ramdiskimage, "RamDiskImage", tbl)) { if (!list_count(bg_conf->ramdisk_list)) fatal("RamDiskImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->ramdisk_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_ramdiskimage = xstrdup(image->name); info("Warning: using %s as the default RamDiskImage. " "If this isn't correct please set RamDiskImage", bg_conf->default_ramdiskimage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default RamDiskImage %s", bg_conf->default_ramdiskimage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_ramdiskimage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->ramdisk_list, image); } #elif defined HAVE_BGP if (s_p_get_array((void ***)&image_array, &count, "AltCnloadImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->linux_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_linuximage, "CnloadImage", tbl)) { if (!list_count(bg_conf->linux_list)) fatal("CnloadImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->linux_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_linuximage = xstrdup(image->name); info("Warning: using %s as the default CnloadImage. " "If this isn't correct please set CnloadImage", bg_conf->default_linuximage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default CnloadImage %s", bg_conf->default_linuximage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_linuximage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->linux_list, image); } if (s_p_get_array((void ***)&image_array, &count, "AltIoloadImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->ramdisk_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_ramdiskimage, "IoloadImage", tbl)) { if (!list_count(bg_conf->ramdisk_list)) fatal("IoloadImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->ramdisk_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_ramdiskimage = xstrdup(image->name); info("Warning: using %s as the default IoloadImage. " "If this isn't correct please set IoloadImage", bg_conf->default_ramdiskimage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default IoloadImage %s", bg_conf->default_ramdiskimage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_ramdiskimage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->ramdisk_list, image); } #endif if (s_p_get_array((void ***)&image_array, &count, "AltMloaderImage", tbl)) { for (i = 0; i < count; i++) { list_append(bg_conf->mloader_list, image_array[i]); image_array[i] = NULL; } } if (!s_p_get_string(&bg_conf->default_mloaderimage, "MloaderImage", tbl)) { if (!list_count(bg_conf->mloader_list)) fatal("MloaderImage not configured " "in bluegene.conf"); itr = list_iterator_create(bg_conf->mloader_list); image = list_next(itr); image->def = true; list_iterator_destroy(itr); bg_conf->default_mloaderimage = xstrdup(image->name); info("Warning: using %s as the default MloaderImage. " "If this isn't correct please set MloaderImage", bg_conf->default_mloaderimage); } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("default MloaderImage %s", bg_conf->default_mloaderimage); image = xmalloc(sizeof(image_t)); image->name = xstrdup(bg_conf->default_mloaderimage); image->def = true; image->groups = NULL; /* we want it to be first */ list_push(bg_conf->mloader_list, image); } if (!s_p_get_uint16(&bg_conf->mp_cnode_cnt, "MidplaneNodeCnt", tbl)) { if (!s_p_get_uint16(&bg_conf->mp_cnode_cnt, "BasePartitionNodeCnt", tbl)) { error("MidplaneNodeCnt not configured in bluegene.conf " "defaulting to 512 as MidplaneNodeCnt"); bg_conf->mp_cnode_cnt = 512; } } if (bg_conf->mp_cnode_cnt <= 0) fatal("You should have more than 0 nodes " "per midplane"); bg_conf->actual_cnodes_per_mp = bg_conf->mp_cnode_cnt; bg_conf->quarter_cnode_cnt = bg_conf->mp_cnode_cnt/4; /* bg_conf->cpus_per_mp should had already been set from the * node_init */ if (bg_conf->cpus_per_mp < bg_conf->mp_cnode_cnt) { fatal("For some reason we have only %u cpus per mp, but " "have %u cnodes per mp. You need at least the same " "number of cpus as you have cnodes per mp. " "Check the NodeName CPUs= " "definition in the slurm.conf.", bg_conf->cpus_per_mp, bg_conf->mp_cnode_cnt); } bg_conf->cpu_ratio = bg_conf->cpus_per_mp/bg_conf->mp_cnode_cnt; if (!bg_conf->cpu_ratio) fatal("We appear to have less than 1 cpu on a cnode. " "You specified %u for MidplaneNodeCnt " "in the blugene.conf and %u cpus " "for each node in the slurm.conf", bg_conf->mp_cnode_cnt, bg_conf->cpus_per_mp); num_unused_cpus = 1; for (i = 0; i<SYSTEM_DIMENSIONS; i++) num_unused_cpus *= dims[i]; num_unused_cpus *= bg_conf->cpus_per_mp; num_possible_unused_cpus = num_unused_cpus; if (!s_p_get_uint16(&bg_conf->nodecard_cnode_cnt, "NodeBoardNodeCnt", tbl)) { if (!s_p_get_uint16(&bg_conf->nodecard_cnode_cnt, "NodeCardNodeCnt", tbl)) { error("NodeCardNodeCnt not configured in bluegene.conf " "defaulting to 32 as NodeCardNodeCnt"); bg_conf->nodecard_cnode_cnt = 32; } } if (bg_conf->nodecard_cnode_cnt <= 0) fatal("You should have more than 0 nodes per nodecard"); bg_conf->mp_nodecard_cnt = bg_conf->mp_cnode_cnt / bg_conf->nodecard_cnode_cnt; if (!s_p_get_uint16(&bg_conf->ionodes_per_mp, "IONodesPerMP", tbl)) if (!s_p_get_uint16(&bg_conf->ionodes_per_mp, "Numpsets", tbl)) fatal("Warning: IONodesPerMP not configured " "in bluegene.conf"); s_p_get_uint16(&bg_conf->max_block_err, "MaxBlockInError", tbl); tmp_bool = 0; s_p_get_boolean(&tmp_bool, "SubMidplaneSystem", tbl); bg_conf->sub_mp_sys = tmp_bool; #ifdef HAVE_BGQ tmp_bool = 0; s_p_get_boolean(&tmp_bool, "AllowSubBlockAllocations", tbl); bg_conf->sub_blocks = tmp_bool; /* You can only have 16 ionodes per midplane */ if (bg_conf->ionodes_per_mp > bg_conf->mp_nodecard_cnt) bg_conf->ionodes_per_mp = bg_conf->mp_nodecard_cnt; #endif for (i=0; i<SYSTEM_DIMENSIONS; i++) bg_conf->default_conn_type[i] = (uint16_t)NO_VAL; s_p_get_string(&tmp_char, "DefaultConnType", tbl); if (tmp_char) { verify_conn_type(tmp_char, bg_conf->default_conn_type); if ((bg_conf->default_conn_type[0] != SELECT_MESH) && (bg_conf->default_conn_type[0] != SELECT_TORUS)) fatal("Can't have a DefaultConnType of %s " "(only Mesh or Torus values are valid).", tmp_char); xfree(tmp_char); } else bg_conf->default_conn_type[0] = SELECT_TORUS; #ifndef HAVE_BG_L_P int first_conn_type = bg_conf->default_conn_type[0]; for (i=1; i<SYSTEM_DIMENSIONS; i++) { if (bg_conf->default_conn_type[i] == (uint16_t)NO_VAL) bg_conf->default_conn_type[i] = first_conn_type; else if (bg_conf->default_conn_type[i] >= SELECT_SMALL) fatal("Can't have a DefaultConnType of %s " "(only Mesh or Torus values are valid).", tmp_char); } #endif if (bg_conf->ionodes_per_mp) { bitstr_t *tmp_bitmap = NULL; int small_size = 1; /* THIS IS A HACK TO MAKE A 1 NODECARD SYSTEM WORK, * Sometime on a Q system the nodecard isn't in the 0 * spot so only do this if you know it is in that * spot. Otherwise say the whole midplane is there * and just make blocks over the whole thing. They * you can error out the blocks that aren't usable. */ if (bg_conf->sub_mp_sys && bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt) { #ifdef HAVE_BGQ bg_conf->quarter_ionode_cnt = 1; bg_conf->nodecard_ionode_cnt = 1; #else bg_conf->quarter_ionode_cnt = 2; bg_conf->nodecard_ionode_cnt = 2; #endif } else { bg_conf->quarter_ionode_cnt = bg_conf->ionodes_per_mp/4; bg_conf->nodecard_ionode_cnt = bg_conf->quarter_ionode_cnt/4; } /* How many nodecards per ionode */ bg_conf->nc_ratio = ((double)bg_conf->mp_cnode_cnt / (double)bg_conf->nodecard_cnode_cnt) / (double)bg_conf->ionodes_per_mp; /* How many ionodes per nodecard */ bg_conf->io_ratio = (double)bg_conf->ionodes_per_mp / ((double)bg_conf->mp_cnode_cnt / (double)bg_conf->nodecard_cnode_cnt); /* How many cnodes per ionode */ bg_conf->ionode_cnode_cnt = bg_conf->nodecard_cnode_cnt * bg_conf->nc_ratio; //info("got %f %f", bg_conf->nc_ratio, bg_conf->io_ratio); /* figure out the smallest block we can have on the system */ #ifdef HAVE_BGL if (bg_conf->io_ratio >= 1) bg_conf->smallest_block=32; else bg_conf->smallest_block=128; #else if (bg_conf->io_ratio >= 2) bg_conf->smallest_block=16; else if (bg_conf->io_ratio == 1) bg_conf->smallest_block=32; else if (bg_conf->io_ratio == .5) bg_conf->smallest_block=64; else if (bg_conf->io_ratio == .25) bg_conf->smallest_block=128; else if (bg_conf->io_ratio == .125) bg_conf->smallest_block=256; else { error("unknown ioratio %f. Can't figure out " "smallest block size, setting it to midplane", bg_conf->io_ratio); bg_conf->smallest_block = 512; } #endif if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE) info("Smallest block possible on this system is %u", bg_conf->smallest_block); /* below we are creating all the possible bitmaps for * each size of small block */ if ((int)bg_conf->nodecard_ionode_cnt < 1) { bg_conf->nodecard_ionode_cnt = 0; } else { bg_lists->valid_small32 = list_create(_destroy_bitmap); /* This is suppose to be = and not ==, we only want to decrement when small_size equals something. */ if ((small_size = bg_conf->nodecard_ionode_cnt)) small_size--; i = 0; while (i<bg_conf->ionodes_per_mp) { tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp); bit_nset(tmp_bitmap, i, i+small_size); i += small_size+1; list_append(bg_lists->valid_small32, tmp_bitmap); } } /* If we only have 1 nodecard just jump to the end since this will never need to happen below. Pretty much a hack to avoid seg fault;). */ if (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt) goto no_calc; bg_lists->valid_small128 = list_create(_destroy_bitmap); if ((small_size = bg_conf->quarter_ionode_cnt)) small_size--; i = 0; while (i<bg_conf->ionodes_per_mp) { tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp); bit_nset(tmp_bitmap, i, i+small_size); i += small_size+1; list_append(bg_lists->valid_small128, tmp_bitmap); } #ifndef HAVE_BGL bg_lists->valid_small64 = list_create(_destroy_bitmap); if ((small_size = bg_conf->nodecard_ionode_cnt * 2)) small_size--; i = 0; while (i<bg_conf->ionodes_per_mp) { tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp); bit_nset(tmp_bitmap, i, i+small_size); i += small_size+1; list_append(bg_lists->valid_small64, tmp_bitmap); } bg_lists->valid_small256 = list_create(_destroy_bitmap); if ((small_size = bg_conf->quarter_ionode_cnt * 2)) small_size--; i = 0; while (i<bg_conf->ionodes_per_mp) { tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp); bit_nset(tmp_bitmap, i, i+small_size); i += small_size+1; list_append(bg_lists->valid_small256, tmp_bitmap); } #endif } else { fatal("your ionodes_per_mp is 0"); } no_calc: if (!s_p_get_uint16(&bg_conf->bridge_api_verb, "BridgeAPIVerbose", tbl)) info("Warning: BridgeAPIVerbose not configured " "in bluegene.conf"); if (!s_p_get_string(&bg_conf->bridge_api_file, "BridgeAPILogFile", tbl)) info("BridgeAPILogFile not configured in bluegene.conf"); else _reopen_bridge_log(); if (s_p_get_string(&tmp_char, "DenyPassthrough", tbl)) { if (strstr(tmp_char, "A")) ba_deny_pass |= PASS_DENY_A; if (strstr(tmp_char, "X")) ba_deny_pass |= PASS_DENY_X; if (strstr(tmp_char, "Y")) ba_deny_pass |= PASS_DENY_Y; if (strstr(tmp_char, "Z")) ba_deny_pass |= PASS_DENY_Z; if (!xstrcasecmp(tmp_char, "ALL")) ba_deny_pass |= PASS_DENY_ALL; bg_conf->deny_pass = ba_deny_pass; xfree(tmp_char); } if (!s_p_get_string(&tmp_char, "LayoutMode", tbl)) { info("Warning: LayoutMode was not specified in bluegene.conf " "defaulting to STATIC partitioning"); bg_conf->layout_mode = LAYOUT_STATIC; } else { if (!xstrcasecmp(tmp_char,"STATIC")) bg_conf->layout_mode = LAYOUT_STATIC; else if (!xstrcasecmp(tmp_char,"OVERLAP")) bg_conf->layout_mode = LAYOUT_OVERLAP; else if (!xstrcasecmp(tmp_char,"DYNAMIC")) bg_conf->layout_mode = LAYOUT_DYNAMIC; else { fatal("I don't understand this LayoutMode = %s", tmp_char); } xfree(tmp_char); } /* add blocks defined in file */ if (bg_conf->layout_mode != LAYOUT_DYNAMIC) { if (!s_p_get_array((void ***)&blockreq_array, &count, "MPs", tbl)) { if (!s_p_get_array((void ***)&blockreq_array, &count, "BPs", tbl)) { info("WARNING: no blocks defined in " "bluegene.conf, " "only making full system block"); /* create_full_system_block(NULL); */ if (bg_conf->sub_mp_sys || (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)) fatal("On a sub-midplane system you " "need to define the blocks you " "want on your system."); } } for (i = 0; i < count; i++) { add_bg_record(bg_lists->main, NULL, blockreq_array[i], 0, 0); } } else if (bg_conf->sub_mp_sys || (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)) /* we can't do dynamic here on a sub-midplane system */ fatal("On a sub-midplane system we can only do OVERLAP or " "STATIC LayoutMode. Please update your bluegene.conf."); #ifdef HAVE_BGQ if ((bg_recover != NOT_FROM_CONTROLLER) && assoc_mgr_qos_list && s_p_get_string(&tmp_char, "RebootQOSList", tbl)) { bool valid; char *token, *last = NULL; slurmdb_qos_rec_t *qos = NULL; assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; /* Lock here to avoid g_qos_count changing under us */ assoc_mgr_lock(&locks); bg_conf->reboot_qos_bitmap = bit_alloc(g_qos_count); itr = list_iterator_create(assoc_mgr_qos_list); token = strtok_r(tmp_char, ",", &last); while (token) { valid = false; while((qos = list_next(itr))) { if (!xstrcasecmp(token, qos->name)) { bit_set(bg_conf->reboot_qos_bitmap, qos->id); valid = true; break; } } if (!valid) error("Invalid RebootQOSList value: %s", token); list_iterator_reset(itr); token = strtok_r(NULL, ",", &last); } list_iterator_destroy(itr); xfree(tmp_char); assoc_mgr_unlock(&locks); } #endif s_p_hashtbl_destroy(tbl); return SLURM_SUCCESS; }
int lz77_compress(const unsigned char *original, unsigned char **compressed, int size) { unsigned long token; unsigned char window[LZ77_WINDOW_SIZE], buffer[LZ77_BUFFER_SIZE], *comp, *temp, next; int offset, length, remaining, tbits, hsize, ipos, opos, tpos, i; /***************************************************************************** * * * Make the pointer to the compressed data not valid until later. * * * *****************************************************************************/ *compressed = NULL; /***************************************************************************** * * * Write the header information. * * * *****************************************************************************/ hsize = sizeof(int); if ((comp = (unsigned char *)malloc(hsize)) == NULL) return -1; memcpy(comp, &size, sizeof(int)); /***************************************************************************** * * * Initialize the sliding window and the look-ahead buffer. * * * *****************************************************************************/ memset(window, 0, LZ77_WINDOW_SIZE); memset(buffer, 0, LZ77_BUFFER_SIZE); /***************************************************************************** * * * Load the look-ahead buffer. * * * *****************************************************************************/ ipos = 0; for (i = 0; i < LZ77_BUFFER_SIZE && ipos < size; i++) { buffer[i] = original[ipos]; ipos++; } /***************************************************************************** * * * Compress the data. * * * *****************************************************************************/ opos = hsize * 8; remaining = size; while (remaining > 0) { if ((length = compare_win(window, buffer, &offset, &next)) != 0) { /*********************************************************************** * * * Encode a phrase token. * * * ***********************************************************************/ token = 0x00000001 << (LZ77_PHRASE_BITS - 1); /*********************************************************************** * * * Set the offset where the match was found in the sliding window. * * * ***********************************************************************/ token = token | (offset << (LZ77_PHRASE_BITS - LZ77_TYPE_BITS - LZ77_WINOFF_BITS)); /*********************************************************************** * * * Set the length of the match. * * * ***********************************************************************/ token = token | (length << (LZ77_PHRASE_BITS - LZ77_TYPE_BITS - LZ77_WINOFF_BITS - LZ77_BUFLEN_BITS)); /*********************************************************************** * * * Set the next symbol in the look-ahead buffer after the match. * * * ***********************************************************************/ token = token | next; /*********************************************************************** * * * Set the number of bits in the token. * * * ***********************************************************************/ tbits = LZ77_PHRASE_BITS; } else { /*********************************************************************** * * * Encode a symbol token. * * * ***********************************************************************/ token = 0x00000000; /*********************************************************************** * * * Set the unmatched symbol. * * * ***********************************************************************/ token = token | next; /*********************************************************************** * * * Set the number of bits in the token. * * * ***********************************************************************/ tbits = LZ77_SYMBOL_BITS; } /************************************************************************** * * * Ensure that the token is in big-endian format. * * * **************************************************************************/ token = htonl(token); /************************************************************************** * * * Write the token to the buffer of compressed data. * * * **************************************************************************/ for (i = 0; i < tbits; i++) { if (opos % 8 == 0) { /******************************************************************** * * * Allocate another byte for the buffer of compressed data. * * * ********************************************************************/ if ((temp = (unsigned char *)realloc(comp,(opos / 8) + 1)) == NULL) { free(comp); return -1; } comp = temp; } tpos = (sizeof(unsigned long) * 8) - tbits + i; bit_set(comp, opos, bit_get((unsigned char *)&token, tpos)); opos++; } /************************************************************************** * * * Adjust the phrase length to account for the unmatched symbol. * * * **************************************************************************/ length++; /************************************************************************** * * * Copy data from the look-ahead buffer to the sliding window. * * * **************************************************************************/ memmove(&window[0], &window[length], LZ77_WINDOW_SIZE - length); memmove(&window[LZ77_WINDOW_SIZE - length], &buffer[0], length); /************************************************************************** * * * Read more data into the look-ahead buffer. * * * **************************************************************************/ memmove(&buffer[0], &buffer[length], LZ77_BUFFER_SIZE - length); for (i = LZ77_BUFFER_SIZE - length; i<LZ77_BUFFER_SIZE && ipos<size; i++) { buffer[i] = original[ipos]; ipos++; } /************************************************************************** * * * Adjust the total symbols remaining by the phrase length. * * * **************************************************************************/ remaining = remaining - length; } /***************************************************************************** * * * Point to the buffer of compressed data. * * * *****************************************************************************/ *compressed = comp; /***************************************************************************** * * * Return the number of bytes in the compressed data. * * * *****************************************************************************/ return ((opos - 1) / 8) + 1; }
/** * basil_inventory - Periodic node-state query via ALPS XML-RPC. * This should be run immediately before each scheduling cycle. * Returns non-SLURM_SUCCESS if * - INVENTORY method failed (error) * - no nodes are available (no point in scheduling) * - orphaned ALPS reservation exists (wait until ALPS resynchronizes) */ extern int basil_inventory(void) { enum basil_version version = get_basil_version(); struct basil_inventory *inv; struct basil_node *node; struct basil_rsvn *rsvn; int slurm_alps_mismatch = 0; int rc = SLURM_SUCCESS; time_t now = time(NULL); static time_t slurm_alps_mismatch_time = (time_t) 0; static bool logged_sync_timeout = false; inv = get_full_inventory(version); if (inv == NULL) { error("BASIL %s INVENTORY failed", bv_names_long[version]); return SLURM_ERROR; } debug("BASIL %s INVENTORY: %d/%d batch nodes available", bv_names_long[version], inv->batch_avail, inv->batch_total); /* Avoid checking for inv->batch_avail here since if we are gang scheduling returning an error for a full system is probably the wrong thing to do. (the schedule() function in the slurmctld will never run ;)). */ if (!inv->f->node_head || !inv->batch_total) rc = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; for (node = inv->f->node_head; node; node = node->next) { int node_inx; struct node_record *node_ptr; char *reason = NULL; /* This will ignore interactive nodes when iterating through * the apbasil inventory. If we don't do this, SLURM is * unable to resolve the ID to a nidXXX name since it's not in * the slurm.conf file. (Chris North) */ if (node->role == BNR_INTER) continue; node_ptr = _find_node_by_basil_id(node->node_id); if (node_ptr == NULL) { error("nid%05u (%s node in state %s) not in slurm.conf", node->node_id, nam_noderole[node->role], nam_nodestate[node->state]); continue; } node_inx = node_ptr - node_record_table_ptr; if (node_is_allocated(node) && !IS_NODE_ALLOCATED(node_ptr)) { /* * ALPS still hangs on to the node while SLURM considers * it already unallocated. Possible causes are partition * cleanup taking too long (can be 10sec ... minutes), * and orphaned ALPS reservations (caught below). * * The converse case (SLURM hanging on to the node while * ALPS has already freed it) happens frequently during * job completion: select_g_job_fini() is called before * make_node_comp(). Rely on SLURM logic for this case. */ slurm_alps_mismatch++; } if (node->state == BNS_DOWN) { reason = "ALPS marked it DOWN"; } else if (node->state == BNS_UNAVAIL) { reason = "node is UNAVAILABLE"; } else if (node->state == BNS_ROUTE) { reason = "node does ROUTING"; } else if (node->state == BNS_SUSPECT) { reason = "entered SUSPECT mode"; } else if (node->state == BNS_ADMINDOWN) { reason = "node is ADMINDOWN"; } else if (node->state != BNS_UP) { reason = "state not UP"; } else if (node->role != BNR_BATCH) { reason = "mode not BATCH"; } else if (node->arch != BNA_XT) { reason = "arch not XT/XE"; } /* Base state entirely derives from ALPS */ if (reason) { if (node_ptr->down_time == 0) node_ptr->down_time = now; if (IS_NODE_DOWN(node_ptr)) { /* node still down */ } else if ((slurmctld_conf.slurmd_timeout == 0) || ((now - node_ptr->down_time) < slurmctld_conf.slurmd_timeout)) { node_ptr->node_state |= NODE_STATE_NO_RESPOND; bit_clear(avail_node_bitmap, node_inx); } else { xfree(node_ptr->reason); info("MARKING %s DOWN (%s)", node_ptr->name, reason); /* set_node_down also kills any running jobs */ set_node_down_ptr(node_ptr, reason); } } else if (IS_NODE_DOWN(node_ptr)) { xfree(node_ptr->reason); node_ptr->down_time = 0; info("MARKING %s UP", node_ptr->name); /* Reset state, make_node_idle figures out the rest */ node_ptr->node_state &= NODE_STATE_FLAGS; node_ptr->node_state &= (~NODE_STATE_NO_RESPOND); node_ptr->node_state |= NODE_STATE_UNKNOWN; make_node_idle(node_ptr, NULL); if (!IS_NODE_DRAIN(node_ptr) && !IS_NODE_FAIL(node_ptr)) { xfree(node_ptr->reason); node_ptr->reason_time = 0; node_ptr->reason_uid = NO_VAL; clusteracct_storage_g_node_up( acct_db_conn, node_ptr, now); } } else if (IS_NODE_NO_RESPOND(node_ptr)) { node_ptr->node_state &= (~NODE_STATE_NO_RESPOND); if (!IS_NODE_DRAIN(node_ptr) && !IS_NODE_FAIL(node_ptr)) { bit_set(avail_node_bitmap, node_inx); } } } if (slurm_alps_mismatch) debug("ALPS: %d node(s) still held", slurm_alps_mismatch); /* * Check that each ALPS reservation corresponds to a SLURM job. * Purge orphaned reservations, which may result from stale or * messed up system state, or are indicative of ALPS problems * (stuck in pending cancel calls). */ for (rsvn = inv->f->rsvn_head; rsvn; rsvn = rsvn->next) { ListIterator job_iter = list_iterator_create(job_list); struct job_record *job_ptr; uint32_t resv_id; if (job_iter == NULL) fatal("list_iterator_create: malloc failure"); while ((job_ptr = (struct job_record *)list_next(job_iter))) { if (_get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_RESV_ID, &resv_id) == SLURM_SUCCESS && resv_id == rsvn->rsvn_id) break; } list_iterator_destroy(job_iter); /* * Changed to ignore reservations for "UNKNOWN" batch * ids (e.g. the interactive region) (Chris North) */ if ((job_ptr == NULL) && (strcmp(rsvn->batch_id, "UNKNOWN"))) { error("orphaned ALPS reservation %u, trying to remove", rsvn->rsvn_id); basil_safe_release(rsvn->rsvn_id, inv); slurm_alps_mismatch = true; } } free_inv(inv); if (slurm_alps_mismatch) { /* If SLURM and ALPS state are not in synchronization, * do not schedule any more jobs until waiting at least * SyncTimeout seconds. */ if (slurm_alps_mismatch_time == 0) { slurm_alps_mismatch_time = now; } else if (cray_conf->sync_timeout == 0) { /* Wait indefinitely */ } else if (difftime(now, slurm_alps_mismatch_time) < cray_conf->sync_timeout) { return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; } else if (!logged_sync_timeout) { error("Could not synchronize SLURM with ALPS for %u " "seconds, proceeding with job scheduling", cray_conf->sync_timeout); logged_sync_timeout = true; } } else { slurm_alps_mismatch_time = 0; logged_sync_timeout = false; } return rc; }
/* Execute escape sequence. */ int input_esc_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct screen *s = sctx->s; struct input_table_entry *entry; if (ictx->flags & INPUT_DISCARD) return (0); log_debug("%s: '%c', %s", __func__, ictx->ch, ictx->interm_buf); entry = bsearch(ictx, input_esc_table, nitems(input_esc_table), sizeof input_esc_table[0], input_table_compare); if (entry == NULL) { log_debug("%s: unknown '%c'", __func__, ictx->ch); return (0); } switch (entry->type) { case INPUT_ESC_RIS: memcpy(&ictx->cell, &grid_default_cell, sizeof ictx->cell); memcpy(&ictx->old_cell, &ictx->cell, sizeof ictx->old_cell); ictx->old_cx = 0; ictx->old_cy = 0; screen_write_reset(sctx); break; case INPUT_ESC_IND: screen_write_linefeed(sctx, 0); break; case INPUT_ESC_NEL: screen_write_carriagereturn(sctx); screen_write_linefeed(sctx, 0); break; case INPUT_ESC_HTS: if (s->cx < screen_size_x(s)) bit_set(s->tabs, s->cx); break; case INPUT_ESC_RI: screen_write_reverseindex(sctx); break; case INPUT_ESC_DECKPAM: screen_write_mode_set(sctx, MODE_KKEYPAD); break; case INPUT_ESC_DECKPNM: screen_write_mode_clear(sctx, MODE_KKEYPAD); break; case INPUT_ESC_DECSC: memcpy(&ictx->old_cell, &ictx->cell, sizeof ictx->old_cell); ictx->old_cx = s->cx; ictx->old_cy = s->cy; break; case INPUT_ESC_DECRC: memcpy(&ictx->cell, &ictx->old_cell, sizeof ictx->cell); screen_write_cursormove(sctx, ictx->old_cx, ictx->old_cy); break; case INPUT_ESC_DECALN: screen_write_alignmenttest(sctx); break; case INPUT_ESC_SCSON_G0: /* * Not really supported, but fake it up enough for those that * use it to switch character sets (by redefining G0 to * graphics set, rather than switching to G1). */ ictx->cell.attr &= ~GRID_ATTR_CHARSET; break; case INPUT_ESC_SCSOFF_G0: ictx->cell.attr |= GRID_ATTR_CHARSET; break; } return (0); }
/* * _task_layout_lllp_block * * task_layout_lllp_block will create a block distribution at the * lowest level of logical processor which is either socket, core or * thread depending on the system architecture. The Block algorithm * is the same as the Block distribution performed in srun. * * Distribution at the lllp: * -m hostfile|plane|block|cyclic:block|cyclic * * The first distribution "hostfile|plane|block|cyclic" is computed * in srun. The second distribution "plane|block|cyclic" is computed * locally by each slurmd. * * The input to the lllp distribution algorithms is the gids (tasks * ids) generated for the local node. * * The output is a mapping of the gids onto logical processors * (thread/core/socket) with is expressed cpu_bind masks. * */ static int _task_layout_lllp_block(launch_tasks_request_msg_t *req, uint32_t node_id, bitstr_t ***masks_p) { int c, i, size, last_taskcount = -1, taskcount = 0; uint16_t hw_sockets = 0, hw_cores = 0, hw_threads = 0; int max_tasks = req->tasks_to_launch[(int)node_id]; int max_cpus = max_tasks * req->cpus_per_task; bitstr_t *avail_map; bitstr_t **masks = NULL; int core_inx, pu_per_core, *core_tasks = NULL; int sock_inx, pu_per_socket, *socket_tasks = NULL; info("_task_layout_lllp_block "); avail_map = _get_avail_map(req, &hw_sockets, &hw_cores, &hw_threads); if (!avail_map) { return SLURM_ERROR; } size = bit_set_count(avail_map); if (size < max_tasks) { error("task/affinity: only %d bits in avail_map for %d tasks!", size, max_tasks); FREE_NULL_BITMAP(avail_map); return SLURM_ERROR; } if (size < max_cpus) { /* Possible result of overcommit */ i = size / max_tasks; info("task/affinity: reset cpus_per_task from %d to %d", req->cpus_per_task, i); req->cpus_per_task = i; } size = bit_size(avail_map); if ((req->cpu_bind_type & CPU_BIND_ONE_THREAD_PER_CORE) && (max_cpus > (hw_sockets * hw_cores))) { /* More CPUs requested than available cores, * disable core-level binding */ req->cpu_bind_type &= (~CPU_BIND_ONE_THREAD_PER_CORE); } *masks_p = xmalloc(max_tasks * sizeof(bitstr_t*)); masks = *masks_p; pu_per_core = hw_threads; core_tasks = xmalloc(sizeof(int) * hw_sockets * hw_cores); pu_per_socket = hw_cores * hw_threads; socket_tasks = xmalloc(sizeof(int) * hw_sockets); /* block distribution with oversubsciption */ c = 0; while (taskcount < max_tasks) { if (taskcount == last_taskcount) fatal("_task_layout_lllp_block infinite loop"); if (taskcount > 0) { /* Clear counters to over-subscribe, if necessary */ memset(core_tasks, 0, (sizeof(int) * hw_sockets * hw_cores)); memset(socket_tasks, 0, (sizeof(int) * hw_sockets)); } last_taskcount = taskcount; /* the abstract map is already laid out in block order, * so just iterate over it */ for (i = 0; i < size; i++) { /* skip unavailable resources */ if (bit_test(avail_map, i) == 0) continue; core_inx = i / pu_per_core; if ((req->ntasks_per_core != 0) && (core_tasks[core_inx] >= req->ntasks_per_core)) continue; sock_inx = i / pu_per_socket; if ((req->ntasks_per_socket != 0) && (socket_tasks[sock_inx] >= req->ntasks_per_socket)) continue; core_tasks[core_inx]++; socket_tasks[sock_inx]++; if (!masks[taskcount]) masks[taskcount] = bit_alloc( conf->block_map_size); //info("setting %d %d", taskcount, i); bit_set(masks[taskcount], i); /* skip unrequested threads */ if (req->cpu_bind_type & CPU_BIND_ONE_THREAD_PER_CORE) i += hw_threads - 1; if (++c < req->cpus_per_task) continue; /* Binding to cores, skip remaining of the threads */ if (!(req->cpu_bind_type & CPU_BIND_ONE_THREAD_PER_CORE) && ((req->cpu_bind_type & CPU_BIND_TO_CORES) || (req->ntasks_per_core == 1))) { int threads_not_used; if (req->cpus_per_task < hw_threads) threads_not_used = hw_threads - req->cpus_per_task; else threads_not_used = req->cpus_per_task % hw_threads; i += threads_not_used; } c = 0; if (++taskcount >= max_tasks) break; } } xfree(core_tasks); xfree(socket_tasks); /* last step: expand the masks to bind each task * to the requested resource */ _expand_masks(req->cpu_bind_type, max_tasks, masks, hw_sockets, hw_cores, hw_threads, avail_map); FREE_NULL_BITMAP(avail_map); return SLURM_SUCCESS; }