int test_list() { puts("starting tests"); puts("##########################################"); puts("starting linked list tests"); puts("##########################################"); int value = 0; struct List *list = list_create(); puts("empty list created"); if (list_length(list) != 0) { printf("list_length of empty list should be zero\n"); return 0; } puts("list_length ok"); // Insert value 101 and test functions list_insert(list, 0, 101); if (list_length(list) != 1) { printf("list_length should be 1\n"); return 0; } if (list_get(list, 0, &value) == 0) { printf("Error in list_get (1)\n"); return 0; } if (value != 101) { printf("list_get should return value 101\n"); return 0; } // Insert value 202 and test functions list_insert(list, 0, 202); if (list_length(list) != 2) { printf("list_length should return 2\n"); return 0; } if (list_get(list, 0, &value) == 0) { printf("Error in list_length (2)\n"); return 0; } if (value != 202) { printf("list_get should return 202\n"); return 0; } puts("list_get ok"); // Test remove function if (list_remove(list, 1) == 0) { printf("Error in list_remove\n"); return 0; } if (list_length(list) != 1) { printf("list_length should return 1 (after remove)\n"); return 0; } if (list_remove(list, 1) != 0) { printf("Error in list_remove\n"); return 0; } if (list_length(list) != 1) { printf("list_length should return 1 (after remove)\n"); return 0; } if (list_remove(list, 0) == 0) { printf("Error in list_remove\n"); return 0; } if (list_length(list) != 0) { printf("list_length should return 0 (after remove)\n"); return 0; } if (list_remove(list, 0) != 0) { printf("Error in list_remove\n"); return 0; } if (list_length(list) != 0) { printf("list_length should return 0 (after remove)\n"); return 0; } puts("list_remove ok"); // Test pop function if (list_pop(list, &value) != 0) { printf("Error in list_pop\n"); return 0; } list_append(list, 202); if (list_pop(list, &value) == 0) { printf("Error in list_pop\n"); return 0; } if (value != 202) { printf("list_pop should return 202\n"); return 0; } if (list_length(list) != 0) { printf("list_length should return 0 (after pop)\n"); return 0; } puts("list_pop ok"); // test list_append() list_append(list, -5); list_append(list, 0); list_append(list, 15); if (list_length(list) != 3) { printf("list_length should return 0 (after pop)\n"); return 0; } if (list_get(list, 0, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != -5) { printf("list_get should return -5\n"); return 0; } if (list_get(list, 1, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != 0) { printf("list_get should return 0\n"); return 0; } if (list_get(list, 2, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != 15) { printf("list_get should return 15\n"); return 0; } if (list_pop(list, &value) == 0) { printf("Error in list_pop\n"); return 0; } if (list_pop(list, &value) == 0) { printf("Error in list_pop\n"); return 0; } if (list_pop(list, &value) == 0) { printf("Error in list_pop\n"); return 0; } if (list_get(list, 0, &value) != 0) { printf("Error in list_get\n"); return 0; } puts("list_append ok"); // test list_prepend list_prepend(list, -5); list_prepend(list, 0); list_prepend(list, 15); if (list_length(list) != 3) { printf("list_length should return 0 (after pop)\n"); return 0; } if (list_get(list, 0, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != 15) { printf("list_get should return 15\n"); return 0; } if (list_get(list, 1, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != 0) { printf("list_get should return 0\n"); return 0; } if (list_get(list, 2, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != -5) { printf("list_get should return -5\n"); return 0; } puts("list_prepend ok"); // test list insert list_insert(list, -5, 0); if (list_length(list) != 4) { printf("list_length should return 4\n"); return 0; } if (list_get(list, 0, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != 0) { printf("list_get should return 0\n"); return 0; } list_insert(list, 2, 100); if (list_length(list) != 5) { printf("list_length should return 5\n"); return 0; } if (list_get(list, 2, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != 100) { printf("list_get should return 100\n"); return 0; } list_insert(list, 10, 500); if (list_length(list) != 6) { printf("list_length should return 6\n"); return 0; } if (list_get(list, 5, &value) != 1) { printf("Error in list_append\n"); return 0; } if (value != 500) { printf("list_get should return 500\n"); return 0; } puts("list_insert ok"); // test insert sorted for (int i = 0; i<6; i++) list_remove(list, 0); for (int i = 0; i<5; i++) list_append(list, i); list_append(list, 6); if (list_length(list) != 6) { printf("list_length should return 6\n"); return 0; } list_insert_sorted(list, -1); list_insert_sorted(list, 5); list_insert_sorted(list, 7); for (int i = -1; i<8; i++) { list_get(list, i+1, &value); if (value != i) printf("error in list insert sorted\n"); } // test print and print reversed puts("list_insert_sorted ok"); puts("print current list, should be sorted"); list_print(list); puts("printing reversed list"); list_print_reverse(list); puts("check print and print_reversed for yourself!"); // test list remove all for (int i = 0; i<9; i++) list_remove(list, 0); for (int i = 0; i<5; i++) list_append(list, 5); list_remove_all(list, 5); if (list_length(list) != 0) { printf("list_length should return 0 (list remove all doesn't work\n"); return 0; } for (int i = 0; i<9; i++) list_remove(list, 0); for (int i = 0; i<5; i++) list_append(list, 5); list_insert(list, -1, 0); list_remove_all(list, 5); if (list_length(list) != 1) { printf("list_length should return 1 (list remove all doesn't work\n"); return 0; } for (int i = 0; i<9; i++) list_remove(list, 0); for (int i = 0; i<5; i++) list_append(list, 5); list_insert(list, 3, 0); list_remove_all(list, 5); if (list_length(list) != 1) { printf("list_length should return 1 (list remove all doesn't work\n"); return 0; } for (int i = 0; i<9; i++) list_remove(list, 0); for (int i = 0; i<5; i++) list_append(list, 5); list_insert(list, 10, 0); list_remove_all(list, 5); if (list_length(list) != 1) { printf("list_length should return 1 (list remove all doesn't work\n"); return 0; } puts("list_remove_all ok"); puts("##########################################"); puts("all tests of linked lists completed"); puts("##########################################"); puts("------------------------------------------"); list_delete(list); return 1; }
int init_rtl(void) { if (rtl_device_pci) { debug_print(NOTICE, "Located an RTL 8139: 0x%x\n", rtl_device_pci); uint16_t command_reg = pci_read_field(rtl_device_pci, PCI_COMMAND, 4); debug_print(NOTICE, "COMMAND register before: 0x%4x\n", command_reg); if (command_reg & (1 << 2)) { debug_print(NOTICE, "Bus mastering already enabled.\n"); } else { command_reg |= (1 << 2); /* bit 2 */ debug_print(NOTICE, "COMMAND register after: 0x%4x\n", command_reg); pci_write_field(rtl_device_pci, PCI_COMMAND, 4, command_reg); command_reg = pci_read_field(rtl_device_pci, PCI_COMMAND, 4); debug_print(NOTICE, "COMMAND register after: 0x%4x\n", command_reg); } rtl_irq = pci_read_field(rtl_device_pci, PCI_INTERRUPT_LINE, 1); debug_print(NOTICE, "Interrupt Line: %x\n", rtl_irq); irq_install_handler(rtl_irq, rtl_irq_handler); uint32_t rtl_bar0 = pci_read_field(rtl_device_pci, PCI_BAR0, 4); uint32_t rtl_bar1 = pci_read_field(rtl_device_pci, PCI_BAR1, 4); debug_print(NOTICE, "BAR0: 0x%8x\n", rtl_bar0); debug_print(NOTICE, "BAR1: 0x%8x\n", rtl_bar1); rtl_iobase = 0x00000000; if (rtl_bar0 & 0x00000001) { rtl_iobase = rtl_bar0 & 0xFFFFFFFC; } else { debug_print(NOTICE, "This doesn't seem right! RTL8139 should be using an I/O BAR; this looks like a memory bar."); } debug_print(NOTICE, "RTL iobase: 0x%x\n", rtl_iobase); rx_wait = list_create(); debug_print(NOTICE, "Determining mac address...\n"); for (int i = 0; i < 6; ++i) { mac[i] = inports(rtl_iobase + RTL_PORT_MAC + i); } debug_print(NOTICE, "%2x:%2x:%2x:%2x:%2x:%2x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); debug_print(NOTICE, "Enabling RTL8139.\n"); outportb(rtl_iobase + RTL_PORT_CONFIG, 0x0); debug_print(NOTICE, "Resetting RTL8139.\n"); outportb(rtl_iobase + RTL_PORT_CMD, 0x10); while ((inportb(rtl_iobase + 0x37) & 0x10) != 0) { } debug_print(NOTICE, "Done resetting RTL8139.\n"); for (int i = 0; i < 5; ++i) { rtl_tx_buffer[i] = (void*)kvmalloc_p(0x1000, &rtl_tx_phys[i]); for (int j = 0; j < 60; ++j) { rtl_tx_buffer[i][j] = 0xF0; } } rtl_rx_buffer = (uint8_t *)kvmalloc_p(0x3000, &rtl_rx_phys); memset(rtl_rx_buffer, 0x00, 0x3000); debug_print(NOTICE, "Buffers:\n"); debug_print(NOTICE, " rx 0x%x [phys 0x%x and 0x%x and 0x%x]\n", rtl_rx_buffer, rtl_rx_phys, map_to_physical((uintptr_t)rtl_rx_buffer + 0x1000), map_to_physical((uintptr_t)rtl_rx_buffer + 0x2000)); for (int i = 0; i < 5; ++i) { debug_print(NOTICE, " tx 0x%x [phys 0x%x]\n", rtl_tx_buffer[i], rtl_tx_phys[i]); } debug_print(NOTICE, "Initializing receive buffer.\n"); outportl(rtl_iobase + RTL_PORT_RBSTART, rtl_rx_phys); debug_print(NOTICE, "Enabling IRQs.\n"); outports(rtl_iobase + RTL_PORT_IMR, 0x8000 | /* PCI error */ 0x4000 | /* PCS timeout */ 0x40 | /* Rx FIFO over */ 0x20 | /* Rx underrun */ 0x10 | /* Rx overflow */ 0x08 | /* Tx error */ 0x04 | /* Tx okay */ 0x02 | /* Rx error */ 0x01 /* Rx okay */ ); /* TOK, ROK */ debug_print(NOTICE, "Configuring transmit\n"); outportl(rtl_iobase + RTL_PORT_TCR, 0 ); debug_print(NOTICE, "Configuring receive buffer.\n"); outportl(rtl_iobase + RTL_PORT_RCR, (0) | /* 8K receive */ 0x08 | /* broadcast */ 0x01 /* all physical */ ); debug_print(NOTICE, "Enabling receive and transmit.\n"); outportb(rtl_iobase + RTL_PORT_CMD, 0x08 | 0x04); debug_print(NOTICE, "Resetting rx stats\n"); outportl(rtl_iobase + RTL_PORT_RXMISS, 0); net_queue = list_create(); #if 1 { debug_print(NOTICE, "Sending DHCP discover\n"); size_t packet_size = write_dhcp_packet(rtl_tx_buffer[next_tx]); outportl(rtl_iobase + RTL_PORT_TXBUF + 4 * next_tx, rtl_tx_phys[next_tx]); outportl(rtl_iobase + RTL_PORT_TXSTAT + 4 * next_tx, packet_size); next_tx++; if (next_tx == 4) { next_tx = 0; } } { struct ethernet_packet * eth = (struct ethernet_packet *)rtl_dequeue(); uint16_t eth_type = ntohs(eth->type); debug_print(NOTICE, "Ethernet II, Src: (%2x:%2x:%2x:%2x:%2x:%2x), Dst: (%2x:%2x:%2x:%2x:%2x:%2x) [type=%4x)\n", eth->source[0], eth->source[1], eth->source[2], eth->source[3], eth->source[4], eth->source[5], eth->destination[0], eth->destination[1], eth->destination[2], eth->destination[3], eth->destination[4], eth->destination[5], eth_type); struct ipv4_packet * ipv4 = (struct ipv4_packet *)eth->payload; uint32_t src_addr = ntohl(ipv4->source); uint32_t dst_addr = ntohl(ipv4->destination); uint16_t length = ntohs(ipv4->length); char src_ip[16]; char dst_ip[16]; ip_ntoa(src_addr, src_ip); ip_ntoa(dst_addr, dst_ip); debug_print(NOTICE, "IP packet [%s → %s] length=%d bytes\n", src_ip, dst_ip, length); struct udp_packet * udp = (struct udp_packet *)ipv4->payload;; uint16_t src_port = ntohs(udp->source_port); uint16_t dst_port = ntohs(udp->destination_port); uint16_t udp_len = ntohs(udp->length); debug_print(NOTICE, "UDP [%d → %d] length=%d bytes\n", src_port, dst_port, udp_len); struct dhcp_packet * dhcp = (struct dhcp_packet *)udp->payload; uint32_t yiaddr = ntohl(dhcp->yiaddr); char yiaddr_ip[16]; ip_ntoa(yiaddr, yiaddr_ip); debug_print(NOTICE, "DHCP Offer: %s\n", yiaddr_ip); free(eth); } #endif debug_print(NOTICE, "Card is configured, going to start worker thread now.\n"); debug_print(NOTICE, "Initializing netif functions\n"); init_netif_funcs(rtl_get_mac, rtl_get_packet, rtl_send_packet); create_kernel_tasklet(net_handler, "[eth]", NULL); debug_print(NOTICE, "Back from starting the worker thread.\n"); } else { return -1; } return 0; }
/* called from dsl */ void dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) { int txgoff; zbookmark_t zb; zio_prop_t zp; zio_t *zio; list_t *list; list_t *newlist = NULL; dbuf_dirty_record_t *dr; dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); ASSERT(dmu_tx_is_syncing(tx)); /* XXX the write_done callback should really give us the tx... */ os->os_synctx = tx; if (os->os_dsl_dataset == NULL) { /* * This is the MOS. If we have upgraded, * spa_max_replication() could change, so reset * os_copies here. */ os->os_copies = spa_max_replication(os->os_spa); } /* * Create the root block IO */ SET_BOOKMARK(&zb, os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : DMU_META_OBJSET, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); arc_release(os->os_phys_buf, &os->os_phys_buf); dmu_write_policy(os, NULL, 0, 0, &zp); zio = arc_write(pio, os->os_spa, tx->tx_txg, os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), DMU_OS_IS_L2COMPRESSIBLE(os), &zp, dmu_objset_write_ready, NULL, dmu_objset_write_done, os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); /* * Sync special dnodes - the parent IO for the sync is the root block */ DMU_META_DNODE(os)->dn_zio = zio; dnode_sync(DMU_META_DNODE(os), tx); os->os_phys->os_flags = os->os_flags; if (DMU_USERUSED_DNODE(os) && DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { DMU_USERUSED_DNODE(os)->dn_zio = zio; dnode_sync(DMU_USERUSED_DNODE(os), tx); DMU_GROUPUSED_DNODE(os)->dn_zio = zio; dnode_sync(DMU_GROUPUSED_DNODE(os), tx); } txgoff = tx->tx_txg & TXG_MASK; if (dmu_objset_userused_enabled(os)) { newlist = &os->os_synced_dnodes; /* * We must create the list here because it uses the * dn_dirty_link[] of this txg. */ list_create(newlist, sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[txgoff])); } dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx); dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx); list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; while ((dr = list_head(list))) { ASSERT0(dr->dr_dbuf->db_level); list_remove(list, dr); if (dr->dr_zio) zio_nowait(dr->dr_zio); } /* * Free intent log blocks up to this tx. */ zil_sync(os->os_zil, tx); os->os_phys->os_zil_header = os->os_zil_header; zio_nowait(zio); }
void test_linked_list() { char* x = "xylophone"; char* y = "yankee"; char* z = "zebra"; List* l = list_create(); printf("initial list size: %d (expects 0)\n\n", list_length(l)); list_add(l, x); list_add(l, y); list_add(l, z); printf("list size after adds: %d (expects 3)\n", list_length(l)); printf("text of first item: %s (expects xylophone)\n", (char*) (list_nth(l, 0))); printf("text of second item: %s (expects yankee)\n", (char*) (list_nth(l, 1))); printf("text of third item: %s (expects zebra)\n\n", (char*) (list_nth(l, 2))); printf("text of last item: %s (expects zebra)\n\n", (char*) (list_last(l))); list_remove(l, 1); printf("list size after remove: %d (expects 2)\n", list_length(l)); printf("text of first item: %s (expects xylophone)\n", (char*) (list_nth(l, 0))); printf("text of second item: %s (expects zebra)\n\n", (char*) (list_nth(l, 1))); list_remove(l, 1); printf("list size after remove: %d (expects 1)\n", list_length(l)); printf("text of first item: %s (expects xylophone)\n\n", (char*) (list_nth(l, 0))); list_remove(l, 0); printf("list size after remove: %d (expects 0)\n\n", list_length(l)); list_add(l, x); list_add(l, y); list_add(l, z); printf("list: "); print_string_list(l); printf("\n"); list_move_up(l, 0); printf("\nmove up 0: "); print_string_list(l); printf("\n (expects xylophone yankee zebra)"); list_move_up(l, 1); printf("\nmove up 1: "); print_string_list(l); printf("\n (expects yankee xylophone zebra)"); list_move_up(l, 2); printf("\nmove up 2: "); print_string_list(l); printf("\n (expects yankee zebra xylophone)\n"); list_move_down(l, 2); printf("\nmove down 2: "); print_string_list(l); printf("\n (expects yankee zebra xylophone)"); list_move_down(l, 1); printf("\nmove down 1: "); print_string_list(l); printf("\n (expects yankee xylophone zebra)"); list_move_down(l, 0); printf("\nmove down 0: "); print_string_list(l); printf("\n (expects xylophone yankee zebra)\n\n"); list_remove(l, 0); list_remove(l, 0); list_move_up(l, 0); list_move_down(l, 0); printf("one item move: "); print_string_list(l); printf("\n (expects zebra)\n\n"); list_destruct(l); printf("list destructed.\n\n"); }
static void elf_file_read_section_headers(struct elf_file_t *elf_file) { int i, count; struct elf_buffer_t *buffer; struct elf_section_t *section; Elf32_Ehdr *elf_header; /* Create section list */ elf_file->section_list = list_create(); if (!elf_file->section_list) fatal("%s: out of memory", __FUNCTION__); /* Check section size and number */ buffer = &elf_file->buffer; elf_header = elf_file->header; if (!elf_header->e_shnum || elf_header->e_shentsize != sizeof(Elf32_Shdr)) fatal("%s: number of sections is 0 or section size is not %d", elf_file->path, (int) sizeof(Elf32_Shdr)); /* Read section headers */ elf_buffer_seek(buffer, elf_header->e_shoff); for (i = 0; i < elf_header->e_shnum; i++) { /* Allocate section */ section = calloc(1, sizeof(struct elf_section_t)); section->header = elf_buffer_tell(buffer); /* Advance buffer */ count = elf_buffer_read(buffer, NULL, sizeof(Elf32_Shdr)); if (count < sizeof(Elf32_Shdr)) fatal("%s: unexpected end of file while reading section headers", elf_file->path); /* Get section contents, if section type is not SHT_NOBITS (8) */ if (section->header->sh_type != 8) { section->buffer.ptr = buffer->ptr + section->header->sh_offset; section->buffer.size = section->header->sh_size; section->buffer.pos = 0; assert(section->buffer.ptr >= buffer->ptr); if (section->buffer.ptr + section->buffer.size > buffer->ptr + buffer->size) fatal("section %d out of the ELF boundaries (offs=0x%x, size=%u, ELF_size=%u)", i, section->header->sh_offset, section->header->sh_size, buffer->size); } /* Add section to list */ list_add(elf_file->section_list, section); } /* Read string table, and update section names */ assert(elf_header->e_shstrndx < elf_header->e_shnum); elf_file->string_table = list_get(elf_file->section_list, elf_header->e_shstrndx); assert(elf_file->string_table->header->sh_type == 3); for (i = 0; i < list_count(elf_file->section_list); i++) { section = list_get(elf_file->section_list, i); section->name = elf_file->string_table->buffer.ptr + section->header->sh_name; } /* Dump section headers */ elf_debug("Section headers:\n"); elf_debug("idx type flags addr offset size link name\n"); for (i = 0; i < 80; i++) elf_debug("-"); elf_debug("\n"); for (i = 0; i < list_count(elf_file->section_list); i++) { section = list_get(elf_file->section_list, i); elf_debug("%-3d %-4u %-5u 0x%-8x 0x%-8x %-9u %-8u %s\n", i, section->header->sh_type, section->header->sh_flags, section->header->sh_addr, section->header->sh_offset, section->header->sh_size, section->header->sh_link, section->name); } elf_debug("\n"); }
/* Perform job initiation work */ static void _start_agent(bg_action_t *bg_action_ptr) { int rc, set_user_rc = SLURM_SUCCESS; bg_record_t *bg_record = NULL; bg_record_t *found_record = NULL; ListIterator itr; List delete_list = NULL; int requeue_job = 0; uint32_t req_job_id = bg_action_ptr->job_ptr->job_id; bool block_inited = 0; bool delete_it = 0; slurm_mutex_lock(&block_state_mutex); bg_record = find_bg_record_in_list(bg_lists->main, bg_action_ptr->bg_block_id); if (!bg_record) { bg_record->modifying = 0; slurm_mutex_unlock(&block_state_mutex); error("block %s not found in bg_lists->main", bg_action_ptr->bg_block_id); bg_requeue_job(req_job_id, 1, 0, JOB_BOOT_FAIL, false); return; } if ((bg_record->job_running <= NO_JOB_RUNNING) && !find_job_in_bg_record(bg_record, req_job_id)) { bg_record->modifying = 0; // bg_reset_block(bg_record); should already happened slurm_mutex_unlock(&block_state_mutex); debug("job %u finished during the queueing job " "(everything is ok)", req_job_id); return; } if ((bg_record->state == BG_BLOCK_TERM) || bg_record->free_cnt) { /* It doesn't appear state of a small block (conn_type) is held on a BGP system so if we to reset it so, just set the reboot flag and handle it later in that code. */ bg_action_ptr->reboot = 1; } delete_list = list_create(NULL); itr = list_iterator_create(bg_lists->main); while ((found_record = list_next(itr))) { if (bg_record == found_record) continue; if (!blocks_overlap(bg_record, found_record)) { debug2("block %s isn't part of %s", found_record->bg_block_id, bg_record->bg_block_id); continue; } if (found_record->job_ptr || (found_record->job_list && list_count(found_record->job_list))) { struct job_record *job_ptr = found_record->job_ptr; if (!found_record->job_ptr) job_ptr = find_job_in_bg_record( found_record, NO_VAL); error("Trying to start job %u on block %s, " "but there is a job %u running on an overlapping " "block %s it will not end until %ld. " "This should never happen.", req_job_id, bg_record->bg_block_id, job_ptr->job_id, found_record->bg_block_id, job_ptr->end_time); requeue_job = 1; break; } debug2("need to make sure %s is free, it's part of %s", found_record->bg_block_id, bg_record->bg_block_id); list_push(delete_list, found_record); } list_iterator_destroy(itr); if (requeue_job) { FREE_NULL_LIST(delete_list); bg_reset_block(bg_record, bg_action_ptr->job_ptr); bg_record->modifying = 0; slurm_mutex_unlock(&block_state_mutex); bg_requeue_job(req_job_id, 0, 0, JOB_BOOT_FAIL, false); return; } slurm_mutex_unlock(&block_state_mutex); if (bg_conf->layout_mode == LAYOUT_DYNAMIC) delete_it = 1; free_block_list(req_job_id, delete_list, delete_it, 1); FREE_NULL_LIST(delete_list); while (1) { slurm_mutex_lock(&block_state_mutex); /* Failure will unlock block_state_mutex so no need to unlock before return. No need to reset modifying here if the block doesn't exist. */ if (!_make_sure_block_still_exists(bg_action_ptr, bg_record)) { error("Problem with deallocating blocks to run job %u " "on block %s", req_job_id, bg_action_ptr->bg_block_id); return; } /* If another thread is freeing this block we need to wait until it is done or we will get into a state where this job will be killed. */ if (!bg_record->free_cnt) break; debug("Waiting for block %s to free for job %u. " "%d thread(s) trying to free it", bg_record->bg_block_id, req_job_id, bg_record->free_cnt); slurm_mutex_unlock(&block_state_mutex); sleep(1); } /* This was set in the start_job function to close the above window where a job could be mistakenly requeued if another thread is trying to free this block as we are trying to run on it, which is fine since we will reboot it later. */ bg_record->modifying = 0; if ((bg_record->job_running <= NO_JOB_RUNNING) && !find_job_in_bg_record(bg_record, req_job_id)) { // bg_reset_block(bg_record); should already happened slurm_mutex_unlock(&block_state_mutex); debug("job %u already finished before boot", req_job_id); return; } if (bg_record->job_list && (bg_action_ptr->job_ptr->total_cpus != bg_record->cpu_cnt) && (list_count(bg_record->job_list) != 1)) { /* We don't allow modification of a block or reboot of a block if we are running multiple jobs on the block. */ debug2("no reboot"); goto no_reboot; } rc = 0; #ifdef HAVE_BGL if (bg_action_ptr->blrtsimage && xstrcasecmp(bg_action_ptr->blrtsimage, bg_record->blrtsimage)) { debug3("changing BlrtsImage from %s to %s", bg_record->blrtsimage, bg_action_ptr->blrtsimage); xfree(bg_record->blrtsimage); bg_record->blrtsimage = xstrdup(bg_action_ptr->blrtsimage); rc = 1; } #elif defined HAVE_BGP if ((bg_action_ptr->conn_type[0] >= SELECT_SMALL) && (bg_action_ptr->conn_type[0] != bg_record->conn_type[0])) { if (bg_conf->slurm_debug_level >= LOG_LEVEL_DEBUG3) { char *req_conn_type = conn_type_string_full(bg_action_ptr->conn_type); char *conn_type = conn_type_string_full(bg_record->conn_type); debug3("changing small block mode from %s to %s", conn_type, req_conn_type); xfree(req_conn_type); xfree(conn_type); } rc = 1; # ifndef HAVE_BG_FILES /* since we don't check state on an emulated system we * have to change it here */ bg_record->conn_type[0] = bg_action_ptr->conn_type[0]; # endif } #endif #ifdef HAVE_BG_L_P if (bg_action_ptr->linuximage && xstrcasecmp(bg_action_ptr->linuximage, bg_record->linuximage)) { # ifdef HAVE_BGL debug3("changing LinuxImage from %s to %s", bg_record->linuximage, bg_action_ptr->linuximage); # else debug3("changing CnloadImage from %s to %s", bg_record->linuximage, bg_action_ptr->linuximage); # endif xfree(bg_record->linuximage); bg_record->linuximage = xstrdup(bg_action_ptr->linuximage); rc = 1; } if (bg_action_ptr->ramdiskimage && xstrcasecmp(bg_action_ptr->ramdiskimage, bg_record->ramdiskimage)) { # ifdef HAVE_BGL debug3("changing RamDiskImage from %s to %s", bg_record->ramdiskimage, bg_action_ptr->ramdiskimage); # else debug3("changing IoloadImage from %s to %s", bg_record->ramdiskimage, bg_action_ptr->ramdiskimage); # endif xfree(bg_record->ramdiskimage); bg_record->ramdiskimage = xstrdup(bg_action_ptr->ramdiskimage); rc = 1; } #endif if (bg_action_ptr->mloaderimage && xstrcasecmp(bg_action_ptr->mloaderimage, bg_record->mloaderimage)) { debug3("changing MloaderImage from %s to %s", bg_record->mloaderimage, bg_action_ptr->mloaderimage); xfree(bg_record->mloaderimage); bg_record->mloaderimage = xstrdup(bg_action_ptr->mloaderimage); rc = 1; } if (rc || bg_action_ptr->reboot) { bg_record->modifying = 1; /* Increment free_cnt to make sure we don't loose this * block since bg_free_block will unlock block_state_mutex. */ bg_record->free_cnt++; bg_free_block(bg_record, 1, 1); bg_record->free_cnt--; #if defined HAVE_BG_FILES && defined HAVE_BG_L_P #ifdef HAVE_BGL if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_BlrtsImg, bg_record->blrtsimage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_BlrtsImg): %s", bg_err_str(rc)); if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_LinuxImg, bg_record->linuximage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_LinuxImg): %s", bg_err_str(rc)); if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_RamdiskImg, bg_record->ramdiskimage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_RamdiskImg): %s", bg_err_str(rc)); #elif defined HAVE_BGP if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_CnloadImg, bg_record->linuximage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_CnloadImg): %s", bg_err_str(rc)); if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_IoloadImg, bg_record->ramdiskimage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_IoloadImg): %s", bg_err_str(rc)); if (bg_action_ptr->conn_type[0] > SELECT_SMALL) { char *conn_type = NULL; switch(bg_action_ptr->conn_type[0]) { case SELECT_HTC_S: conn_type = "s"; break; case SELECT_HTC_D: conn_type = "d"; break; case SELECT_HTC_V: conn_type = "v"; break; case SELECT_HTC_L: conn_type = "l"; break; default: break; } /* the option has to be set before the pool can be set */ if ((rc = bridge_block_modify( bg_record->bg_block_id, RM_MODIFY_Options, conn_type)) != SLURM_SUCCESS) error("bridge_set_data(RM_MODIFY_Options): %s", bg_err_str(rc)); } #endif if ((rc = bridge_block_modify(bg_record->bg_block_id, RM_MODIFY_MloaderImg, bg_record->mloaderimage)) != SLURM_SUCCESS) error("bridge_block_modify(RM_MODIFY_MloaderImg): %s", bg_err_str(rc)); #endif bg_record->modifying = 0; } no_reboot: if (bg_record->state == BG_BLOCK_FREE) { if ((rc = bridge_block_boot(bg_record)) != SLURM_SUCCESS) { char reason[200]; bg_record->boot_state = 0; bg_record->boot_count = 0; if (rc == BG_ERROR_INVALID_STATE) snprintf(reason, sizeof(reason), "Block %s is in an incompatible " "state. This usually means " "hardware is allocated " "by another block (maybe outside " "of SLURM).", bg_record->bg_block_id); else snprintf(reason, sizeof(reason), "Couldn't boot block %s: %s", bg_record->bg_block_id, bg_err_str(rc)); slurm_mutex_unlock(&block_state_mutex); requeue_and_error(bg_record, reason); return; } } else if (bg_record->state == BG_BLOCK_BOOTING) { #ifdef HAVE_BG_FILES bg_record->boot_state = 1; #else if (!block_ptr_exist_in_list(bg_lists->booted, bg_record)) list_push(bg_lists->booted, bg_record); bg_record->state = BG_BLOCK_INITED; last_bg_update = time(NULL); #endif } if ((bg_record->job_running <= NO_JOB_RUNNING) && !find_job_in_bg_record(bg_record, req_job_id)) { slurm_mutex_unlock(&block_state_mutex); debug("job %u finished during the start of the boot " "(everything is ok)", req_job_id); return; } /* Don't reset boot_count, it will be reset when state changes, and needs to outlast a job allocation. */ /* bg_record->boot_count = 0; */ if (bg_record->state == BG_BLOCK_INITED) { debug("block %s is already ready.", bg_record->bg_block_id); /* Just in case reset the boot flags */ bg_record->boot_state = 0; bg_record->boot_count = 0; set_user_rc = bridge_block_sync_users(bg_record); block_inited = 1; } slurm_mutex_unlock(&block_state_mutex); /* This lock needs to happen after the block_state_mutex to avoid deadlock. */ if (block_inited && bg_action_ptr->job_ptr) { slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; lock_slurmctld(job_write_lock); bg_action_ptr->job_ptr->job_state &= (~JOB_CONFIGURING); last_job_update = time(NULL); unlock_slurmctld(job_write_lock); } if (set_user_rc == SLURM_ERROR) { sleep(2); /* wait for the slurmd to begin the batch script, slurm_fail_job() is a no-op if issued prior to the script initiation do clean up just incase the fail job isn't ran */ (void) slurm_fail_job(req_job_id, JOB_BOOT_FAIL); } }
/* create a slurmd job structure from a launch tasks message */ extern stepd_step_rec_t * stepd_step_rec_create(launch_tasks_request_msg_t *msg) { stepd_step_rec_t *job = NULL; srun_info_t *srun = NULL; slurm_addr_t resp_addr; slurm_addr_t io_addr; int i, nodeid = NO_VAL; xassert(msg != NULL); xassert(msg->complete_nodelist != NULL); debug3("entering stepd_step_rec_create"); if (!_valid_uid_gid((uid_t)msg->uid, &(msg->gid), &(msg->user_name))) return NULL; if (_check_acct_freq_task(msg->job_mem_lim, msg->acctg_freq)) return NULL; job = xmalloc(sizeof(stepd_step_rec_t)); job->msg = msg; #ifndef HAVE_FRONT_END nodeid = nodelist_find(msg->complete_nodelist, conf->node_name); job->node_name = xstrdup(conf->node_name); #else nodeid = 0; job->node_name = xstrdup(msg->complete_nodelist); #endif if (nodeid < 0) { error("couldn't find node %s in %s", job->node_name, msg->complete_nodelist); stepd_step_rec_destroy(job); return NULL; } job->state = SLURMSTEPD_STEP_STARTING; job->node_tasks = msg->tasks_to_launch[nodeid]; job->ntasks = msg->ntasks; job->jobid = msg->job_id; job->stepid = msg->job_step_id; job->uid = (uid_t) msg->uid; job->user_name = xstrdup(msg->user_name); job->gid = (gid_t) msg->gid; job->cwd = xstrdup(msg->cwd); job->task_dist = msg->task_dist; job->cpu_bind_type = msg->cpu_bind_type; job->cpu_bind = xstrdup(msg->cpu_bind); job->mem_bind_type = msg->mem_bind_type; job->mem_bind = xstrdup(msg->mem_bind); job->cpu_freq = msg->cpu_freq; job->ckpt_dir = xstrdup(msg->ckpt_dir); job->restart_dir = xstrdup(msg->restart_dir); job->cpus_per_task = msg->cpus_per_task; job->env = _array_copy(msg->envc, msg->env); job->array_job_id = msg->job_id; job->array_task_id = (uint16_t) NO_VAL; for (i = 0; i < msg->envc; i++) { /* 1234567890123456789 */ if (!strncmp(msg->env[i], "SLURM_ARRAY_JOB_ID=", 19)) job->array_job_id = atoi(msg->env[i] + 19); /* 12345678901234567890 */ if (!strncmp(msg->env[i], "SLURM_ARRAY_TASK_ID=", 20)) job->array_task_id = atoi(msg->env[i] + 20); } job->eio = eio_handle_create(); job->sruns = list_create((ListDelF) _srun_info_destructor); job->clients = list_create(NULL); /* FIXME! Needs destructor */ job->stdout_eio_objs = list_create(NULL); /* FIXME! Needs destructor */ job->stderr_eio_objs = list_create(NULL); /* FIXME! Needs destructor */ job->free_incoming = list_create(NULL); /* FIXME! Needs destructor */ job->incoming_count = 0; job->free_outgoing = list_create(NULL); /* FIXME! Needs destructor */ job->outgoing_count = 0; job->outgoing_cache = list_create(NULL); /* FIXME! Needs destructor */ job->envtp = xmalloc(sizeof(env_t)); job->envtp->jobid = -1; job->envtp->stepid = -1; job->envtp->procid = -1; job->envtp->localid = -1; job->envtp->nodeid = -1; job->envtp->distribution = 0; job->envtp->cpu_bind_type = 0; job->envtp->cpu_bind = NULL; job->envtp->mem_bind_type = 0; job->envtp->mem_bind = NULL; job->envtp->ckpt_dir = NULL; job->envtp->comm_port = msg->resp_port[nodeid % msg->num_resp_port]; memcpy(&resp_addr, &msg->orig_addr, sizeof(slurm_addr_t)); slurm_set_addr(&resp_addr, msg->resp_port[nodeid % msg->num_resp_port], NULL); job->user_managed_io = msg->user_managed_io; if (!msg->user_managed_io) { memcpy(&io_addr, &msg->orig_addr, sizeof(slurm_addr_t)); slurm_set_addr(&io_addr, msg->io_port[nodeid % msg->num_io_port], NULL); } srun = srun_info_create(msg->cred, &resp_addr, &io_addr); job->buffered_stdio = msg->buffered_stdio; job->labelio = msg->labelio; job->profile = msg->profile; job->task_prolog = xstrdup(msg->task_prolog); job->task_epilog = xstrdup(msg->task_epilog); job->argc = msg->argc; job->argv = _array_copy(job->argc, msg->argv); job->nnodes = msg->nnodes; job->nodeid = nodeid; job->debug = msg->slurmd_debug; job->cpus = msg->node_cpus; /* This needs to happen before acct_gather_profile_startpoll and only really looks at the profile in the job. */ acct_gather_profile_g_node_step_start(job); acct_gather_profile_startpoll(msg->acctg_freq, conf->job_acct_gather_freq); job->multi_prog = msg->multi_prog; job->timelimit = (time_t) -1; job->task_flags = msg->task_flags; job->switch_job = msg->switch_job; job->pty = msg->pty; job->open_mode = msg->open_mode; job->options = msg->options; format_core_allocs(msg->cred, conf->node_name, conf->cpus, &job->job_alloc_cores, &job->step_alloc_cores, &job->job_mem, &job->step_mem); if (job->step_mem) { jobacct_gather_set_mem_limit(job->jobid, job->stepid, job->step_mem); } else if (job->job_mem) { jobacct_gather_set_mem_limit(job->jobid, job->stepid, job->job_mem); } #ifdef HAVE_ALPS_CRAY /* This is only used for Cray emulation mode where slurmd is used to * launch job steps. On a real Cray system, ALPS is used to launch * the tasks instead of SLURM. SLURM's task launch RPC does NOT * contain the reservation ID, so just use some non-zero value here * for testing purposes. */ job->resv_id = 1; select_g_select_jobinfo_set(msg->select_jobinfo, SELECT_JOBDATA_RESV_ID, &job->resv_id); #endif get_cred_gres(msg->cred, conf->node_name, &job->job_gres_list, &job->step_gres_list); list_append(job->sruns, (void *) srun); _job_init_task_info(job, msg->global_task_ids[nodeid], msg->ifname, msg->ofname, msg->efname); return job; }
/* called from dsl */ void dmu_objset_sync(objset_impl_t *os, zio_t *pio, dmu_tx_t *tx) { int txgoff; zbookmark_t zb; writeprops_t wp = { 0 }; zio_t *zio; list_t *list; list_t *newlist = NULL; dbuf_dirty_record_t *dr; dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); ASSERT(dmu_tx_is_syncing(tx)); /* XXX the write_done callback should really give us the tx... */ os->os_synctx = tx; if (os->os_dsl_dataset == NULL) { /* * This is the MOS. If we have upgraded, * spa_max_replication() could change, so reset * os_copies here. */ os->os_copies = spa_max_replication(os->os_spa); } /* * Create the root block IO */ zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; zb.zb_object = 0; zb.zb_level = -1; /* for block ordering; it's level 0 on disk */ zb.zb_blkid = 0; wp.wp_type = DMU_OT_OBJSET; wp.wp_level = 0; /* on-disk BP level; see above */ wp.wp_copies = os->os_copies; wp.wp_oschecksum = os->os_checksum; wp.wp_oscompress = os->os_compress; if (BP_IS_OLDER(os->os_rootbp, tx->tx_txg)) { (void) dsl_dataset_block_kill(os->os_dsl_dataset, os->os_rootbp, pio, tx); } arc_release(os->os_phys_buf, &os->os_phys_buf); zio = arc_write(pio, os->os_spa, &wp, DMU_OS_IS_L2CACHEABLE(os), tx->tx_txg, os->os_rootbp, os->os_phys_buf, ready, NULL, os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); /* * Sync special dnodes - the parent IO for the sync is the root block */ os->os_meta_dnode->dn_zio = zio; dnode_sync(os->os_meta_dnode, tx); os->os_phys->os_flags = os->os_flags; if (os->os_userused_dnode && os->os_userused_dnode->dn_type != DMU_OT_NONE) { os->os_userused_dnode->dn_zio = zio; dnode_sync(os->os_userused_dnode, tx); os->os_groupused_dnode->dn_zio = zio; dnode_sync(os->os_groupused_dnode, tx); } txgoff = tx->tx_txg & TXG_MASK; if (dmu_objset_userused_enabled(os)) { newlist = &os->os_synced_dnodes; /* * We must create the list here because it uses the * dn_dirty_link[] of this txg. */ list_create(newlist, sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[txgoff])); } dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx); dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx); list = &os->os_meta_dnode->dn_dirty_records[txgoff]; while ((dr = list_head(list))) { ASSERT(dr->dr_dbuf->db_level == 0); list_remove(list, dr); if (dr->dr_zio) zio_nowait(dr->dr_zio); } /* * Free intent log blocks up to this tx. */ zil_sync(os->os_zil, tx); os->os_phys->os_zil_header = os->os_zil_header; zio_nowait(zio); }
int udpclient(int argc, char* argv[]) { char* lhost, *lport, *phost, *pport, *rhost, *rport; list_t* clients; list_t* conn_clients; client_t* client; client_t* client2; socket_t* tcp_serv = NULL; socket_t* tcp_sock = NULL; socket_t* udp_sock = NULL; char data[MSG_MAX_LEN]; char addrstr[ADDRSTRLEN]; char pport_s[6]; struct timeval curr_time; struct timeval check_time; struct timeval check_interval; struct timeval timeout; fd_set client_fds; fd_set read_fds; uint16_t tmp_id; uint8_t tmp_type; uint16_t tmp_len; uint16_t tmp_req_id; int num_fds; int ret; int i; int icmp_sock ; int timeexc = -1; struct sockaddr_in src, dest, rsrc; struct hostent* hp; uint32_t timeexc_ip; signal(SIGINT, &signal_handler); i = 0; if(index(argv[i], 58) || index(argv[i], 46)) lhost = argv[i++]; else lhost = NULL; lport = argv[i++]; phost = argv[i++]; if(index(argv[i], 58) || index(argv[i], 46)) { snprintf(pport_s, 5, "2222"); pport = pport_s; } else pport = argv[i++]; rhost = argv[i++]; rport = argv[i++]; /* Get info about localhost IP */ if(!lhost){ char szHostName[255]; gethostname(szHostName, 255); hp = gethostbyname(szHostName); }else{ hp = gethostbyname(lhost); } memset(&rsrc, 0, sizeof(struct sockaddr_in)); timeexc_ip = *(uint32_t*)hp->h_addr_list[0]; rsrc.sin_family = AF_INET; rsrc.sin_port = 0; rsrc.sin_addr.s_addr = timeexc_ip; /* IP of destination */ memset(&src, 0, sizeof(struct sockaddr_in)); hp = gethostbyname(phost); timeexc_ip = *(uint32_t*)hp->h_addr_list[0]; src.sin_family = AF_INET; src.sin_port = 0; src.sin_addr.s_addr = timeexc_ip; /* IP of where the fake packet (echo request) was going */ hp = gethostbyname("3.3.3.3"); memcpy(&dest.sin_addr, hp->h_addr, hp->h_length); inet_pton(AF_INET, "3.3.3.3", &(dest.sin_addr)); srand(time(NULL)); next_req_id = rand() % 0xffff; /* Create an empty list for the clients */ clients = list_create(sizeof(client_t), p_client_cmp, p_client_copy, p_client_free); ERROR_GOTO(clients == NULL, "Error creating clients list.", done); /* Create and empty list for the connecting clients */ conn_clients = list_create(sizeof(client_t), p_client_cmp, p_client_copy, p_client_free); ERROR_GOTO(conn_clients == NULL, "Error creating clients list.", done); /* Create a TCP server socket to listen for incoming connections */ tcp_serv = sock_create(lhost, lport, ipver, SOCK_TYPE_TCP, 1, 1); ERROR_GOTO(tcp_serv == NULL, "Error creating TCP socket.", done); if(debug_level >= DEBUG_LEVEL1) { printf("Listening on TCP %s\n", sock_get_str(tcp_serv, addrstr, sizeof(addrstr))); } FD_ZERO(&client_fds); /* Initialize all the timers */ timerclear(&timeout); check_interval.tv_sec = 0; check_interval.tv_usec = 500000; gettimeofday(&check_time, NULL); /* open raw socket */ create_icmp_socket(&icmp_sock); if(icmp_sock == -1) { printf("[main] can't open raw socket\n"); exit(1); } while(running) { if(!timerisset(&timeout)) timeout.tv_usec = 50000; if(++timeexc==100) { timeexc=0; /* Send ICMP TTL exceeded to penetrate remote NAT */ send_icmp(icmp_sock, &rsrc, &src, &dest, 0); } read_fds = client_fds; FD_SET(SOCK_FD(tcp_serv), &read_fds); ret = select(FD_SETSIZE, &read_fds, NULL, NULL, &timeout); PERROR_GOTO(ret < 0, "select", done); num_fds = ret; gettimeofday(&curr_time, NULL); /* Go through all the clients and check if didn't get an ACK for sent data during the timeout period */ if(timercmp(&curr_time, &check_time, >)) { for(i = 0; i < LIST_LEN(clients); i++) { client = list_get_at(clients, i); ret = client_check_and_resend(client, curr_time); if(ret == -2) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds); i--; continue; } ret = client_check_and_send_keepalive(client, curr_time); if(ret == -2) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds); i--; } } timeradd(&curr_time, &check_interval, &check_time); } if(num_fds == 0) continue; timeexc=0; /* Check if pending TCP connection to accept and create a new client and UDP connection if one is ready */ if(FD_ISSET(SOCK_FD(tcp_serv), &read_fds)) { tcp_sock = sock_accept(tcp_serv); udp_sock = sock_create(phost, pport, ipver, SOCK_TYPE_UDP, 0, 1); client = client_create(next_req_id++, tcp_sock, udp_sock, 1); if(!client || !tcp_sock || !udp_sock) { if(tcp_sock) sock_close(tcp_sock); if(udp_sock) sock_close(udp_sock); } else { client2 = list_add(conn_clients, client); client_free(client); client = NULL; client_send_hello(client2, rhost, rport, CLIENT_ID(client2)); client_add_tcp_fd_to_set(client2, &client_fds); client_add_udp_fd_to_set(client2, &client_fds); } sock_free(tcp_sock); sock_free(udp_sock); tcp_sock = NULL; udp_sock = NULL; num_fds--; } /* Check for pending handshakes from UDP connection */ for(i = 0; i < LIST_LEN(conn_clients) && num_fds > 0; i++) { client = list_get_at(conn_clients, i); if(client_udp_fd_isset(client, &read_fds)) { num_fds--; tmp_req_id = CLIENT_ID(client); ret = client_recv_udp_msg(client, data, sizeof(data), &tmp_id, &tmp_type, &tmp_len); if(ret == 0) ret = handle_message(client, tmp_id, tmp_type, data, tmp_len); if(ret < 0) { disconnect_and_remove_client(tmp_req_id, conn_clients, &client_fds); i--; } else { client = list_add(clients, client); list_delete_at(conn_clients, i); client_remove_udp_fd_from_set(client, &read_fds); i--; } } } /* Check if data is ready from any of the clients */ for(i = 0; i < LIST_LEN(clients) && num_fds > 0; i++) { client = list_get_at(clients, i); /* Check for UDP data */ if(client_udp_fd_isset(client, &read_fds)) { num_fds--; ret = client_recv_udp_msg(client, data, sizeof(data), &tmp_id, &tmp_type, &tmp_len); if(ret == 0) ret = handle_message(client, tmp_id, tmp_type, data, tmp_len); if(ret < 0) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds); i--; continue; /* Don't go to check the TCP connection */ } } /* Check for TCP data */ if(client_tcp_fd_isset(client, &read_fds)) { num_fds--; ret = client_recv_tcp_data(client); if(ret == 0) ret = client_send_udp_data(client); #if 0 /* if udptunnel is taking up 100% of cpu, try including this */ else if(ret == 1) #ifdef _WIN32 _sleep(1); #else usleep(1000); /* Quick hack so doesn't use 100% of CPU if data wasn't ready yet (waiting for ack) */ #endif /*WIN32*/ #endif /*0*/ if(ret < 0) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds); i--; } } } } done: if(debug_level >= DEBUG_LEVEL1) printf("Cleaning up...\n"); if(tcp_serv) { sock_close(tcp_serv); sock_free(tcp_serv); } if(udp_sock) { sock_close(udp_sock); sock_free(udp_sock); } if(clients) list_free(clients); if(debug_level >= DEBUG_LEVEL1) printf("Goodbye.\n"); return 0; }
extern int setup_job_cluster_cond_limits(mysql_conn_t *mysql_conn, slurmdb_job_cond_t *job_cond, char *cluster_name, char **extra) { int set = 0; ListIterator itr = NULL; char *object = NULL; if (!job_cond) return SLURM_SUCCESS; /* this must be done before resvid_list since we set resvid_list up here */ if (job_cond->resv_list && list_count(job_cond->resv_list)) { char *query = xstrdup_printf( "select distinct job_db_inx from \"%s_%s\" where (", cluster_name, job_table); int my_set = 0; MYSQL_RES *result = NULL; MYSQL_ROW row; itr = list_iterator_create(job_cond->resv_list); while ((object = list_next(itr))) { if (my_set) xstrcat(query, " || "); xstrfmtcat(query, "resv_name='%s'", object); my_set = 1; } list_iterator_destroy(itr); xstrcat(query, ")"); if (!(result = mysql_db_query_ret( mysql_conn, query, 0))) { xfree(query); error("couldn't query the database"); goto no_resv; } xfree(query); if (!job_cond->resvid_list) job_cond->resvid_list = list_create(slurm_destroy_char); while ((row = mysql_fetch_row(result))) { list_append(job_cond->resvid_list, xstrdup(row[0])); } mysql_free_result(result); } no_resv: if (job_cond->resvid_list && list_count(job_cond->resvid_list)) { set = 0; if (*extra) xstrcat(*extra, " && ("); else xstrcat(*extra, " where ("); itr = list_iterator_create(job_cond->resvid_list); while ((object = list_next(itr))) { if (set) xstrcat(*extra, " || "); xstrfmtcat(*extra, "t1.id_resv='%s'", object); set = 1; } list_iterator_destroy(itr); xstrcat(*extra, ")"); } if (job_cond->state_list && list_count(job_cond->state_list)) { itr = list_iterator_create(job_cond->state_list); while ((object = list_next(itr))) { uint32_t state = (uint32_t)slurm_atoul(object); state &= JOB_STATE_BASE; if (state == JOB_SUSPENDED) break; } list_iterator_destroy(itr); if (object) { MYSQL_RES *result = NULL; MYSQL_ROW row; char *query = xstrdup_printf( "select job_db_inx from \"%s_%s\"", cluster_name, suspend_table); if (job_cond->usage_start) { if (!job_cond->usage_end) { xstrfmtcat(query, " where (!time_end " "|| (%d between " "time_start and time_end))", (int)job_cond->usage_start); } else { xstrfmtcat(query, " where (!time_end " "|| (time_start && " "((%d between time_start " "and time_end) " "|| (time_start between " "%d and %d))))", (int)job_cond->usage_start, (int)job_cond->usage_start, (int)job_cond->usage_end); } } else if (job_cond->usage_end) { xstrfmtcat(query, " where (time_start && " "time_start < %d)", (int)job_cond->usage_end); } debug3("%d(%s:%d) query\n%s", mysql_conn->conn, THIS_FILE, __LINE__, query); result = mysql_db_query_ret(mysql_conn, query, 0); xfree(query); if (!result) return SLURM_ERROR; set = 0; while ((row = mysql_fetch_row(result))) { if (set) xstrfmtcat(*extra, " || t1.job_db_inx=%s", row[0]); else { set = 1; if (*extra) xstrfmtcat( *extra, " || (t1.job_db_inx=%s", row[0]); else xstrfmtcat(*extra, " where " "(t1.job_db_inx=%s", row[0]); } } mysql_free_result(result); if (set) xstrcat(*extra, ")"); } } return SLURM_SUCCESS; }
int dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, objset_impl_t **osip) { objset_impl_t *osi; int i, err; ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP); osi->os.os = osi; osi->os_dsl_dataset = ds; osi->os_spa = spa; osi->os_rootbp = bp; if (!BP_IS_HOLE(osi->os_rootbp)) { uint32_t aflags = ARC_WAIT; zbookmark_t zb; zb.zb_objset = ds ? ds->ds_object : 0; zb.zb_object = 0; zb.zb_level = -1; zb.zb_blkid = 0; if (DMU_OS_IS_L2CACHEABLE(osi)) aflags |= ARC_L2CACHE; dprintf_bp(osi->os_rootbp, "reading %s", ""); /* * NB: when bprewrite scrub can change the bp, * and this is called from dmu_objset_open_ds_os, the bp * could change, and we'll need a lock. */ err = arc_read_nolock(NULL, spa, osi->os_rootbp, arc_getbuf_func, &osi->os_phys_buf, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); if (err) { kmem_free(osi, sizeof (objset_impl_t)); /* convert checksum errors into IO errors */ if (err == ECKSUM) err = EIO; return (err); } /* Increase the blocksize if we are permitted. */ if (spa_version(spa) >= SPA_VERSION_USERSPACE && arc_buf_size(osi->os_phys_buf) < sizeof (objset_phys_t)) { arc_buf_t *buf = arc_buf_alloc(spa, sizeof (objset_phys_t), &osi->os_phys_buf, ARC_BUFC_METADATA); bzero(buf->b_data, sizeof (objset_phys_t)); bcopy(osi->os_phys_buf->b_data, buf->b_data, arc_buf_size(osi->os_phys_buf)); (void) arc_buf_remove_ref(osi->os_phys_buf, &osi->os_phys_buf); osi->os_phys_buf = buf; } osi->os_phys = osi->os_phys_buf->b_data; osi->os_flags = osi->os_phys->os_flags; } else { int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; osi->os_phys_buf = arc_buf_alloc(spa, size, &osi->os_phys_buf, ARC_BUFC_METADATA); osi->os_phys = osi->os_phys_buf->b_data; bzero(osi->os_phys, size); } /* * Note: the changed_cb will be called once before the register * func returns, thus changing the checksum/compression from the * default (fletcher2/off). Snapshots don't need to know about * checksum/compression/copies. */ if (ds) { err = dsl_prop_register(ds, "primarycache", primary_cache_changed_cb, osi); if (err == 0) err = dsl_prop_register(ds, "secondarycache", secondary_cache_changed_cb, osi); if (!dsl_dataset_is_snapshot(ds)) { if (err == 0) err = dsl_prop_register(ds, "checksum", checksum_changed_cb, osi); if (err == 0) err = dsl_prop_register(ds, "compression", compression_changed_cb, osi); if (err == 0) err = dsl_prop_register(ds, "copies", copies_changed_cb, osi); } if (err) { VERIFY(arc_buf_remove_ref(osi->os_phys_buf, &osi->os_phys_buf) == 1); kmem_free(osi, sizeof (objset_impl_t)); return (err); } } else if (ds == NULL) { /* It's the meta-objset. */ osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4; osi->os_compress = ZIO_COMPRESS_LZJB; osi->os_copies = spa_max_replication(spa); osi->os_primary_cache = ZFS_CACHE_ALL; osi->os_secondary_cache = ZFS_CACHE_ALL; } osi->os_zil_header = osi->os_phys->os_zil_header; osi->os_zil = zil_alloc(&osi->os, &osi->os_zil_header); for (i = 0; i < TXG_SIZE; i++) { list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); list_create(&osi->os_free_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); } list_create(&osi->os_dnodes, sizeof (dnode_t), offsetof(dnode_t, dn_link)); list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), offsetof(dmu_buf_impl_t, db_link)); mutex_init(&osi->os_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&osi->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&osi->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); osi->os_meta_dnode = dnode_special_open(osi, &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT); if (arc_buf_size(osi->os_phys_buf) >= sizeof (objset_phys_t)) { osi->os_userused_dnode = dnode_special_open(osi, &osi->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT); osi->os_groupused_dnode = dnode_special_open(osi, &osi->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT); } /* * We should be the only thread trying to do this because we * have ds_opening_lock */ if (ds) { VERIFY(NULL == dsl_dataset_set_user_ptr(ds, osi, dmu_objset_evict)); } *osip = osi; return (0); }
extern List setup_cluster_list_with_inx(mysql_conn_t *mysql_conn, slurmdb_job_cond_t *job_cond, void **curr_cluster) { List local_cluster_list = NULL; time_t now = time(NULL); MYSQL_RES *result = NULL; MYSQL_ROW row; hostlist_t temp_hl = NULL; hostlist_iterator_t h_itr = NULL; char *query = NULL; int dims = 0; if (!job_cond || !job_cond->used_nodes) return NULL; if (!job_cond->cluster_list || list_count(job_cond->cluster_list) != 1) { error("If you are doing a query against nodes " "you must only have 1 cluster " "you are asking for."); return NULL; } /* get the dimensions of this cluster so we know how to deal with the hostlists */ query = xstrdup_printf("select dimensions, flags from %s where " "name='%s'", cluster_table, (char *)list_peek(job_cond->cluster_list)); debug4("%d(%s:%d) query\n%s", mysql_conn->conn, THIS_FILE, __LINE__, query); if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) { xfree(query); return NULL; } xfree(query); if (!(row = mysql_fetch_row(result))) { error("Couldn't get the dimensions of cluster '%s'.", (char *)list_peek(job_cond->cluster_list)); return NULL; } /* On a Cray System when dealing with hostlists as we are here this always needs to be 1. */ if (slurm_atoul(row[1]) & CLUSTER_FLAG_CRAYXT) dims = 1; else dims = atoi(row[0]); temp_hl = hostlist_create_dims(job_cond->used_nodes, dims); if (hostlist_count(temp_hl) <= 0) { error("we didn't get any real hosts to look for."); goto no_hosts; } h_itr = hostlist_iterator_create(temp_hl); query = xstrdup_printf("select cluster_nodes, time_start, " "time_end from \"%s_%s\" where node_name='' " "&& cluster_nodes !=''", (char *)list_peek(job_cond->cluster_list), event_table); if (job_cond->usage_start) { if (!job_cond->usage_end) job_cond->usage_end = now; xstrfmtcat(query, " && ((time_start < %ld) " "&& (time_end >= %ld || time_end = 0))", job_cond->usage_end, job_cond->usage_start); } debug3("%d(%s:%d) query\n%s", mysql_conn->conn, THIS_FILE, __LINE__, query); if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) { xfree(query); goto no_hosts; } xfree(query); local_cluster_list = list_create(_destroy_local_cluster); while ((row = mysql_fetch_row(result))) { char *host = NULL; int loc = 0; local_cluster_t *local_cluster = xmalloc(sizeof(local_cluster_t)); local_cluster->hl = hostlist_create_dims(row[0], dims); local_cluster->start = slurm_atoul(row[1]); local_cluster->end = slurm_atoul(row[2]); local_cluster->asked_bitmap = bit_alloc(hostlist_count(local_cluster->hl)); while ((host = hostlist_next_dims(h_itr, dims))) { if ((loc = hostlist_find( local_cluster->hl, host)) != -1) bit_set(local_cluster->asked_bitmap, loc); free(host); } hostlist_iterator_reset(h_itr); if (bit_ffs(local_cluster->asked_bitmap) != -1) { list_append(local_cluster_list, local_cluster); if (local_cluster->end == 0) { local_cluster->end = now; (*curr_cluster) = local_cluster; } } else _destroy_local_cluster(local_cluster); } mysql_free_result(result); if (!list_count(local_cluster_list)) { list_destroy(local_cluster_list); local_cluster_list = NULL; goto no_hosts; } no_hosts: hostlist_iterator_destroy(h_itr); hostlist_destroy(temp_hl); return local_cluster_list; }
static int _cluster_get_jobs(mysql_conn_t *mysql_conn, slurmdb_user_rec_t *user, slurmdb_job_cond_t *job_cond, char *cluster_name, char *job_fields, char *step_fields, char *sent_extra, bool is_admin, int only_pending, List sent_list) { char *query = NULL; char *extra = xstrdup(sent_extra); uint16_t private_data = slurm_get_private_data(); slurmdb_selected_step_t *selected_step = NULL; MYSQL_RES *result = NULL, *step_result = NULL; MYSQL_ROW row, step_row; slurmdb_job_rec_t *job = NULL; slurmdb_step_rec_t *step = NULL; time_t now = time(NULL); List job_list = list_create(slurmdb_destroy_job_rec); ListIterator itr = NULL; List local_cluster_list = NULL; int set = 0; char *prefix="t2"; int rc = SLURM_SUCCESS; int last_id = -1, curr_id = -1; local_cluster_t *curr_cluster = NULL; /* This is here to make sure we are looking at only this user * if this flag is set. We also include any accounts they may be * coordinator of. */ if (!is_admin && (private_data & PRIVATE_DATA_JOBS)) { query = xstrdup_printf("select lft from \"%s_%s\" " "where user='******'", cluster_name, assoc_table, user->name); if (user->coord_accts) { slurmdb_coord_rec_t *coord = NULL; itr = list_iterator_create(user->coord_accts); while ((coord = list_next(itr))) { xstrfmtcat(query, " || acct='%s'", coord->name); } list_iterator_destroy(itr); } debug3("%d(%s:%d) query\n%s", mysql_conn->conn, THIS_FILE, __LINE__, query); if (!(result = mysql_db_query_ret( mysql_conn, query, 0))) { xfree(extra); xfree(query); rc = SLURM_ERROR; goto end_it; } xfree(query); set = 0; while ((row = mysql_fetch_row(result))) { if (set) { xstrfmtcat(extra, " || (%s between %s.lft and %s.rgt)", row[0], prefix, prefix); } else { set = 1; if (extra) xstrfmtcat(extra, " && ((%s between %s.lft " "and %s.rgt)", row[0], prefix, prefix); else xstrfmtcat(extra, " where ((%s between %s.lft " "and %s.rgt)", row[0], prefix, prefix); } } if (set) xstrcat(extra,")"); mysql_free_result(result); } setup_job_cluster_cond_limits(mysql_conn, job_cond, cluster_name, &extra); query = xstrdup_printf("select %s from \"%s_%s\" as t1 " "left join \"%s_%s\" as t2 " "on t1.id_assoc=t2.id_assoc", job_fields, cluster_name, job_table, cluster_name, assoc_table); if (extra) { xstrcat(query, extra); xfree(extra); } /* Here we want to order them this way in such a way so it is easy to look for duplicates, it is also easy to sort the resized jobs. */ xstrcat(query, " group by id_job, time_submit desc"); debug3("%d(%s:%d) query\n%s", mysql_conn->conn, THIS_FILE, __LINE__, query); if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) { xfree(query); rc = SLURM_ERROR; goto end_it; } xfree(query); /* Here we set up environment to check used nodes of jobs. Since we store the bitmap of the entire cluster we can use that to set up a hostlist and set up the bitmap to make things work. This should go before the setup of conds since we could update the start/end time. */ if (job_cond && job_cond->used_nodes) { local_cluster_list = setup_cluster_list_with_inx( mysql_conn, job_cond, (void **)&curr_cluster); if (!local_cluster_list) { rc = SLURM_ERROR; goto end_it; } } while ((row = mysql_fetch_row(result))) { char *id = row[JOB_REQ_ID]; bool job_ended = 0; int submit = slurm_atoul(row[JOB_REQ_SUBMIT]); curr_id = slurm_atoul(row[JOB_REQ_JOBID]); if (job_cond && !job_cond->duplicates && (curr_id == last_id) && (slurm_atoul(row[JOB_REQ_STATE]) != JOB_RESIZING)) continue; /* check the bitmap to see if this is one of the jobs we are looking for */ if (!good_nodes_from_inx(local_cluster_list, (void **)&curr_cluster, row[JOB_REQ_NODE_INX], submit)) { last_id = curr_id; continue; } job = slurmdb_create_job_rec(); job->state = slurm_atoul(row[JOB_REQ_STATE]); if (curr_id == last_id) /* put in reverse so we order by the submit getting larger which it is given to us in reverse order from the database */ list_prepend(job_list, job); else list_append(job_list, job); last_id = curr_id; job->alloc_cpus = slurm_atoul(row[JOB_REQ_ALLOC_CPUS]); job->alloc_nodes = slurm_atoul(row[JOB_REQ_ALLOC_NODES]); job->associd = slurm_atoul(row[JOB_REQ_ASSOCID]); job->resvid = slurm_atoul(row[JOB_REQ_RESVID]); job->cluster = xstrdup(cluster_name); /* we want a blank wckey if the name is null */ if (row[JOB_REQ_WCKEY]) job->wckey = xstrdup(row[JOB_REQ_WCKEY]); else job->wckey = xstrdup(""); job->wckeyid = slurm_atoul(row[JOB_REQ_WCKEYID]); if (row[JOB_REQ_USER_NAME]) job->user = xstrdup(row[JOB_REQ_USER_NAME]); else job->uid = slurm_atoul(row[JOB_REQ_UID]); if (row[JOB_REQ_LFT]) job->lft = slurm_atoul(row[JOB_REQ_LFT]); if (row[JOB_REQ_ACCOUNT] && row[JOB_REQ_ACCOUNT][0]) job->account = xstrdup(row[JOB_REQ_ACCOUNT]); else if (row[JOB_REQ_ACCOUNT1] && row[JOB_REQ_ACCOUNT1][0]) job->account = xstrdup(row[JOB_REQ_ACCOUNT1]); if (row[JOB_REQ_BLOCKID]) job->blockid = xstrdup(row[JOB_REQ_BLOCKID]); job->eligible = slurm_atoul(row[JOB_REQ_ELIGIBLE]); job->submit = submit; job->start = slurm_atoul(row[JOB_REQ_START]); job->end = slurm_atoul(row[JOB_REQ_END]); job->timelimit = slurm_atoul(row[JOB_REQ_TIMELIMIT]); /* since the job->end could be set later end it here */ if (job->end) { job_ended = 1; if (!job->start || (job->start > job->end)) job->start = job->end; } if (job_cond && !job_cond->without_usage_truncation && job_cond->usage_start) { if (job->start && (job->start < job_cond->usage_start)) job->start = job_cond->usage_start; if (!job->end || job->end > job_cond->usage_end) job->end = job_cond->usage_end; if (!job->start) job->start = job->end; job->elapsed = job->end - job->start; if (row[JOB_REQ_SUSPENDED]) { MYSQL_RES *result2 = NULL; MYSQL_ROW row2; /* get the suspended time for this job */ query = xstrdup_printf( "select time_start, time_end from " "\"%s_%s\" where " "(time_start < %ld && (time_end >= %ld " "|| time_end = 0)) && job_db_inx=%s " "order by time_start", cluster_name, suspend_table, job_cond->usage_end, job_cond->usage_start, id); debug4("%d(%s:%d) query\n%s", mysql_conn->conn, THIS_FILE, __LINE__, query); if (!(result2 = mysql_db_query_ret( mysql_conn, query, 0))) { list_destroy(job_list); job_list = NULL; break; } xfree(query); while ((row2 = mysql_fetch_row(result2))) { time_t local_start = slurm_atoul(row2[0]); time_t local_end = slurm_atoul(row2[1]); if (!local_start) continue; if (job->start > local_start) local_start = job->start; if (job->end < local_end) local_end = job->end; if ((local_end - local_start) < 1) continue; job->elapsed -= (local_end - local_start); job->suspended += (local_end - local_start); } mysql_free_result(result2); } } else { job->suspended = slurm_atoul(row[JOB_REQ_SUSPENDED]); /* fix the suspended number to be correct */ if (job->state == JOB_SUSPENDED) job->suspended = now - job->suspended; if (!job->start) { job->elapsed = 0; } else if (!job->end) { job->elapsed = now - job->start; } else { job->elapsed = job->end - job->start; } job->elapsed -= job->suspended; } if ((int)job->elapsed < 0) job->elapsed = 0; job->jobid = curr_id; job->jobname = xstrdup(row[JOB_REQ_NAME]); job->gid = slurm_atoul(row[JOB_REQ_GID]); job->exitcode = slurm_atoul(row[JOB_REQ_EXIT_CODE]); job->derived_ec = slurm_atoul(row[JOB_REQ_DERIVED_EC]); job->derived_es = xstrdup(row[JOB_REQ_DERIVED_ES]); if (row[JOB_REQ_PARTITION]) job->partition = xstrdup(row[JOB_REQ_PARTITION]); if (row[JOB_REQ_NODELIST]) job->nodes = xstrdup(row[JOB_REQ_NODELIST]); if (!job->nodes || !strcmp(job->nodes, "(null)")) { xfree(job->nodes); job->nodes = xstrdup("(unknown)"); } job->track_steps = slurm_atoul(row[JOB_REQ_TRACKSTEPS]); job->priority = slurm_atoul(row[JOB_REQ_PRIORITY]); job->req_cpus = slurm_atoul(row[JOB_REQ_REQ_CPUS]); job->requid = slurm_atoul(row[JOB_REQ_KILL_REQUID]); job->qosid = slurm_atoul(row[JOB_REQ_QOS]); job->show_full = 1; if (only_pending || (job_cond && job_cond->without_steps)) goto skip_steps; if (job_cond && job_cond->step_list && list_count(job_cond->step_list)) { set = 0; itr = list_iterator_create(job_cond->step_list); while ((selected_step = list_next(itr))) { if (selected_step->jobid != job->jobid) { continue; } else if (selected_step->stepid == NO_VAL) { job->show_full = 1; break; } else if (selected_step->stepid == INFINITE) selected_step->stepid = SLURM_BATCH_SCRIPT; if (set) xstrcat(extra, " || "); else xstrcat(extra, " && ("); /* The stepid could be -2 so use %d not %u */ xstrfmtcat(extra, "t1.id_step=%d", selected_step->stepid); set = 1; job->show_full = 0; } list_iterator_destroy(itr); if (set) xstrcat(extra, ")"); } query = xstrdup_printf("select %s from \"%s_%s\" as t1 " "where t1.job_db_inx=%s", step_fields, cluster_name, step_table, id); if (extra) { xstrcat(query, extra); xfree(extra); } debug4("%d(%s:%d) query\n%s", mysql_conn->conn, THIS_FILE, __LINE__, query); if (!(step_result = mysql_db_query_ret( mysql_conn, query, 0))) { xfree(query); rc = SLURM_ERROR; goto end_it; } xfree(query); /* Querying the steps in the fashion was faster than doing only 1 query and then matching the steps up later with the job. */ while ((step_row = mysql_fetch_row(step_result))) { /* check the bitmap to see if this is one of the steps we are looking for */ if (!good_nodes_from_inx(local_cluster_list, (void **)&curr_cluster, step_row[STEP_REQ_NODE_INX], submit)) continue; step = slurmdb_create_step_rec(); step->tot_cpu_sec = 0; step->tot_cpu_usec = 0; step->job_ptr = job; if (!job->first_step_ptr) job->first_step_ptr = step; list_append(job->steps, step); step->stepid = slurm_atoul(step_row[STEP_REQ_STEPID]); /* info("got step %u.%u", */ /* job->header.jobnum, step->stepnum); */ step->state = slurm_atoul(step_row[STEP_REQ_STATE]); step->exitcode = slurm_atoul(step_row[STEP_REQ_EXIT_CODE]); step->ncpus = slurm_atoul(step_row[STEP_REQ_CPUS]); step->nnodes = slurm_atoul(step_row[STEP_REQ_NODES]); step->ntasks = slurm_atoul(step_row[STEP_REQ_TASKS]); step->task_dist = slurm_atoul(step_row[STEP_REQ_TASKDIST]); if (!step->ntasks) step->ntasks = step->ncpus; step->start = slurm_atoul(step_row[STEP_REQ_START]); step->end = slurm_atoul(step_row[STEP_REQ_END]); /* if the job has ended end the step also */ if (!step->end && job_ended) { step->end = job->end; step->state = job->state; } if (job_cond && !job_cond->without_usage_truncation && job_cond->usage_start) { if (step->start && (step->start < job_cond->usage_start)) step->start = job_cond->usage_start; if (!step->start && step->end) step->start = step->end; if (!step->end || (step->end > job_cond->usage_end)) step->end = job_cond->usage_end; } /* figure this out by start stop */ step->suspended = slurm_atoul(step_row[STEP_REQ_SUSPENDED]); if (!step->end) { step->elapsed = now - step->start; } else { step->elapsed = step->end - step->start; } step->elapsed -= step->suspended; if ((int)step->elapsed < 0) step->elapsed = 0; step->user_cpu_sec = slurm_atoul(step_row[STEP_REQ_USER_SEC]); step->user_cpu_usec = slurm_atoul(step_row[STEP_REQ_USER_USEC]); step->sys_cpu_sec = slurm_atoul(step_row[STEP_REQ_SYS_SEC]); step->sys_cpu_usec = slurm_atoul(step_row[STEP_REQ_SYS_USEC]); step->tot_cpu_sec += step->user_cpu_sec + step->sys_cpu_sec; step->tot_cpu_usec += step->user_cpu_usec + step->sys_cpu_usec; step->stats.vsize_max = slurm_atoul(step_row[STEP_REQ_MAX_VSIZE]); step->stats.vsize_max_taskid = slurm_atoul(step_row[STEP_REQ_MAX_VSIZE_TASK]); step->stats.vsize_ave = atof(step_row[STEP_REQ_AVE_VSIZE]); step->stats.rss_max = slurm_atoul(step_row[STEP_REQ_MAX_RSS]); step->stats.rss_max_taskid = slurm_atoul(step_row[STEP_REQ_MAX_RSS_TASK]); step->stats.rss_ave = atof(step_row[STEP_REQ_AVE_RSS]); step->stats.pages_max = slurm_atoul(step_row[STEP_REQ_MAX_PAGES]); step->stats.pages_max_taskid = slurm_atoul(step_row[STEP_REQ_MAX_PAGES_TASK]); step->stats.pages_ave = atof(step_row[STEP_REQ_AVE_PAGES]); step->stats.cpu_min = slurm_atoul(step_row[STEP_REQ_MIN_CPU]); step->stats.cpu_min_taskid = slurm_atoul(step_row[STEP_REQ_MIN_CPU_TASK]); step->stats.cpu_ave = atof(step_row[STEP_REQ_AVE_CPU]); step->stepname = xstrdup(step_row[STEP_REQ_NAME]); step->nodes = xstrdup(step_row[STEP_REQ_NODELIST]); step->stats.vsize_max_nodeid = slurm_atoul(step_row[STEP_REQ_MAX_VSIZE_NODE]); step->stats.rss_max_nodeid = slurm_atoul(step_row[STEP_REQ_MAX_RSS_NODE]); step->stats.pages_max_nodeid = slurm_atoul(step_row[STEP_REQ_MAX_PAGES_NODE]); step->stats.cpu_min_nodeid = slurm_atoul(step_row[STEP_REQ_MIN_CPU_NODE]); step->requid = slurm_atoul(step_row[STEP_REQ_KILL_REQUID]); } mysql_free_result(step_result); if (!job->track_steps) { /* If we don't have track_steps we want to see if we have multiple steps. If we only have 1 step check the job name against the step name in most all cases it will be different. If it is different print out the step separate. */ if (list_count(job->steps) > 1) job->track_steps = 1; else if (step && step->stepname && job->jobname) { if (strcmp(step->stepname, job->jobname)) job->track_steps = 1; } } skip_steps: /* need to reset here to make the above test valid */ step = NULL; } mysql_free_result(result); end_it: if (local_cluster_list) list_destroy(local_cluster_list); if (rc == SLURM_SUCCESS) list_transfer(sent_list, job_list); list_destroy(job_list); return rc; }
extern List as_mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid, slurmdb_job_cond_t *job_cond) { char *extra = NULL; char *tmp = NULL, *tmp2 = NULL; ListIterator itr = NULL; int is_admin=1; int i; List job_list = NULL; uint16_t private_data = 0; slurmdb_user_rec_t user; int only_pending = 0; List use_cluster_list = as_mysql_cluster_list; char *cluster_name; memset(&user, 0, sizeof(slurmdb_user_rec_t)); user.uid = uid; private_data = slurm_get_private_data(); if (private_data & PRIVATE_DATA_JOBS) { if (!(is_admin = is_user_min_admin_level( mysql_conn, uid, SLURMDB_ADMIN_OPERATOR))) { /* Only fill in the coordinator accounts here we will check them later when we actually try to get the jobs. */ is_user_any_coord(mysql_conn, &user); } } if (job_cond && job_cond->state_list && (list_count(job_cond->state_list) == 1) && (slurm_atoul(list_peek(job_cond->state_list)) == JOB_PENDING)) only_pending = 1; setup_job_cond_limits(mysql_conn, job_cond, &extra); xfree(tmp); xstrfmtcat(tmp, "%s", job_req_inx[0]); for(i=1; i<JOB_REQ_COUNT; i++) { xstrfmtcat(tmp, ", %s", job_req_inx[i]); } xfree(tmp2); xstrfmtcat(tmp2, "%s", step_req_inx[0]); for(i=1; i<STEP_REQ_COUNT; i++) { xstrfmtcat(tmp2, ", %s", step_req_inx[i]); } if (job_cond && job_cond->cluster_list && list_count(job_cond->cluster_list)) use_cluster_list = job_cond->cluster_list; else slurm_mutex_lock(&as_mysql_cluster_list_lock); job_list = list_create(slurmdb_destroy_job_rec); itr = list_iterator_create(use_cluster_list); while ((cluster_name = list_next(itr))) { int rc; if ((rc = _cluster_get_jobs(mysql_conn, &user, job_cond, cluster_name, tmp, tmp2, extra, is_admin, only_pending, job_list)) != SLURM_SUCCESS) error("Problem getting jobs for cluster %s", cluster_name); } list_iterator_destroy(itr); if (use_cluster_list == as_mysql_cluster_list) slurm_mutex_unlock(&as_mysql_cluster_list_lock); xfree(tmp); xfree(tmp2); xfree(extra); return job_list; }
/* * create_dynamic_block - create new block(s) to be used for a new * job allocation. * RET - a list of created block(s) or NULL on failure errno is set. */ extern List create_dynamic_block(List block_list, select_ba_request_t *request, List my_block_list, bool track_down_nodes) { int rc = SLURM_SUCCESS; ListIterator itr, itr2; bg_record_t *bg_record = NULL, *found_record = NULL; List results = NULL; List new_blocks = NULL; bitstr_t *my_bitmap = NULL; select_ba_request_t blockreq; int cnodes = request->procs / bg_conf->cpu_ratio; uint16_t start_geo[SYSTEM_DIMENSIONS]; if (cnodes < bg_conf->smallest_block) { error("Can't create this size %d " "on this system ionodes_per_mp is %d", request->procs, bg_conf->ionodes_per_mp); goto finished; } memset(&blockreq, 0, sizeof(select_ba_request_t)); memcpy(start_geo, request->geometry, sizeof(start_geo)); /* We need to lock this just incase a blocks_overlap is called which will in turn reset and set the system as it sees fit. */ slurm_mutex_lock(&block_state_mutex); if (my_block_list) { reset_ba_system(track_down_nodes); itr = list_iterator_create(my_block_list); while ((bg_record = list_next(itr))) { if (bg_record->magic != BLOCK_MAGIC) { /* This should never happen since we only call this on copies of blocks and we check on this during the copy. */ error("create_dynamic_block: " "got a block with bad magic?"); continue; } if (bg_record->free_cnt) { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) { int dim; char start_geo[SYSTEM_DIMENSIONS+1]; char geo[SYSTEM_DIMENSIONS+1]; for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) { start_geo[dim] = alpha_num[ bg_record->start[dim]]; geo[dim] = alpha_num[ bg_record->geo[dim]]; } start_geo[dim] = '\0'; geo[dim] = '\0'; info("not adding %s(%s) %s %s %s %u " "(free_cnt)", bg_record->bg_block_id, bg_record->mp_str, bg_block_state_string( bg_record->state), start_geo, geo, bg_record->cnode_cnt); } continue; } if (!my_bitmap) { my_bitmap = bit_alloc(bit_size(bg_record->mp_bitmap)); } if (!bit_super_set(bg_record->mp_bitmap, my_bitmap)) { bit_or(my_bitmap, bg_record->mp_bitmap); if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) { int dim; char start_geo[SYSTEM_DIMENSIONS+1]; char geo[SYSTEM_DIMENSIONS+1]; for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) { start_geo[dim] = alpha_num[ bg_record->start[dim]]; geo[dim] = alpha_num[ bg_record->geo[dim]]; } start_geo[dim] = '\0'; geo[dim] = '\0'; info("adding %s(%s) %s %s %s %u", bg_record->bg_block_id, bg_record->mp_str, bg_block_state_string( bg_record->state), start_geo, geo, bg_record->cnode_cnt); } if (check_and_set_mp_list( bg_record->ba_mp_list) == SLURM_ERROR) { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("something happened in " "the load of %s", bg_record->bg_block_id); list_iterator_destroy(itr); FREE_NULL_BITMAP(my_bitmap); rc = SLURM_ERROR; goto finished; } } else { if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) { int dim; char start_geo[SYSTEM_DIMENSIONS+1]; char geo[SYSTEM_DIMENSIONS+1]; for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) { start_geo[dim] = alpha_num[ bg_record->start[dim]]; geo[dim] = alpha_num[ bg_record->geo[dim]]; } start_geo[dim] = '\0'; geo[dim] = '\0'; info("not adding %s(%s) %s %s %s %u ", bg_record->bg_block_id, bg_record->mp_str, bg_block_state_string( bg_record->state), start_geo, geo, bg_record->cnode_cnt); } /* just so we don't look at it later */ bg_record->free_cnt = -1; } } list_iterator_destroy(itr); FREE_NULL_BITMAP(my_bitmap); } else { reset_ba_system(false); if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("No list was given"); } if (request->avail_mp_bitmap) ba_set_removable_mps(request->avail_mp_bitmap, 1); if (request->size==1 && cnodes < bg_conf->mp_cnode_cnt) { switch(cnodes) { #ifdef HAVE_BGL case 32: blockreq.small32 = 4; blockreq.small128 = 3; break; case 128: blockreq.small128 = 4; break; #else case 16: blockreq.small16 = 2; blockreq.small32 = 1; blockreq.small64 = 1; blockreq.small128 = 1; blockreq.small256 = 1; break; case 32: blockreq.small32 = 2; blockreq.small64 = 1; blockreq.small128 = 1; blockreq.small256 = 1; break; case 64: blockreq.small64 = 2; blockreq.small128 = 1; blockreq.small256 = 1; break; case 128: blockreq.small128 = 2; blockreq.small256 = 1; break; case 256: blockreq.small256 = 2; break; #endif default: error("This size %d is unknown on this system", cnodes); goto finished; break; } /* Sort the list so the small blocks are in the order * of ionodes. */ list_sort(block_list, (ListCmpF)bg_record_cmpf_inc); request->conn_type[0] = SELECT_SMALL; new_blocks = list_create(destroy_bg_record); /* check only blocks that are free and small */ if (_breakup_blocks(block_list, new_blocks, request, my_block_list, true, true) == SLURM_SUCCESS) goto finished; /* check only blocks that are free and any size */ if (_breakup_blocks(block_list, new_blocks, request, my_block_list, true, false) == SLURM_SUCCESS) goto finished; /* check usable blocks that are small with any state */ if (_breakup_blocks(block_list, new_blocks, request, my_block_list, false, true) == SLURM_SUCCESS) goto finished; /* check all usable blocks */ if (_breakup_blocks(block_list, new_blocks, request, my_block_list, false, false) == SLURM_SUCCESS) goto finished; /* Re-sort the list back to the original order. */ list_sort(block_list, (ListCmpF)bg_record_sort_aval_inc); list_destroy(new_blocks); new_blocks = NULL; if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("small block not able to be placed inside others"); } if (request->conn_type[0] == SELECT_NAV) request->conn_type[0] = SELECT_TORUS; //debug("going to create %d", request->size); if (!new_ba_request(request)) { if (request->geometry[0] != (uint16_t)NO_VAL) { char *geo = give_geo(request->geometry); error("Problems with request for size %d geo %s", request->size, geo); xfree(geo); } else { error("Problems with request for size %d. " "No geo given.", request->size); } rc = ESLURM_INTERCONNECT_FAILURE; goto finished; } /* try on free midplanes */ rc = SLURM_SUCCESS; if (results) list_flush(results); else { #ifdef HAVE_BGQ results = list_create(destroy_ba_mp); #else results = list_create(NULL); #endif } rc = allocate_block(request, results); /* This could be changed in allocate_block so set it back up */ memcpy(request->geometry, start_geo, sizeof(start_geo)); if (rc) { rc = SLURM_SUCCESS; goto setup_records; } if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("allocate failure for size %d base " "partitions of free midplanes", request->size); rc = SLURM_ERROR; if (!list_count(my_block_list) || !my_block_list) goto finished; /*Try to put block starting in the smallest of the exisiting blocks*/ itr = list_iterator_create(my_block_list); itr2 = list_iterator_create(my_block_list); while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { bool is_small = 0; /* never check a block with a job running */ if (bg_record->free_cnt || bg_record->job_running != NO_JOB_RUNNING) continue; /* Here we are only looking for the first block on the midplane. So either the count is greater or equal than bg_conf->mp_cnode_cnt or the first bit is set in the ionode_bitmap. */ if (bg_record->cnode_cnt < bg_conf->mp_cnode_cnt) { bool found = 0; if (bit_ffs(bg_record->ionode_bitmap) != 0) continue; /* Check to see if we have other blocks in this midplane that have jobs running. */ while ((found_record = list_next(itr2))) { if (!found_record->free_cnt && (found_record->job_running != NO_JOB_RUNNING) && bit_overlap(bg_record->mp_bitmap, found_record->mp_bitmap)) { found = 1; break; } } list_iterator_reset(itr2); if (found) continue; is_small = 1; } if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("removing %s(%s) for request %d", bg_record->bg_block_id, bg_record->mp_str, request->size); remove_block(bg_record->ba_mp_list, is_small); rc = SLURM_SUCCESS; if (results) list_flush(results); else { #ifdef HAVE_BGQ results = list_create(destroy_ba_mp); #else results = list_create(NULL); #endif } rc = allocate_block(request, results); /* This could be changed in allocate_block so set it back up */ memcpy(request->geometry, start_geo, sizeof(start_geo)); if (rc) { rc = SLURM_SUCCESS; break; } if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("allocate failure for size %d base partitions", request->size); rc = SLURM_ERROR; } list_iterator_destroy(itr); list_iterator_destroy(itr2); setup_records: if (rc == SLURM_SUCCESS) { /*set up bg_record(s) here */ new_blocks = list_create(destroy_bg_record); blockreq.save_name = request->save_name; #ifdef HAVE_BGL blockreq.blrtsimage = request->blrtsimage; #endif blockreq.linuximage = request->linuximage; blockreq.mloaderimage = request->mloaderimage; blockreq.ramdiskimage = request->ramdiskimage; memcpy(blockreq.conn_type, request->conn_type, sizeof(blockreq.conn_type)); add_bg_record(new_blocks, &results, &blockreq, 0, 0); } finished: if (request->avail_mp_bitmap && (bit_ffc(request->avail_mp_bitmap) == -1)) ba_reset_all_removed_mps(); slurm_mutex_unlock(&block_state_mutex); /* reset the ones we mucked with */ itr = list_iterator_create(my_block_list); while ((bg_record = (bg_record_t *) list_next(itr))) { if (bg_record->free_cnt == -1) bg_record->free_cnt = 0; } list_iterator_destroy(itr); xfree(request->save_name); if (results) list_destroy(results); errno = rc; return new_blocks; }
/* * _event_server_service_connection * * Service a connection from a client to receive event packets. Use * wrapper functions minimally, b/c we want to return errors to the * user instead of exitting with errors. * */ static void _event_server_service_connection(int fd) { int recv_len; struct cerebro_event_server_request req; struct cerebrod_event_connection_data *ecd = NULL; char buf[CEREBRO_MAX_PACKET_LEN]; char event_name_buf[CEREBRO_MAX_EVENT_NAME_LEN+1]; char *event_name_ptr = NULL; int32_t version; int *fdptr = NULL; List connections = NULL; assert(fd >= 0); memset(&req, '\0', sizeof(struct cerebro_event_server_request)); if ((recv_len = receive_data(fd, CEREBRO_EVENT_SERVER_REQUEST_PACKET_LEN, buf, CEREBRO_MAX_PACKET_LEN, CEREBRO_EVENT_SERVER_PROTOCOL_CLIENT_TIMEOUT_LEN, NULL)) < 0) goto cleanup; if (recv_len < sizeof(version)) goto cleanup; if (_event_server_request_check_version(buf, recv_len, &version) < 0) { _event_server_err_only_response(fd, version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_VERSION_INVALID); goto cleanup; } if (recv_len != CEREBRO_EVENT_SERVER_REQUEST_PACKET_LEN) { _event_server_err_only_response(fd, version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_PACKET_INVALID); goto cleanup; } if (_event_server_request_unmarshall(&req, buf, recv_len) < 0) { _event_server_err_only_response(fd, version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_PACKET_INVALID); goto cleanup; } _event_server_request_dump(&req); /* Guarantee ending '\0' character */ memset(event_name_buf, '\0', CEREBRO_MAX_EVENT_NAME_LEN+1); memcpy(event_name_buf, req.event_name, CEREBRO_MAX_EVENT_NAME_LEN); if (!strlen(event_name_buf)) { _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_EVENT_INVALID); goto cleanup; } /* Is it the special event-names request */ if (!strcmp(event_name_buf, CEREBRO_EVENT_NAMES)) { pthread_t thread; pthread_attr_t attr; int *arg; Pthread_attr_init(&attr); Pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); Pthread_attr_setstacksize(&attr, CEREBROD_THREAD_STACKSIZE); arg = Malloc(sizeof(int)); *arg = fd; Pthread_create(&thread, &attr, _respond_with_event_names, (void *)arg); Pthread_attr_destroy(&attr); return; } if (!event_names) { _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_EVENT_INVALID); goto cleanup; } /* Event names is not changeable - so no need for a lock */ if (!(event_name_ptr = list_find_first(event_names, _event_names_compare, event_name_buf))) { _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_EVENT_INVALID); goto cleanup; } if (!(ecd = (struct cerebrod_event_connection_data *)malloc(sizeof(struct cerebrod_event_connection_data)))) { CEREBRO_ERR(("malloc: %s", strerror(errno))); _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_INTERNAL_ERROR); goto cleanup; } ecd->event_name = event_name_ptr; ecd->fd = fd; if (!(fdptr = (int *)malloc(sizeof(int)))) { CEREBRO_ERR(("malloc: %s", strerror(errno))); _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_INTERNAL_ERROR); goto cleanup; } *fdptr = fd; Pthread_mutex_lock(&event_connections_lock); if (!list_append(event_connections, ecd)) { CEREBRO_ERR(("list_append: %s", strerror(errno))); _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_INTERNAL_ERROR); goto cleanup; } if (!(connections = Hash_find(event_connections_index, ecd->event_name))) { if (!(connections = list_create((ListDelF)free))) { CEREBRO_ERR(("list_create: %s", strerror(errno))); _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_INTERNAL_ERROR); goto cleanup; } if (!Hash_insert(event_connections_index, ecd->event_name, connections)) { CEREBRO_ERR(("Hash_insert: %s", strerror(errno))); _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_INTERNAL_ERROR); list_destroy(connections); goto cleanup; } } if (!list_append(connections, fdptr)) { CEREBRO_ERR(("list_append: %s", strerror(errno))); _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_INTERNAL_ERROR); goto cleanup; } Pthread_mutex_unlock(&event_connections_lock); /* Clear this pointer so we know it's stored away in a list */ fdptr = NULL; _event_server_err_only_response(fd, req.version, CEREBRO_EVENT_SERVER_PROTOCOL_ERR_SUCCESS); return; cleanup: if (ecd) free(ecd); if (fdptr) free(fdptr); /* ignore potential error, we're in the error path already */ close(fd); return; }
/* ** Perform a reduce action and the shift that must immediately ** follow the reduce. */ static void yy_reduce( yyParser *yypParser, /* The parser */ int yyruleno /* Number of the rule by which to reduce */ ){ int yygoto; /* The next state */ int yyact; /* The next action */ YYMINORTYPE yygotominor; /* The LHS of the rule reduced */ yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ ParseARG_FETCH; yymsp = &yypParser->yystack[yypParser->yyidx]; #ifndef NDEBUG if( yyTraceFILE && yyruleno>=0 && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ fprintf(yyTraceFILE, "%sReduce [%s].\n", yyTracePrompt, yyRuleName[yyruleno]); } #endif /* NDEBUG */ /* Silence complaints from purify about yygotominor being uninitialized ** in some cases when it is copied into the stack after the following ** switch. yygotominor is uninitialized when a rule reduces that does ** not set the value of its left-hand side nonterminal. Leaving the ** value of the nonterminal uninitialized is utterly harmless as long ** as the value is never used. So really the only thing this code ** accomplishes is to quieten purify. ** ** 2007-01-16: The wireshark project (www.wireshark.org) reports that ** without this code, their parser segfaults. I'm not sure what there ** parser is doing to make this happen. This is the second bug report ** from wireshark this week. Clearly they are stressing Lemon in ways ** that it has not been previously stressed... (SQLite ticket #2172) */ /*memset(&yygotominor, 0, sizeof(yygotominor));*/ yygotominor = yyzerominor; switch( yyruleno ){ /* Beginning here are the reduction cases. A typical example ** follows: ** case 0: ** #line <lineno> <grammarfile> ** { ... } // User supplied code ** #line <lineno> <thisfile> ** break; */ case 0: /* config ::= vars */ #line 30 "src/parser.y" { state->settings = yymsp[0].minor.yy13; } #line 754 "src/parser.c" break; case 1: /* vars ::= vars assignment */ #line 34 "src/parser.y" { yygotominor.yy13 = tst_insert(yymsp[-1].minor.yy13, bdata(yymsp[0].minor.yy7->key->data), blength(yymsp[0].minor.yy7->key->data), yymsp[0].minor.yy7); } #line 761 "src/parser.c" break; case 2: /* vars ::= assignment */ #line 39 "src/parser.y" { yygotominor.yy13 = tst_insert(yygotominor.yy13, bdata(yymsp[0].minor.yy7->key->data), blength(yymsp[0].minor.yy7->key->data), yymsp[0].minor.yy7); } #line 768 "src/parser.c" break; case 3: /* vars ::= vars EOF */ #line 43 "src/parser.y" { yygotominor.yy13 = yymsp[-1].minor.yy13; yy_destructor(yypParser,1,&yymsp[0].minor); } #line 774 "src/parser.c" break; case 4: /* expr ::= QSTRING */ #line 47 "src/parser.y" { yygotominor.yy8 = Value_create(VAL_QSTRING, yymsp[0].minor.yy0); } #line 779 "src/parser.c" break; case 5: /* expr ::= NUMBER */ #line 48 "src/parser.y" { yygotominor.yy8 = Value_create(VAL_NUMBER, yymsp[0].minor.yy0); } #line 784 "src/parser.c" break; case 6: /* expr ::= class */ #line 49 "src/parser.y" { yygotominor.yy8 = Value_create(VAL_CLASS, yymsp[0].minor.yy11); } #line 789 "src/parser.c" break; case 7: /* expr ::= list */ #line 50 "src/parser.y" { yygotominor.yy8 = Value_create(VAL_LIST, yymsp[0].minor.yy46); } #line 794 "src/parser.c" break; case 8: /* expr ::= hash */ #line 51 "src/parser.y" { yygotominor.yy8 = Value_create(VAL_HASH, yymsp[0].minor.yy13); } #line 799 "src/parser.c" break; case 9: /* expr ::= IDENT */ #line 52 "src/parser.y" { yygotominor.yy8 = Value_create(VAL_REF, yymsp[0].minor.yy0); } #line 804 "src/parser.c" break; case 10: /* assignment ::= IDENT EQ expr */ #line 57 "src/parser.y" { yygotominor.yy7 = malloc(sizeof(Pair)); yygotominor.yy7->key = yymsp[-2].minor.yy0; yygotominor.yy7->value = yymsp[0].minor.yy8; yy_destructor(yypParser,5,&yymsp[-1].minor); } #line 812 "src/parser.c" break; case 11: /* class ::= CLASS LPAREN parameters RPAREN */ #line 64 "src/parser.y" { yygotominor.yy11 = calloc(sizeof(Class), 1); yygotominor.yy11->id = -1; yygotominor.yy11->ident = yymsp[-3].minor.yy0; yygotominor.yy11->params = yymsp[-1].minor.yy13; yy_destructor(yypParser,7,&yymsp[-2].minor); yy_destructor(yypParser,8,&yymsp[0].minor); } #line 819 "src/parser.c" break; case 12: /* parameters ::= parameters COMMA assignment */ #line 69 "src/parser.y" { yygotominor.yy13 = tst_insert(yymsp[-2].minor.yy13, bdata(yymsp[0].minor.yy7->key->data), blength(yymsp[0].minor.yy7->key->data), yymsp[0].minor.yy7); yy_destructor(yypParser,9,&yymsp[-1].minor); } #line 825 "src/parser.c" break; case 13: /* parameters ::= parameters assignment */ #line 72 "src/parser.y" { yygotominor.yy13 = tst_insert(yymsp[-1].minor.yy13, bdata(yymsp[0].minor.yy7->key->data), blength(yymsp[0].minor.yy7->key->data), yymsp[0].minor.yy7); } #line 830 "src/parser.c" break; case 14: /* parameters ::= */ case 22: /* hash_elements ::= */ yytestcase(yyruleno==22); #line 75 "src/parser.y" { yygotominor.yy13 = NULL; } #line 836 "src/parser.c" break; case 15: /* list ::= LBRACE list_elements RBRACE */ #line 79 "src/parser.y" { yygotominor.yy46 = yymsp[-1].minor.yy46; yy_destructor(yypParser,10,&yymsp[-2].minor); yy_destructor(yypParser,11,&yymsp[0].minor); } #line 843 "src/parser.c" break; case 16: /* list_elements ::= list_elements COMMA expr */ #line 83 "src/parser.y" { yygotominor.yy46 = yymsp[-2].minor.yy46; list_append(yygotominor.yy46, lnode_create(yymsp[0].minor.yy8)); yy_destructor(yypParser,9,&yymsp[-1].minor); } #line 849 "src/parser.c" break; case 17: /* list_elements ::= list_elements expr */ #line 86 "src/parser.y" { yygotominor.yy46 = yymsp[-1].minor.yy46; list_append(yygotominor.yy46, lnode_create(yymsp[0].minor.yy8)); } #line 854 "src/parser.c" break; case 18: /* list_elements ::= */ #line 89 "src/parser.y" { yygotominor.yy46 = list_create(LISTCOUNT_T_MAX); } #line 859 "src/parser.c" break; case 19: /* hash ::= LBRACKET hash_elements RBRACKET */ #line 93 "src/parser.y" { yygotominor.yy13 = yymsp[-1].minor.yy13; yy_destructor(yypParser,12,&yymsp[-2].minor); yy_destructor(yypParser,13,&yymsp[0].minor); } #line 866 "src/parser.c" break; case 20: /* hash_elements ::= hash_elements COMMA hash_pair */ #line 97 "src/parser.y" { yygotominor.yy13 = tst_insert(yymsp[-2].minor.yy13, bdata(yymsp[0].minor.yy17->key->data), blength(yymsp[0].minor.yy17->key->data), yymsp[0].minor.yy17); yy_destructor(yypParser,9,&yymsp[-1].minor); } #line 872 "src/parser.c" break; case 21: /* hash_elements ::= hash_elements hash_pair */ #line 100 "src/parser.y" { yygotominor.yy13 = tst_insert(yymsp[-1].minor.yy13, bdata(yymsp[0].minor.yy17->key->data), blength(yymsp[0].minor.yy17->key->data), yymsp[0].minor.yy17); } #line 877 "src/parser.c" break; case 23: /* hash_pair ::= QSTRING COLON expr */ #line 108 "src/parser.y" { yygotominor.yy17 = malloc(sizeof(Pair)); yygotominor.yy17->key = yymsp[-2].minor.yy0; yygotominor.yy17->value = yymsp[0].minor.yy8; yy_destructor(yypParser,14,&yymsp[-1].minor); } #line 885 "src/parser.c" break; default: break; }; yygoto = yyRuleInfo[yyruleno].lhs; yysize = yyRuleInfo[yyruleno].nrhs; yypParser->yyidx -= yysize; yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto); if( yyact < YYNSTATE ){ #ifdef NDEBUG /* If we are not debugging and the reduce action popped at least ** one element off the stack, then we can push the new element back ** onto the stack here, and skip the stack overflow test in yy_shift(). ** That gives a significant speed improvement. */ if( yysize ){ yypParser->yyidx++; yymsp -= yysize-1; yymsp->stateno = (YYACTIONTYPE)yyact; yymsp->major = (YYCODETYPE)yygoto; yymsp->minor = yygotominor; }else #endif { yy_shift(yypParser,yyact,yygoto,&yygotominor); } }else{ assert( yyact == YYNSTATE + YYNRULE + 1 ); yy_accept(yypParser); } }
int udpclient(int argc, char *argv[]) { list_t *clients = NULL; list_t *conn_clients; client_t *client; client_t *tunnel; client_t *client2; char data[MSG_MAX_LEN]; char addrstr[ADDRSTRLEN]; char taddrstr[ADDRSTRLEN]; socket_t *tcp_sock = NULL; socket_t *udp_sock = NULL; socket_t *next_sock = NULL; struct timeval curr_time; struct timeval check_time; struct timeval check_interval; struct timeval timeout; fd_set client_fds; fd_set read_fds; uint16_t tmp_id; uint8_t tmp_type; uint16_t tmp_len; // uint16_t tmp_req_id; int num_fds; uint32_t sourceid; int ret; int i; signal(SIGINT, &signal_handler); i = 0; lhost = (argc - i == 5) ? NULL : argv[i++]; lport = argv[i++]; rport = argv[i++]; phost = argv[i++]; pport = argv[i++]; relays = atoi(argv[i++]); if(debug_level >= DEBUG_LEVEL1) printf("relays need %d \n",relays); /* Check validity of ports (can't check ip's b/c might be host names) */ ERROR_GOTO(!isnum(lport), "Invalid listen port.", done); ERROR_GOTO(!isnum(rport), "Invalid recv port.", done); ERROR_GOTO(!isnum(pport), "Invalid inter port.", done); //ERROR_GOTO(!isnum(rport), "Invalid remote port.", done); srand(inet_addr(lhost)); localid=(rand()); generate_rsakey(lhost); if(debug_level >= DEBUG_LEVEL1) { printf("local id %d \n",localid); } next_req_id = rand() % 0xffff; /* Create an empty list for the clients */ clients = list_create(sizeof(client_t), p_client_cmp, p_client_copy, p_client_free, 1); ERROR_GOTO(clients == NULL, "Error creating clients list.", done); /* Create and empty list for the connecting clients */ conn_clients = list_create(sizeof(client_t), p_client_cmp, p_client_copy, p_client_free, 1); ERROR_GOTO(conn_clients == NULL, "Error creating conn_clients list.", done); relay_clients = list_create(sizeof(client_t), p_client_cmp, p_client_copy, p_client_free, 1); ERROR_GOTO(relay_clients == NULL, "Error creating clients list.", done); /* Create a TCP server socket to listen for incoming connections */ tcp_serv = sock_create(lhost, lport, ipver, SOCK_TYPE_TCP, 1, 1); ERROR_GOTO(tcp_serv == NULL, "Error creating TCP socket.", done); udp_serv = sock_create(lhost, rport,ipver, SOCK_TYPE_UDP, 1, 1); ERROR_GOTO(udp_serv == NULL, "Error creating TCP socket.", done); if(debug_level >= DEBUG_LEVEL1) { printf("Listening on TCP %s,UDP %s \n", sock_get_str(tcp_serv, addrstr, sizeof(addrstr)),sock_get_str(udp_serv, taddrstr, sizeof(taddrstr))); } next_sock = sock_create(phost, pport, ipver, SOCK_TYPE_UDP, 0, 1); msg_send_req(next_sock,lhost,rport,0,localid); sock_free(next_sock); next_sock = NULL; FD_ZERO(&client_fds); /* Initialize all the timers */ timerclear(&timeout); check_interval.tv_sec = 0; check_interval.tv_usec = 500000; gettimeofday(&check_time, NULL); while(running) { if(!timerisset(&timeout)) timeout.tv_usec = 50000; read_fds = client_fds; FD_SET(SOCK_FD(tcp_serv), &read_fds); FD_SET(SOCK_FD(udp_serv), &read_fds); ret = select(FD_SETSIZE, &read_fds, NULL, NULL, &timeout); PERROR_GOTO(ret < 0, "select", done); num_fds = ret; gettimeofday(&curr_time, NULL); /* Go through all the clients and check if didn't get an ACK for sent data during the timeout period */ if(timercmp(&curr_time, &check_time, >)) { for(i = 0; i < LIST_LEN(clients); i++) { client = list_get_at(clients, i); ret = client_check_and_resend(client, curr_time); if(ret == -2) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds, 1); i--; continue; } ret = client_check_and_send_keepalive(client, curr_time); if(ret == -2) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds, 1); i--; } } timeradd(&curr_time, &check_interval, &check_time); } if(num_fds == 0) continue; /* Check if pending TCP connection to accept and create a new client and UDP connection if one is ready */ if(FD_ISSET(SOCK_FD(tcp_serv), &read_fds)) { tcp_sock = sock_accept(tcp_serv); if(tcp_sock == NULL) continue; if(SelectMethod(tcp_sock->fd)==-1) { if(debug_level >= DEBUG_LEVEL1) printf("socks version error\n"); return-1; } rhost=ParseCommand(tcp_sock->fd); if (0<LIST_LEN(relay_clients)) { tunnel = list_get_at(relay_clients, 0); udp_sock =sock_copy(CLIENT_TCP_SOCK(tunnel)); SOCK_FD(udp_sock)=socket(AF_INET, SOCK_DGRAM, 0); } if(udp_sock == NULL) { sock_close(tcp_sock); sock_free(tcp_sock); continue; } client = client_create(next_req_id++, localid, tcp_sock, udp_sock, 1); memcpy(client->rsakey,tunnel->rsakey,strlen(tunnel->rsakey)); printf("expid rsakey is %s",client->rsakey); if(debug_level >= DEBUG_LEVEL1) printf("create client id %d \n",CLIENT_ID(client)); if(!client || !tcp_sock || !udp_sock) { if(tcp_sock) sock_close(tcp_sock); if(udp_sock) sock_close(udp_sock); } else { client2 = list_add(conn_clients, client, 1); client_free(client); client = NULL; if(debug_level >= DEBUG_LEVEL1) { sock_get_str(CLIENT_TCP_SOCK(client2), addrstr, sizeof(addrstr)); printf("tunnel(%d): local %s ",client2->sourceid, addrstr); sock_get_str(CLIENT_UDP_SOCK(client2), addrstr, sizeof(addrstr)); printf("to %s \n",addrstr); } client_send_hello(client2,rhost,CLIENT_ID(client2)); client_add_tcp_fd_to_set(client2, &client_fds); //client_add_udp_fd_to_set(client2, &client_fds); } sock_free(tcp_sock); sock_free(udp_sock); tcp_sock = NULL; udp_sock = NULL; num_fds--; } /* Check for UDP data */ if(FD_ISSET(SOCK_FD(udp_serv), &read_fds)) { //ret = client_recv_udp_msg(client, data, sizeof(data), // &tmp_id, &tmp_type, &tmp_len,&sourceid); ret = msg_recv_msg(udp_serv, data, sizeof(data), &tmp_id, &tmp_type, &tmp_len,&sourceid); if(debug_level >= DEBUG_LEVEL2) printf("recv msg from %d type %d %d bytes \n ",sourceid,tmp_type,tmp_len); if(ret == 0) ret = handle_message(tmp_id, tmp_type, data, tmp_len,sourceid,clients, conn_clients); /*if(ret < 0) { disconnect_and_remove_client(tmp_id, clients, &client_fds, 1); } */ } /* Check if data is ready from any of the clients */ for(i = 0; i < LIST_LEN(clients); i++) { client = list_get_at(clients, i); /* Check for TCP data */ if(num_fds > 0 && client_tcp_fd_isset(client, &read_fds)) { ret = client_recv_tcp_data(client); if(ret == -1) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds, 1); i--; continue; } else if(ret == -2) { client_mark_to_disconnect(client); disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds, 0); } num_fds--; } /* send any TCP data that was ready */ ret = client_send_udp_data(client); if(ret < 0) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds, 1); i--; } } /* Finally, send any udp data that's still in the queue */ for(i = 0; i < LIST_LEN(clients); i++) { client = list_get_at(clients, i); ret = client_send_udp_data(client); if(ret < 0 || client_ready_to_disconnect(client)) { disconnect_and_remove_client(CLIENT_ID(client), clients, &client_fds, 1); i--; } } } done: if(debug_level >= DEBUG_LEVEL1) printf("Cleaning up...\n"); if(tcp_serv) { sock_close(tcp_serv); sock_free(tcp_serv); } if(udp_serv) { sock_close(udp_serv); sock_free(udp_serv); } if(clients) list_free(clients); if(conn_clients) list_free(conn_clients); if(debug_level >= DEBUG_LEVEL1) printf("Goodbye.\n"); return 0; }
/* * Synchronize BG block state to that of currently active jobs. * This can recover from slurmctld crashes when block usership * changes were queued */ extern int sync_jobs(List job_list) { ListIterator itr; struct job_record *job_ptr = NULL; List block_list = NULL, kill_list = NULL; static bool run_already = false; bg_record_t *bg_record = NULL; /* Execute only on initial startup. We don't support bgblock * creation on demand today, so there is no need to re-sync data. */ if (run_already) return SLURM_SUCCESS; run_already = true; if (!job_list) { error("sync_jobs: no job_list"); return SLURM_ERROR; } slurm_mutex_lock(&block_state_mutex); /* Insure that all running jobs own the specified block */ itr = list_iterator_create(job_list); while ((job_ptr = list_next(itr))) { bg_action_t *bg_action_ptr = NULL; if (!IS_JOB_RUNNING(job_ptr) && !IS_JOB_COMPLETING(job_ptr)) continue; bg_action_ptr = xmalloc(sizeof(bg_action_t)); if (IS_JOB_COMPLETING(job_ptr)) bg_action_ptr->op = TERM_OP; else bg_action_ptr->op = START_OP; bg_action_ptr->job_ptr = job_ptr; get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_BLOCK_ID, &(bg_action_ptr->bg_block_id)); #ifdef HAVE_BG_L_P # ifdef HAVE_BGL get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_BLRTS_IMAGE, &(bg_action_ptr->blrtsimage)); # else get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_CONN_TYPE, &(bg_action_ptr->conn_type)); # endif get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_LINUX_IMAGE, &(bg_action_ptr->linuximage)); get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_RAMDISK_IMAGE, &(bg_action_ptr->ramdiskimage)); #endif get_select_jobinfo(job_ptr->select_jobinfo->data, SELECT_JOBDATA_MLOADER_IMAGE, &(bg_action_ptr->mloaderimage)); if (bg_action_ptr->bg_block_id == NULL) { error("Running job %u has bgblock==NULL", job_ptr->job_id); } else if (job_ptr->nodes == NULL) { error("Running job %u has nodes==NULL", job_ptr->job_id); } else if (!(bg_record = find_bg_record_in_list( bg_lists->main, bg_action_ptr->bg_block_id))) { error("Kill job %u belongs to defunct " "bgblock %s", job_ptr->job_id, bg_action_ptr->bg_block_id); } if (!bg_record) { /* Can't fail it just now, we have locks in place. */ bg_status_add_job_kill_list(job_ptr, &kill_list); _destroy_bg_action(bg_action_ptr); continue; } /* _sync_agent will destroy the bg_action_ptr */ _sync_agent(bg_action_ptr, bg_record); } list_iterator_destroy(itr); block_list = list_create(destroy_bg_record); itr = list_iterator_create(bg_lists->main); while ((bg_record = list_next(itr))) { bg_record_t *rm_record; if (bg_record->job_ptr || (bg_record->job_list && list_count(bg_record->job_list))) continue; rm_record = xmalloc(sizeof(bg_record_t)); rm_record->magic = BLOCK_MAGIC; rm_record->bg_block_id = xstrdup(bg_record->bg_block_id); rm_record->mp_str = xstrdup(bg_record->mp_str); list_append(block_list, rm_record); } list_iterator_destroy(itr); slurm_mutex_unlock(&block_state_mutex); if (kill_list) { /* slurmctld is already locked up, so handle this right after * the unlock of block_state_mutex. */ bg_status_process_kill_job_list(kill_list, JOB_BOOT_FAIL, 1); FREE_NULL_LIST(kill_list); } /* Insure that all other blocks are free of users */ if (block_list) { itr = list_iterator_create(block_list); while ((bg_record = list_next(itr))) { info("Queue clearing of users of BG block %s", bg_record->bg_block_id); term_jobs_on_block(bg_record->bg_block_id); } list_iterator_destroy(itr); FREE_NULL_LIST(block_list); } else { /* this should never happen, * vestigial logic */ error("sync_jobs: no block_list"); return SLURM_ERROR; } return SLURM_SUCCESS; }
static int _load_fed_parts(slurm_msg_t *req_msg, partition_info_msg_t **part_info_msg_pptr, uint16_t show_flags, char *cluster_name, slurmdb_federation_rec_t *fed) { int cluster_inx = 0, i; load_part_resp_struct_t *part_resp; partition_info_msg_t *orig_msg = NULL, *new_msg = NULL; uint32_t new_rec_cnt; slurmdb_cluster_rec_t *cluster; ListIterator iter; pthread_attr_t load_attr; int pthread_count = 0; pthread_t *load_thread = 0; load_part_req_struct_t *load_args; List resp_msg_list; *part_info_msg_pptr = NULL; /* Spawn one pthread per cluster to collect partition information */ resp_msg_list = list_create(NULL); load_thread = xmalloc(sizeof(pthread_attr_t) * list_count(fed->cluster_list)); iter = list_iterator_create(fed->cluster_list); while ((cluster = (slurmdb_cluster_rec_t *) list_next(iter))) { int retries = 0; if ((cluster->control_host == NULL) || (cluster->control_host[0] == '\0')) continue; /* Cluster down */ load_args = xmalloc(sizeof(load_part_req_struct_t)); load_args->cluster = cluster; load_args->cluster_inx = cluster_inx++; load_args->req_msg = req_msg; load_args->resp_msg_list = resp_msg_list; load_args->show_flags = show_flags; slurm_attr_init(&load_attr); if (pthread_attr_setdetachstate(&load_attr, PTHREAD_CREATE_JOINABLE)) error("pthread_attr_setdetachstate error %m"); while (pthread_create(&load_thread[pthread_count], &load_attr, _load_part_thread, (void *) load_args)) { error("pthread_create error %m"); if (++retries > MAX_RETRIES) fatal("Can't create pthread"); usleep(10000); /* sleep and retry */ } pthread_count++; slurm_attr_destroy(&load_attr); } list_iterator_destroy(iter); /* Wait for all pthreads to complete */ for (i = 0; i < pthread_count; i++) pthread_join(load_thread[i], NULL); xfree(load_thread); /* Maintain a consistent cluster/node ordering */ list_sort(resp_msg_list, _sort_by_cluster_inx); /* Merge the responses into a single response message */ iter = list_iterator_create(resp_msg_list); while ((part_resp = (load_part_resp_struct_t *) list_next(iter))) { new_msg = part_resp->new_msg; if (!orig_msg) { orig_msg = new_msg; *part_info_msg_pptr = orig_msg; } else { /* Merge the node records */ orig_msg->last_update = MIN(orig_msg->last_update, new_msg->last_update); new_rec_cnt = orig_msg->record_count + new_msg->record_count; if (new_msg->record_count) { orig_msg->partition_array = xrealloc(orig_msg->partition_array, sizeof(partition_info_t) * new_rec_cnt); (void) memcpy(orig_msg->partition_array + orig_msg->record_count, new_msg->partition_array, sizeof(partition_info_t) * new_msg->record_count); orig_msg->record_count = new_rec_cnt; } xfree(new_msg->partition_array); xfree(new_msg); } xfree(part_resp); } list_iterator_destroy(iter); FREE_NULL_LIST(resp_msg_list); if (!orig_msg) slurm_seterrno_ret(SLURM_ERROR); return SLURM_PROTOCOL_SUCCESS; }
extern stepd_step_rec_t * batch_stepd_step_rec_create(batch_job_launch_msg_t *msg) { stepd_step_rec_t *job; srun_info_t *srun = NULL; char *in_name; xassert(msg != NULL); debug3("entering batch_stepd_step_rec_create"); if (!_valid_uid_gid((uid_t)msg->uid, &(msg->gid), &(msg->user_name))) return NULL; if (_check_acct_freq_task(msg->job_mem, msg->acctg_freq)) return NULL; job = xmalloc(sizeof(stepd_step_rec_t)); job->state = SLURMSTEPD_STEP_STARTING; if (msg->cpus_per_node) job->cpus = msg->cpus_per_node[0]; job->node_tasks = 1; job->ntasks = msg->ntasks; job->jobid = msg->job_id; job->stepid = msg->step_id; job->array_job_id = msg->array_job_id; job->array_task_id = msg->array_task_id; job->batch = true; /* This needs to happen before acct_gather_profile_startpoll and only really looks at the profile in the job. */ acct_gather_profile_g_node_step_start(job); /* needed for the jobacct_gather plugin to start */ acct_gather_profile_startpoll(msg->acctg_freq, conf->job_acct_gather_freq); job->multi_prog = 0; job->open_mode = msg->open_mode; job->overcommit = (bool) msg->overcommit; job->node_name = xstrdup(conf->node_name); job->uid = (uid_t) msg->uid; job->user_name = xstrdup(msg->user_name); job->gid = (gid_t) msg->gid; job->cwd = xstrdup(msg->work_dir); job->ckpt_dir = xstrdup(msg->ckpt_dir); job->restart_dir = xstrdup(msg->restart_dir); job->env = _array_copy(msg->envc, msg->environment); job->eio = eio_handle_create(); job->sruns = list_create((ListDelF) _srun_info_destructor); job->envtp = xmalloc(sizeof(env_t)); job->envtp->jobid = -1; job->envtp->stepid = -1; job->envtp->procid = -1; job->envtp->localid = -1; job->envtp->nodeid = -1; job->envtp->distribution = 0; job->cpu_bind_type = msg->cpu_bind_type; job->cpu_bind = xstrdup(msg->cpu_bind); job->envtp->mem_bind_type = 0; job->envtp->mem_bind = NULL; job->envtp->ckpt_dir = NULL; job->envtp->restart_cnt = msg->restart_cnt; if (msg->cpus_per_node) job->cpus = msg->cpus_per_node[0]; format_core_allocs(msg->cred, conf->node_name, conf->cpus, &job->job_alloc_cores, &job->step_alloc_cores, &job->job_mem, &job->step_mem); if (job->step_mem) jobacct_gather_set_mem_limit(job->jobid, NO_VAL, job->step_mem); else if (job->job_mem) jobacct_gather_set_mem_limit(job->jobid, NO_VAL, job->job_mem); get_cred_gres(msg->cred, conf->node_name, &job->job_gres_list, &job->step_gres_list); srun = srun_info_create(NULL, NULL, NULL); list_append(job->sruns, (void *) srun); if (msg->argc) { job->argc = msg->argc; job->argv = _array_copy(job->argc, msg->argv); } else { job->argc = 1; /* job script has not yet been written out to disk -- * argv will be filled in later by _make_batch_script() */ job->argv = (char **) xmalloc(2 * sizeof(char *)); } job->task = xmalloc(sizeof(stepd_step_task_info_t *)); if (msg->std_err == NULL) msg->std_err = xstrdup(msg->std_out); if (msg->std_in == NULL) in_name = xstrdup("/dev/null"); else in_name = fname_create(job, msg->std_in, 0); job->task[0] = task_info_create(0, 0, in_name, _batchfilename(job, msg->std_out), _batchfilename(job, msg->std_err)); job->task[0]->argc = job->argc; job->task[0]->argv = job->argv; #ifdef HAVE_ALPS_CRAY select_g_select_jobinfo_get(msg->select_jobinfo, SELECT_JOBDATA_RESV_ID, &job->resv_id); #endif return job; }
static List _create_front_end_info_list( front_end_info_msg_t *front_end_info_ptr, int changed) { char *upper = NULL; char user[32], time_str[32]; static List info_list = NULL; List last_list = NULL; ListIterator last_list_itr = NULL; int i = 0; sview_front_end_info_t *sview_front_end_info_ptr = NULL; front_end_info_t *front_end_ptr = NULL; if (!changed && info_list) goto update_color; if (info_list) last_list = info_list; info_list = list_create(_front_end_info_list_del); if (!info_list) { g_print("malloc error\n"); return NULL; } if (last_list) last_list_itr = list_iterator_create(last_list); for (i = 0; i < front_end_info_ptr->record_count; i++) { front_end_ptr = &(front_end_info_ptr->front_end_array[i]); sview_front_end_info_ptr = NULL; if (last_list_itr) { while ((sview_front_end_info_ptr = list_next(last_list_itr))) { if (!strcmp(sview_front_end_info_ptr-> front_end_name, front_end_ptr->name)) { list_remove(last_list_itr); _front_end_info_free( sview_front_end_info_ptr); break; } } list_iterator_reset(last_list_itr); } if (!sview_front_end_info_ptr) sview_front_end_info_ptr = xmalloc(sizeof(sview_front_end_info_t)); sview_front_end_info_ptr->pos = i; sview_front_end_info_ptr->front_end_name = front_end_ptr->name; sview_front_end_info_ptr->front_end_ptr = front_end_ptr; sview_front_end_info_ptr->color_inx = i % sview_colors_cnt; if (g_node_info_ptr) { sview_front_end_info_ptr->node_inx[0] = 0; sview_front_end_info_ptr->node_inx[1] = g_node_info_ptr->record_count - 1; sview_front_end_info_ptr->node_inx[2] = -1; } else sview_front_end_info_ptr->node_inx[0] = -1; if (front_end_ptr->boot_time) { slurm_make_time_str(&front_end_ptr->boot_time, time_str, sizeof(time_str)); sview_front_end_info_ptr->boot_time = xstrdup(time_str); } if (front_end_ptr->slurmd_start_time) { slurm_make_time_str(&front_end_ptr->slurmd_start_time, time_str, sizeof(time_str)); sview_front_end_info_ptr->slurmd_start_time = xstrdup(time_str); } upper = node_state_string(front_end_ptr->node_state); sview_front_end_info_ptr->state = str_tolower(upper); if (front_end_ptr->reason && front_end_ptr->reason_time && (front_end_ptr->reason_uid != NO_VAL)) { struct passwd *pw = NULL; if ((pw=getpwuid(front_end_ptr->reason_uid))) snprintf(user, sizeof(user), "%s", pw->pw_name); else snprintf(user, sizeof(user), "Unk(%u)", front_end_ptr->reason_uid); slurm_make_time_str(&front_end_ptr->reason_time, time_str, sizeof(time_str)); sview_front_end_info_ptr->reason = xstrdup_printf("%s [%s@%s]", front_end_ptr->reason, user, time_str); } else { sview_front_end_info_ptr->reason = xstrdup(front_end_ptr->reason); } list_append(info_list, sview_front_end_info_ptr); } if (last_list) { list_iterator_destroy(last_list_itr); list_destroy(last_list); } update_color: return info_list; }
struct opencl_union_device_t *opencl_union_device_create(struct opencl_device_t *parent, struct list_t *devices) { cl_uint i; struct opencl_union_device_t *u; struct opencl_device_t *tmp; u = xcalloc(1, sizeof (struct opencl_union_device_t)); u->parent = parent; int num_devices = list_count(devices); u->devices = list_create(); for (i = 0; i < num_devices; i++) list_add(u->devices, list_get(devices, i)); opencl_debug("[%s] union device contains:", __FUNCTION__); for (i = 0; i < num_devices; i++) { tmp = list_get(devices, i); opencl_debug("[%s] %s = %p", __FUNCTION__, tmp->name, tmp); } *(parent) = *(struct opencl_device_t *)list_get(devices, 0); // just copy over the parameters from someone - we'll do a better job later. parent->name = "Multi2Sim Union Device"; parent->type = CL_DEVICE_TYPE_ACCELERATOR; parent->arch_device_free_func = (opencl_arch_device_free_func_t) opencl_union_device_free; parent->arch_device_mem_alloc_func = (opencl_arch_device_mem_alloc_func_t) opencl_x86_device_mem_alloc; parent->arch_device_mem_free_func = (opencl_arch_device_mem_free_func_t) opencl_x86_device_mem_free; parent->arch_device_mem_read_func = (opencl_arch_device_mem_read_func_t) opencl_x86_device_mem_read; parent->arch_device_mem_write_func = (opencl_arch_device_mem_write_func_t) opencl_x86_device_mem_write; parent->arch_device_mem_copy_func = (opencl_arch_device_mem_copy_func_t) opencl_x86_device_mem_copy; parent->arch_device_preferred_workgroups_func = NULL; /* Call-back functions for architecture-specific program */ parent->arch_program_create_func = (opencl_arch_program_create_func_t) opencl_union_program_create; parent->arch_program_free_func = (opencl_arch_program_free_func_t) opencl_union_program_free; parent->arch_program_valid_binary_func = opencl_union_program_valid_binary; /* Call-back functions for architecture-specific kernel */ parent->arch_kernel_create_func = (opencl_arch_kernel_create_func_t) opencl_union_kernel_create; parent->arch_kernel_free_func = (opencl_arch_kernel_free_func_t) opencl_union_kernel_free; parent->arch_kernel_set_arg_func = (opencl_arch_kernel_set_arg_func_t) opencl_union_kernel_set_arg; /* Call-back functions for architecture-specific ND-Range */ parent->arch_ndrange_create_func = (opencl_arch_ndrange_create_func_t) opencl_union_ndrange_create; parent->arch_ndrange_free_func = (opencl_arch_ndrange_free_func_t) opencl_union_ndrange_free; parent->arch_ndrange_init_func = (opencl_arch_ndrange_init_func_t) opencl_union_ndrange_init; parent->arch_ndrange_run_func = (opencl_arch_ndrange_run_func_t) opencl_union_ndrange_run; parent->arch_ndrange_run_partial_func = (opencl_arch_ndrange_run_partial_func_t) opencl_union_ndrange_run_partial; return u; }
extern void specific_info_front_end(popup_info_t *popup_win) { int resv_error_code = SLURM_SUCCESS; static front_end_info_msg_t *front_end_info_ptr = NULL; static front_end_info_t *front_end_ptr = NULL; specific_info_t *spec_info = popup_win->spec_info; sview_search_info_t *search_info = spec_info->search_info; char error_char[100]; GtkWidget *label = NULL; GtkTreeView *tree_view = NULL; List resv_list = NULL; List send_resv_list = NULL; int changed = 1; sview_front_end_info_t *sview_front_end_info_ptr = NULL; int i = -1; ListIterator itr = NULL; if (!spec_info->display_widget) { setup_popup_info(popup_win, display_data_front_end, SORTID_CNT); } if (spec_info->display_widget && popup_win->toggled) { gtk_widget_destroy(spec_info->display_widget); spec_info->display_widget = NULL; goto display_it; } resv_error_code = get_new_info_front_end(&front_end_info_ptr, popup_win->force_refresh); if (resv_error_code == SLURM_NO_CHANGE_IN_DATA) { if (!spec_info->display_widget || spec_info->view == ERROR_VIEW) goto display_it; changed = 0; } else if (resv_error_code != SLURM_SUCCESS) { if (spec_info->view == ERROR_VIEW) goto end_it; spec_info->view = ERROR_VIEW; if (spec_info->display_widget) gtk_widget_destroy(spec_info->display_widget); sprintf(error_char, "get_new_info_front_end: %s", slurm_strerror(slurm_get_errno())); label = gtk_label_new(error_char); gtk_table_attach_defaults(popup_win->table, label, 0, 1, 0, 1); gtk_widget_show(label); spec_info->display_widget = gtk_widget_ref(label); goto end_it; } display_it: resv_list = _create_front_end_info_list(front_end_info_ptr, changed); if (!resv_list) return; if (spec_info->view == ERROR_VIEW && spec_info->display_widget) { gtk_widget_destroy(spec_info->display_widget); spec_info->display_widget = NULL; } if (spec_info->type != INFO_PAGE && !spec_info->display_widget) { tree_view = create_treeview(local_display_data, &popup_win->grid_button_list); gtk_tree_selection_set_mode( gtk_tree_view_get_selection(tree_view), GTK_SELECTION_MULTIPLE); spec_info->display_widget = gtk_widget_ref(GTK_WIDGET(tree_view)); gtk_table_attach_defaults(popup_win->table, GTK_WIDGET(tree_view), 0, 1, 0, 1); /* since this function sets the model of the tree_view to the treestore we don't really care about the return value */ create_treestore(tree_view, popup_win->display_data, SORTID_CNT, SORTID_NAME, SORTID_COLOR); } setup_popup_grid_list(popup_win); spec_info->view = INFO_VIEW; if (spec_info->type == INFO_PAGE) { _display_info_front_end(resv_list, popup_win); goto end_it; } /* just linking to another list, don't free the inside, just the list */ send_resv_list = list_create(NULL); itr = list_iterator_create(resv_list); i = -1; while ((sview_front_end_info_ptr = list_next(itr))) { i++; front_end_ptr = sview_front_end_info_ptr->front_end_ptr; switch (spec_info->type) { case PART_PAGE: case BLOCK_PAGE: case NODE_PAGE: break; case JOB_PAGE: if (strcmp(front_end_ptr->name, search_info->gchar_data)) continue; break; case RESV_PAGE: switch (search_info->search_type) { case SEARCH_RESERVATION_NAME: if (!search_info->gchar_data) continue; if (strcmp(front_end_ptr->name, search_info->gchar_data)) continue; break; default: continue; } break; default: g_print("Unknown type %d\n", spec_info->type); continue; } list_push(send_resv_list, sview_front_end_info_ptr); } list_iterator_destroy(itr); post_setup_popup_grid_list(popup_win); _update_info_front_end(send_resv_list, GTK_TREE_VIEW(spec_info->display_widget)); list_destroy(send_resv_list); end_it: popup_win->toggled = 0; popup_win->force_refresh = 0; return; }
int main (int argc, char *argv[]) { int opt_char; log_options_t opts = LOG_OPTS_STDERR_ONLY; shares_request_msg_t req_msg; char *temp = NULL; int option_index; bool all_users = 0; static struct option long_options[] = { {"accounts", 1, 0, 'A'}, {"all", 0, 0, 'a'}, {"helpformat",0,0, 'e'}, {"long", 0, 0, 'l'}, {"partition",0, 0, 'm'}, {"cluster", 1, 0, 'M'}, {"clusters", 1, 0, 'M'}, {"noheader", 0, 0, 'n'}, {"format", 1, 0, 'o'}, {"parsable", 0, 0, 'p'}, {"parsable2",0, 0, 'P'}, {"users", 1, 0, 'u'}, {"Users", 0, 0, 'U'}, {"verbose", 0, 0, 'v'}, {"version", 0, 0, 'V'}, {"help", 0, 0, OPT_LONG_HELP}, {"usage", 0, 0, OPT_LONG_USAGE}, {NULL, 0, 0, 0} }; exit_code = 0; long_flag = 0; quiet_flag = 0; verbosity = 0; memset(&req_msg, 0, sizeof(shares_request_msg_t)); slurm_conf_init(NULL); log_init("sshare", opts, SYSLOG_FACILITY_DAEMON, NULL); while((opt_char = getopt_long(argc, argv, "aA:ehlM:no:pPqUu:t:vVm", long_options, &option_index)) != -1) { switch (opt_char) { case (int)'?': fprintf(stderr, "Try \"sshare --help\" " "for more information\n"); exit(1); break; case 'a': all_users = 1; break; case 'A': if (!req_msg.acct_list) req_msg.acct_list = list_create(slurm_destroy_char); slurm_addto_char_list(req_msg.acct_list, optarg); break; case 'e': _help_format_msg(); exit(0); break; case 'h': print_fields_have_header = 0; break; exit(exit_code); break; case 'l': long_flag = 1; break; case 'M': FREE_NULL_LIST(clusters); if (!(clusters = slurmdb_get_info_cluster(optarg))) { print_db_notok(optarg, 0); exit(1); } working_cluster_rec = list_peek(clusters); break; case 'm': options |= PRINT_PARTITIONS; break; case 'n': print_fields_have_header = 0; break; case 'o': xstrfmtcat(opt_field_list, "%s,", optarg); break; case 'p': print_fields_parsable_print = PRINT_FIELDS_PARSABLE_ENDING; break; case 'P': print_fields_parsable_print = PRINT_FIELDS_PARSABLE_NO_ENDING; break; case 'u': if (!strcmp(optarg, "-1")) { all_users = 1; break; } all_users = 0; if (!req_msg.user_list) req_msg.user_list = list_create(slurm_destroy_char); _addto_name_char_list(req_msg.user_list, optarg, 0); break; case 'U': options |= PRINT_USERS_ONLY; break; case 'v': quiet_flag = -1; verbosity++; break; case 'V': _print_version(); exit(exit_code); break; case OPT_LONG_HELP: case OPT_LONG_USAGE: _usage(); exit(0); default: exit_code = 1; fprintf(stderr, "getopt error, returned %c\n", opt_char); exit(exit_code); } } if (verbosity) { opts.stderr_level += verbosity; opts.prefix_level = 1; log_alter(opts, 0, NULL); } if (all_users) { if (req_msg.user_list && list_count(req_msg.user_list)) { FREE_NULL_LIST(req_msg.user_list); } if (verbosity) fprintf(stderr, "Users requested:\n\t: all\n"); } else if (verbosity && req_msg.user_list && list_count(req_msg.user_list)) { fprintf(stderr, "Users requested:\n"); ListIterator itr = list_iterator_create(req_msg.user_list); while((temp = list_next(itr))) fprintf(stderr, "\t: %s\n", temp); list_iterator_destroy(itr); } else if (!req_msg.user_list || !list_count(req_msg.user_list)) { struct passwd *pwd = getpwuid(getuid()); if (!req_msg.user_list) req_msg.user_list = list_create(slurm_destroy_char); temp = xstrdup(pwd->pw_name); list_append(req_msg.user_list, temp); if (verbosity) { fprintf(stderr, "Users requested:\n"); fprintf(stderr, "\t: %s\n", temp); } } if (req_msg.acct_list && list_count(req_msg.acct_list)) { if (verbosity) { fprintf(stderr, "Accounts requested:\n"); ListIterator itr = list_iterator_create(req_msg.acct_list); while((temp = list_next(itr))) fprintf(stderr, "\t: %s\n", temp); list_iterator_destroy(itr); } } else { if (req_msg.acct_list && list_count(req_msg.acct_list)) { FREE_NULL_LIST(req_msg.acct_list); } if (verbosity) fprintf(stderr, "Accounts requested:\n\t: all\n"); } if (clusters) exit_code = _multi_cluster(&req_msg); else exit_code = _single_cluster(&req_msg); exit(exit_code); }
extern void get_bg_part(void) { int error_code, i, recs=0, count = 0, last_count = -1; static partition_info_msg_t *part_info_ptr = NULL; static partition_info_msg_t *new_part_ptr = NULL; static block_info_msg_t *bg_info_ptr = NULL; static block_info_msg_t *new_bg_ptr = NULL; uint16_t show_flags = 0; partition_info_t part; db2_block_info_t *block_ptr = NULL; db2_block_info_t *found_block = NULL; ListIterator itr; List nodelist = NULL; bitstr_t *nodes_req = NULL; if (!(params.cluster_flags & CLUSTER_FLAG_BG)) return; if (params.all_flag) show_flags |= SHOW_ALL; if (part_info_ptr) { error_code = slurm_load_partitions(part_info_ptr->last_update, &new_part_ptr, show_flags); if (error_code == SLURM_SUCCESS) slurm_free_partition_info_msg(part_info_ptr); else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) { error_code = SLURM_SUCCESS; new_part_ptr = part_info_ptr; } } else { error_code = slurm_load_partitions((time_t) NULL, &new_part_ptr, show_flags); } if (error_code) { if (quiet_flag != 1) { if (!params.commandline) { mvwprintw(text_win, main_ycord, 1, "slurm_load_partitions: %s", slurm_strerror(slurm_get_errno())); main_ycord++; } else { printf("slurm_load_partitions: %s\n", slurm_strerror(slurm_get_errno())); } } return; } if (bg_info_ptr) { error_code = slurm_load_block_info(bg_info_ptr->last_update, &new_bg_ptr, show_flags); if (error_code == SLURM_SUCCESS) slurm_free_block_info_msg(bg_info_ptr); else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) { error_code = SLURM_SUCCESS; new_bg_ptr = bg_info_ptr; } } else { error_code = slurm_load_block_info((time_t) NULL, &new_bg_ptr, show_flags); } if (error_code) { if (quiet_flag != 1) { if (!params.commandline) { mvwprintw(text_win, main_ycord, 1, "slurm_load_block: %s", slurm_strerror(slurm_get_errno())); main_ycord++; } else { printf("slurm_load_block: %s\n", slurm_strerror(slurm_get_errno())); } } return; } if (block_list) { /* clear the old list */ list_flush(block_list); } else { block_list = list_create(_block_list_del); } if (!params.commandline) if ((new_bg_ptr->record_count - text_line_cnt) < (getmaxy(text_win) - 4)) text_line_cnt--; if (params.hl) nodes_req = get_requested_node_bitmap(); for (i = 0; i < new_bg_ptr->record_count; i++) { if (nodes_req) { int overlap = 0; bitstr_t *loc_bitmap = bit_alloc(bit_size(nodes_req)); inx2bitstr(loc_bitmap, new_bg_ptr->block_array[i].mp_inx); overlap = bit_overlap(loc_bitmap, nodes_req); FREE_NULL_BITMAP(loc_bitmap); if (!overlap) continue; } if (params.io_bit && new_bg_ptr->block_array[i].ionode_str) { int overlap = 0; bitstr_t *loc_bitmap = bit_alloc(bit_size(params.io_bit)); inx2bitstr(loc_bitmap, new_bg_ptr->block_array[i].ionode_inx); overlap = bit_overlap(loc_bitmap, params.io_bit); FREE_NULL_BITMAP(loc_bitmap); if (!overlap) continue; } block_ptr = xmalloc(sizeof(db2_block_info_t)); block_ptr->bg_block_name = xstrdup(new_bg_ptr->block_array[i].bg_block_id); block_ptr->mp_str = xstrdup(new_bg_ptr->block_array[i].mp_str); block_ptr->nodelist = list_create(_nodelist_del); _make_nodelist(block_ptr->mp_str, block_ptr->nodelist); block_ptr->state = new_bg_ptr->block_array[i].state; memcpy(block_ptr->bg_conn_type, new_bg_ptr->block_array[i].conn_type, sizeof(block_ptr->bg_conn_type)); if (params.cluster_flags & CLUSTER_FLAG_BGL) block_ptr->bg_node_use = new_bg_ptr->block_array[i].node_use; block_ptr->ionode_str = xstrdup(new_bg_ptr->block_array[i].ionode_str); block_ptr->cnode_cnt = new_bg_ptr->block_array[i].cnode_cnt; itr = list_iterator_create(block_list); while ((found_block = (db2_block_info_t*)list_next(itr))) { if (!strcmp(block_ptr->mp_str, found_block->mp_str)) { block_ptr->letter_num = found_block->letter_num; break; } } list_iterator_destroy(itr); if (!found_block) { last_count++; _marknodes(block_ptr, last_count); } block_ptr->job_list = list_create(slurm_free_block_job_info); if (new_bg_ptr->block_array[i].job_list) { block_job_info_t *found_job; ListIterator itr = list_iterator_create( new_bg_ptr->block_array[i].job_list); while ((found_job = list_next(itr))) { block_job_info_t *block_job = xmalloc(sizeof(block_job_info_t)); block_job->job_id = found_job->job_id; list_append(block_ptr->job_list, block_job); } list_iterator_destroy(itr); } if (block_ptr->bg_conn_type[0] >= SELECT_SMALL) block_ptr->size = 0; list_append(block_list, block_ptr); } if (!params.no_header) _print_header_part(); if (new_part_ptr) recs = new_part_ptr->record_count; else recs = 0; for (i = 0; i < recs; i++) { part = new_part_ptr->partition_array[i]; if (!part.nodes || (part.nodes[0] == '\0')) continue; /* empty partition */ nodelist = list_create(_nodelist_del); _make_nodelist(part.nodes, nodelist); if (block_list) { itr = list_iterator_create(block_list); while ((block_ptr = (db2_block_info_t*) list_next(itr)) != NULL) { if (_in_slurm_partition(nodelist, block_ptr->nodelist)) { block_ptr->slurm_part_name = xstrdup(part.name); } } list_iterator_destroy(itr); } list_destroy(nodelist); } /* Report the BG Blocks */ if (block_list) { itr = list_iterator_create(block_list); while ((block_ptr = (db2_block_info_t*) list_next(itr)) != NULL) { if (params.commandline) block_ptr->printed = 1; else { if (count>=text_line_cnt) block_ptr->printed = 1; } _print_rest(block_ptr); count++; } list_iterator_destroy(itr); } if (params.commandline && params.iterate) printf("\n"); part_info_ptr = new_part_ptr; bg_info_ptr = new_bg_ptr; return; }
int dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, objset_t **osp) { objset_t *os; int i, err; ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); os = kmem_zalloc(sizeof (objset_t), KM_PUSHPAGE); os->os_dsl_dataset = ds; os->os_spa = spa; os->os_rootbp = bp; if (!BP_IS_HOLE(os->os_rootbp)) { uint32_t aflags = ARC_WAIT; zbookmark_t zb; SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); if (DMU_OS_IS_L2CACHEABLE(os)) aflags |= ARC_L2CACHE; if (DMU_OS_IS_L2COMPRESSIBLE(os)) aflags |= ARC_L2COMPRESS; dprintf_bp(os->os_rootbp, "reading %s", ""); err = arc_read(NULL, spa, os->os_rootbp, arc_getbuf_func, &os->os_phys_buf, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); if (err != 0) { kmem_free(os, sizeof (objset_t)); /* convert checksum errors into IO errors */ if (err == ECKSUM) err = SET_ERROR(EIO); return (err); } /* Increase the blocksize if we are permitted. */ if (spa_version(spa) >= SPA_VERSION_USERSPACE && arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { arc_buf_t *buf = arc_buf_alloc(spa, sizeof (objset_phys_t), &os->os_phys_buf, ARC_BUFC_METADATA); bzero(buf->b_data, sizeof (objset_phys_t)); bcopy(os->os_phys_buf->b_data, buf->b_data, arc_buf_size(os->os_phys_buf)); (void) arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf); os->os_phys_buf = buf; } os->os_phys = os->os_phys_buf->b_data; os->os_flags = os->os_phys->os_flags; } else { int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; os->os_phys_buf = arc_buf_alloc(spa, size, &os->os_phys_buf, ARC_BUFC_METADATA); os->os_phys = os->os_phys_buf->b_data; bzero(os->os_phys, size); } /* * Note: the changed_cb will be called once before the register * func returns, thus changing the checksum/compression from the * default (fletcher2/off). Snapshots don't need to know about * checksum/compression/copies. */ if (ds) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), primary_cache_changed_cb, os); if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), secondary_cache_changed_cb, os); } if (!dsl_dataset_is_snapshot(ds)) { if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COMPRESSION), compression_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COPIES), copies_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_DEDUP), dedup_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_LOGBIAS), logbias_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SYNC), sync_changed_cb, os); } } if (err != 0) { VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf)); kmem_free(os, sizeof (objset_t)); return (err); } } else if (ds == NULL) { /* It's the meta-objset. */ os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; os->os_compress = ZIO_COMPRESS_LZJB; os->os_copies = spa_max_replication(spa); os->os_dedup_checksum = ZIO_CHECKSUM_OFF; os->os_dedup_verify = 0; os->os_logbias = 0; os->os_sync = 0; os->os_primary_cache = ZFS_CACHE_ALL; os->os_secondary_cache = ZFS_CACHE_ALL; } if (ds == NULL || !dsl_dataset_is_snapshot(ds)) os->os_zil_header = os->os_phys->os_zil_header; os->os_zil = zil_alloc(os, &os->os_zil_header); for (i = 0; i < TXG_SIZE; i++) { list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); list_create(&os->os_free_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); } list_create(&os->os_dnodes, sizeof (dnode_t), offsetof(dnode_t, dn_link)); list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), offsetof(dmu_buf_impl_t, db_link)); mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); DMU_META_DNODE(os) = dnode_special_open(os, &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT, &os->os_meta_dnode); if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { DMU_USERUSED_DNODE(os) = dnode_special_open(os, &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT, &os->os_userused_dnode); DMU_GROUPUSED_DNODE(os) = dnode_special_open(os, &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); } /* * We should be the only thread trying to do this because we * have ds_opening_lock */ if (ds) { mutex_enter(&ds->ds_lock); ASSERT(ds->ds_objset == NULL); ds->ds_objset = os; mutex_exit(&ds->ds_lock); } *osp = os; return (0); }
extern bg_record_t *create_small_record(bg_record_t *bg_record, bitstr_t *ionodes, int size) { bg_record_t *found_record = NULL; ba_mp_t *new_ba_mp = NULL; ba_mp_t *ba_mp = NULL; char bitstring[BITSIZE]; found_record = (bg_record_t*) xmalloc(sizeof(bg_record_t)); found_record->magic = BLOCK_MAGIC; found_record->job_running = NO_JOB_RUNNING; found_record->user_name = xstrdup(bg_record->user_name); found_record->user_uid = bg_record->user_uid; found_record->ba_mp_list = list_create(destroy_ba_mp); if (bg_record->ba_mp_list) ba_mp = list_peek(bg_record->ba_mp_list); if (!ba_mp) { if (bg_record->mp_str) { hostlist_t hl = hostlist_create(bg_record->mp_str); char *host = hostlist_shift(hl); hostlist_destroy(hl); found_record->mp_str = xstrdup(host); free(host); error("you gave me a list with no ba_mps using %s", found_record->mp_str); } else { char tmp_char[SYSTEM_DIMENSIONS+1]; int dim; for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) tmp_char[dim] = alpha_num[found_record->start[dim]]; tmp_char[dim] = '\0'; found_record->mp_str = xstrdup_printf( "%s%s", bg_conf->slurm_node_prefix, tmp_char); error("you gave me a record with no ba_mps " "and no nodes either using %s", found_record->mp_str); } } else { new_ba_mp = ba_copy_mp(ba_mp); /* We need to have this node wrapped in Q to handle wires correctly when creating around the midplane. */ ba_setup_mp(new_ba_mp, false, true); new_ba_mp->used = BA_MP_USED_TRUE; list_append(found_record->ba_mp_list, new_ba_mp); found_record->mp_count = 1; found_record->mp_str = xstrdup_printf( "%s%s", bg_conf->slurm_node_prefix, new_ba_mp->coord_str); } #ifdef HAVE_BGL found_record->node_use = SELECT_COPROCESSOR_MODE; found_record->blrtsimage = xstrdup(bg_record->blrtsimage); #endif #ifdef HAVE_BG_L_P found_record->linuximage = xstrdup(bg_record->linuximage); found_record->ramdiskimage = xstrdup(bg_record->ramdiskimage); #endif found_record->mloaderimage = xstrdup(bg_record->mloaderimage); process_nodes(found_record, false); found_record->conn_type[0] = SELECT_SMALL; xassert(bg_conf->cpu_ratio); found_record->cpu_cnt = bg_conf->cpu_ratio * size; found_record->cnode_cnt = size; found_record->ionode_bitmap = bit_copy(ionodes); bit_fmt(bitstring, BITSIZE, found_record->ionode_bitmap); found_record->ionode_str = xstrdup(bitstring); found_record->mp_used_bitmap = bit_alloc(node_record_count); if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) info("made small block of %s[%s]", found_record->mp_str, found_record->ionode_str); return found_record; }
int main (int argc, char *argv[]) { int error_code = SLURM_SUCCESS, opt_char; log_options_t opts = LOG_OPTS_STDERR_ONLY; shares_request_msg_t req_msg; shares_response_msg_t *resp_msg = NULL; char *temp = NULL; int option_index; bool all_users = 0; static struct option long_options[] = { {"accounts", 1, 0, 'A'}, {"all", 0, 0, 'a'}, {"long", 0, 0, 'l'}, {"cluster", 1, 0, 'M'}, {"clusters", 1, 0, 'M'}, {"noheader", 0, 0, 'h'}, {"parsable", 0, 0, 'p'}, {"parsable2",0, 0, 'P'}, {"users", 1, 0, 'u'}, {"verbose", 0, 0, 'v'}, {"version", 0, 0, 'V'}, {"help", 0, 0, OPT_LONG_HELP}, {"usage", 0, 0, OPT_LONG_USAGE}, {NULL, 0, 0, 0} }; exit_code = 0; long_flag = 0; quiet_flag = 0; verbosity = 0; memset(&req_msg, 0, sizeof(shares_request_msg_t)); log_init("sshare", opts, SYSLOG_FACILITY_DAEMON, NULL); while((opt_char = getopt_long(argc, argv, "aA:hlM:npPqu:t:vV", long_options, &option_index)) != -1) { switch (opt_char) { case (int)'?': fprintf(stderr, "Try \"sshare --help\" " "for more information\n"); exit(1); break; case 'a': all_users = 1; break; case 'A': if (!req_msg.acct_list) req_msg.acct_list = list_create(slurm_destroy_char); slurm_addto_char_list(req_msg.acct_list, optarg); break; case 'h': print_fields_have_header = 0; break; exit(exit_code); break; case 'l': long_flag = 1; break; case 'M': if (clusters) list_destroy(clusters); if (!(clusters = slurmdb_get_info_cluster(optarg))) { print_db_notok(optarg, 0); exit(1); } working_cluster_rec = list_peek(clusters); break; case 'n': print_fields_have_header = 0; break; case 'p': print_fields_parsable_print = PRINT_FIELDS_PARSABLE_ENDING; break; case 'P': print_fields_parsable_print = PRINT_FIELDS_PARSABLE_NO_ENDING; break; case 'u': if (!strcmp(optarg, "-1")) { all_users = 1; break; } all_users = 0; if (!req_msg.user_list) req_msg.user_list = list_create(slurm_destroy_char); _addto_name_char_list(req_msg.user_list, optarg, 0); break; case 'v': quiet_flag = -1; verbosity++; break; case 'V': _print_version(); exit(exit_code); break; case OPT_LONG_HELP: case OPT_LONG_USAGE: _usage(); exit(0); default: exit_code = 1; fprintf(stderr, "getopt error, returned %c\n", opt_char); exit(exit_code); } } if (verbosity) { opts.stderr_level += verbosity; opts.prefix_level = 1; log_alter(opts, 0, NULL); } if (all_users) { if (req_msg.user_list && list_count(req_msg.user_list)) { list_destroy(req_msg.user_list); req_msg.user_list = NULL; } if (verbosity) fprintf(stderr, "Users requested:\n\t: all\n"); } else if (verbosity && req_msg.user_list && list_count(req_msg.user_list)) { fprintf(stderr, "Users requested:\n"); ListIterator itr = list_iterator_create(req_msg.user_list); while((temp = list_next(itr))) fprintf(stderr, "\t: %s\n", temp); list_iterator_destroy(itr); } else if (!req_msg.user_list || !list_count(req_msg.user_list)) { struct passwd *pwd = getpwuid(getuid()); if (!req_msg.user_list) req_msg.user_list = list_create(slurm_destroy_char); temp = xstrdup(pwd->pw_name); list_append(req_msg.user_list, temp); if (verbosity) { fprintf(stderr, "Users requested:\n"); fprintf(stderr, "\t: %s\n", temp); } } if (req_msg.acct_list && list_count(req_msg.acct_list)) { fprintf(stderr, "Accounts requested:\n"); ListIterator itr = list_iterator_create(req_msg.acct_list); while((temp = list_next(itr))) fprintf(stderr, "\t: %s\n", temp); list_iterator_destroy(itr); } else { if (req_msg.acct_list && list_count(req_msg.acct_list)) { list_destroy(req_msg.acct_list); req_msg.acct_list = NULL; } if (verbosity) fprintf(stderr, "Accounts requested:\n\t: all\n"); } error_code = _get_info(&req_msg, &resp_msg); if (req_msg.acct_list) list_destroy(req_msg.acct_list); if (req_msg.user_list) list_destroy(req_msg.user_list); if (error_code) { slurm_perror("Couldn't get shares from controller"); exit(error_code); } /* do stuff with it */ process(resp_msg); slurm_free_shares_response_msg(resp_msg); exit(exit_code); }
static int _copy_allocation(char *com, List allocated_blocks) { ListIterator results_i; allocated_block_t *allocated_block = NULL; allocated_block_t *temp_block = NULL; select_ba_request_t *request = NULL; int i = 1, j; int len = strlen(com); char letter = '\0'; int count = 1; int *geo = NULL, *geo_ptr = NULL; /* look for the space after copy */ while ((com[i-1] != ' ') && (i < len)) i++; if (i <= len) { /* Here we are looking for a real number for the count * instead of the params.cluster_base so atoi is ok */ if ((com[i] >= '0') && (com[i] <= '9')) count = atoi(com+i); else { letter = com[i]; i++; if (com[i] != '\n') { while ((com[i-1] != ' ') && (i < len)) i++; if ((com[i] >= '0') && (com[i] <= '9')) count = atoi(com+i); } } } results_i = list_iterator_create(allocated_blocks); while ((allocated_block = list_next(results_i)) != NULL) { temp_block = allocated_block; if (allocated_block->letter != letter) continue; break; } list_iterator_destroy(results_i); if (!letter) allocated_block = temp_block; if (!allocated_block) { memset(error_string, 0, 255); sprintf(error_string, "Could not find requested record to copy"); return 0; } for (i = 0; i < count; i++) { request = (select_ba_request_t*) xmalloc(sizeof(select_ba_request_t)); for (j = 0; j < params.cluster_dims; j++) { request->geometry[j] = allocated_block->request-> geometry[j]; request->conn_type[j] = allocated_block->request-> conn_type[j]; } request->size = allocated_block->request->size; request->rotate =allocated_block->request->rotate; request->elongate = allocated_block->request->elongate; request->deny_pass = allocated_block->request->deny_pass; #ifndef HAVE_BGL request->small16 = allocated_block->request->small16; request->small64 = allocated_block->request->small64; request->small256 = allocated_block->request->small256; #endif request->small32 = allocated_block->request->small32; request->small128 = allocated_block->request->small128; request->rotate_count= 0; request->elongate_count = 0; request->elongate_geos = list_create(NULL); request->avail_mp_bitmap = NULL; results_i = list_iterator_create(request->elongate_geos); while ((geo_ptr = list_next(results_i)) != NULL) { geo = xmalloc(sizeof(int) * params.cluster_dims); for (j = 0; j < params.cluster_dims; j++) geo[j] = geo_ptr[j]; list_append(request->elongate_geos, geo); } list_iterator_destroy(results_i); if ((allocated_block = _make_request(request)) == NULL) { memset(error_string, 0, 255); sprintf(error_string, "Problem with the copy\n" "Are you sure there is enough room for it?"); xfree(request); return 0; } list_append(allocated_blocks, allocated_block); } return 1; }