int pkg_perform(const char *pkg) { char *cp; FILE *pkg_in; package_t plist; const char *full_pkg, *suffix; char *allocated_pkg; int retval; /* Break the package name into base and desired suffix (if any) */ if ((cp = strrchr(pkg, '.')) != NULL) { allocated_pkg = xmalloc(cp - pkg + 1); memcpy(allocated_pkg, pkg, cp - pkg); allocated_pkg[cp - pkg] = '\0'; suffix = cp + 1; full_pkg = pkg; pkg = allocated_pkg; } else { allocated_pkg = NULL; full_pkg = pkg; suffix = "tgz"; } /* Preliminary setup */ sanity_check(); if (Verbose && !PlistOnly) printf("Creating package %s\n", pkg); get_dash_string(&Comment); get_dash_string(&Desc); if (IS_STDIN(Contents)) pkg_in = stdin; else { pkg_in = fopen(Contents, "r"); if (!pkg_in) errx(2, "unable to open contents file '%s' for input", Contents); } plist.head = plist.tail = NULL; /* Stick the dependencies, if any, at the top */ if (Pkgdeps) register_depends(&plist, Pkgdeps, 0); /* * Put the build dependencies after the dependencies. * This works due to the evaluation order in pkg_add. */ if (BuildPkgdeps) register_depends(&plist, BuildPkgdeps, 1); /* Put the conflicts directly after the dependencies, if any */ if (Pkgcfl) { if (Verbose && !PlistOnly) printf("Registering conflicts:"); while (Pkgcfl) { cp = strsep(&Pkgcfl, " \t\n"); if (*cp) { add_plist(&plist, PLIST_PKGCFL, cp); if (Verbose && !PlistOnly) printf(" %s", cp); } } if (Verbose && !PlistOnly) printf(".\n"); } /* Slurp in the packing list */ append_plist(&plist, pkg_in); if (pkg_in != stdin) fclose(pkg_in); /* Prefix should override the packing list */ if (Prefix) { delete_plist(&plist, FALSE, PLIST_CWD, NULL); add_plist_top(&plist, PLIST_CWD, Prefix); } /* * Run down the list and see if we've named it, if not stick in a name * at the top. */ if (find_plist(&plist, PLIST_NAME) == NULL) { add_plist_top(&plist, PLIST_NAME, basename_of(pkg)); } /* Make first "real contents" pass over it */ check_list(&plist, basename_of(pkg)); /* * We're just here for to dump out a revised plist for the FreeBSD ports * hack. It's not a real create in progress. */ if (PlistOnly) { write_plist(&plist, stdout, realprefix); retval = TRUE; } else { #ifdef BOOTSTRAP warnx("Package building is not supported in bootstrap mode"); retval = FALSE; #else retval = pkg_build(pkg, full_pkg, suffix, &plist); #endif } /* Cleanup */ free(Comment); free(Desc); free_plist(&plist); free(allocated_pkg); return retval; }
static int victara_choose_dtb(const char *dtb_img, off_t dtb_len, char **dtb_buf, off_t *dtb_length) { char *dtb = (char*)dtb_img; char *dtb_end = dtb + dtb_len; FILE *f; struct msm_id devid, dtb_id; char *bestmatch_tag = NULL; size_t id_read = 0; uint32_t bestmatch_tag_size; uint32_t bestmatch_soc_rev_id = INVALID_SOC_REV_ID; uint32_t bestmatch_board_rev_id = INVALID_SOC_REV_ID; f = fopen("/proc/device-tree/qcom,msm-id", "r"); if(!f) { fprintf(stderr, "DTB: Couldn't open /proc/device-tree/qcom,msm-id!\n"); return 0; } id_read = fread(&devid, 1, sizeof(struct msm_id), f); fclose(f); devid.platform_id = fdt32_to_cpu(devid.platform_id); devid.hardware_id = fdt32_to_cpu(devid.hardware_id); devid.soc_rev = fdt32_to_cpu(devid.soc_rev); if(id_read > 12) devid.board_rev = fdt32_to_cpu(devid.board_rev); else devid.board_rev = 0; printf("DTB: platform %u hw %u soc 0x%x board %u\n", devid.platform_id, devid.hardware_id, devid.soc_rev, devid.board_rev); while(dtb + sizeof(struct fdt_header) < dtb_end) { uint32_t dtb_soc_rev_id; struct fdt_header dtb_hdr; uint32_t dtb_size; /* the DTB could be unaligned, so extract the header, * and operate on it separately */ memcpy(&dtb_hdr, dtb, sizeof(struct fdt_header)); if (fdt_check_header((const void *)&dtb_hdr) != 0 || (dtb + fdt_totalsize((const void *)&dtb_hdr) > dtb_end)) { fprintf(stderr, "DTB: Invalid dtb header!\n"); break; } dtb_size = fdt_totalsize(&dtb_hdr); if(victara_dtb_compatible(dtb, &devid, &dtb_id)) { if (dtb_id.soc_rev == devid.soc_rev && dtb_id.board_rev == devid.board_rev) { *dtb_buf = xmalloc(dtb_size); memcpy(*dtb_buf, dtb, dtb_size); *dtb_length = dtb_size; printf("DTB: match 0x%x %u, my id 0x%x %u, len %u\n", dtb_id.soc_rev, dtb_id.board_rev, devid.soc_rev, devid.board_rev, dtb_size); return 1; } else if(dtb_id.soc_rev <= devid.soc_rev && dtb_id.board_rev < devid.board_rev) { if((bestmatch_soc_rev_id == INVALID_SOC_REV_ID) || (bestmatch_soc_rev_id < dtb_id.soc_rev) || (bestmatch_soc_rev_id == dtb_id.soc_rev && bestmatch_board_rev_id < dtb_id.board_rev)) { bestmatch_tag = dtb; bestmatch_tag_size = dtb_size; bestmatch_soc_rev_id = dtb_id.soc_rev; bestmatch_board_rev_id = dtb_id.board_rev; } } } /* goto the next device tree if any */ dtb += dtb_size; // try to skip padding in standalone dtb.img files while(dtb < dtb_end && *dtb == 0) ++dtb; } if(bestmatch_tag) { printf("DTB: bestmatch 0x%x %u, my id 0x%x %u\n", bestmatch_soc_rev_id, bestmatch_board_rev_id, devid.soc_rev, devid.board_rev); *dtb_buf = xmalloc(bestmatch_tag_size); memcpy(*dtb_buf, bestmatch_tag, bestmatch_tag_size); *dtb_length = bestmatch_tag_size; return 1; } return 0; }
int main(int argc, char** argv) { select_ba_request_t *request = xmalloc(sizeof(select_ba_request_t)); log_options_t log_opts = LOG_OPTS_INITIALIZER; int debug_level = 5; List results; // List results2; // int i,j; log_opts.stderr_level = debug_level; log_opts.logfile_level = debug_level; log_opts.syslog_level = debug_level; log_alter(log_opts, LOG_DAEMON, "/dev/null"); DIM_SIZE[X]=0; DIM_SIZE[Y]=0; DIM_SIZE[Z]=0; slurm_conf_reinit(NULL); ba_init(NULL, 1); /* [010x831] */ /* results = list_create(NULL); */ /* request->geometry[0] = 9; */ /* request->geometry[1] = 3; */ /* request->geometry[2] = 2; */ /* request->start[0] = 0; */ /* request->start[1] = 1; */ /* request->start[2] = 0; */ /* request->start_req = 1; */ /* // request->size = 16; */ /* request->rotate = 0; */ /* request->elongate = 0; */ /* request->conn_type = SELECT_TORUS; */ /* new_ba_request(request); */ /* print_ba_request(request); */ /* if (!allocate_block(request, results)) { */ /* debug("couldn't allocate %c%c%c", */ /* alpha_num[request->geometry[0]], */ /* alpha_num[request->geometry[1]], */ /* alpha_num[request->geometry[2]]); */ /* } */ /* FREE_NULL_LIST(results); */ /* /\* [001x801] *\/ */ /* results = list_create(NULL); */ /* request->geometry[0] = 9; */ /* request->geometry[1] = 1; */ /* request->geometry[2] = 1; */ /* request->start[0] = 0; */ /* request->start[1] = 0; */ /* request->start[2] = 1; */ /* request->start_req = 1; */ /* // request->size = 1; */ /* request->rotate = 0; */ /* request->elongate = 0; */ /* request->conn_type = SELECT_TORUS; */ /* new_ba_request(request); */ /* print_ba_request(request); */ /* if (!allocate_block(request, results)) { */ /* debug("couldn't allocate %c%c%c", */ /* request->geometry[0], */ /* request->geometry[1], */ /* request->geometry[2]); */ /* } */ /* FREE_NULL_LIST(results); */ /* [001x801] */ results = list_create(NULL); request->geometry[0] = 7; request->geometry[1] = 4; request->geometry[2] = 2; request->start[0] = 0; request->start[1] = 0; request->start[2] = 0; request->start_req = 0; // request->size = 1; request->rotate = 1; request->elongate = 1; request->conn_type[0] = SELECT_TORUS; new_ba_request(request); print_ba_request(request); if (!allocate_block(request, results)) { debug("couldn't allocate %c%c%c", request->geometry[0], request->geometry[1], request->geometry[2]); } FREE_NULL_LIST(results); int dim,j; int x,y,z; int startx=0; int starty=0; int startz=0; int endx=DIM_SIZE[X]; int endy=1;//DIM_SIZE[Y]; int endz=1;//DIM_SIZE[Z]; for(x=startx;x<endx;x++) { for(y=starty;y<endy;y++) { for(z=startz;z<endz;z++) { ba_mp_t *curr_node = &(ba_main_grid[x][y][z]); info("Node %c%c%c Used = %d", alpha_num[x],alpha_num[y],alpha_num[z], curr_node->used); for(dim=0;dim<1;dim++) { info("Dim %d",dim); ba_switch_t *wire = &curr_node->axis_switch[dim]; for(j=0;j<NUM_PORTS_PER_NODE;j++) info("\t%d -> %d -> %c%c%c %d " "Used = %d", j, wire->int_wire[j]. port_tar, alpha_num[wire->ext_wire[ wire->int_wire[j]. port_tar]. mp_tar[X]], alpha_num[wire->ext_wire[ wire->int_wire[j]. port_tar]. mp_tar[Y]], alpha_num[wire->ext_wire[ wire->int_wire[j]. port_tar]. mp_tar[Z]], wire->ext_wire[ wire->int_wire[j]. port_tar]. port_tar, wire->int_wire[j].used); } } } } /* FREE_NULL_LIST(results); */ /* ba_fini(); */ /* delete_ba_request(request); */ return 0; }
static char * get_btrfs_fs_prefix (const char *mount_path) { struct btrfs_ioctl_ino_lookup_args args; struct stat st; int fd; grub_uint64_t tree_id, inode_id; char *ret = NULL; fd = open (mount_path, O_RDONLY); if (fd < 0) return NULL; memset (&args, 0, sizeof(args)); args.objectid = GRUB_BTRFS_TREE_ROOT_OBJECTID; if (ioctl (fd, BTRFS_IOC_INO_LOOKUP, &args) < 0) goto fail; tree_id = args.treeid; if (fstat (fd, &st) < 0) goto fail; inode_id = st.st_ino; while (tree_id != GRUB_BTRFS_ROOT_VOL_OBJECTID || inode_id != GRUB_BTRFS_TREE_ROOT_OBJECTID) { const char *name; size_t namelen; struct btrfs_ioctl_search_args sargs; char *old; memset (&sargs, 0, sizeof(sargs)); if (inode_id == GRUB_BTRFS_TREE_ROOT_OBJECTID) { struct grub_btrfs_root_backref *br; sargs.key.tree_id = 1; sargs.key.min_objectid = tree_id; sargs.key.max_objectid = tree_id; sargs.key.min_offset = 0; sargs.key.max_offset = ~0ULL; sargs.key.min_transid = 0; sargs.key.max_transid = ~0ULL; sargs.key.min_type = GRUB_BTRFS_ITEM_TYPE_ROOT_BACKREF; sargs.key.max_type = GRUB_BTRFS_ITEM_TYPE_ROOT_BACKREF; sargs.key.nr_items = 1; if (ioctl (fd, BTRFS_IOC_TREE_SEARCH, &sargs) < 0) goto fail; if (sargs.key.nr_items == 0) goto fail; tree_id = sargs.buf[2]; br = (struct grub_btrfs_root_backref *) (sargs.buf + 4); inode_id = grub_le_to_cpu64 (br->inode_id); name = br->name; namelen = grub_le_to_cpu16 (br->n); } else { struct grub_btrfs_inode_ref *ir; sargs.key.tree_id = tree_id; sargs.key.min_objectid = inode_id; sargs.key.max_objectid = inode_id; sargs.key.min_offset = 0; sargs.key.max_offset = ~0ULL; sargs.key.min_transid = 0; sargs.key.max_transid = ~0ULL; sargs.key.min_type = GRUB_BTRFS_ITEM_TYPE_INODE_REF; sargs.key.max_type = GRUB_BTRFS_ITEM_TYPE_INODE_REF; if (ioctl (fd, BTRFS_IOC_TREE_SEARCH, &sargs) < 0) goto fail; if (sargs.key.nr_items == 0) goto fail; inode_id = sargs.buf[2]; ir = (struct grub_btrfs_inode_ref *) (sargs.buf + 4); name = ir->name; namelen = grub_le_to_cpu16 (ir->n); } old = ret; ret = xmalloc (namelen + (old ? strlen (old) : 0) + 2); ret[0] = '/'; memcpy (ret + 1, name, namelen); if (old) { strcpy (ret + 1 + namelen, old); free (old); } else ret[1+namelen] = '\0'; } if (!ret) ret = xstrdup ("/"); close (fd); return ret; fail: free (ret); close (fd); return NULL; }
void kexdh_server(Kex *kex) { BIGNUM *shared_secret = NULL, *dh_client_pub = NULL; DH *dh; Key *server_host_public, *server_host_private; u_char *kbuf, *hash, *signature = NULL, *server_host_key_blob = NULL; u_int sbloblen, klen, hashlen, slen; int kout; /* generate server DH public key */ switch (kex->kex_type) { case KEX_DH_GRP1_SHA1: dh = dh_new_group1(); break; case KEX_DH_GRP14_SHA1: dh = dh_new_group14(); break; default: fatal("%s: Unexpected KEX type %d", __func__, kex->kex_type); } dh_gen_key(dh, kex->we_need * 8); debug("expecting SSH2_MSG_KEXDH_INIT"); packet_read_expect(SSH2_MSG_KEXDH_INIT); if (kex->load_host_public_key == NULL || kex->load_host_private_key == NULL) fatal("Cannot load hostkey"); server_host_public = kex->load_host_public_key(kex->hostkey_type); if (server_host_public == NULL) fatal("Unsupported hostkey type %d", kex->hostkey_type); server_host_private = kex->load_host_private_key(kex->hostkey_type); /* key, cert */ if ((dh_client_pub = BN_new()) == NULL) fatal("dh_client_pub == NULL"); packet_get_bignum2(dh_client_pub); packet_check_eom(); #ifdef DEBUG_KEXDH fprintf(stderr, "dh_client_pub= "); BN_print_fp(stderr, dh_client_pub); fprintf(stderr, "\n"); debug("bits %d", BN_num_bits(dh_client_pub)); #endif #ifdef DEBUG_KEXDH DHparams_print_fp(stderr, dh); fprintf(stderr, "pub= "); BN_print_fp(stderr, dh->pub_key); fprintf(stderr, "\n"); #endif if (!dh_pub_is_valid(dh, dh_client_pub)) packet_disconnect("bad client public DH value"); klen = DH_size(dh); kbuf = xmalloc(klen); if ((kout = DH_compute_key(kbuf, dh_client_pub, dh)) < 0) fatal("DH_compute_key: failed"); #ifdef DEBUG_KEXDH dump_digest("shared secret", kbuf, kout); #endif if ((shared_secret = BN_new()) == NULL) fatal("kexdh_server: BN_new failed"); if (BN_bin2bn(kbuf, kout, shared_secret) == NULL) fatal("kexdh_server: BN_bin2bn failed"); memset(kbuf, 0, klen); free(kbuf); key_to_blob(server_host_public, &server_host_key_blob, &sbloblen); /* calc H */ kex_dh_hash( kex->client_version_string, kex->server_version_string, buffer_ptr(&kex->peer), buffer_len(&kex->peer), buffer_ptr(&kex->my), buffer_len(&kex->my), server_host_key_blob, sbloblen, dh_client_pub, dh->pub_key, shared_secret, &hash, &hashlen ); BN_clear_free(dh_client_pub); /* save session id := H */ if (kex->session_id == NULL) { kex->session_id_len = hashlen; kex->session_id = xmalloc(kex->session_id_len); memcpy(kex->session_id, hash, kex->session_id_len); } /* sign H */ kex->sign(server_host_private, server_host_public, &signature, &slen, hash, hashlen); /* destroy_sensitive_data(); */ /* send server hostkey, DH pubkey 'f' and singed H */ packet_start(SSH2_MSG_KEXDH_REPLY); packet_put_string(server_host_key_blob, sbloblen); packet_put_bignum2(dh->pub_key); /* f */ packet_put_string(signature, slen); packet_send(); free(signature); free(server_host_key_blob); /* have keys, free DH */ DH_free(dh); kex_derive_keys_bn(kex, hash, hashlen, shared_secret); BN_clear_free(shared_secret); kex_finish(kex); }
void qs_filter_relations(sieve_conf_t *conf) { /* Perform all of the postprocessing on the list of relations from the sieving phase. There are two main jobs, reading in all the relations that will be used and then determining the list of cycles in which partial relations appear. Care should be taken to avoid wasting huge amounts of memory */ msieve_obj *obj = conf->obj; uint32 *hashtable = conf->cycle_hashtable; cycle_t *table = conf->cycle_table; uint32 num_poly_factors = conf->num_poly_factors; uint32 num_derived_poly = 1 << (num_poly_factors - 1); uint32 *final_poly_index; uint32 num_relations, num_cycles, num_poly; la_col_t *cycle_list; relation_t *relation_list; uint32 i, passes, start; uint32 curr_a_idx, curr_poly_idx, curr_rel; uint32 curr_expected, curr_saved, curr_cycle; uint32 total_poly_a; uint32 poly_saved; uint32 cycle_bins[NUM_CYCLE_BINS+1] = {0}; char buf[LINE_BUF_SIZE]; /* Rather than reading all the relations in and then removing singletons, read only the large primes of each relation into an initial list, remove the singletons, and then only read in the relations that survive. This avoids reading in useless relations (and usually the polynomials they would need) */ savefile_open(&obj->savefile, SAVEFILE_READ); relation_list = (relation_t *)xmalloc(10000 * sizeof(relation_t)); curr_rel = 10000; i = 0; total_poly_a = 0; /* skip over the first line */ savefile_read_line(buf, sizeof(buf), &obj->savefile); while (!savefile_eof(&obj->savefile)) { char *start; switch (buf[0]) { case 'A': total_poly_a++; break; case 'R': start = strchr(buf, 'L'); if (start != NULL) { uint32 prime1, prime2; read_large_primes(start, &prime1, &prime2); if (i == curr_rel) { curr_rel = 3 * curr_rel / 2; relation_list = (relation_t *)xrealloc( relation_list, curr_rel * sizeof(relation_t)); } relation_list[i].poly_idx = i; relation_list[i].large_prime[0] = prime1; relation_list[i].large_prime[1] = prime2; i++; } break; } savefile_read_line(buf, sizeof(buf), &obj->savefile); } num_relations = i; num_relations = purge_singletons(obj, relation_list, num_relations, table, hashtable); relation_list = (relation_t *)xrealloc(relation_list, num_relations * sizeof(relation_t)); /* Now we know how many relations to expect. Also initialize the lists of polynomial 'a' and 'b' values */ num_poly = 10000; conf->poly_list = (poly_t *)xmalloc(num_poly * sizeof(poly_t)); conf->poly_a_list = (mp_t *)xmalloc(total_poly_a * sizeof(mp_t)); final_poly_index = (uint32 *)xmalloc(num_derived_poly * sizeof(uint32)); /* initialize the running counts of relations and polynomials */ i = 0; curr_expected = 0; curr_saved = 0; curr_rel = (uint32)(-1); curr_poly_idx = (uint32)(-1); curr_a_idx = (uint32)(-1); poly_saved = 0; logprintf(obj, "attempting to read %u relations\n", num_relations); /* Read in the relations and the polynomials they use at the same time. */ savefile_rewind(&obj->savefile); while (curr_expected < num_relations) { char *tmp; relation_t *r; /* read in the next entity */ if (savefile_eof(&obj->savefile)) break; savefile_read_line(buf, sizeof(buf), &obj->savefile); switch (buf[0]) { case 'A': /* Read in a new 'a' value */ curr_a_idx++; read_poly_a(conf, buf); mp_copy(&conf->curr_a, conf->poly_a_list+curr_a_idx); /* build all of the 'b' values associated with it */ build_derived_poly(conf); /* all 'b' values start off unused */ memset(final_poly_index, -1, num_derived_poly * sizeof(uint32)); break; case 'R': /* handle a new relation. First find the large primes; these will determine if a relation is full or partial */ tmp = strchr(buf, 'L'); if (tmp == NULL) break; /* Check if this relation is needed. If it survived singleton removal then its ordinal ID will be in the next entry of relation_list. First move up the index of relation_list until the relation index to check is >= the one we have (it may have gotten behind because relations were corrupted) */ curr_rel++; while (curr_expected < num_relations && relation_list[curr_expected].poly_idx < curr_rel) { curr_expected++; } /* now check if the relation should be saved */ if (curr_expected >= num_relations || relation_list[curr_expected].poly_idx != curr_rel) break; curr_expected++; /* convert the ASCII text of the relation to a relation_t, verifying correctness in the process */ r = relation_list + curr_saved; if (read_relation(conf, buf, r) != 0) { logprintf(obj, "failed to read relation %d\n", curr_expected - 1); break; } curr_saved++; /* if necessary, save the b value corresponding to this relation */ if (final_poly_index[r->poly_idx] == (uint32)(-1)) { if (i == num_poly) { num_poly *= 2; conf->poly_list = (poly_t *)xrealloc( conf->poly_list, num_poly * sizeof(poly_t)); } conf->poly_list[i].a_idx = curr_a_idx; signed_mp_copy(&(conf->curr_b[r->poly_idx]), &(conf->poly_list[i].b)); final_poly_index[r->poly_idx] = i; r->poly_idx = i++; } else { r->poly_idx = final_poly_index[r->poly_idx]; } break; /* done with this relation */ } } /* update the structures with the counts of relations and polynomials actually recovered */ num_relations = curr_saved; logprintf(obj, "recovered %u relations\n", num_relations); logprintf(obj, "recovered %u polynomials\n", i); savefile_close(&obj->savefile); free(final_poly_index); conf->poly_list = (poly_t *)xrealloc(conf->poly_list, i * sizeof(poly_t)); /* begin the cycle generation process by purging duplicate relations. For the sake of consistency, always rebuild the graph afterwards */ num_relations = purge_duplicate_relations(obj, relation_list, num_relations); memset(hashtable, 0, sizeof(uint32) << LOG2_CYCLE_HASH); conf->vertices = 0; conf->components = 0; conf->cycle_table_size = 1; for (i = 0; i < num_relations; i++) { relation_t *r = relation_list + i; if (r->large_prime[0] != r->large_prime[1]) { add_to_cycles(conf, r->large_prime[0], r->large_prime[1]); } } /* compute the number of cycles to expect. Note that this number includes cycles from both full and partial relations (the cycle for a full relation is trivial) */ num_cycles = num_relations + conf->components - conf->vertices; /* The idea behind the cycle-finding code is this: the graph is composed of a bunch of connected components, and each component contains one or more cycles. To find the cycles, you build the 'spanning tree' for each component. Think of the spanning tree as a binary tree; there are no cycles in it because leaves are only connected to a common root and not to each other. Any time you connect together two leaves of the tree, though, a cycle is formed. So, for a spanning tree like this: 1 o / \ 2 o o 3 / \ \ o o o 4 5 6 if you connect leaves 4 and 5 you get a cycle (4-2-5). If you connect leaves 4 and 6 you get another cycle (4-2-1-3-6) that will reuse two of the nodes in the first cycle. It's this reuse that makes double large primes so powerful. For our purposes, every edge in the tree above represents a partial relation. Every edge that would create a cycle comes from another partial relation. So to find all the cycles, you begin with the roots of all of the connected components, and then iterate through the list of partial relations until all have been 'processed'. A partial relation is considered processed when one or both of its primes is in the tree. If one prime is present then the relation gets added to the tree; if both primes are present then the relation creates one cycle but is *not* added to the tree. It's really great to see such simple ideas do something so complicated as finding cycles (and doing it very quickly) */ /* First traverse the entire graph and remove any vertices that are not the roots of connected components (i.e. remove any primes whose cycle_t entry does not point to itself */ for (i = 0; i < (1 << LOG2_CYCLE_HASH); i++) { uint32 offset = hashtable[i]; while (offset != 0) { cycle_t *entry = table + offset; if (offset != entry->data) entry->data = 0; offset = entry->next; } } logprintf(obj, "attempting to build %u cycles\n", num_cycles); cycle_list = (la_col_t *)xmalloc(num_cycles * sizeof(la_col_t)); /* keep going until either all cycles are found, all relations are processed, or cycles stop arriving. Normally these conditions all occur at the same time */ for (start = passes = curr_cycle = 0; start < num_relations && curr_cycle < num_cycles; passes++) { /* The list of relations up to index 'start' is con- sidered processed. For all relations past that... */ uint32 start_cycles = curr_cycle; for (i = start; i < num_relations && curr_cycle < num_cycles; i++) { cycle_t *entry1, *entry2; relation_t rtmp = relation_list[i]; if (rtmp.large_prime[0] == rtmp.large_prime[1]) { /* this is a full relation, and forms a cycle just by itself. Move it to position 'start' of the relation list and increment 'start'. The relation is now frozen at that position */ la_col_t *c = cycle_list + curr_cycle++; relation_list[i] = relation_list[start]; relation_list[start] = rtmp; /* build a trivial cycle for the relation */ c->cycle.num_relations = 1; c->cycle.list = (uint32 *) xmalloc(sizeof(uint32)); c->cycle.list[0] = start++; continue; } /* retrieve the cycle_t entries associated with the large primes in relation r. */ entry1 = get_table_entry(table, hashtable, rtmp.large_prime[0], 0); entry2 = get_table_entry(table, hashtable, rtmp.large_prime[1], 0); /* if both vertices do not point to other vertices, then neither prime has been added to the graph yet, and r must remain unprocessed */ if (entry1->data == 0 && entry2->data == 0) continue; /* if one or the other prime is part of the graph, add r to the graph. The vertex not in the graph points to the vertex that is, and this entry also points to the relation that is associated with rtmp. If both primes are in the graph, recover the cycle this generates */ if (entry1->data == 0) { entry1->data = entry2 - table; entry1->count = start; } else if (entry2->data == 0) { entry2->data = entry1 - table; entry2->count = start; } else { la_col_t *c = cycle_list + curr_cycle; c->cycle.list = NULL; enumerate_cycle(obj, c, table, entry1, entry2, start); if (c->cycle.list) curr_cycle++; } /* whatever happened above, the relation is processed now; move it to position 'start' of the relation list and increment 'start'. The relation is now frozen at that position */ relation_list[i] = relation_list[start]; relation_list[start++] = rtmp; } /* If this pass did not find any new cycles, then we've reached steady state and are finished */ if (curr_cycle == start_cycles) break; } num_cycles = curr_cycle; logprintf(obj, "found %u cycles in %u passes\n", num_cycles, passes); /* sort the list of cycles so that the cycles with the largest number of relations will come last. If the linear algebra code skips any cycles it can easily skip the most dense cycles */ qsort(cycle_list, (size_t)num_cycles, sizeof(la_col_t), sort_cycles); conf->relation_list = relation_list; conf->num_relations = num_relations; conf->cycle_list = cycle_list; conf->num_cycles = num_cycles; /* print out a histogram of cycle lengths for infor- mational purposes */ for (i = 0; i < num_cycles; i++) { num_relations = cycle_list[i].cycle.num_relations; if (num_relations >= NUM_CYCLE_BINS) cycle_bins[NUM_CYCLE_BINS]++; else cycle_bins[num_relations - 1]++; } logprintf(obj, "distribution of cycle lengths:\n"); for (i = 0; i < NUM_CYCLE_BINS; i++) { if (cycle_bins[i]) { logprintf(obj, " length %d : %d\n", i + 1, cycle_bins[i]); } } if (cycle_bins[i]) logprintf(obj, " length %u+: %u\n", i + 1, cycle_bins[i]); logprintf(obj, "largest cycle: %u relations\n", cycle_list[num_cycles-1].cycle.num_relations); }
struct nhnet_game * nhnet_list_games(int done, int show_all, int *count) { int i, has_amulet; json_t *jmsg, *jarr, *jobj; struct nhnet_game *gb; struct nhnet_game *gamebuf = NULL; const char *plname, *plrole, *plrace, *plgend, *plalign, *level_desc, *death; if (!api_entry()) return NULL; jmsg = json_pack("{si,si,si}", "limit", 0, "completed", done, "show_all", show_all); jmsg = send_receive_msg("list_games", jmsg); if (json_unpack(jmsg, "{so!}", "games", &jarr) == -1 || !json_is_array(jarr)) { print_error("Incorrect return object in nhnet_list_games"); *count = 0; } else { *count = json_array_size(jarr); gamebuf = xmalloc(*count * sizeof (struct nhnet_game)); for (i = 0; i < *count; i++) { gb = &gamebuf[i]; memset(gb, 0, sizeof (struct nhnet_game)); jobj = json_array_get(jarr, i); if (json_unpack (jobj, "{si,si,si,ss,ss,ss,ss,ss*}", "gameid", &gb->gameid, "status", &gb->status, "playmode", &gb->i.playmode, "plname", &plname, "plrole", &plrole, "plrace", &plrace, "plgend", &plgend, "plalign", &plalign) == -1) { print_error("Invalid game info object."); continue; } strncpy(gb->i.name, plname, PL_NSIZ - 1); strncpy(gb->i.plrole, plrole, PLRBUFSZ - 1); strncpy(gb->i.plrace, plrace, PLRBUFSZ - 1); strncpy(gb->i.plgend, plgend, PLRBUFSZ - 1); strncpy(gb->i.plalign, plalign, PLRBUFSZ - 1); if (gb->status == LS_SAVED) { json_unpack(jobj, "{ss,si,si,si*}", "level_desc", &level_desc, "moves", &gb->i.moves, "depth", &gb->i.depth, "has_amulet", &has_amulet); gb->i.has_amulet = has_amulet; strncpy(gb->i.level_desc, level_desc, sizeof (gb->i.level_desc) - 1); } else if (gb->status == LS_DONE) { json_unpack(jobj, "{ss,si,si*}", "death", &death, "moves", &gb->i.moves, "depth", &gb->i.depth); strncpy(gb->i.death, death, sizeof (gb->i.death) - 1); } } } json_decref(jmsg); api_exit(); return gamebuf; }
static void setup_display(void) { XGCValues gv; XSetWindowAttributes attr; XColor dummy; XModifierKeymap *modmap; /* used in scanning windows (XQueryTree) */ unsigned int i, j, nwins; Window dw1, dw2, *wins; XWindowAttributes winattr; LOG_ENTER("setup_display()"); dpy = XOpenDisplay(opt_display); if (!dpy) { LOG_ERROR("can't open display %s\n", opt_display); exit(1); } XSetErrorHandler(handle_xerror); /* XSynchronize(dpy, True); */ /* Standard & EWMH atoms */ ewmh_init(); font = XLoadQueryFont(dpy, opt_font); if (!font) font = XLoadQueryFont(dpy, DEF_FONT); if (!font) { LOG_ERROR("couldn't find a font to use: try starting with -fn fontname\n"); exit(1); } move_curs = XCreateFontCursor(dpy, XC_fleur); resize_curs = XCreateFontCursor(dpy, XC_plus); /* find out which modifier is NumLock - we'll use this when grabbing * every combination of modifiers we can think of */ modmap = XGetModifierMapping(dpy); for (i = 0; i < 8; i++) { for (j = 0; j < (unsigned int)modmap->max_keypermod; j++) { if (modmap->modifiermap[i*modmap->max_keypermod+j] == XKeysymToKeycode(dpy, XK_Num_Lock)) { numlockmask = (1<<i); LOG_DEBUG("XK_Num_Lock is (1<<0x%02x)\n", i); } } } XFreeModifiermap(modmap); /* set up GC parameters - same for each screen */ gv.function = GXinvert; gv.subwindow_mode = IncludeInferiors; gv.line_width = 1; /* opt_bw */ gv.font = font->fid; /* set up root window attributes - same for each screen */ attr.event_mask = ChildMask | EnterWindowMask | ColormapChangeMask; /* SHAPE extension? */ #ifdef SHAPE { int e_dummy; have_shape = XShapeQueryExtension(dpy, &shape_event, &e_dummy); } #endif /* Xrandr extension? */ #ifdef RANDR { int e_dummy; have_randr = XRRQueryExtension(dpy, &randr_event_base, &e_dummy); if (!have_randr) { LOG_DEBUG("XRandR is not supported on this display.\n"); } } #endif /* now set up each screen in turn */ num_screens = ScreenCount(dpy); if (num_screens < 0) { LOG_ERROR("Can't count screens\n"); exit(1); } screens = xmalloc(num_screens * sizeof(ScreenInfo)); for (i = 0; i < (unsigned int)num_screens; i++) { char *ds, *colon, *dot; ds = DisplayString(dpy); /* set up DISPLAY environment variable to use */ colon = strrchr(ds, ':'); screens[i].display = xmalloc(14 + strlen(ds)); strcpy(screens[i].display, "DISPLAY="); strcat(screens[i].display, ds); if (colon && num_screens > 1) { colon = strrchr(screens[i].display, ':'); dot = strchr(colon, '.'); if (!dot) dot = colon + strlen(colon); snprintf(dot, 5, ".%d", i); } screens[i].screen = i; screens[i].root = RootWindow(dpy, i); #ifdef RANDR if (have_randr) { XRRSelectInput(dpy, screens[i].root, RRScreenChangeNotifyMask); } #endif #ifdef VWM screens[i].vdesk = KEY_TO_VDESK(XK_1); #endif XAllocNamedColor(dpy, DefaultColormap(dpy, i), opt_fg, &screens[i].fg, &dummy); XAllocNamedColor(dpy, DefaultColormap(dpy, i), opt_bg, &screens[i].bg, &dummy); #ifdef VWM XAllocNamedColor(dpy, DefaultColormap(dpy, i), opt_fc, &screens[i].fc, &dummy); #endif screens[i].invert_gc = XCreateGC(dpy, screens[i].root, GCFunction | GCSubwindowMode | GCLineWidth | GCFont, &gv); XChangeWindowAttributes(dpy, screens[i].root, CWEventMask, &attr); grab_keys_for_screen(&screens[i]); screens[i].docks_visible = 1; /* scan all the windows on this screen */ LOG_XENTER("XQueryTree(screen=%d)", i); XQueryTree(dpy, screens[i].root, &dw1, &dw2, &wins, &nwins); LOG_XDEBUG("%d windows\n", nwins); LOG_XLEAVE(); for (j = 0; j < nwins; j++) { XGetWindowAttributes(dpy, wins[j], &winattr); if (!winattr.override_redirect && winattr.map_state == IsViewable) make_new_client(wins[j], &screens[i]); } XFree(wins); ewmh_init_screen(&screens[i]); } ewmh_set_net_active_window(NULL); LOG_LEAVE(); }
void * btk_mem_alloc( size_t size, bt_data32_t flags ) { void * kmem_p = NULL; FUNCTION("btk_mem_alloc"); LOG_UNKNOWN_UNIT; FENTRY; #if defined (_AIX) kmem_p = xmalloc(size, ALIGN_LONG, (flags & BTK_ALLOC_SWAPPABLE) ? 0 : pinned_heap); #elif defined(__hpux) /* ** sys_memall() allocates a number of virtual memory pages (NBPG=4K). ** Since the driver calls btk_mem_alloc() to get each interrupt ** registration structure this is wasteful of lockable system memory. ** Eventually it would be a good idea to add another layer of memory ** management to clean this up. */ kmem_p = sys_memall(size); #elif defined(__sun) kmem_p = kmem_alloc( size, KM_NOSLEEP); #elif defined(__sgi) /* ** Does not request cache-aligned or physically contiguous memory. ** If the memory is going to be DMA'd in to or out of (i.e. write ** or read respectively) then kmem_alloc() must be called directly ** with the appropriate flag for cache-alignment. In addition to ** cache-aligning the buffer, cache flushing issues must be taken ** into consideration and are unique for the platform in question. */ kmem_p = kmem_alloc(size, 0); #elif defined(__vxworks) kmem_p = malloc(size); #elif defined(BT_uCOS) kmem_p = malloc(size); #elif defined(_NTDDK_) /* The requested memory is not cache aligned or physically contiguous. Don't use for DMA. Also, don't use for a buffer to be shared with user space unless size < physical page size because the user virtual address won't be valid across non-contiguous pages. */ kmem_p = ExAllocatePool( (flags & BTK_ALLOC_SWAPPABLE) ? PagedPool : NonPagedPool, size); #elif defined(__linux__) kmem_p = kmalloc(size, GFP_KERNEL); #elif defined(__lynxos) kmem_p = sysbrk(size); #endif /* _AIX, __hpux, __sun, __sgi, __vxworks, _NTDDK_ */ /* Protect btk_alloc_total_g accesses. This mutex may not be held during the preceeding memory allocation to avoid doing the allocations at a raised interrupt level. */ btk_mutex_enter(&kmem_mutex); btk_alloc_total_g += size; /* running total of kmem */ if (btk_alloc_total_g < 0) { FATAL_STR("Allocated kernel memory went negative.\n"); } TRC_MSG((BT_TRC_ALLOC), (LOG_FMT "ptr " PTR_FMT "; size %d; total %d.\n", LOG_ARG, kmem_p, (int) size, btk_alloc_total_g)); btk_mutex_exit(&kmem_mutex); FEXIT(kmem_p); return(kmem_p); }
void bparse_test( FILE *FH ) { char *s; size_t bd_sz = 0, i; struct bdeque **bd=xmalloc(bd_sz), /* Array of bdeque pointers */ *deque; while ( !feof(FH) ) { /* Get a line from the file */ s=b_gets(FH,INPUT_BLOCKSIZE); /* Remove comments */ strtrunc(s,comment_chars); /* Remove trailing new line */ chomp(s); /* Tokenize line and load into a deque */ deque = blex(s); if ( bdeque_count( deque ) > 0 ) { /* Grow the bdeque pointer array by one and append the new deque to the end */ struct bdeque **temp = xrealloc(bd,sizeof(*bd)*(++bd_sz)); bd = temp; bd[bd_sz-1] = deque; } } printf("<<DEQUE PRINT>>\n"); /* Print the bdeques */ for (i=0; i<bd_sz; i++) bdeque_print(bd[i]); printf("<<END DEQUE PRINT>>\n"); for (i=0; i<bd_sz; i++) { struct bdeque *deque = bd[i], *dupdeque = bdeque_create(); struct d_elem *node = deque->head; /* Keep track of operation count. If the line isn't 'done' and we can't do operations on it, we don't want to loop on it forever */ unsigned int ops = 0; while ( node != NULL ) { struct d_elem *retnode = NULL; // Node returned by the current operation int def = is_def(*node->var->value); if ( def != -1 ) { if ( defs[def].operands == 2 ) { retnode = defs[def].op( node->prev, node->next ); // Perform the binary operation of the current defined operator and place the result into retnode. //bdeque_npush( dupdeque, defs[def].op( node->prev, node->next ) ); // FIXME: I don't put my return value back into the deque. } else if ( defs[def].operands == 1 ) { //struct d_elem *retval = defs[def].op( node->next ); retnode = defs[def].op( node->next ); // Perform the unary operation of the current defined operator and place the result into retnode. //defs[def].op( node->next ); //if ( retval != NULL ) // bdeque_npush( dupdeque, retval ); } else if ( defs[def].operands == 0 ) { defs[def].op( node->next ); } else { bdeque_npush( dupdeque, d_elem_copy(node) ); } } //else { //bdeque_npush( dupdeque, d_elem_copy(node) ); //} /* int def = is_def(*node->var->value); if ( def != -1 ) { if ( defs[def].operands == 1 ) b_PRINT( defs[def].op( node->next ) ); else if ( defs[def].operands == 2 ) b_PRINT( defs[def].op( node->prev, node->next ) ); } */ if ( node->next == NULL ) break; //else if ( node->next->prev == node ) if ( retnode != NULL ) { } node = node->next; } bdeque_print( dupdeque ); } }
int main(int argc, char *argv[]) { struct sigaction act; int argn = 1, ret; { const char *home = getenv("HOME"); if (home) { char *conffile = xmalloc(strlen(home) + sizeof(CONFIG_FILE) + 2); strcpy(conffile, home); strcat(conffile, "/" CONFIG_FILE); xconfig_parse_file(evilwm_options, conffile); free(conffile); } } ret = xconfig_parse_cli(evilwm_options, argc, argv, &argn); if (ret == XCONFIG_MISSING_ARG) { fprintf(stderr, "%s: missing argument to `%s'\n", argv[0], argv[argn]); exit(1); } else if (ret == XCONFIG_BAD_OPTION) { if (0 == strcmp(argv[argn], "-h") || 0 == strcmp(argv[argn], "--help")) { helptext(); exit(0); #ifdef STDIO } else if (0 == strcmp(argv[argn], "-V") || 0 == strcmp(argv[argn], "--version")) { LOG_INFO("evilwm version " VERSION "\n"); exit(0); #endif } else { helptext(); exit(1); } } if (opt_grabmask1) grabmask1 = parse_modifiers(opt_grabmask1); if (opt_grabmask2) grabmask2 = parse_modifiers(opt_grabmask2); if (opt_altmask) altmask = parse_modifiers(opt_altmask); wm_exit = 0; act.sa_handler = handle_signal; sigemptyset(&act.sa_mask); act.sa_flags = 0; sigaction(SIGTERM, &act, NULL); sigaction(SIGINT, &act, NULL); sigaction(SIGHUP, &act, NULL); setup_display(); event_main_loop(); /* Quit Nicely */ while (clients_stacking_order) remove_client(clients_stacking_order->data); XSetInputFocus(dpy, PointerRoot, RevertToPointerRoot, CurrentTime); if (font) XFreeFont(dpy, font); { int i; for (i = 0; i < num_screens; i++) { ewmh_deinit_screen(&screens[i]); XFreeGC(dpy, screens[i].invert_gc); XInstallColormap(dpy, DefaultColormap(dpy, i)); } } free(screens); XCloseDisplay(dpy); return 0; }
dnstcppkt::dnstcppkt () : inbufsize (2048 - MALLOCRESV), inbufpos (0), inbufused (0), inbuf (static_cast<u_char *> (xmalloc (inbufsize))) { }
/* call-seq: stmt.execute * * Executes the current prepared statement, returns +result+. */ static VALUE execute(int argc, VALUE *argv, VALUE self) { MYSQL_BIND *bind_buffers = NULL; unsigned long *length_buffers = NULL; unsigned long bind_count; long i; MYSQL_STMT *stmt; MYSQL_RES *metadata; VALUE current; VALUE resultObj; VALUE *params_enc; int is_streaming; #ifdef HAVE_RUBY_ENCODING_H rb_encoding *conn_enc; #endif GET_STATEMENT(self); GET_CLIENT(stmt_wrapper->client); #ifdef HAVE_RUBY_ENCODING_H conn_enc = rb_to_encoding(wrapper->encoding); #endif /* Scratch space for string encoding exports, allocate on the stack. */ params_enc = alloca(sizeof(VALUE) * argc); stmt = stmt_wrapper->stmt; bind_count = mysql_stmt_param_count(stmt); if (argc != (long)bind_count) { rb_raise(cMysql2Error, "Bind parameter count (%ld) doesn't match number of arguments (%d)", bind_count, argc); } // setup any bind variables in the query if (bind_count > 0) { bind_buffers = xcalloc(bind_count, sizeof(MYSQL_BIND)); length_buffers = xcalloc(bind_count, sizeof(unsigned long)); for (i = 0; i < argc; i++) { bind_buffers[i].buffer = NULL; params_enc[i] = Qnil; switch (TYPE(argv[i])) { case T_NIL: bind_buffers[i].buffer_type = MYSQL_TYPE_NULL; break; case T_FIXNUM: #if SIZEOF_INT < SIZEOF_LONG bind_buffers[i].buffer_type = MYSQL_TYPE_LONGLONG; bind_buffers[i].buffer = xmalloc(sizeof(long long int)); *(long*)(bind_buffers[i].buffer) = FIX2LONG(argv[i]); #else bind_buffers[i].buffer_type = MYSQL_TYPE_LONG; bind_buffers[i].buffer = xmalloc(sizeof(int)); *(long*)(bind_buffers[i].buffer) = FIX2INT(argv[i]); #endif break; case T_BIGNUM: bind_buffers[i].buffer_type = MYSQL_TYPE_LONGLONG; bind_buffers[i].buffer = xmalloc(sizeof(long long int)); *(LONG_LONG*)(bind_buffers[i].buffer) = rb_big2ll(argv[i]); break; case T_FLOAT: bind_buffers[i].buffer_type = MYSQL_TYPE_DOUBLE; bind_buffers[i].buffer = xmalloc(sizeof(double)); *(double*)(bind_buffers[i].buffer) = NUM2DBL(argv[i]); break; case T_STRING: { params_enc[i] = argv[i]; #ifdef HAVE_RUBY_ENCODING_H params_enc[i] = rb_str_export_to_enc(params_enc[i], conn_enc); #endif set_buffer_for_string(&bind_buffers[i], &length_buffers[i], params_enc[i]); } break; default: // TODO: what Ruby type should support MYSQL_TYPE_TIME if (CLASS_OF(argv[i]) == rb_cTime || CLASS_OF(argv[i]) == cDateTime) { MYSQL_TIME t; VALUE rb_time = argv[i]; bind_buffers[i].buffer_type = MYSQL_TYPE_DATETIME; bind_buffers[i].buffer = xmalloc(sizeof(MYSQL_TIME)); memset(&t, 0, sizeof(MYSQL_TIME)); t.neg = 0; t.second_part = FIX2INT(rb_funcall(rb_time, intern_usec, 0)); t.second = FIX2INT(rb_funcall(rb_time, intern_sec, 0)); t.minute = FIX2INT(rb_funcall(rb_time, intern_min, 0)); t.hour = FIX2INT(rb_funcall(rb_time, intern_hour, 0)); t.day = FIX2INT(rb_funcall(rb_time, intern_day, 0)); t.month = FIX2INT(rb_funcall(rb_time, intern_month, 0)); t.year = FIX2INT(rb_funcall(rb_time, intern_year, 0)); *(MYSQL_TIME*)(bind_buffers[i].buffer) = t; } else if (CLASS_OF(argv[i]) == cDate) { MYSQL_TIME t; VALUE rb_time = argv[i]; bind_buffers[i].buffer_type = MYSQL_TYPE_DATE; bind_buffers[i].buffer = xmalloc(sizeof(MYSQL_TIME)); memset(&t, 0, sizeof(MYSQL_TIME)); t.second_part = 0; t.neg = 0; t.day = FIX2INT(rb_funcall(rb_time, intern_day, 0)); t.month = FIX2INT(rb_funcall(rb_time, intern_month, 0)); t.year = FIX2INT(rb_funcall(rb_time, intern_year, 0)); *(MYSQL_TIME*)(bind_buffers[i].buffer) = t; } else if (CLASS_OF(argv[i]) == cBigDecimal) { bind_buffers[i].buffer_type = MYSQL_TYPE_NEWDECIMAL; // DECIMAL are represented with the "string representation of the // original server-side value", see // https://dev.mysql.com/doc/refman/5.7/en/c-api-prepared-statement-type-conversions.html // This should be independent of the locale used both on the server // and the client side. VALUE rb_val_as_string = rb_funcall(argv[i], intern_to_s, 0); params_enc[i] = rb_val_as_string; #ifdef HAVE_RUBY_ENCODING_H params_enc[i] = rb_str_export_to_enc(params_enc[i], conn_enc); #endif set_buffer_for_string(&bind_buffers[i], &length_buffers[i], params_enc[i]); } break; } } // copies bind_buffers into internal storage if (mysql_stmt_bind_param(stmt, bind_buffers)) { FREE_BINDS; rb_raise_mysql2_stmt_error(stmt_wrapper); } } if ((VALUE)rb_thread_call_without_gvl(nogvl_execute, stmt, RUBY_UBF_IO, 0) == Qfalse) { FREE_BINDS; rb_raise_mysql2_stmt_error(stmt_wrapper); } FREE_BINDS; metadata = mysql_stmt_result_metadata(stmt); if (metadata == NULL) { if (mysql_stmt_errno(stmt) != 0) { // either CR_OUT_OF_MEMORY or CR_UNKNOWN_ERROR. both fatal. wrapper->active_thread = Qnil; rb_raise_mysql2_stmt_error(stmt_wrapper); } // no data and no error, so query was not a SELECT return Qnil; } current = rb_hash_dup(rb_iv_get(stmt_wrapper->client, "@query_options")); (void)RB_GC_GUARD(current); Check_Type(current, T_HASH); is_streaming = (Qtrue == rb_hash_aref(current, sym_stream)); if (!is_streaming) { // recieve the whole result set from the server if (mysql_stmt_store_result(stmt)) { mysql_free_result(metadata); rb_raise_mysql2_stmt_error(stmt_wrapper); } wrapper->active_thread = Qnil; } resultObj = rb_mysql_result_to_obj(stmt_wrapper->client, wrapper->encoding, current, metadata, self); if (!is_streaming) { // cache all result rb_funcall(resultObj, intern_each, 0); } return resultObj; }
static void setDolp(Char *cp) { Char *dp; size_t i; if (dolmod.len == 0 || dolmcnt == 0) { dolp = cp; return; } cp = Strsave(cp); for (i = 0; i < dolmod.len; i++) { int didmod = 0; /* handle s// [eichin:19910926.0510EST] */ if(dolmod.s[i] == 's') { Char delim; Char *lhsub, *rhsub, *np; size_t lhlen = 0, rhlen = 0; delim = dolmod.s[++i]; if (!delim || letter(delim) || Isdigit(delim) || any(" \t\n", delim)) { seterror(ERR_BADSUBST); break; } lhsub = &dolmod.s[++i]; while(dolmod.s[i] != delim && dolmod.s[++i]) { lhlen++; } dolmod.s[i] = 0; rhsub = &dolmod.s[++i]; while(dolmod.s[i] != delim && dolmod.s[++i]) { rhlen++; } dolmod.s[i] = 0; strip(lhsub); strip(rhsub); strip(cp); dp = cp; do { dp = Strstr(dp, lhsub); if (dp) { ptrdiff_t diff = dp - cp; size_t len = (Strlen(cp) + 1 - lhlen + rhlen); np = xmalloc(len * sizeof(Char)); (void) Strncpy(np, cp, diff); (void) Strcpy(np + diff, rhsub); (void) Strcpy(np + diff + rhlen, dp + lhlen); dp = np + diff + 1; xfree(cp); cp = np; cp[--len] = '\0'; didmod = 1; if (diff >= len) break; } else { /* should this do a seterror? */ break; } } while (dol_flag_a != 0); /* * restore dolmod for additional words */ dolmod.s[i] = rhsub[-1] = (Char) delim; } else { do { if ((dp = domod(cp, dolmod.s[i])) != NULL) { didmod = 1; if (Strcmp(cp, dp) == 0) { xfree(cp); cp = dp; break; } else { xfree(cp); cp = dp; } } else break; } while (dol_flag_a != 0); } if (didmod && dolmcnt != INT_MAX) dolmcnt--; #ifdef notdef else break; #endif } addla(cp); dolp = STRNULL; if (seterr) stderror(ERR_OLD); }
/*--------------------------------------------------------------------*/ static int32 read_relation(sieve_conf_t *conf, char *relation_buf, relation_t *r) { /* Convert an ascii representation of a sieve relation into a relation_t. Also verify that the relation is not somehow corrupted. Note that the parsing here is not really bulletproof; if you want bulletproof, build an XML spec and then a parser for *that* */ uint32 i, j, k; uint32 num_factors; char *next_field; uint32 sieve_offset; uint32 sign_of_offset; uint32 fb_offsets[64]; signed_mp_t t0, t1; uint32 num_poly_factors = conf->num_poly_factors; uint32 *poly_factors = conf->poly_factors; uint32 num_derived_poly = 1 << (num_poly_factors - 1); uint32 poly_index; /* read the sieve offset (which may be negative) */ relation_buf += 2; sign_of_offset = POSITIVE; if (*relation_buf == '-') { sign_of_offset = NEGATIVE; relation_buf++; } if (!isxdigit(*relation_buf)) return -1; sieve_offset = strtoul(relation_buf, &next_field, 16); relation_buf = next_field; while (isspace(*relation_buf)) relation_buf++; /* read the polynomial index, make sure that 0 <= poly_index < num_derived_poly */ if (!isxdigit(*relation_buf)) return -2; poly_index = strtoul(relation_buf, &next_field, 16); relation_buf = next_field; if (poly_index >= num_derived_poly) return -3; while (isspace(*relation_buf)) relation_buf++; /* keep reading factors (really offsets into the factor base) until an 'L' is encountered or a failure occurs */ num_factors = 0; while (num_factors < 64 && *relation_buf != 'L') { if (isxdigit(*relation_buf)) { i = strtoul(relation_buf, &next_field, 16); /* factor base offsets must be sorted into ascending order */ if (num_factors > 0 && i < fb_offsets[num_factors-1]) return -4; else fb_offsets[num_factors++] = i; relation_buf = next_field; } else { break; } while (isspace(*relation_buf)) relation_buf++; } if (*relation_buf != 'L') return -5; /* start filling the relation_t */ r->sieve_offset = sieve_offset | (sign_of_offset << 31); r->poly_idx = poly_index; r->num_factors = num_factors + num_poly_factors; r->fb_offsets = (uint32 *)xmalloc(r->num_factors * sizeof(uint32)); read_large_primes(relation_buf, r->large_prime, r->large_prime + 1); /* combine the factors of the sieve value with the factors of the polynomial 'a' value; the linear algebra code has to know about both. Because both lists are sorted, this is just a merge operation */ i = j = k = 0; while (i < num_factors && j < num_poly_factors) { if (fb_offsets[i] < poly_factors[j]) { r->fb_offsets[k++] = fb_offsets[i++]; } else if (fb_offsets[i] > poly_factors[j]) { r->fb_offsets[k++] = poly_factors[j++]; } else { r->fb_offsets[k] = fb_offsets[i++]; r->fb_offsets[k+1] = poly_factors[j++]; k += 2; } } while (i < num_factors) r->fb_offsets[k++] = fb_offsets[i++]; while (j < num_poly_factors) r->fb_offsets[k++] = poly_factors[j++]; /* The relation has now been read in; verify that it really works. First compute (a * sieve_offset + b)^2 - n */ mp_mul_1(&conf->curr_a, sieve_offset, &t0.num); t0.sign = sign_of_offset; signed_mp_add(&t0, &(conf->curr_b[r->poly_idx]), &t0); signed_mp_mul(&t0, &t0, &t1); mp_copy(conf->n, &t0.num); t0.sign = POSITIVE; signed_mp_sub(&t1, &t0, &t1); if (t1.sign == POSITIVE) { /* the first factor in the list must not be -1 */ if (r->fb_offsets[0] == 0) goto read_failed; } else { /* otherwise the first factor in the list must equal -1 */ if (r->fb_offsets[0] != 0) goto read_failed; } /* make sure that the list of factors we have for this relation represent the complete factorization */ for (i = 0; i < r->num_factors; i++) { uint32 prime; uint32 fb_offset = r->fb_offsets[i]; if (i > 0 && fb_offset == 0) break; if (fb_offset == 0 || fb_offset >= conf->fb_size) continue; prime = conf->factor_base[fb_offset].prime; if (mp_divrem_1(&t1.num, prime, &t1.num) != 0) break; } if (r->large_prime[0] > 1) if (mp_divrem_1(&t1.num, r->large_prime[0], &t1.num) != 0) goto read_failed; if (r->large_prime[1] > 1) if (mp_divrem_1(&t1.num, r->large_prime[1], &t1.num) != 0) goto read_failed; if (mp_is_one(&t1.num)) return 0; read_failed: free(r->fb_offsets); return -6; }
/* * Convert delimiter separated pathnames (e.g. PATH) or single file pathname * (e.g. c:/foo, c:\bar) to NutC format. If we are handed a string that * _NutPathToNutc() fails to convert, just return the path we were handed * and assume the caller will know what to do with it (It was probably * a mistake to try and convert it anyway due to some of the bizarre things * that might look like pathnames in makefiles). */ char * convert_path_to_nutc(char *path) { int count; /* count of path elements */ char *nutc_path; /* new NutC path */ int nutc_path_len; /* length of buffer to allocate for new path */ char *pathp; /* pointer to nutc_path used to build it */ char *etok; /* token separator for old path */ char *p; /* points to element of old path */ char sep; /* what flavor of separator used in old path */ char *rval; /* is this a multi-element path ? */ for (p = path, etok = strpbrk(p, ":;"), count = 0; etok; etok = strpbrk(p, ":;")) if ((etok - p) == 1) { if (*(etok - 1) == ';' || *(etok - 1) == ':') { p = ++etok; continue; /* ignore empty bucket */ } else if (etok = strpbrk(etok+1, ":;")) /* found one to count, handle drive letter */ p = ++etok, count++; else /* all finished, force abort */ p += strlen(p); } else /* found another one, no drive letter */ p = ++etok, count++; if (count) { count++; /* x1;x2;x3 <- need to count x3 */ /* * Hazard a guess on how big the buffer needs to be. * We have to convert things like c:/foo to /c=/foo. */ nutc_path_len = strlen(path) + (count*2) + 1; nutc_path = xmalloc(nutc_path_len); pathp = nutc_path; *pathp = '\0'; /* * Loop through PATH and convert one elemnt of the path at at * a time. Single file pathnames will fail this and fall * to the logic below loop. */ for (p = path, etok = strpbrk(p, ":;"); etok; etok = strpbrk(p, ":;")) { /* don't trip up on device specifiers or empty path slots */ if ((etok - p) == 1) if (*(etok - 1) == ';' || *(etok - 1) == ':') { p = ++etok; continue; } else if ((etok = strpbrk(etok+1, ":;")) == NULL) break; /* thing found was a WINDOWS32 pathname */ /* save separator */ sep = *etok; /* terminate the current path element -- temporarily */ *etok = '\0'; #ifdef __NUTC__ /* convert to NutC format */ if (_NutPathToNutc(p, pathp, 0) == FALSE) { free(nutc_path); rval = savestring(path, strlen(path)); return rval; } #else *pathp++ = '/'; *pathp++ = p[0]; *pathp++ = '='; *pathp++ = '/'; strcpy(pathp, &p[2]); #endif pathp += strlen(pathp); *pathp++ = ':'; /* use Unix style path separtor for new path */ *pathp = '\0'; /* make sure we are null terminaed */ /* restore path separator */ *etok = sep; /* point p to first char of next path element */ p = ++etok; } } else { nutc_path_len = strlen(path) + 3; nutc_path = xmalloc(nutc_path_len); pathp = nutc_path; *pathp = '\0'; p = path; } /* * OK, here we handle the last element in PATH (e.g. c of a;b;c) * or the path was a single filename and will be converted * here. Note, testing p here assures that we don't trip up * on paths like a;b; which have trailing delimiter followed by * nothing. */ if (*p != '\0') { #ifdef __NUTC__ if (_NutPathToNutc(p, pathp, 0) == FALSE) { free(nutc_path); rval = savestring(path, strlen(path)); return rval; } #else *pathp++ = '/'; *pathp++ = p[0]; *pathp++ = '='; *pathp++ = '/'; strcpy(pathp, &p[2]); #endif } else *(pathp-1) = '\0'; /* we're already done, don't leave trailing : */ rval = savestring(nutc_path, strlen(nutc_path)); free(nutc_path); return rval; }
/*--------------------------------------------------------------------*/ static void enumerate_cycle(msieve_obj *obj, la_col_t *c, cycle_t *table, cycle_t *entry1, cycle_t *entry2, uint32 final_relation) { /* given two entries out of the hashtable, corresponding to two distinct primes, generate the list of relations that participate in the cycle that these two primes have just created. final_relation is the relation to which the two primes belong, and the completed cycle is packed into 'c' */ uint32 traceback1[100]; uint32 traceback2[100]; uint32 num1, num2; uint32 i, j; /* Follow each cycle_t back up the graph until the root component for this cycle is reached. For each prime encountered along the way, save the offset of the relation containing that prime */ num1 = 0; while (entry1 != table + entry1->data) { if (num1 >= 100) { logprintf(obj, "warning: cycle too long, " "skipping it\n"); return; } traceback1[num1++] = entry1->count; entry1 = table + entry1->data; } num2 = 0; while (entry2 != table + entry2->data) { if (num2 >= 100) { logprintf(obj, "warning: cycle too long, " "skipping it\n"); return; } traceback2[num2++] = entry2->count; entry2 = table + entry2->data; } /* Now walk backwards through the lists, until either one list runs out or a relation is encountered that does not appear in both lists */ while (num1 > 0 && num2 > 0) { if (traceback1[num1 - 1] != traceback2[num2 - 1]) break; num1--; num2--; } /* Now that we know how many relations are in the cycle, allocate space to remember them */ c->cycle.num_relations = num1 + num2 + 1; c->cycle.list = (uint32 *)xmalloc(c->cycle.num_relations * sizeof(uint32)); /* Combine the two lists of relations */ for (i = 0; i < num1; i++) c->cycle.list[i] = traceback1[i]; for (j = 0; j < num2; j++, i++) c->cycle.list[i] = traceback2[j]; /* Add the relation that created the cycle in the first place */ c->cycle.list[i] = final_relation; }
static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn, char *cluster_name, time_t curr_start, time_t curr_end, List cluster_down_list) { local_cluster_usage_t *c_usage = NULL; char *query = NULL; MYSQL_RES *result = NULL; MYSQL_ROW row; int i = 0; ListIterator d_itr = NULL; local_cluster_usage_t *loc_c_usage; char *event_req_inx[] = { "node_name", "time_start", "time_end", "state", "tres", }; char *event_str = NULL; enum { EVENT_REQ_NAME, EVENT_REQ_START, EVENT_REQ_END, EVENT_REQ_STATE, EVENT_REQ_TRES, EVENT_REQ_COUNT }; xstrfmtcat(event_str, "%s", event_req_inx[i]); for(i=1; i<EVENT_REQ_COUNT; i++) { xstrfmtcat(event_str, ", %s", event_req_inx[i]); } /* first get the events during this time. All that is * except things with the maintainance flag set in the * state. We handle those later with the reservations. */ query = xstrdup_printf("select %s from \"%s_%s\" where " "!(state & %d) && (time_start < %ld " "&& (time_end >= %ld " "|| time_end = 0)) " "order by node_name, time_start", event_str, cluster_name, event_table, NODE_STATE_MAINT, curr_end, curr_start); xfree(event_str); if (debug_flags & DEBUG_FLAG_DB_USAGE) DB_DEBUG(mysql_conn->conn, "query\n%s", query); if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) { xfree(query); return NULL; } xfree(query); d_itr = list_iterator_create(cluster_down_list); while ((row = mysql_fetch_row(result))) { time_t row_start = slurm_atoul(row[EVENT_REQ_START]); time_t row_end = slurm_atoul(row[EVENT_REQ_END]); uint16_t state = slurm_atoul(row[EVENT_REQ_STATE]); int seconds; if (row_start < curr_start) row_start = curr_start; if (!row_end || row_end > curr_end) row_end = curr_end; /* Don't worry about it if the time is less * than 1 second. */ if ((seconds = (row_end - row_start)) < 1) continue; /* this means we are a cluster registration entry */ if (!row[EVENT_REQ_NAME][0]) { local_cluster_usage_t *loc_c_usage; /* if the cpu count changes we will * only care about the last cpu count but * we will keep a total of the time for * all cpus to get the correct cpu time * for the entire period. */ if (state || !c_usage) { loc_c_usage = xmalloc( sizeof(local_cluster_usage_t)); loc_c_usage->start = row_start; loc_c_usage->loc_tres = list_create(_destroy_local_tres_usage); /* If this has a state it means the slurmctld went down and we should put this on the list and remove any jobs from this time that were running later. */ if (state) list_append(cluster_down_list, loc_c_usage); else c_usage = loc_c_usage; } else loc_c_usage = c_usage; loc_c_usage->end = row_end; _add_tres_2_list(loc_c_usage->loc_tres, row[EVENT_REQ_TRES], seconds); continue; } /* only record down time for the cluster we are looking for. If it was during this time period we would already have it. */ if (c_usage) { time_t local_start = row_start; time_t local_end = row_end; int seconds; if (c_usage->start > local_start) local_start = c_usage->start; if (c_usage->end < local_end) local_end = c_usage->end; seconds = (local_end - local_start); if (seconds > 0) { _add_tres_time_2_list(c_usage->loc_tres, row[EVENT_REQ_TRES], TIME_DOWN, seconds, 0); /* Now remove this time if there was a disconnected slurmctld during the down time. */ list_iterator_reset(d_itr); while ((loc_c_usage = list_next(d_itr))) { int temp_end = row_end; int temp_start = row_start; if (loc_c_usage->start > local_start) temp_start = loc_c_usage->start; if (loc_c_usage->end < temp_end) temp_end = loc_c_usage->end; seconds = (temp_end - temp_start); if (seconds < 1) continue; _remove_job_tres_time_from_cluster( loc_c_usage->loc_tres, c_usage->loc_tres, seconds); /* info("Node %s was down for " */ /* "%d seconds while " */ /* "cluster %s's slurmctld " */ /* "wasn't responding", */ /* row[EVENT_REQ_NAME], */ /* seconds, cluster_name); */ } } } } mysql_free_result(result); list_iterator_destroy(d_itr); return c_usage; }
/* Allocate a new variant structure, and set up default values for all the fields. */ static struct gdbarch_tdep * new_variant (void) { struct gdbarch_tdep *var; int r; char buf[20]; var = xmalloc (sizeof (*var)); memset (var, 0, sizeof (*var)); var->frv_abi = FRV_ABI_EABI; var->num_gprs = 64; var->num_fprs = 64; var->num_hw_watchpoints = 0; var->num_hw_breakpoints = 0; /* By default, don't supply any general-purpose or floating-point register names. */ var->register_names = (char **) xmalloc ((frv_num_regs + frv_num_pseudo_regs) * sizeof (char *)); for (r = 0; r < frv_num_regs + frv_num_pseudo_regs; r++) var->register_names[r] = ""; /* Do, however, supply default names for the known special-purpose registers. */ var->register_names[pc_regnum] = "pc"; var->register_names[lr_regnum] = "lr"; var->register_names[lcr_regnum] = "lcr"; var->register_names[psr_regnum] = "psr"; var->register_names[ccr_regnum] = "ccr"; var->register_names[cccr_regnum] = "cccr"; var->register_names[tbr_regnum] = "tbr"; /* Debug registers. */ var->register_names[brr_regnum] = "brr"; var->register_names[dbar0_regnum] = "dbar0"; var->register_names[dbar1_regnum] = "dbar1"; var->register_names[dbar2_regnum] = "dbar2"; var->register_names[dbar3_regnum] = "dbar3"; /* iacc0 (Only found on MB93405.) */ var->register_names[iacc0h_regnum] = "iacc0h"; var->register_names[iacc0l_regnum] = "iacc0l"; var->register_names[iacc0_regnum] = "iacc0"; /* fsr0 (Found on FR555 and FR501.) */ var->register_names[fsr0_regnum] = "fsr0"; /* acc0 - acc7. The architecture provides for the possibility of many more (up to 64 total), but we don't want to make that big of a hole in the G packet. If we need more in the future, we'll add them elsewhere. */ for (r = acc0_regnum; r <= acc7_regnum; r++) { char *buf; buf = xstrprintf ("acc%d", r - acc0_regnum); var->register_names[r] = buf; } /* accg0 - accg7: These are one byte registers. The remote protocol provides the raw values packed four into a slot. accg0123 and accg4567 correspond to accg0 - accg3 and accg4-accg7 respectively. We don't provide names for accg0123 and accg4567 since the user will likely not want to see these raw values. */ for (r = accg0_regnum; r <= accg7_regnum; r++) { char *buf; buf = xstrprintf ("accg%d", r - accg0_regnum); var->register_names[r] = buf; } /* msr0 and msr1. */ var->register_names[msr0_regnum] = "msr0"; var->register_names[msr1_regnum] = "msr1"; /* gner and fner registers. */ var->register_names[gner0_regnum] = "gner0"; var->register_names[gner1_regnum] = "gner1"; var->register_names[fner0_regnum] = "fner0"; var->register_names[fner1_regnum] = "fner1"; return var; }
extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn, char *cluster_name, time_t start, time_t end, uint16_t archive_data) { int rc = SLURM_SUCCESS; int add_sec = 3600; int i=0; time_t now = time(NULL); time_t curr_start = start; time_t curr_end = curr_start + add_sec; char *query = NULL; MYSQL_RES *result = NULL; MYSQL_ROW row; ListIterator a_itr = NULL; ListIterator c_itr = NULL; ListIterator w_itr = NULL; ListIterator r_itr = NULL; List assoc_usage_list = list_create(_destroy_local_id_usage); List cluster_down_list = list_create(_destroy_local_cluster_usage); List wckey_usage_list = list_create(_destroy_local_id_usage); List resv_usage_list = list_create(_destroy_local_resv_usage); uint16_t track_wckey = slurm_get_track_wckey(); local_cluster_usage_t *loc_c_usage = NULL; local_cluster_usage_t *c_usage = NULL; local_resv_usage_t *r_usage = NULL; local_id_usage_t *a_usage = NULL; local_id_usage_t *w_usage = NULL; /* char start_char[20], end_char[20]; */ char *job_req_inx[] = { "job.job_db_inx", "job.id_job", "job.id_assoc", "job.id_wckey", "job.array_task_pending", "job.time_eligible", "job.time_start", "job.time_end", "job.time_suspended", "job.cpus_req", "job.id_resv", "job.tres_alloc", "SUM(step.consumed_energy)" }; char *job_str = NULL; enum { JOB_REQ_DB_INX, JOB_REQ_JOBID, JOB_REQ_ASSOCID, JOB_REQ_WCKEYID, JOB_REQ_ARRAY_PENDING, JOB_REQ_ELG, JOB_REQ_START, JOB_REQ_END, JOB_REQ_SUSPENDED, JOB_REQ_RCPU, JOB_REQ_RESVID, JOB_REQ_TRES, JOB_REQ_ENERGY, JOB_REQ_COUNT }; char *suspend_req_inx[] = { "time_start", "time_end" }; char *suspend_str = NULL; enum { SUSPEND_REQ_START, SUSPEND_REQ_END, SUSPEND_REQ_COUNT }; char *resv_req_inx[] = { "id_resv", "assoclist", "flags", "tres", "time_start", "time_end" }; char *resv_str = NULL; enum { RESV_REQ_ID, RESV_REQ_ASSOCS, RESV_REQ_FLAGS, RESV_REQ_TRES, RESV_REQ_START, RESV_REQ_END, RESV_REQ_COUNT }; i=0; xstrfmtcat(job_str, "%s", job_req_inx[i]); for(i=1; i<JOB_REQ_COUNT; i++) { xstrfmtcat(job_str, ", %s", job_req_inx[i]); } i=0; xstrfmtcat(suspend_str, "%s", suspend_req_inx[i]); for(i=1; i<SUSPEND_REQ_COUNT; i++) { xstrfmtcat(suspend_str, ", %s", suspend_req_inx[i]); } i=0; xstrfmtcat(resv_str, "%s", resv_req_inx[i]); for(i=1; i<RESV_REQ_COUNT; i++) { xstrfmtcat(resv_str, ", %s", resv_req_inx[i]); } /* info("begin start %s", slurm_ctime2(&curr_start)); */ /* info("begin end %s", slurm_ctime2(&curr_end)); */ a_itr = list_iterator_create(assoc_usage_list); c_itr = list_iterator_create(cluster_down_list); w_itr = list_iterator_create(wckey_usage_list); r_itr = list_iterator_create(resv_usage_list); while (curr_start < end) { int last_id = -1; int last_wckeyid = -1; int seconds = 0; int tot_time = 0; if (debug_flags & DEBUG_FLAG_DB_USAGE) DB_DEBUG(mysql_conn->conn, "%s curr hour is now %ld-%ld", cluster_name, curr_start, curr_end); /* info("start %s", slurm_ctime2(&curr_start)); */ /* info("end %s", slurm_ctime2(&curr_end)); */ c_usage = _setup_cluster_usage(mysql_conn, cluster_name, curr_start, curr_end, cluster_down_list); // now get the reservations during this time /* If a reservation has the IGNORE_JOBS flag we don't * have an easy way to distinguish the cpus a job not * running in the reservation, but on it's cpus. * So we will just ignore these reservations for * accounting purposes. */ query = xstrdup_printf("select %s from \"%s_%s\" where " "(time_start < %ld && time_end >= %ld) " "&& !(flags & %u)" "order by time_start", resv_str, cluster_name, resv_table, curr_end, curr_start, RESERVE_FLAG_IGN_JOBS); if (debug_flags & DEBUG_FLAG_DB_USAGE) DB_DEBUG(mysql_conn->conn, "query\n%s", query); if (!(result = mysql_db_query_ret( mysql_conn, query, 0))) { rc = SLURM_ERROR; goto end_it; } xfree(query); if (c_usage) xassert(c_usage->loc_tres); /* If a reservation overlaps another reservation we total up everything here as if they didn't but when calculating the total time for a cluster we will remove the extra time received. This may result in unexpected results with association based reports since the association is given the total amount of time of each reservation, thus equaling more time than is available. Job/Cluster/Reservation reports should be fine though since we really don't over allocate resources. The issue with us not being able to handle overlapping reservations here is unless the reservation completely overlaps the other reservation we have no idea how many cpus should be removed since this could be a heterogeneous system. This same problem exists when a reservation is created with the ignore_jobs option which will allow jobs to continue to run in the reservation that aren't suppose to. */ while ((row = mysql_fetch_row(result))) { time_t row_start = slurm_atoul(row[RESV_REQ_START]); time_t row_end = slurm_atoul(row[RESV_REQ_END]); uint32_t row_flags = slurm_atoul(row[RESV_REQ_FLAGS]); int seconds; if (row_start < curr_start) row_start = curr_start; if (!row_end || row_end > curr_end) row_end = curr_end; /* Don't worry about it if the time is less * than 1 second. */ if ((seconds = (row_end - row_start)) < 1) continue; r_usage = xmalloc(sizeof(local_resv_usage_t)); r_usage->id = slurm_atoul(row[RESV_REQ_ID]); r_usage->local_assocs = list_create(slurm_destroy_char); slurm_addto_char_list(r_usage->local_assocs, row[RESV_REQ_ASSOCS]); r_usage->loc_tres = list_create(_destroy_local_tres_usage); _add_tres_2_list(r_usage->loc_tres, row[RESV_REQ_TRES], seconds); r_usage->start = row_start; r_usage->end = row_end; list_append(resv_usage_list, r_usage); /* Since this reservation was added to the cluster and only certain people could run there we will use this as allocated time on the system. If the reservation was a maintenance then we add the time to planned down time. */ /* only record time for the clusters that have registered. This continue should rarely if ever happen. */ if (!c_usage) continue; _add_time_tres_list(c_usage->loc_tres, r_usage->loc_tres, (row_flags & RESERVE_FLAG_MAINT) ? TIME_PDOWN : TIME_ALLOC, 0, 0); /* slurm_make_time_str(&r_usage->start, start_char, */ /* sizeof(start_char)); */ /* slurm_make_time_str(&r_usage->end, end_char, */ /* sizeof(end_char)); */ /* info("adding this much %lld to cluster %s " */ /* "%d %d %s - %s", */ /* r_usage->total_time, c_usage->name, */ /* (row_flags & RESERVE_FLAG_MAINT), */ /* r_usage->id, start_char, end_char); */ } mysql_free_result(result); /* now get the jobs during this time only */ query = xstrdup_printf("select %s from \"%s_%s\" as job " "left outer join \"%s_%s\" as step on " "job.job_db_inx=step.job_db_inx " "and (step.id_step>=0) " "where (job.time_eligible < %ld && " "(job.time_end >= %ld || " "job.time_end = 0)) " "group by job.job_db_inx " "order by job.id_assoc, " "job.time_eligible", job_str, cluster_name, job_table, cluster_name, step_table, curr_end, curr_start); if (debug_flags & DEBUG_FLAG_DB_USAGE) DB_DEBUG(mysql_conn->conn, "query\n%s", query); if (!(result = mysql_db_query_ret( mysql_conn, query, 0))) { rc = SLURM_ERROR; goto end_it; } xfree(query); while ((row = mysql_fetch_row(result))) { uint32_t job_id = slurm_atoul(row[JOB_REQ_JOBID]); uint32_t assoc_id = slurm_atoul(row[JOB_REQ_ASSOCID]); uint32_t wckey_id = slurm_atoul(row[JOB_REQ_WCKEYID]); uint32_t array_pending = slurm_atoul(row[JOB_REQ_ARRAY_PENDING]); uint32_t resv_id = slurm_atoul(row[JOB_REQ_RESVID]); time_t row_eligible = slurm_atoul(row[JOB_REQ_ELG]); time_t row_start = slurm_atoul(row[JOB_REQ_START]); time_t row_end = slurm_atoul(row[JOB_REQ_END]); uint32_t row_rcpu = slurm_atoul(row[JOB_REQ_RCPU]); List loc_tres = NULL; uint64_t row_energy = 0; int loc_seconds = 0; seconds = 0; if (row[JOB_REQ_ENERGY]) row_energy = slurm_atoull(row[JOB_REQ_ENERGY]); if (row_start && (row_start < curr_start)) row_start = curr_start; if (!row_start && row_end) row_start = row_end; if (!row_end || row_end > curr_end) row_end = curr_end; if (!row_start || ((row_end - row_start) < 1)) goto calc_cluster; seconds = (row_end - row_start); if (slurm_atoul(row[JOB_REQ_SUSPENDED])) { MYSQL_RES *result2 = NULL; MYSQL_ROW row2; /* get the suspended time for this job */ query = xstrdup_printf( "select %s from \"%s_%s\" where " "(time_start < %ld && (time_end >= %ld " "|| time_end = 0)) && job_db_inx=%s " "order by time_start", suspend_str, cluster_name, suspend_table, curr_end, curr_start, row[JOB_REQ_DB_INX]); debug4("%d(%s:%d) query\n%s", mysql_conn->conn, THIS_FILE, __LINE__, query); if (!(result2 = mysql_db_query_ret( mysql_conn, query, 0))) { rc = SLURM_ERROR; goto end_it; } xfree(query); while ((row2 = mysql_fetch_row(result2))) { time_t local_start = slurm_atoul( row2[SUSPEND_REQ_START]); time_t local_end = slurm_atoul( row2[SUSPEND_REQ_END]); if (!local_start) continue; if (row_start > local_start) local_start = row_start; if (row_end < local_end) local_end = row_end; tot_time = (local_end - local_start); if (tot_time < 1) continue; seconds -= tot_time; } mysql_free_result(result2); } if (seconds < 1) { debug4("This job (%u) was suspended " "the entire hour", job_id); continue; } if (last_id != assoc_id) { a_usage = xmalloc(sizeof(local_id_usage_t)); a_usage->id = assoc_id; list_append(assoc_usage_list, a_usage); last_id = assoc_id; /* a_usage->loc_tres is made later, don't do it here. */ } /* Short circuit this so so we don't get a pointer. */ if (!track_wckey) last_wckeyid = wckey_id; /* do the wckey calculation */ if (last_wckeyid != wckey_id) { list_iterator_reset(w_itr); while ((w_usage = list_next(w_itr))) if (w_usage->id == wckey_id) break; if (!w_usage) { w_usage = xmalloc( sizeof(local_id_usage_t)); w_usage->id = wckey_id; list_append(wckey_usage_list, w_usage); w_usage->loc_tres = list_create( _destroy_local_tres_usage); } last_wckeyid = wckey_id; } /* do the cluster allocated calculation */ calc_cluster: if (!a_usage) loc_tres = list_create( _destroy_local_tres_usage); else { if (!a_usage->loc_tres) a_usage->loc_tres = list_create( _destroy_local_tres_usage); loc_tres = a_usage->loc_tres; } _add_tres_time_2_list(loc_tres, row[JOB_REQ_TRES], TIME_ALLOC, seconds, 0); if (w_usage) _add_tres_time_2_list(w_usage->loc_tres, row[JOB_REQ_TRES], TIME_ALLOC, seconds, 0); _add_time_tres(loc_tres, TIME_ALLOC, TRES_ENERGY, row_energy, 0); if (w_usage) _add_time_tres( w_usage->loc_tres, TIME_ALLOC, TRES_ENERGY, row_energy, 0); /* Now figure out there was a disconnected slurmctld durning this job. */ list_iterator_reset(c_itr); while ((loc_c_usage = list_next(c_itr))) { int temp_end = row_end; int temp_start = row_start; if (loc_c_usage->start > temp_start) temp_start = loc_c_usage->start; if (loc_c_usage->end < temp_end) temp_end = loc_c_usage->end; loc_seconds = (temp_end - temp_start); if (loc_seconds < 1) continue; _remove_job_tres_time_from_cluster( loc_c_usage->loc_tres, loc_tres, loc_seconds); /* info("Job %u was running for " */ /* "%d seconds while " */ /* "cluster %s's slurmctld " */ /* "wasn't responding", */ /* job_id, loc_seconds, cluster_name); */ } /* first figure out the reservation */ if (resv_id) { if (seconds <= 0) { if (!a_usage) FREE_NULL_LIST(loc_tres); continue; } /* Since we have already added the entire reservation as used time on the cluster we only need to calculate the used time for the reservation and then divy up the unused time over the associations able to run in the reservation. Since the job was to run, or ran a reservation we don't care about eligible time since that could totally skew the clusters reserved time since the job may be able to run outside of the reservation. */ list_iterator_reset(r_itr); while ((r_usage = list_next(r_itr))) { int temp_end, temp_start; /* since the reservation could have changed in some way, thus making a new reservation record in the database, we have to make sure all the reservations are checked to see if such a thing has happened */ if (r_usage->id != resv_id) continue; temp_end = row_end; temp_start = row_start; if (r_usage->start > temp_start) temp_start = r_usage->start; if (r_usage->end < temp_end) temp_end = r_usage->end; loc_seconds = (temp_end - temp_start); if (loc_seconds > 0) _add_time_tres_list( r_usage->loc_tres, loc_tres, TIME_ALLOC, loc_seconds, 1); } if (!a_usage) FREE_NULL_LIST(loc_tres); continue; } /* only record time for the clusters that have registered. This continue should rarely if ever happen. */ if (!c_usage) { if (!a_usage) FREE_NULL_LIST(loc_tres); continue; } if (row_start && (seconds > 0)) { /* info("%d assoc %d adds " */ /* "(%d)(%d-%d) * %d = %d " */ /* "to %d", */ /* job_id, */ /* a_usage->id, */ /* seconds, */ /* row_end, row_start, */ /* row_acpu, */ /* seconds * row_acpu, */ /* row_acpu); */ _add_job_alloc_time_to_cluster( c_usage->loc_tres, loc_tres); } /* The loc_tres isn't needed after this */ if (!a_usage) FREE_NULL_LIST(loc_tres); /* now reserved time */ if (!row_start || (row_start >= c_usage->start)) { int temp_end = row_start; int temp_start = row_eligible; if (c_usage->start > temp_start) temp_start = c_usage->start; if (c_usage->end < temp_end) temp_end = c_usage->end; loc_seconds = (temp_end - temp_start); if (loc_seconds > 0) { /* If we have pending jobs in an array they haven't been inserted into the database yet as proper job records, so handle them here. */ if (array_pending) loc_seconds *= array_pending; /* info("%d assoc %d reserved " */ /* "(%d)(%d-%d) * %d * %d = %d " */ /* "to %d", */ /* job_id, */ /* assoc_id, */ /* temp_end - temp_start, */ /* temp_end, temp_start, */ /* row_rcpu, */ /* array_pending, */ /* loc_seconds, */ /* row_rcpu); */ _add_time_tres(c_usage->loc_tres, TIME_RESV, TRES_CPU, loc_seconds * row_rcpu, 0); } } } mysql_free_result(result); /* now figure out how much more to add to the associations that could had run in the reservation */ list_iterator_reset(r_itr); while ((r_usage = list_next(r_itr))) { ListIterator t_itr; local_tres_usage_t *loc_tres; if (!r_usage->loc_tres || !list_count(r_usage->loc_tres)) continue; t_itr = list_iterator_create(r_usage->loc_tres); while ((loc_tres = list_next(t_itr))) { int64_t idle = loc_tres->total_time - loc_tres->time_alloc; char *assoc = NULL; ListIterator tmp_itr = NULL; if (idle <= 0) break; /* since this will be * the same for all TRES */ /* now divide that time by the number of associations in the reservation and add them to each association */ seconds = idle / list_count(r_usage->local_assocs); /* info("resv %d got %d seconds for TRES %u " */ /* "for %d assocs", */ /* r_usage->id, seconds, loc_tres->id, */ /* list_count(r_usage->local_assocs)); */ tmp_itr = list_iterator_create( r_usage->local_assocs); while ((assoc = list_next(tmp_itr))) { uint32_t associd = slurm_atoul(assoc); if ((last_id != associd) && !(a_usage = list_find_first( assoc_usage_list, _find_id_usage, &associd))) { a_usage = xmalloc( sizeof(local_id_usage_t)); a_usage->id = associd; list_append(assoc_usage_list, a_usage); last_id = associd; a_usage->loc_tres = list_create( _destroy_local_tres_usage); } _add_time_tres(a_usage->loc_tres, TIME_ALLOC, loc_tres->id, seconds, 0); } list_iterator_destroy(tmp_itr); } list_iterator_destroy(t_itr); } /* now apply the down time from the slurmctld disconnects */ if (c_usage) { list_iterator_reset(c_itr); while ((loc_c_usage = list_next(c_itr))) { local_tres_usage_t *loc_tres; ListIterator tmp_itr = list_iterator_create( loc_c_usage->loc_tres); while ((loc_tres = list_next(tmp_itr))) _add_time_tres(c_usage->loc_tres, TIME_DOWN, loc_tres->id, loc_tres->total_time, 0); list_iterator_destroy(tmp_itr); } if ((rc = _process_cluster_usage( mysql_conn, cluster_name, curr_start, curr_end, now, c_usage)) != SLURM_SUCCESS) { goto end_it; } } list_iterator_reset(a_itr); while ((a_usage = list_next(a_itr))) _create_id_usage_insert(cluster_name, ASSOC_TABLES, curr_start, now, a_usage, &query); if (query) { if (debug_flags & DEBUG_FLAG_DB_USAGE) DB_DEBUG(mysql_conn->conn, "query\n%s", query); rc = mysql_db_query(mysql_conn, query); xfree(query); if (rc != SLURM_SUCCESS) { error("Couldn't add assoc hour rollup"); goto end_it; } } if (!track_wckey) goto end_loop; list_iterator_reset(w_itr); while ((w_usage = list_next(w_itr))) _create_id_usage_insert(cluster_name, WCKEY_TABLES, curr_start, now, w_usage, &query); if (query) { if (debug_flags & DEBUG_FLAG_DB_USAGE) DB_DEBUG(mysql_conn->conn, "query\n%s", query); rc = mysql_db_query(mysql_conn, query); xfree(query); if (rc != SLURM_SUCCESS) { error("Couldn't add wckey hour rollup"); goto end_it; } } end_loop: _destroy_local_cluster_usage(c_usage); _destroy_local_id_usage(a_usage); _destroy_local_id_usage(w_usage); _destroy_local_resv_usage(r_usage); c_usage = NULL; r_usage = NULL; a_usage = NULL; w_usage = NULL; list_flush(assoc_usage_list); list_flush(cluster_down_list); list_flush(wckey_usage_list); list_flush(resv_usage_list); curr_start = curr_end; curr_end = curr_start + add_sec; } end_it: xfree(query); xfree(suspend_str); xfree(job_str); xfree(resv_str); _destroy_local_cluster_usage(c_usage); _destroy_local_id_usage(a_usage); _destroy_local_id_usage(w_usage); _destroy_local_resv_usage(r_usage); if (a_itr) list_iterator_destroy(a_itr); if (c_itr) list_iterator_destroy(c_itr); if (w_itr) list_iterator_destroy(w_itr); if (r_itr) list_iterator_destroy(r_itr); FREE_NULL_LIST(assoc_usage_list); FREE_NULL_LIST(cluster_down_list); FREE_NULL_LIST(wckey_usage_list); FREE_NULL_LIST(resv_usage_list); /* info("stop start %s", slurm_ctime2(&curr_start)); */ /* info("stop end %s", slurm_ctime2(&curr_end)); */ /* go check to see if we archive and purge */ if (rc == SLURM_SUCCESS) rc = _process_purge(mysql_conn, cluster_name, archive_data, SLURMDB_PURGE_HOURS); return rc; }
struct nh_topten_entry * nhnet_get_topten(int *out_len, char *statusbuf, const char *player, int top, int around, nh_bool own) { struct nh_topten_entry *ttlist; json_t *jmsg, *jarr, *jobj; const char *msg, *plrole, *plrace, *plgend, *plalign, *name, *death, *entrytxt; int len, i, highlight; if (!nhnet_active()) return nh_get_topten(out_len, statusbuf, player, top, around, own); *out_len = 0; if (!api_entry()) return NULL; jmsg = json_pack("{ss,si,si,si}", "player", player ? player : "", "top", top, "around", around, "own", own); jmsg = send_receive_msg("get_topten", jmsg); if (json_unpack(jmsg, "{so,ss!}", "toplist", &jarr, "msg", &msg) == -1 || !json_is_array(jarr)) { print_error("Incorrect return object in nhnet_get_topten"); ttlist = NULL; } else { len = json_array_size(jarr); strncpy(statusbuf, msg, BUFSZ - 1); *out_len = len; ttlist = xmalloc((len + 1) * sizeof (struct nh_topten_entry)); memset(ttlist, 0, (len + 1) * sizeof (struct nh_topten_entry)); for (i = 0; i < len; i++) { jobj = json_array_get(jarr, i); json_unpack(jobj, "{si,si,si,si,si,si,si,si,si,si,si,si,si,ss,ss,ss,ss,ss,ss,ss,si!}", "rank", &ttlist[i].rank, "points", &ttlist[i].points, "maxlvl", &ttlist[i].maxlvl, "hp", &ttlist[i].hp, "maxhp", &ttlist[i].maxhp, "deaths", &ttlist[i].deaths, "ver_major", &ttlist[i].ver_major, "ver_minor", &ttlist[i].ver_minor, "patchlevel", &ttlist[i].patchlevel, "deathdate", &ttlist[i].deathdate, "birthdate", &ttlist[i].birthdate, "moves", &ttlist[i].moves, "end_how", &ttlist[i].end_how, "plrole", &plrole, "plrace", &plrace, "plgend", &plgend, "plalign", &plalign, "name", &name, "death", &death, "entrytxt", &entrytxt, "highlight", &highlight); strncpy(ttlist[i].plrole, plrole, PLRBUFSZ - 1); strncpy(ttlist[i].plrace, plrace, PLRBUFSZ - 1); strncpy(ttlist[i].plgend, plgend, PLRBUFSZ - 1); strncpy(ttlist[i].plalign, plalign, PLRBUFSZ - 1); strncpy(ttlist[i].name, name, PL_NSIZ - 1); strncpy(ttlist[i].death, death, BUFSZ - 1); strncpy(ttlist[i].entrytxt, entrytxt, BUFSZ - 1); ttlist[i].highlight = highlight; } } json_decref(jmsg); api_exit(); return ttlist; }
int getopt_main(int argc, char *argv[]) { char *optstr=NULL; char *name=NULL; int opt; int compatible=0; init_longopt(); if (getenv("GETOPT_COMPATIBLE")) compatible=1; if (argc == 1) { if (compatible) { /* For some reason, the original getopt gave no error when there were no arguments. */ printf(" --\n"); exit(0); } else error_msg_and_die("missing optstring argument"); } if (argv[1][0] != '-' || compatible) { quote=0; optstr=xmalloc(strlen(argv[1])+1); strcpy(optstr,argv[1]+strspn(argv[1],"-+")); argv[1]=argv[0]; exit(generate_output(argv+1,argc-1,optstr,long_options)); } while ((opt=getopt_long(argc,argv,shortopts,longopts,NULL)) != EOF) switch (opt) { case 'a': alternative=1; break; case 'o': if (optstr) free(optstr); optstr=xmalloc(strlen(optarg)+1); strcpy(optstr,optarg); break; case 'l': add_long_options(optarg); break; case 'n': if (name) free(name); name=xmalloc(strlen(optarg)+1); strcpy(name,optarg); break; case 'q': quiet_errors=1; break; case 'Q': quiet_output=1; break; case 's': set_shell(optarg); break; case 'T': exit(4); case 'u': quote=0; break; default: show_usage(); } if (!optstr) { if (optind >= argc) error_msg_and_die("missing optstring argument"); else { optstr=xmalloc(strlen(argv[optind])+1); strcpy(optstr,argv[optind]); optind++; } } if (name) argv[optind-1]=name; else argv[optind-1]=argv[0]; exit(generate_output(argv+optind-1,argc-optind+1,optstr,long_options)); }
char * grub_util_part_to_disk (const char *os_dev, struct stat *st, int *is_part) { char *path; if (! S_ISBLK (st->st_mode)) { *is_part = 0; return xstrdup (os_dev); } path = xmalloc (PATH_MAX); if (! realpath (os_dev, path)) return NULL; if (strncmp ("/dev/", path, 5) == 0) { char *p = path + 5; /* If this is an IDE disk. */ if (strncmp ("ide/", p, 4) == 0) { p = strstr (p, "part"); if (p) { *is_part = 1; strcpy (p, "disc"); } return path; } /* If this is a SCSI disk. */ if (strncmp ("scsi/", p, 5) == 0) { p = strstr (p, "part"); if (p) { *is_part = 1; strcpy (p, "disc"); } return path; } /* If this is a DAC960 disk. */ if (strncmp ("rd/c", p, 4) == 0) { /* /dev/rd/c[0-9]+d[0-9]+(p[0-9]+)? */ p = strchr (p, 'p'); if (p) { *is_part = 1; *p = '\0'; } return path; } /* If this is a Mylex AcceleRAID Array. */ if (strncmp ("rs/c", p, 4) == 0) { /* /dev/rd/c[0-9]+d[0-9]+(p[0-9]+)? */ p = strchr (p, 'p'); if (p) { *is_part = 1; *p = '\0'; } return path; } /* If this is a CCISS disk. */ if (strncmp ("cciss/c", p, sizeof ("cciss/c") - 1) == 0) { /* /dev/cciss/c[0-9]+d[0-9]+(p[0-9]+)? */ p = strchr (p, 'p'); if (p) { *is_part = 1; *p = '\0'; } return path; } /* If this is an AOE disk. */ if (strncmp ("etherd/e", p, sizeof ("etherd/e") - 1) == 0) { /* /dev/etherd/e[0-9]+\.[0-9]+(p[0-9]+)? */ p = strchr (p, 'p'); if (p) { *is_part = 1; *p = '\0'; } return path; } /* If this is a Compaq Intelligent Drive Array. */ if (strncmp ("ida/c", p, sizeof ("ida/c") - 1) == 0) { /* /dev/ida/c[0-9]+d[0-9]+(p[0-9]+)? */ p = strchr (p, 'p'); if (p) { *is_part = 1; *p = '\0'; } return path; } /* If this is an I2O disk. */ if (strncmp ("i2o/hd", p, sizeof ("i2o/hd") - 1) == 0) { /* /dev/i2o/hd[a-z]([0-9]+)? */ if (p[sizeof ("i2o/hda") - 1]) *is_part = 1; p[sizeof ("i2o/hda") - 1] = '\0'; return path; } /* If this is a MultiMediaCard (MMC). */ if (strncmp ("mmcblk", p, sizeof ("mmcblk") - 1) == 0) { /* /dev/mmcblk[0-9]+(p[0-9]+)? */ p = strchr (p, 'p'); if (p) { *is_part = 1; *p = '\0'; } return path; } if (strncmp ("md", p, 2) == 0 && p[2] >= '0' && p[2] <= '9') { char *ptr = p + 2; while (*ptr >= '0' && *ptr <= '9') ptr++; if (*ptr) *is_part = 1; *ptr = 0; return path; } if (strncmp ("nbd", p, 3) == 0 && p[3] >= '0' && p[3] <= '9') { char *ptr = p + 3; while (*ptr >= '0' && *ptr <= '9') ptr++; if (*ptr) *is_part = 1; *ptr = 0; return path; } /* If this is an IDE, SCSI or Virtio disk. */ if (strncmp ("vdisk", p, 5) == 0 && p[5] >= 'a' && p[5] <= 'z') { /* /dev/vdisk[a-z][0-9]* */ if (p[6]) *is_part = 1; p[6] = '\0'; return path; } if ((strncmp ("hd", p, 2) == 0 || strncmp ("vd", p, 2) == 0 || strncmp ("sd", p, 2) == 0) && p[2] >= 'a' && p[2] <= 'z') { char *pp = p + 2; while (*pp >= 'a' && *pp <= 'z') pp++; if (*pp) *is_part = 1; /* /dev/[hsv]d[a-z]+[0-9]* */ *pp = '\0'; return path; } /* If this is a Xen virtual block device. */ if ((strncmp ("xvd", p, 3) == 0) && p[3] >= 'a' && p[3] <= 'z') { char *pp = p + 3; while (*pp >= 'a' && *pp <= 'z') pp++; if (*pp) *is_part = 1; /* /dev/xvd[a-z]+[0-9]* */ *pp = '\0'; return path; } /* If this is a loop device */ if ((strncmp ("loop", p, 4) == 0) && p[4] >= '0' && p[4] <= '9') { char *pp = p + 4; while (*pp >= '0' && *pp <= '9') pp++; if (*pp == 'p') *is_part = 1; /* /dev/loop[0-9]+p[0-9]* */ *pp = '\0'; return path; } /* If this is a NVMe device */ if ((strncmp ("nvme", p, 4) == 0) && p[4] >= '0' && p[4] <= '9') { char *pp = p + 4; while (*pp >= '0' && *pp <= '9') pp++; if (*pp == 'n') pp++; while (*pp >= '0' && *pp <= '9') pp++; if (*pp == 'p') *is_part = 1; /* /dev/nvme[0-9]+n[0-9]+p[0-9]* */ *pp = '\0'; return path; } } return path; }
void new_start_pam(Authctxt *authctxt, struct pam_conv *conv) { int retval; pam_handle_t *pamh; const char *rhost, *svc; char *user = NULL; pam_stuff *pam; if (authctxt == NULL) fatal("Internal error during userauth"); if (compat20 && authctxt->method == NULL) fatal("Userauth method unknown while starting PAM"); /* PAM service selected here */ svc = derive_pam_svc_name(authctxt->method); debug2("Starting PAM service %s for method %s", svc, get_method_name(authctxt)); if (authctxt->user != NULL) user = authctxt->user; /* Cleanup previous PAM state */ if (authctxt->pam != NULL) { fatal_remove_cleanup(&do_pam_cleanup_proc, authctxt->pam); do_pam_cleanup_proc(authctxt->pam); } pam = xmalloc(sizeof(pam_stuff)); (void) memset(pam, 0, sizeof(pam_stuff)); /* * pam->last_pam_retval has to be and is considered * along with pam->state. * * pam->state = 0; -> no PAM auth, account, etc, work * done yet. (Set by memset() above.) * * pam->last_pam_retval = PAM_SUCCESS; -> meaningless at * this point. * * See finish_userauth_do_pam() below. */ pam->authctxt = authctxt; pam->last_pam_retval = PAM_SUCCESS; authctxt->pam = pam; /* Free any previously stored text/error PAM prompts */ if (__pam_msg) { xfree(__pam_msg); __pam_msg = NULL; } if ((retval = pam_start(svc, user, conv, &pamh)) != PAM_SUCCESS) { fatal("PAM initialization failed during %s userauth", get_method_name(authctxt)); } fatal_add_cleanup((void (*)(void *)) &do_pam_cleanup_proc, (void *) authctxt->pam); rhost = get_remote_name_or_ip(utmp_len, options.verify_reverse_mapping); if ((retval = pam_set_item(pamh, PAM_RHOST, rhost)) != PAM_SUCCESS) { (void) pam_end(pamh, retval); fatal("Could not set PAM_RHOST item during %s userauth", get_method_name(authctxt)); } if ((retval = pam_set_item(pamh, PAM_TTY, "sshd")) != PAM_SUCCESS) { (void) pam_end(pamh, retval); fatal("Could not set PAM_TTY item during %s userauth", get_method_name(authctxt)); } authctxt->pam->h = pamh; }
int _tmain(int argc, _TCHAR* argv[]) { lzo_bytep in; lzo_bytep out; lzo_voidp wrkmem; lzo_uint in_len; lzo_uint out_len; lzo_uint new_len; //lzo 초기화 if (lzo_init() != LZO_E_OK) { printf("lzo_init() failed\n"); return 0; } //파일 로딩 FILE* fp = 0; errno_t err = fopen_s(&fp, "LICENSE", "rb"); if (err != 0) return 0; fseek(fp, 0L, SEEK_END); in_len = ftell(fp); fseek(fp, 0L, SEEK_SET); //메모리 할당 //작은 데이터를 압축할 경우 데이터의 크기가 오히려 증가할 경우가 있기 때문에 //압축된 결과를 저장하는 버퍼는 원본 버퍼보다 조금 크게 잡는다. in = (lzo_bytep)xmalloc(in_len); out_len = in_len + in_len / 16 + 64 + 3; out = (lzo_bytep)xmalloc(out_len); wrkmem = (lzo_voidp)xmalloc(LZO1X_1_MEM_COMPRESS); if (in == NULL || out == NULL || wrkmem == NULL) { printf("out of memory\n"); return 0; } //데이터를 읽어 들인다. fread(in, in_len, 1, fp); fclose(fp); //데이터 압축 int r = lzo1x_1_compress(in, in_len, out, &out_len, wrkmem); if (r == LZO_E_OK) printf("compressed %lu bytes into %lu bytes\n", (unsigned long)in_len, (unsigned long)out_len); else { /* this should NEVER happen */ printf("internal error - compression failed: %d\n", r); return 2; } /* check for an incompressible block */ if (out_len >= in_len) { printf("This block contains incompressible data.\n"); return 0; } //압축 해제 new_len = in_len; r = lzo1x_decompress(out, out_len, in, &new_len, NULL); if (r == LZO_E_OK && new_len == in_len) printf("decompressed %lu bytes back into %lu bytes\n", (unsigned long)out_len, (unsigned long)in_len); else { /* this should NEVER happen */ printf("internal error - decompression failed: %d\n", r); return 1; } lzo_free(wrkmem); lzo_free(out); lzo_free(in); printf("Simple compression test passed.\n"); return 0; }
/* create a new finfo record, open FITS file */ static Finfo FinfoNew(char *fname) { int i, len; char *e=NULL; char *f=NULL; char *s=NULL; unsigned char header[8]; Finfo finfo; /* sanity check */ if( !fname ) return NULL; /* return existing finfo, if possible */ if( (finfo=FinfoLookup(fname)) ) return finfo; /* allocate record */ if( !(finfo = (Finfo)xcalloc(sizeof(FinfoRec), 1)) ){ fprintf(stderr, "ERROR: can't allocate rec for image\n"); return NULL; } /* save file name */ finfo->fname = xstrdup(fname); /* check for file type */ if( (s = strrchr(fname, '.')) && !strcasecmp(s, ".png") ){ /* its a PNG */ finfo->ftype = FTYPE_PNG; } else { /* assume FITS type */ finfo->ftype = FTYPE_FITS; } /* open file */ switch(finfo->ftype){ case FTYPE_PNG: /* code taken from "PNG: The Definitive Guide" by Greg Roelofs, Chapter 13 "Reading PNG Images" */ /* set data path */ datapath = getenv("JS9_DATAPATH"); /* look for path of the PNG file */ s = Find(fname, "r", NULL, datapath); if( s && *s ){ if( !(finfo->fp = fopen(s, "rb")) ){ fprintf(stderr, "ERROR: can't open PNG file '%s'\n", fname); goto error; } fread(header, 1, 8, finfo->fp); if( png_sig_cmp(header, 0, 8) ){ fprintf(stderr, "ERROR: not recognized as a PNG file '%s'\n", fname); goto error; } /* initialize stuff */ finfo->png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if( !finfo->png_ptr ){ fprintf(stderr, "ERROR: png_create_read_struct failed '%s'\n", fname); goto error; } finfo->info_ptr = png_create_info_struct(finfo->png_ptr); if( !finfo->info_ptr ){ fprintf(stderr, "ERROR: png_create_info_struct failed '%s'\n", fname); goto error; } if( setjmp(png_jmpbuf(finfo->png_ptr)) ){ fprintf(stderr, "ERROR: during png init_io '%s'\n", fname); goto error; } png_init_io(finfo->png_ptr, finfo->fp); png_set_sig_bytes(finfo->png_ptr, 8); png_read_info(finfo->png_ptr, finfo->info_ptr); /* get the text chunks that come before the image */ if( png_get_text(finfo->png_ptr, finfo->info_ptr, &(finfo->text_ptr), &(finfo->num_text)) > 0 ){ /* process all known PNG keywords */ for(i=0; i<finfo->num_text; i++){ if( !strcmp(finfo->text_ptr[i].key, FITSFILE) ){ finfo->fitsfile = xstrdup(finfo->text_ptr[i].text); /* remove the extension that was used to generate png */ s = strchr(finfo->fitsfile, '['); if( s ){ *s = '\0'; } } } } } else { fprintf(stderr, "ERROR: can't locate PNG file for '%s' (%s)\n", fname, datapath); goto error; } break; /* look for an error */ case FTYPE_FITS: /* fits file can have an extension */ f = FileRoot(fname); /* set data path */ datapath = getenv("JS9_DATAPATH"); /* look for path of the FITS file */ s = Find(f, "r", NULL, datapath); xfree(f); if( s && *s ){ len = strlen(s) + 1; /* construct full path to fits file + extension */ e = FileExtension(fname); if( e ){ len += strlen(e); } finfo->fitsfile = xmalloc(len); strcpy(finfo->fitsfile, s); if( e ){ strcat(finfo->fitsfile, e); } xfree(s); } else { fprintf(stderr, "ERROR: can't locate FITS file for '%s' (%s)\n", fname, datapath); goto error; } break; default: fprintf(stderr, "ERROR: unknown file type '%s'\n", fname); goto error; break; } /* add this finfo to end of list of existing finfos */ FinfoListAdd(&finfohead, finfo); /* return the news */ return finfo; error: /* free up struct and return nothing */ _FinfoFree(finfo); return NULL; }
int easy_pkcs7_sign(const char *content, size_t len, char **signature, size_t *signature_len, const char *key_file, const char *cert_file) { FILE *f; X509 *certificate; STACK_OF(X509) *c, *cert_chain; EVP_PKEY *private_key; char *tmp_sig; BIO *out, *in; PKCS7 *p7; int status; OpenSSL_add_all_algorithms(); ERR_load_crypto_strings(); status = -1; private_key = NULL; cert_chain = NULL; in = NULL; c = file_to_certs(cert_file); if (sk_X509_num(c) != 1) { warnx("More then one certificate in the certificate file"); goto cleanup; } certificate = sk_X509_value(c, 0); /* Compute ex_kusage */ X509_check_purpose(certificate, -1, 0); if (check_ca(certificate)) { warnx("CA keys are not valid for signatures"); goto cleanup; } if (certificate->ex_xkusage != pkg_key_usage) { warnx("Certificate must have CODE SIGNING " "and EMAIL PROTECTION property"); goto cleanup; } if (cert_chain_file) cert_chain = file_to_certs(cert_chain_file); if ((f = fopen(key_file, "r")) == NULL) { warn("Failed to open private key file %s", key_file); goto cleanup; } private_key = PEM_read_PrivateKey(f, NULL, ssl_pass_cb, NULL); fclose(f); if (private_key == NULL) { warnx("Can't read private key: %s", key_file); goto cleanup; } if (X509_check_private_key(certificate, private_key) != 1) { warnx("The private key %s doesn't match the certificate %s", key_file, cert_file); goto cleanup; } in = BIO_new_mem_buf(__UNCONST(content), len); p7 = PKCS7_sign(certificate, private_key, cert_chain, in, PKCS7_DETACHED|PKCS7_NOATTR|PKCS7_BINARY); if (p7 == NULL) { warnx("Failed to create signature structure"); goto cleanup; } out = BIO_new(BIO_s_mem()); PEM_write_bio_PKCS7(out, p7); *signature_len = BIO_get_mem_data(out, &tmp_sig); *signature = xmalloc(*signature_len); memcpy(*signature, tmp_sig, *signature_len); BIO_free_all(out); PKCS7_free(p7); status = 0; cleanup: sk_X509_free(c); sk_X509_free(cert_chain); EVP_PKEY_free(private_key); BIO_free(in); return status; }
enum cmd_retval cmd_set_buffer_exec(struct cmd *self, struct cmd_q *cmdq) { struct args *args = self->args; struct paste_buffer *pb; char *pdata, *cause; const char *bufname; size_t psize, newsize; bufname = NULL; if (args_has(args, 'n')) { if (args->argc > 0) { cmdq_error(cmdq, "don't provide data with n flag"); return (CMD_RETURN_ERROR); } if (args_has(args, 'b')) bufname = args_get(args, 'b'); if (bufname == NULL) { pb = paste_get_top(); if (pb == NULL) { cmdq_error(cmdq, "no buffer"); return (CMD_RETURN_ERROR); } bufname = pb->name; } if (paste_rename(bufname, args_get(args, 'n'), &cause) != 0) { cmdq_error(cmdq, "%s", cause); free(cause); return (CMD_RETURN_ERROR); } return (CMD_RETURN_NORMAL); } if (args->argc != 1) { cmdq_error(cmdq, "no data specified"); return (CMD_RETURN_ERROR); } psize = 0; pdata = NULL; pb = NULL; if ((newsize = strlen(args->argv[0])) == 0) return (CMD_RETURN_NORMAL); if (args_has(args, 'b')) { bufname = args_get(args, 'b'); pb = paste_get_name(bufname); } else if (args_has(args, 'a')) { pb = paste_get_top(); if (pb != NULL) bufname = pb->name; } if (args_has(args, 'a') && pb != NULL) { psize = pb->size; pdata = xmalloc(psize); memcpy(pdata, pb->data, psize); } pdata = xrealloc(pdata, 1, psize + newsize); memcpy(pdata + psize, args->argv[0], newsize); psize += newsize; if (paste_set(pdata, psize, bufname, &cause) != 0) { cmdq_error(cmdq, "%s", cause); free(pdata); free(cause); return (CMD_RETURN_ERROR); } return (CMD_RETURN_NORMAL); }
/* Attempts to open 'name' with the specified 'open_mode'. On success, stores * the new log into '*filep' and returns NULL; otherwise returns NULL and * stores NULL into '*filep'. * * Whether the file will be locked using lockfile_lock() depends on 'locking': * use true to lock it, false not to lock it, or -1 to lock it only if * 'open_mode' is a mode that allows writing. */ struct ovsdb_error * ovsdb_log_open(const char *name, enum ovsdb_log_open_mode open_mode, int locking, struct ovsdb_log **filep) { struct lockfile *lockfile; struct ovsdb_error *error; struct ovsdb_log *file; struct stat s; FILE *stream; int flags; int fd; *filep = NULL; ovs_assert(locking == -1 || locking == false || locking == true); if (locking < 0) { locking = open_mode != OVSDB_LOG_READ_ONLY; } if (locking) { int retval = lockfile_lock(name, &lockfile); if (retval) { error = ovsdb_io_error(retval, "%s: failed to lock lockfile", name); goto error; } } else { lockfile = NULL; } if (open_mode == OVSDB_LOG_READ_ONLY) { flags = O_RDONLY; } else if (open_mode == OVSDB_LOG_READ_WRITE) { flags = O_RDWR; } else if (open_mode == OVSDB_LOG_CREATE) { #ifndef _WIN32 if (stat(name, &s) == -1 && errno == ENOENT && lstat(name, &s) == 0 && S_ISLNK(s.st_mode)) { /* 'name' is a dangling symlink. We want to create the file that * the symlink points to, but POSIX says that open() with O_EXCL * must fail with EEXIST if the named file is a symlink. So, we * have to leave off O_EXCL and accept the race. */ flags = O_RDWR | O_CREAT; } else { flags = O_RDWR | O_CREAT | O_EXCL; } #else flags = O_RDWR | O_CREAT | O_EXCL; #endif } else { OVS_NOT_REACHED(); } #ifdef _WIN32 flags = flags | O_BINARY; #endif fd = open(name, flags, 0666); if (fd < 0) { const char *op = open_mode == OVSDB_LOG_CREATE ? "create" : "open"; error = ovsdb_io_error(errno, "%s: %s failed", name, op); goto error_unlock; } if (!fstat(fd, &s) && s.st_size == 0) { /* It's (probably) a new file so fsync() its parent directory to ensure * that its directory entry is committed to disk. */ fsync_parent_dir(name); } stream = fdopen(fd, open_mode == OVSDB_LOG_READ_ONLY ? "rb" : "w+b"); if (!stream) { error = ovsdb_io_error(errno, "%s: fdopen failed", name); goto error_close; } file = xmalloc(sizeof *file); file->name = xstrdup(name); file->lockfile = lockfile; file->stream = stream; file->prev_offset = 0; file->offset = 0; file->read_error = NULL; file->write_error = false; file->mode = OVSDB_LOG_READ; *filep = file; return NULL; error_close: close(fd); error_unlock: lockfile_unlock(lockfile); error: return error; }
/* Create "opt_name=opt_value" string */ static NOINLINE char *xmalloc_optname_optval(uint8_t *option, const struct dhcp_option *type_p, const char *opt_name) { unsigned upper_length; int len, type, optlen; uint16_t val_u16; int16_t val_s16; uint32_t val_u32; int32_t val_s32; char *dest, *ret; /* option points to OPT_DATA, need to go back and get OPT_LEN */ len = option[OPT_LEN - OPT_DATA]; type = type_p->flags & TYPE_MASK; optlen = dhcp_option_lengths[type]; upper_length = len_of_option_as_string[type] * (len / optlen); dest = ret = xmalloc(upper_length + strlen(opt_name) + 2); dest += sprintf(ret, "%s=", opt_name); while (len >= optlen) { switch (type) { case OPTION_IP_PAIR: dest += sprint_nip(dest, "", option); *dest++ = '/'; option += 4; optlen = 4; case OPTION_IP: /* Works regardless of host byte order. */ dest += sprint_nip(dest, "", option); break; case OPTION_BOOLEAN: dest += sprintf(dest, *option ? "yes" : "no"); break; case OPTION_U8: dest += sprintf(dest, "%u", *option); break; case OPTION_U16: move_from_unaligned16(val_u16, option); dest += sprintf(dest, "%u", ntohs(val_u16)); break; case OPTION_S16: move_from_unaligned16(val_s16, option); dest += sprintf(dest, "%d", ntohs(val_s16)); break; case OPTION_U32: move_from_unaligned32(val_u32, option); dest += sprintf(dest, "%lu", (unsigned long) ntohl(val_u32)); break; case OPTION_S32: move_from_unaligned32(val_s32, option); dest += sprintf(dest, "%ld", (long) ntohl(val_s32)); break; case OPTION_STRING: memcpy(dest, option, len); dest[len] = '\0'; return ret; /* Short circuit this case */ case OPTION_STATIC_ROUTES: { /* Option binary format: * mask [one byte, 0..32] * ip [big endian, 0..4 bytes depending on mask] * router [big endian, 4 bytes] * may be repeated * * We convert it to a string "IP/MASK ROUTER IP2/MASK2 ROUTER2" */ const char *pfx = ""; while (len >= 1 + 4) { /* mask + 0-byte ip + router */ uint32_t nip; uint8_t *p; unsigned mask; int bytes; mask = *option++; if (mask > 32) break; len--; nip = 0; p = (void*) &nip; bytes = (mask + 7) / 8; /* 0 -> 0, 1..8 -> 1, 9..16 -> 2 etc */ while (--bytes >= 0) { *p++ = *option++; len--; } if (len < 4) break; /* print ip/mask */ dest += sprint_nip(dest, pfx, (void*) &nip); pfx = " "; dest += sprintf(dest, "/%u ", mask); /* print router */ dest += sprint_nip(dest, "", option); option += 4; len -= 4; } return ret; } #if ENABLE_FEATURE_UDHCP_RFC3397 case OPTION_STR1035: /* unpack option into dest; use ret for prefix (i.e., "optname=") */ dest = dname_dec(option, len, ret); if (dest) { free(ret); return dest; } /* error. return "optname=" string */ return ret; #endif } option += optlen; len -= optlen; if (len <= 0) break; *dest++ = ' '; *dest = '\0'; } return ret; }