km_layout *km_createLayout(const char *name) { km_layout *temp = (km_layout *)mm_alloc(sizeof(km_layout)); if(temp) { temp->name = (char *)mm_alloc((strlen(name) + 1) * sizeof(char)); if(!temp->name) { mm_free(temp); return NULL; } strcpy(temp->name, name); int i; for(i=0; i<128; i++) { temp->layout_noCaps[i] = 0; temp->layout_caps[i] = 0; } } return temp; }
/** * Creates a void image with the given name **/ ld_image *ld_createVoidImage(const char *name) { if(!name) return NULL; ld_image *image = mm_alloc(sizeof(ld_image)); if(image) { image->name = (const char *)mm_alloc((strlen(name) + 1) * sizeof(char)); if(!image->name) { mm_free(image); return NULL; } strcpy((char *)image->name, (char *)name); image->type = ld_imageTypeUnknown; image->entry = NULL; image->next = NULL; } return image; }
Bool namespace_recognition_swstart(struct SWSTART *params) { uint32 i ; UNUSED_PARAM(struct SWSTART *, params) ; HQASSERT(recognition_table == NULL, "recognition_table is not NULL") ; if ((recognition_table = mm_alloc(mm_pool_fixed, sizeof(struct Recognition_fltr_ctxt), MM_ALLOC_CLASS_XML_NAMESPACE)) == NULL) return error_handler(VMERROR) ; recognition_table->table = mm_alloc(mm_pool_fixed, sizeof(struct NSRecognitionEntry*) * NAMESPACE_RECOGNITION_HASHTABLESIZE, MM_ALLOC_CLASS_XML_NAMESPACE) ; if (recognition_table->table == NULL) { namespace_recognition_finish(); return error_handler(VMERROR) ; } /* Initialize the table structure. */ recognition_table->num_entries = 0 ; for (i=0; i < NAMESPACE_RECOGNITION_HASHTABLESIZE; i++) { recognition_table->table[i] = NULL ; } return TRUE ; }
int io_read_header(dev_hook *hook, dc_header **header, xts_key **out_key, dc_pass *password) { xts_key *hdr_key = NULL; int hdr_len = max(sizeof(dc_header), hook->bps); int resl; /* allocate memory for header */ if ( (*header = mm_alloc(hdr_len, MEM_SECURE | MEM_SUCCESS)) == NULL ) return ST_NOMEM; do { /* read volume header */ if ( (resl = io_hook_rw(hook, *header, hdr_len, 0, 1)) != ST_OK ) break; /* decrypt volume header */ if (password != NULL) { /* allocate memory for header key */ if ( (hdr_key = mm_alloc(sizeof(xts_key), MEM_SECURE)) == NULL ) { resl = ST_NOMEM; break; } /* try to decrypt header */ if (cp_decrypt_header(hdr_key, *header, password) == 0) { resl = ST_PASS_ERR; break; } /* save decrypted header and key */ if (out_key != NULL) { *out_key = hdr_key; hdr_key = NULL; } } } while (0); if (resl != ST_OK) { mm_free(*header); *header = NULL; } if (hdr_key != NULL) mm_free(hdr_key); return resl; }
static int load(struct kr_module *module, const char *path) { auto_fclose FILE *fp = fopen(path, "r"); if (fp == NULL) { DEBUG_MSG(NULL, "reading '%s' failed: %s\n", path, strerror(errno)); return kr_error(errno); } else { DEBUG_MSG(NULL, "reading '%s'\n", path); } /* Create pool and copy itself */ mm_ctx_t _pool = { .ctx = mp_new(4096), .alloc = (mm_alloc_t) mp_alloc }; mm_ctx_t *pool = mm_alloc(&_pool, sizeof(*pool)); if (!pool) { return kr_error(ENOMEM); } memcpy(pool, &_pool, sizeof(*pool)); /* Load file to map */ struct kr_zonecut *hints = mm_alloc(pool, sizeof(*hints)); kr_zonecut_init(hints, (const uint8_t *)(""), pool); module->data = hints; return load_map(hints, fp); }
/*! \brief Reserve enough space in the RR arrays. */ static int pkt_rr_array_alloc(knot_pkt_t *pkt, uint16_t count) { /* Enough space. */ if (pkt->rrset_allocd >= count) { return KNOT_EOK; } /* Allocate rr_info and rr fields to next size. */ size_t next_size = NEXT_RR_COUNT(count); knot_rrinfo_t *rr_info = mm_alloc(&pkt->mm, sizeof(knot_rrinfo_t) * next_size); if (rr_info == NULL) { return KNOT_ENOMEM; } knot_rrset_t *rr = mm_alloc(&pkt->mm, sizeof(knot_rrset_t) * next_size); if (rr == NULL) { mm_free(&pkt->mm, rr_info); return KNOT_ENOMEM; } /* Copy the old data. */ memcpy(rr_info, pkt->rr_info, pkt->rrset_allocd * sizeof(knot_rrinfo_t)); memcpy(rr, pkt->rr, pkt->rrset_allocd * sizeof(knot_rrset_t)); /* Reassign and free old data. */ mm_free(&pkt->mm, pkt->rr); mm_free(&pkt->mm, pkt->rr_info); pkt->rr = rr; pkt->rr_info = rr_info; pkt->rrset_allocd = next_size; return KNOT_EOK; }
static void _cmdline_process(int argc, char *const *argv) { int opt = 0; while ((opt = getopt(argc, argv, "frRSu:g:i:o:e:p:")) != -1) { if (opt == 'u') { if (!strisnum(optarg)) _usage_invalid_opt_arg(opt, optarg, argv); config.uid = strtoul(optarg, NULL, 10); } else if (opt == 'g') { if (!strisnum(optarg)) _usage_invalid_opt_arg(opt, optarg, argv); config.gid = strtoul(optarg, NULL, 10); } else if (opt == 'i') { config.flags |= CONFIG_FL_REDIR_IN; if (!(config.inf_name = mm_alloc(strlen(optarg) + 1))) _failure("malloc"); strcpy(config.inf_name, optarg); } else if (opt == 'o') { config.flags |= CONFIG_FL_REDIR_OUT; if (!(config.outf_name = mm_alloc(strlen(optarg) + 1))) _failure("malloc"); strcpy(config.outf_name, optarg); } else if (opt == 'e') { config.flags |= CONFIG_FL_REDIR_ERR; if (!(config.errf_name = mm_alloc(strlen(optarg) + 1))) _failure("malloc"); strcpy(config.errf_name, optarg); } else if (opt == 'p') { config.flags |= CONFIG_FL_PIDF_CREATE; if (!(config.pidf_name = mm_alloc(strlen(optarg) + 1))) _failure("malloc"); strcpy(config.pidf_name, optarg); } else if (opt == 'f') { config.flags |= CONFIG_FL_PIDF_FORCE; } else if (opt == 'r') { config.flags |= CONFIG_FL_PROC_RESTART; } else if (opt == 'R') { config.flags |= CONFIG_FL_PROC_RESTART; config.flags |= CONFIG_FL_PROC_RSTIGN; } else if (opt == 'S') { config.flags |= CONFIG_FL_PROC_RSTUSIG; } else { _usage_invalid_opt(optopt, argv); } } if ((argc - optind) < 1) _usage(argv); _config_set_req(argv[optind]); _config_set_opt(argv, optind); }
static struct usched_client_request *_parse_conj_compound(struct usched_client_request *req, int argc, char **argv) { int ret = 0; /* Process conjuction */ if ((ret = _parse_get_conj(argv[0])) < 0) { usage_client_error_set(USCHED_USAGE_CLIENT_ERR_INVALID_CONJ, argv[0]); goto _conj_error; } req->conj = ret; /* Pre-validate the logic of conjunctions: * * - After an UNTIL conjunction, only the AND conjuction is accepted * - After a WHILE conjunction, only the AND conjunction is accepted */ if (req->prev && (req->conj != USCHED_CONJ_AND)) { if ((req->prev->conj == USCHED_CONJ_UNTIL) || (req->prev->conj == USCHED_CONJ_WHILE)) { usage_client_error_set(USCHED_USAGE_CLIENT_ERR_UNEXPECT_CONJ, argv[0]); goto _conj_error; } } debug_printf(DEBUG_INFO, "CONJ: %d\n", req->conj); /* We need at least 4 args to consider the request (the [ CONJ ] and the { PREP [ ARG ADVERB | ADVERB ARG ] }) */ if (argc < 4) { usage_client_error_set(USCHED_USAGE_CLIENT_ERR_INSUFF_ARGS, NULL); goto _conj_error; } /* Allocate the next entry */ if (!(req->next = mm_alloc(sizeof(struct usched_client_request)))) goto _conj_error; memset(req->next, 0, sizeof(struct usched_client_request)); /* Duplicate SUBJECT to the next entry. strdup() shall not be used to take advantage of libfsma */ if (!(req->next->subj = mm_alloc(strlen(req->subj) + 1))) goto _conj_error; strcpy(req->next->subj, req->subj); req->next->op = req->op; req->next->uid = req->uid; req->next->gid = req->gid; req->next->prev = req; return _parse_prep_compound(req->next, argc - 1, &argv[1]); _conj_error: parse_client_req_destroy(req); return NULL; }
blk_t blk_get(off_t bi, off_t bj, bool device=false) { blk_t b; b.bi=bi; b.bj=bj; b.mi=C_H-bi*B_H; if (b.mi>B_H) b.mi=B_H; b.mj=C_W-bj*B_W; if (b.mj>B_W) b.mj=B_W; b.in[0]=&g_in[0][bi*B_H]; // XXX: depends whether we're on device b.in[1]=&g_in[1][bj*B_W]; b.wr_back=false; b.cost=(TC*)mm_alloc(bi,bj,true); b.back=(TB*)mm_alloc(bi,bj,false); return b; }
int io_write_header(dev_hook *hook, dc_header *header, xts_key *hdr_key, dc_pass *password) { u8 salt[PKCS5_SALT_SIZE]; int hdr_len = max(sizeof(dc_header), hook->bps); dc_header *hcopy = NULL; xts_key *h_key = hdr_key; int resl; do { if ( (hcopy = mm_alloc(hdr_len, MEM_SECURE | MEM_SUCCESS)) == NULL ) { resl = ST_NOMEM; break; } memcpy(hcopy, header, sizeof(dc_header)); if (h_key == NULL) { if ( (h_key = mm_alloc(sizeof(xts_key), MEM_SECURE | MEM_SUCCESS)) == NULL ) { resl = ST_NOMEM; break; } } if (hdr_key == NULL) { /* add volume header to random pool because RNG not have sufficient entropy at boot time */ cp_rand_add_seed(header, sizeof(dc_header)); /* generate new salt */ cp_rand_bytes(salt, PKCS5_SALT_SIZE); /* copy salt to header */ memcpy(hcopy->salt, salt, PKCS5_SALT_SIZE); /* init new header key */ cp_set_header_key(h_key, salt, header->alg_1, password); } else { /* save original salt */ memcpy(salt, header->salt, PKCS5_SALT_SIZE); } /* calc header CRC */ hcopy->hdr_crc = crc32(pv(&hcopy->version), DC_CRC_AREA_SIZE); /* encrypt header with new key */ xts_encrypt(pv(hcopy), pv(hcopy), sizeof(dc_header), 0, h_key); /* restore original salt */ memcpy(hcopy->salt, salt, PKCS5_SALT_SIZE); /* fill the gap with random numbers */ if (hdr_len > sizeof(dc_header)) { cp_rand_bytes(pv(hcopy + 1), hdr_len - sizeof(dc_header)); } /* write new header */ resl = io_hook_rw(hook, hcopy, hdr_len, 0, 0); } while (0); /* prevent leaks */ burn(salt, sizeof(salt)); /* free resources */ if (h_key != NULL && h_key != hdr_key) mm_free(h_key); if (hcopy != NULL) mm_free(hcopy); return resl; }
/* ---------------------------------------------------------------------- */ XREFOBJ *pdf_allocxrefobj( PDFCONTEXT *pdfc , XREFTAB *xreftab , int32 number ) { XREFOBJ *xrefobj ; PDFXCONTEXT *pdfxc ; PDF_CHECK_MC( pdfc ) ; PDF_GET_XC( pdfxc ) ; HQASSERT( xreftab , "xreftab NULL in pdf_allocxrefobj.\n" ) ; HQASSERT( number > 0 , "number of objects must be +ve.\n" ) ; xrefobj = mm_alloc( pdfxc->mm_structure_pool , number * sizeof( XREFOBJ ) , MM_ALLOC_CLASS_PDF_XREF ) ; if ( ! xrefobj ) { (void)error_handler( VMERROR ); return NULL ; } xreftab->xrefobj = xrefobj ; while ((--number) >= 0 ) (xrefobj++)->objuse = XREF_Uninitialised ; return xreftab->xrefobj ; }
int marshal_daemon_backup(void) { int errsv = 0; size_t len = strlen(rund.config.core.serialize_file) + 50; char *file_bak = NULL; if (!(file_bak = mm_alloc(len))) { errsv = errno; log_warn("marshal_daemon_backup(): mm_alloc(): %s\n", strerror(errno)); errno = errsv; return -1; } memset(file_bak, 0, len); snprintf(file_bak, len - 1, "%s-%lu-%u", rund.config.core.serialize_file, (unsigned long) time(NULL), (unsigned int) getpid()); if (fsop_cp(rund.config.core.serialize_file, file_bak, 8192) < 0) { errsv = errno; log_warn("marshal_daemon_backup(): fsop_cp(): %s\n", strerror(errno)); mm_free(file_bak); errno = errsv; return -1; } mm_free(file_bak); return 0; }
/** * @brief Return an ECIES public key as binary data. * @param key the input ECIES key pair. * @param olen a pointer to store the length of the returned key. * @return NULL on failure, or a pointer to the raw public key. */ uchr_t * deprecated_ecies_key_public_bin(EC_KEY *key, size_t *olen) { uchr_t *result; size_t rlen, blen = 512; const EC_POINT *point; const EC_GROUP *group; if (!(point = EC_KEY_get0_public_key_d(key))) { log_info("No public key available. {%s}", ssl_error_string(MEMORYBUF(256), 256)); return NULL; } else if (!(group = EC_KEY_get0_group_d(key))) { log_info("No group available. {%s}", ssl_error_string(MEMORYBUF(256), 256)); return NULL; } else if (!(result = mm_alloc(blen))) { log_info("Error allocating space for ECIES public key."); return NULL; } else if ((rlen = EC_POINT_point2oct_d(group, point, POINT_CONVERSION_COMPRESSED, result, blen, NULL)) <= 0) { log_info("Unable to extract the public key. {%s}", ssl_error_string(MEMORYBUF(256), 256)); mm_free(result); return NULL; } if (olen) { *olen = rlen; } // char * (*EC_POINT_point2hex_d)(const EC_GROUP *, const EC_POINT *, point_conversion_form_t form, BN_CTX *) __attribute__ ((common)) = NULL; //size_t (*EC_POINT_point2oct_d)(const EC_GROUP *, const EC_POINT *, point_conversion_form_t form, unsigned char *buf, size_t len, BN_CTX *ctx) __attribute__ ((common)) = NULL; return result; }
/* ---------------------------------------------------------------------- */ XREFSEC *pdf_allocxrefsec( PDFCONTEXT *pdfc , Hq32x2 byteoffset ) { XREFSEC *root ; XREFSEC *xrefsec ; PDFXCONTEXT *pdfxc ; PDF_CHECK_MC( pdfc ) ; PDF_GET_XC( pdfxc ) ; xrefsec = mm_alloc( pdfxc->mm_structure_pool , sizeof( XREFSEC ) , MM_ALLOC_CLASS_PDF_XREF ) ; if ( ! xrefsec ) { (void)error_handler( VMERROR ); return NULL ; } xrefsec->byteoffset = byteoffset ; xrefsec->xrefnxt = NULL ; xrefsec->xreftab = NULL ; /* Chain onto the end of all the xref sections. */ if (( root = pdfxc->xrefsec ) != NULL ) { while ( root->xrefnxt != NULL ) root = root->xrefnxt ; root->xrefnxt = xrefsec ; } else pdfxc->xrefsec = xrefsec ; return xrefsec ; }
Bool namespace_recognition_add( const xmlGIStr *uri, XMLDocContextCreate f_create_context, XMLDocContextDestroy f_destroy_context) { struct NSRecognitionEntry *curr ; uint32 hval ; HQASSERT(recognition_table != NULL, "recognition_table is NULL") ; curr = find_recognition_entry(recognition_table, uri, &hval) ; if (curr == NULL) { curr = mm_alloc(mm_pool_temp, sizeof(struct NSRecognitionEntry), MM_ALLOC_CLASS_XML_NAMESPACE) ; if (curr == NULL) return error_handler(VMERROR) ; curr->uri = uri ; curr->next = recognition_table->table[hval] ; recognition_table->table[hval] = curr ; recognition_table->num_entries++ ; } else { /* Clobber any entry which may already be in this slot. */ } curr->f_create_context = f_create_context ; curr->f_destroy_context = f_destroy_context ; return TRUE ; }
/*----------------------------------------------------------------------------*/ _public_ knot_dname_t *knot_dname_parse(const uint8_t *pkt, size_t *pos, size_t maxpos, mm_ctx_t *mm) { if (pkt == NULL || pos == NULL) return NULL; const uint8_t *name = pkt + *pos; const uint8_t *endp = pkt + maxpos; int parsed = knot_dname_wire_check(name, endp, pkt); if (parsed < 0) { return NULL; } /* Calculate decompressed length. */ int decompressed_len = knot_dname_realsize(name, pkt); if (decompressed_len < 1) { return NULL; } /* Allocate space for the name. */ knot_dname_t *res = mm_alloc(mm, decompressed_len); if (res) { /* Unpack name (expand compression pointers). */ if (knot_dname_unpack(res, name, decompressed_len, pkt) > 0) { *pos += parsed; } else { free(res); res = NULL; } } return res; }
/** * @brief Create a new magma folder object. * @param foldernum the numerical id of this folder. * @param parent the folder id of this folder's containing parent folder. * @param order the order number of this folder in its parent folder. * @param name a managed string with the name of the target folder. * @return NULL on failure or a pointer to the newly allocated magma folder object on success. */ magma_folder_t * magma_folder_alloc(uint64_t foldernum, uint64_t parent, uint32_t order, stringer_t *name) { magma_folder_t *result; if (!(result = mm_alloc(align(16, sizeof(magma_folder_t) + sizeof(placer_t)) + align(8, st_length_get(name) + 1)))) { log_pedantic("Unable to allocate %zu bytes for a folder structure.", align(16, sizeof(magma_folder_t) + sizeof(placer_t)) + align(8, st_length_get(name) + 1)); return NULL; } else if (rwlock_init(&(result->lock), NULL)) { log_pedantic("Unable to initialize the magma folder thread lock."); mm_free(result); return NULL; } result->foldernum = foldernum; result->parent = parent; result->order = order; result->name = (placer_t *)((chr_t *)result + sizeof(magma_folder_t)); ((placer_t *)result->name)->opts = PLACER_T | JOINTED | STACK | FOREIGNDATA; ((placer_t *)result->name)->length = st_length_get(name); ((placer_t *)result->name)->data = (chr_t *)result + align(16, sizeof(magma_folder_t) + sizeof(placer_t)); mm_copy(st_data_get(result->name), st_data_get(name), st_length_get(name)); return result; }
/** * Create a region info object for tracking the background. */ static Bool region_info_create(DL_STATE *page, RegionInfo **head, COLORANTINDEX ci, Bool processColorant, Bool virtualColorant) { RegionInfo *regionInfo; /* Only need region maps for process colorants and any virtual spot colorants. The regionInfos are used to avoid compositing for overprinting. */ regionInfo = mm_alloc(mm_pool_temp, sizeof(RegionInfo), MM_ALLOC_CLASS_REGION); if ( regionInfo == NULL ) return error_handler(VMERROR); regionInfo->ci = ci; regionInfo->processColorant = processColorant; regionInfo->virtualColorant = virtualColorant; regionInfo->currentObject = FALSE; regionInfo->currentObjectWhite = FALSE; regionInfo->bitGrid = NULL; regionInfo->next = *head; if ( !region_map_create(page, ®ionInfo->bitGrid) ) { mm_free(mm_pool_temp, regionInfo, sizeof(RegionInfo)); return FALSE; } *head = regionInfo; return TRUE; }
/** * @brief Allocate and initialize a new user alert message object. * @param alertnum the numerical id of the user alert. * @param type a pointer to a managed string containing the type of the alert (e.g. "warning" or "alert"). * @param message a pointer to a managed string containing the alert message. * @param created the UTC timecode for when the alert message was created. * @return NULL on error or a pointer to the newly initialized user alert message object on success. */ meta_alert_t * alert_alloc(uint64_t alertnum, stringer_t *type, stringer_t *message, uint64_t created) { meta_alert_t *result; if (!(result = mm_alloc(align(16, sizeof(meta_alert_t) + sizeof(placer_t) + sizeof(placer_t)) + align(8, st_length_get(type) + 1) + align(8, st_length_get(message) + 1)))) { log_pedantic("Unable to allocate %zu bytes for an alert structure.", align(16, sizeof(meta_alert_t) + sizeof(placer_t) + sizeof(placer_t)) + align(8, st_length_get(type) + 1) + align(8, st_length_get(message) + 1)); return NULL; } result->alertnum = alertnum; result->created = created; result->type = (placer_t *)((chr_t *)result + sizeof(meta_alert_t)); result->message = (placer_t *)((chr_t *)result + sizeof(meta_alert_t) + sizeof(placer_t)); ((placer_t *)result->type)->opts = PLACER_T | JOINTED | STACK | FOREIGNDATA; ((placer_t *)result->type)->length = st_length_get(type); ((placer_t *)result->type)->data = (chr_t *)result + align(16, sizeof(meta_alert_t) + sizeof(placer_t) + sizeof(placer_t)); ((placer_t *)result->message)->opts = PLACER_T | JOINTED | STACK | FOREIGNDATA; ((placer_t *)result->message)->length = st_length_get(message); ((placer_t *)result->message)->data = (chr_t *)result + align(16, sizeof(meta_alert_t) + sizeof(placer_t) + sizeof(placer_t)) + align(8, st_length_get(type) + 1); mm_copy(st_data_get(result->type), st_data_get(type), st_length_get(type)); mm_copy(st_data_get(result->message), st_data_get(message), st_length_get(message)); return result; }
/* * "Visita" el handle dado. * Primero determina si efectivamente apunta a una * estructura en el MM viejo. * Si la estructura no fue alcanzada anteriormente: * - Crea una copia de dicha estructura en el MM nuevo. * - Pisa la primera parte de la estructura vieja * con el puntero al objeto nuevo, para poder * actualizar las referencias en la etapa posterior * de gc. * - Marca la estructura vieja como ya alcanzada. */ static void mm_gc_visit(MM *old_mm, MM *new_mm, Obj *ptr) { #if MM_VALGRIND_COMPATIBILITY VALGRIND_MAKE_MEM_DEFINED(ptr, sizeof(Obj)); #endif if (!mm_is_handle(old_mm, *ptr)) { /* No es un handle */ return; } Obj handle_src = *ptr; Obj *src = OBJ_HANDLE_TO_PTR(handle_src); if (!(*src & OBJ_FLAG_REACH)) { /* No fue alcanzado anteriormente */ /* Creo una copia de la estructura */ int size = mm_structure_size(handle_src); Obj handle_dst = mm_alloc(new_mm, size); Obj *dst = OBJ_HANDLE_TO_PTR(handle_dst); memcpy(dst, src, size * sizeof(Obj)); /* Piso el primer objeto de la estructura vieja * con el "forward pointer" */ *src = handle_dst | OBJ_FLAG_REACH; } /* Actualizo la referencia a la estructura */ *ptr = *src; OBJ_UNSET_FLAG_REACH(*ptr); if (handle_src & OBJ_FLAG_CONTINUE) { OBJ_SET_FLAG_CONTINUE(*ptr); } }
bool_t check_inx_cursor_mthread(check_inx_opt_t *opts) { bool_t result = true; void *outcome = NULL; pthread_t *threads = NULL; if (!INX_CHECK_MTHREADS) { return true; } else if (!(threads = mm_alloc(sizeof(pthread_t) * INX_CHECK_MTHREADS))) { return false; } for (uint64_t counter = 0; counter < INX_CHECK_MTHREADS; counter++) { if (thread_launch(threads + counter, &check_inx_cursor_mthread_cnv, opts)) { result = false; } } for (uint64_t counter = 0; counter < INX_CHECK_MTHREADS; counter++) { if (thread_result(*(threads + counter), &outcome) || !outcome || !*(bool_t *)outcome) { result = false; } if (outcome) { mm_free(outcome); } } mm_free(threads); return result; }
struct usched_client_request *parse_client_instruction_array(int argc, char **argv) { int errsv = 0; struct usched_client_request *req = NULL; /* We need at least 1 arg to consider the request */ if (argc < 1) { errno = EINVAL; return NULL; } if (!(req = mm_alloc(sizeof(struct usched_client_request)))) { errsv = errno; log_warn("parse_client_instruction_array(): mm_alloc(): %s\n", strerror(errno)); errno = errsv; return NULL; } memset(req, 0, sizeof(struct usched_client_request)); /* Get UID */ req->uid = getuid(); debug_printf(DEBUG_INFO, "UID: %u\n", req->uid); /* Get GID */ req->gid = getgid(); debug_printf(DEBUG_INFO, "GID: %u\n", req->gid); if (!_parse_op_compound(req, argc, argv)) return NULL; return req; }
/* * Create a new mm_struct and populate it with a temporary stack * vm_area_struct. We don't have enough context at this point to set the stack * flags, permissions, and offset, so we use temporary values. We'll update * them later in setup_arg_pages(). */ int bprm_mm_init(struct linux_binprm *bprm) { int err; struct mm_struct *mm = NULL; bprm->mm = mm = mm_alloc(); err = -ENOMEM; if (!mm) goto err; err = init_new_context(current, mm); if (err) goto err; err = __bprm_mm_init(bprm); if (err) goto err; return 0; err: if (mm) { bprm->mm = NULL; mmdrop(mm); } return err; }
static void hook_dump_entry() { PLDR_DATA_TABLE_ENTRY table; entry_hook *ehook; ExAcquireFastMutex(&dump_sync); if (dump_imgbase != NULL && (table = find_image(dump_imgbase))) { if (table->BaseDllName.Buffer != NULL && table->EntryPoint != NULL && img_cmp(&table->BaseDllName, L"dump_") || img_cmp(&table->BaseDllName, L"hiber_")) { if (ehook = mm_alloc(sizeof(entry_hook), 0)) { memcpy(ehook->code, jmp_code, sizeof(jmp_code)); ppv(ehook->code + DEST_OFF)[0] = dump_driver_entry; ppv(ehook->code + PARM_OFF)[0] = ehook; ehook->old_entry = table->EntryPoint; table->EntryPoint = pv(ehook->code); } } dump_imgbase = NULL; } ExReleaseFastMutex(&dump_sync); }
/** * @brief * Serialize an EC public key to be shared. * @param * key a pointer to the EC key pair to have its public key serialized. * @param * outsize a pointer to a variable that will receive the length of the * serialized key on success. * @return * a pointer to the serialized EC public key on success, or NULL on failure. */ unsigned char * _serialize_ec_pubkey(EC_KEY *key, size_t *outsize) { unsigned char *buf = NULL; if (!key || !outsize) { RET_ERROR_PTR(ERR_BAD_PARAM, NULL); } stringer_t *pub; if (!(pub = secp256k1_public_get(key, MANAGEDBUF(33)))) { PUSH_ERROR_OPENSSL(); RET_ERROR_PTR(ERR_UNSPEC, "unable to serialize EC public key"); } buf = mm_alloc(33); memmove(buf, st_data_get(pub), st_length_get(pub)); *outsize = st_length_get(pub); // EC_KEY_set_conv_form_d(key, POINT_CONVERSION_COMPRESSED); // if ((bsize = i2o_ECPublicKey_d(key, &buf)) < 0) { // PUSH_ERROR_OPENSSL(); // RET_ERROR_PTR(ERR_UNSPEC, "unable to serialize EC public key"); // } // *outsize = bsize; return buf; }
void test_gc_build_structure(MM *mm, Obj handle_start) { Obj handle_prev = handle_start; Obj hdl; uint64 suma; int i; for (i = 1; i <= 1000; i++) { Obj handle_current = mm_alloc(mm, 2); mm_set(handle_current, 0, MK_IMMEDIATE(i)); mm_set(handle_prev, 1, handle_current); handle_prev = handle_current; } mm_set(handle_prev, 1, MK_IMMEDIATE(9999)); { suma = 0; hdl = handle_start; while (hdl & OBJ_FLAG_HANDLE) { suma += OBJ_HANDLE_TO_PTR(hdl)[0]; hdl = OBJ_HANDLE_TO_PTR(hdl)[1]; } assert(IMM_VALUE(suma) == 500544); } hdl = handle_start; for (i = 1; i <= 10; i++) { hdl = OBJ_HANDLE_TO_PTR(hdl)[1]; } mm_set(handle_prev, 1, OBJ_HANDLE_TO_PTR(hdl)[1]); mm_set(hdl, 1, MK_IMMEDIATE(18)); }
char * hashline_number(void) { /* do not print line numbers if we are in debug mode */ if (input_filename #ifdef YYDEBUG && !base_yydebug #endif ) { /* "* 2" here is for escaping '\' and '"' below */ char *line = mm_alloc(strlen("\n#line %d \"%s\"\n") + sizeof(int) * CHAR_BIT * 10 / 3 + strlen(input_filename) * 2); char *src, *dest; sprintf(line, "\n#line %d \"", base_yylineno); src = input_filename; dest = line + strlen(line); while (*src) { if (*src == '\\' || *src == '"') *dest++ = '\\'; *dest++ = *src++; } *dest = '\0'; strcat(dest, "\"\n"); return line; } return EMPTY; }
hattrie_t* hattrie_create_n(unsigned bucket_size, const mm_ctx_t *mm) { hattrie_t* T = mm_alloc((mm_ctx_t *)mm, sizeof(hattrie_t)); memcpy(&T->mm, mm, sizeof(mm_ctx_t)); hattrie_init(T, bucket_size); return T; }
/* * Reserva una estructura del tamanyo indicado. * Si no hay mas espacio para almacenarla dentro de * los bloques del memory manager, se agrega un bloque * mas. * * Esta funcion no invoca al garbage collector. */ Obj mm_alloc(MM *mm, int obj_size) { assert(1 <= obj_size && obj_size <= BLOCK_CAPACITY); Block *last_block = mm->blocks[mm->nblocks - 1]; if (last_block->block_size + obj_size >= BLOCK_CAPACITY) { /* Si supera el umbral, se hace garbage collection */ if (mm->nblocks + 1 > mm->gc_threshold) { mm_gc(mm); return mm_alloc(mm, obj_size); } /* Nota: * Se compara mediante ">=" con BLOCK_CAPACITY para * asegurarse de que el ultimo objeto del bloque * quede siempre libre, para poder determinar * el tamanyo de una estructura mirando unicamente * el OBJ_SET_FLAG_CONTINUE (sin tener en cuenta * si ya "me pase" del tamanyo del bloque). */ last_block = malloc(sizeof(Block)); mm_init_block(last_block); mm_grow_blocks_if_full(mm); mm->nblocks += 1; mm->blocks[mm->nblocks - 1] = last_block; } Obj *obj = &last_block->buffer[last_block->block_size]; mm_init_structure(obj, obj_size); last_block->block_size += obj_size; return OBJ_PTR_TO_HANDLE(obj); }
int main(int argc, char *argv[]) { plan_lazy(); knot_mm_t pool; mm_ctx_mempool(&pool, MM_DEFAULT_BLKSIZE); char *dbid = test_mkdtemp(); ok(dbid != NULL, "make temporary directory"); /* Random keys. */ unsigned nkeys = 10000; char **keys = mm_alloc(&pool, sizeof(char*) * nkeys); for (unsigned i = 0; i < nkeys; ++i) { keys[i] = str_key_rand(KEY_MAXLEN, &pool); } /* Sort random keys. */ str_key_sort(keys, nkeys); /* Execute test set for all backends. */ struct knot_db_lmdb_opts lmdb_opts = KNOT_DB_LMDB_OPTS_INITIALIZER; lmdb_opts.path = dbid; struct knot_db_trie_opts trie_opts = KNOT_DB_TRIE_OPTS_INITIALIZER; knot_db_test_set(nkeys, keys, &lmdb_opts, knot_db_lmdb_api(), &pool); knot_db_test_set(nkeys, keys, &trie_opts, knot_db_trie_api(), &pool); /* Cleanup. */ mp_delete(pool.ctx); test_rm_rf(dbid); free(dbid); return 0; }