/* * hfs_create() * * This is the create() entry in the inode_operations structure for * regular HFS directories. The purpose is to create a new file in * a directory and return a corresponding inode, given the inode for * the directory and the name (and its length) of the new file. */ int hfs_create(struct inode * dir, const char * name, int len, int mode, struct inode ** result) { struct hfs_cat_entry *entry = HFS_I(dir)->entry; struct hfs_cat_entry *new; struct hfs_cat_key key; int error; *result = NULL; /* build the key, checking against reserved names */ if (build_key(&key, dir, name, len)) { error = -EEXIST; } else { /* try to create the file */ error = hfs_cat_create(entry, &key, (mode & S_IWUSR) ? 0 : HFS_FIL_LOCK, HFS_SB(dir->i_sb)->s_type, HFS_SB(dir->i_sb)->s_creator, &new); } if (!error) { update_dirs_plus(entry, 0); /* create an inode for the new file */ *result = __hfs_iget(new, HFS_I(dir)->file_type, 0); if (!(*result)) { /* XXX correct error? */ error = -EIO; } }
static void handle_vals(ag_t *ag, const char *plugin, const char *plugin_inst, const char *type_inst, double *vals, int n_vals) { char key[1024]; build_key (key, 1024, plugin, plugin_inst, type_inst); ag_key_entry_t *entry; entry = g_hash_table_lookup(ag->keys, key); if (!entry) { DEBUG("creating entry for %s", key); entry = ag_key_entry_new(ag, key); g_hash_table_insert(ag->keys, entry->key, entry); evtimer_set(&entry->flush_ev, flush_cb, entry); evtimer_add(&entry->flush_ev, &entry->interval); } DEBUG("handle_vals called"); DEBUG("key: %s", key); int n; for (n=0; n < n_vals; n++) { DEBUG("val: %g", vals[n]); switch(ag->type) { case AG_TYPE_SUM: entry->total += vals[n]; break; case AG_TYPE_AVG: entry->total += vals[n]; entry->count++; break; } } DEBUG("total: %g", entry->total); }
bool memcache::set_begin(const char* key, size_t dlen, time_t timeout /* = 0 */, unsigned short flags /* = 0 */) { if (dlen == 0) { logger_error("dlen == 0, invalid"); return false; } content_length_ = dlen; length_ = 0; const string& kbuf = build_key(key, strlen(key)); req_line_.format("set %s %u %d %d\r\n", kbuf.c_str(), flags, (int) timeout, (int) dlen); bool has_tried = false; AGAIN: if (open() == false) return false; if (conn_->write(req_line_) == -1) { close(); if (retry_ && !has_tried) { has_tried = true; goto AGAIN; } ebuf_.format("write set(%s) error", key); return false; } return true; }
static int build_keypackage (pskc_key_t * kp, xmlNodePtr keyp) { build_deviceinfo (kp, keyp); build_cryptomoduleinfo (kp, keyp); build_key (kp, keyp); return PSKC_OK; }
bool memcache::set(const char* key, size_t klen, time_t timeout /* = 0 */) { string buf; unsigned short flags; if (get(key, klen, buf, &flags) == false) return false; const string& kbuf = build_key(key, klen); return set(kbuf, buf.c_str(), buf.length(), timeout, flags); }
YcsbKey& build_rmw_key() { auto key_seq = rnd_record_select_.uniform_within(0, initial_table_size_ - 1); auto cnt = local_key_counter_->key_counter_; if (cnt == 0) { // Unbalanced load, see the only loader's counter cnt = channel_->peek_local_key_counter(engine_, 0); } ASSERT_ND(cnt > 0); auto hi = key_seq / cnt; auto lo = key_seq % cnt; return build_key(hi, lo); }
/* Conforms to RFC4510 re: Criticality, original RFC2891 spec is broken * Also see ITS#7253 for discussion */ static int sss_parseCtrl( Operation *op, SlapReply *rs, LDAPControl *ctrl ) { BerElementBuffer berbuf; BerElement *ber; ber_tag_t tag; ber_len_t len; int i; sort_ctrl *sc; rs->sr_err = LDAP_PROTOCOL_ERROR; if ( op->o_ctrlflag[sss_cid] > SLAP_CONTROL_IGNORED ) { rs->sr_text = "sorted results control specified multiple times"; } else if ( BER_BVISNULL( &ctrl->ldctl_value ) ) { rs->sr_text = "sorted results control value is absent"; } else if ( BER_BVISEMPTY( &ctrl->ldctl_value ) ) { rs->sr_text = "sorted results control value is empty"; } else { rs->sr_err = LDAP_SUCCESS; } if ( rs->sr_err != LDAP_SUCCESS ) return rs->sr_err; op->o_ctrlflag[sss_cid] = ctrl->ldctl_iscritical ? SLAP_CONTROL_CRITICAL : SLAP_CONTROL_NONCRITICAL; ber = (BerElement *)&berbuf; ber_init2( ber, &ctrl->ldctl_value, 0 ); i = count_key( ber ); sc = op->o_tmpalloc( sizeof(sort_ctrl) + (i-1) * sizeof(sort_key), op->o_tmpmemctx ); sc->sc_nkeys = i; op->o_controls[sss_cid] = sc; /* peel off initial sequence */ ber_scanf( ber, "{" ); i = 0; do { if ( build_key( ber, rs, &sc->sc_keys[i] ) != LDAP_SUCCESS ) break; i++; tag = ber_peek_tag( ber, &len ); } while ( tag != LBER_DEFAULT ); return rs->sr_err; }
static bool RegionExists(const int id) { bool result = true; try { char k[500]; build_key(id, k); result = RegionExists(k); } catch (...) { result = false; } return result; }
/* * update_ext() * * Given a (struct hfs_fork) write an extent record back to disk. */ static void update_ext(struct hfs_fork *fork, struct hfs_extent *ext) { struct hfs_ext_key target; struct hfs_brec brec; if (ext->start) { build_key(&target, fork, ext->start); if (!hfs_bfind(&brec, fork->entry->mdb->ext_tree, HFS_BKEY(&target), HFS_BFIND_WRITE)) { write_extent(brec.data, ext); hfs_brec_relse(&brec, NULL); } } }
/* * new_extent() * * Description: * Adds a new extent record to a fork, extending its physical length. * Input Variable(s): * struct hfs_fork *fork: the fork to extend * struct hfs_extent *ext: the current last extent for 'fork' * hfs_u16 ablock: the number of allocation blocks in 'fork'. * hfs_u16 start: first allocation block to add to 'fork'. * hfs_u16 len: the number of allocation blocks to add to 'fork'. * hfs_u32 ablksz: number of sectors in an allocation block. * Output Variable(s): * NONE * Returns: * (struct hfs_extent *) the new extent or NULL * Preconditions: * 'fork' points to a valid (struct hfs_fork) * 'ext' point to a valid (struct hfs_extent) which is the last in 'fork' * 'ablock', 'start', 'len' and 'ablksz' are what they claim to be. * Postconditions: * If NULL is returned then no changes have been made to 'fork'. * If the return value is non-NULL that it is the extent that has been * added to 'fork' both in memory and on disk. The 'psize' field of * 'fork' has been updated to reflect the new physical size. */ static struct hfs_extent *new_extent(struct hfs_fork *fork, struct hfs_extent *ext, hfs_u16 ablock, hfs_u16 start, hfs_u16 len, hfs_u16 ablksz) { struct hfs_raw_extent raw; struct hfs_ext_key key; int error; if (fork->entry->cnid == htonl(HFS_EXT_CNID)) { /* Limit extents tree to the record in the MDB */ return NULL; } if (!HFS_NEW(ext->next)) { return NULL; } ext->next->prev = ext; ext->next->next = NULL; ext = ext->next; relse_ext(ext->prev); ext->start = ablock; ext->block[0] = start; ext->length[0] = len; ext->block[1] = 0; ext->length[1] = 0; ext->block[2] = 0; ext->length[2] = 0; ext->end = ablock + len - 1; ext->count = 1; write_extent(&raw, ext); build_key(&key, fork, ablock); error = hfs_binsert(fork->entry->mdb->ext_tree, HFS_BKEY(&key), &raw, sizeof(raw)); if (error) { ext->prev->next = NULL; HFS_DELETE(ext); return NULL; } set_cache(fork, ext); return ext; }
/* Core functionality of our getter functions: fetch DATA from the memcached * given by CACHE_VOID and identified by KEY. Indicate success in FOUND and * use a tempoary sub-pool of POOL for allocations. */ static svn_error_t * memcache_internal_get(char **data, apr_size_t *size, svn_boolean_t *found, void *cache_void, const void *key, apr_pool_t *pool) { memcache_t *cache = cache_void; apr_status_t apr_err; const char *mc_key; apr_pool_t *subpool; if (key == NULL) { *found = FALSE; return SVN_NO_ERROR; } subpool = svn_pool_create(pool); SVN_ERR(build_key(&mc_key, cache, key, subpool)); apr_err = apr_memcache_getp(cache->memcache, pool, mc_key, data, size, NULL /* ignore flags */); if (apr_err == APR_NOTFOUND) { *found = FALSE; svn_pool_destroy(subpool); return SVN_NO_ERROR; } else if (apr_err != APR_SUCCESS || !*data) return svn_error_wrap_apr(apr_err, _("Unknown memcached error while reading")); *found = TRUE; svn_pool_destroy(subpool); return SVN_NO_ERROR; }
static void filter_targetbest(GtFeatureNode *current_feature, GtDlist *trees, GtHashmap *target_to_elem) { unsigned long num_of_targets; GtDlistelem *previous_elem; GtStr *first_target_id; const char *target; int had_err; gt_assert(current_feature && trees); target = gt_feature_node_get_attribute(current_feature, TARGET_STRING); gt_assert(target); first_target_id = gt_str_new(); had_err = gt_gff3_parser_parse_target_attributes(target, &num_of_targets, first_target_id, NULL, NULL, "", 0, NULL); gt_assert(!had_err); if (num_of_targets == 1) { GtStr *key = gt_str_new(); build_key(key, current_feature, first_target_id); if (!(previous_elem = gt_hashmap_get(target_to_elem, gt_str_get(key)))) { /* element with this target_id not included yet -> include it */ include_feature(trees, target_to_elem, current_feature, key); } else { GtFeatureNode *previous_feature = gt_dlistelem_get_data(previous_elem); /* element with this target_id included already -> compare them */ if (gt_feature_node_get_score(current_feature) > gt_feature_node_get_score(previous_feature)) { /* current feature is better -> replace previous feature */ replace_previous_elem(previous_elem, current_feature, trees, target_to_elem, key); } else /* current feature is not better -> remove it */ gt_genome_node_delete((GtGenomeNode*) current_feature); } gt_str_delete(key); } else gt_dlist_add(trees, current_feature); gt_str_delete(first_target_id); }
/* Core functionality of our setter functions: store LENGH bytes of DATA * to be identified by KEY in the memcached given by CACHE_VOID. Use POOL * for temporary allocations. */ static svn_error_t * memcache_internal_set(void *cache_void, const void *key, const char *data, apr_size_t len, apr_pool_t *scratch_pool) { memcache_t *cache = cache_void; const char *mc_key; apr_status_t apr_err; SVN_ERR(build_key(&mc_key, cache, key, scratch_pool)); apr_err = apr_memcache_set(cache->memcache, mc_key, (char *)data, len, 0, 0); /* ### Maybe write failures should be ignored (but logged)? */ if (apr_err != APR_SUCCESS) return svn_error_wrap_apr(apr_err, _("Unknown memcached error while writing")); return SVN_NO_ERROR; }
bool memcache::del(const char* key, size_t klen) { bool has_tried = false; const string& kbuf = build_key(key, klen); AGAIN: if (open() == false) return false; req_line_.format("delete %s\r\n", kbuf.c_str()); if (conn_->write(req_line_) < 0) { if (retry_ && !has_tried) { has_tried = true; goto AGAIN; } ebuf_.format("write (%s) error", req_line_.c_str()); return false; } // DELETED|NOT_FOUND\r\n if (conn_->gets(res_line_) == false) { if (retry_ && !has_tried) { has_tried = true; goto AGAIN; } ebuf_.format("reply for(%s) error", req_line_.c_str()); return false; } if (res_line_.compare("DELETED", false) != 0 && res_line_.compare("NOT_FOUND", false) != 0) { ebuf_.format("reply(%s) for (%s) error", res_line_.c_str(), req_line_.c_str()); return false; } return true; }
/* * delete_extent() * * Description: * Deletes an extent record from a fork, reducing its physical length. * Input Variable(s): * struct hfs_fork *fork: the fork * struct hfs_extent *ext: the current last extent for 'fork' * Output Variable(s): * NONE * Returns: * void * Preconditions: * 'fork' points to a valid (struct hfs_fork) * 'ext' point to a valid (struct hfs_extent) which is the last in 'fork' * and which is not also the first extent in 'fork'. * Postconditions: * The extent record has been removed if possible, and a warning has been * printed otherwise. */ static void delete_extent(struct hfs_fork *fork, struct hfs_extent *ext) { struct hfs_mdb *mdb = fork->entry->mdb; struct hfs_ext_key key; int error; if (fork->cache == ext) { set_cache(fork, ext->prev); } ext->prev->next = NULL; if (ext->count != 1) { hfs_warn("hfs_truncate: extent has count %d.\n", ext->count); } lock_bitmap(mdb); error = hfs_clear_vbm_bits(mdb, ext->block[2], ext->length[2]); if (error) { hfs_warn("hfs_truncate: error %d freeing blocks.\n", error); } error = hfs_clear_vbm_bits(mdb, ext->block[1], ext->length[1]); if (error) { hfs_warn("hfs_truncate: error %d freeing blocks.\n", error); } error = hfs_clear_vbm_bits(mdb, ext->block[0], ext->length[0]); if (error) { hfs_warn("hfs_truncate: error %d freeing blocks.\n", error); } unlock_bitmap(mdb); build_key(&key, fork, ext->start); error = hfs_bdelete(mdb->ext_tree, HFS_BKEY(&key)); if (error) { hfs_warn("hfs_truncate: error %d deleting an extent.\n", error); } HFS_DELETE(ext); }
void load_reg_key( regkey_t *parent, xmlNode *node ) { xmlAttr *e; xmlChar *contents = NULL; const char *type = NULL; const char *keycls = NULL; unicode_string_t name, data; xmlNode *n; regval_t *val; ULONG size; regkey_t *key; if (!node->name[0] || node->name[1]) return; for ( e = node->properties; e; e = e->next ) { if (!strcmp( (const char*)e->name, "n")) contents = xmlNodeGetContent( (xmlNode*) e ); else if (!strcmp( (const char*)e->name, "t")) type = (const char*) xmlNodeGetContent( (xmlNode*) e ); else if (!strcmp( (const char*)e->name, "c")) keycls = (const char*) xmlNodeGetContent( (xmlNode*) e ); } if (!contents) return; name.copy( contents ); switch (node->name[0]) { case 'x': // value stored as hex // default type is binary if (type == NULL) type = "3"; contents = xmlNodeGetContent( node ); size = hex_to_binary( contents, 0, NULL ); val = new regval_t( &name, atoi(type), size ); hex_to_binary( contents, size, val->data ); parent->values.append( val ); break; case 'n': // number // default type is REG_DWORD if (type == NULL) type = "4"; contents = xmlNodeGetContent( node ); size = sizeof (ULONG); val = new regval_t( &name, atoi(type), size ); number_to_binary( contents, size, val->data ); parent->values.append( val ); break; case 's': // value stored as a string // default type is REG_SZ if (type == NULL) type = "1"; data.copy( xmlNodeGetContent( node ) ); val = new regval_t( &name, atoi(type), data.Length + 2 ); memcpy( val->data, data.Buffer, data.Length ); memset( val->data + data.Length, 0, 2 ); parent->values.append( val ); break; case 'k': // key key = build_key( parent, &name ); key->cls.copy( keycls ); for (n = node->children; n; n = n->next) load_reg_key( key, n ); break; } }
int memcache::get_begin(const void* key, size_t klen, unsigned short* flags) { content_length_ = 0; length_ = 0; bool has_tried = false; const string& kbuf = build_key((const char*) key, klen); req_line_.format("get %s\r\n", kbuf.c_str()); AGAIN: if (open() == false) return -1; if (conn_->write(req_line_) < 0) { close(); if (retry_ && !has_tried) { has_tried = true; goto AGAIN; } ebuf_.format("write get(%s) error", kbuf.c_str()); return -1; } // 读取服务器响应行 if (conn_->gets(res_line_) == false) { close(); if (retry_ && !has_tried) { has_tried = true; goto AGAIN; } ebuf_.format("reply for get(%s) error", kbuf.c_str()); return -1; } else if (res_line_.compare("END", false) == 0) { ebuf_.format("not found"); return 0; } else if (error_happen(res_line_.c_str())) { close(); return -1; } // VALUE {key} {flags} {bytes}\r\n ACL_ARGV* tokens = acl_argv_split(res_line_.c_str(), " \t"); if (tokens->argc < 4 || strcasecmp(tokens->argv[0], "VALUE") != 0) { close(); ebuf_.format("server error for get(%s), value: %s", kbuf.c_str(), res_line_.c_str()); acl_argv_free(tokens); return -1; } if (flags) *flags = (unsigned short) atoi(tokens->argv[2]); content_length_ = atoi(tokens->argv[3]); acl_argv_free(tokens); // 如果服务端返回数据体长度值为 0 则当不存在处理 if (content_length_ == 0) return 0; return content_length_; }
string build_queue_size_key(const string& queue_name) { return build_key("q:%s:s", queue_name.c_str()); }
string build_job_key(const string& job_uid) { return build_key("j:%s", job_uid.c_str()); }
bool memcache::set(const char* key, size_t klen, const void* dat, size_t dlen, time_t timeout /* = 0 */, unsigned short flags /* = 0 */) { const string& kbuf = build_key(key, klen); return set(kbuf, dat, dlen, timeout, flags); }
static bool Remove(const int id) { char k[500]; build_key(id, k); return Remove(k); }
/* * find_ext() * * Given a pointer to a (struct hfs_file) and an allocation block * number in the file, find the extent record containing that block. * Returns a pointer to the extent record on success or NULL on failure. * The 'cache' field of 'fil' also points to the extent so it has a * reference count of at least 2. * * Callers must check that fil != NULL */ static struct hfs_extent * find_ext(struct hfs_fork *fork, int alloc_block) { struct hfs_cat_entry *entry = fork->entry; struct hfs_btree *tr= entry->mdb->ext_tree; struct hfs_ext_key target, *key; struct hfs_brec brec; struct hfs_extent *ext, *ptr; int tmp; if (alloc_block < 0) { ext = &fork->first; goto found; } ext = fork->cache; if (!ext || (alloc_block < ext->start)) { ext = &fork->first; } while (ext->next && (alloc_block > ext->end)) { ext = ext->next; } if ((alloc_block <= ext->end) && (alloc_block >= ext->start)) { goto found; } /* time to read more extents */ if (!HFS_NEW(ext)) { goto bail3; } build_key(&target, fork, alloc_block); tmp = hfs_bfind(&brec, tr, HFS_BKEY(&target), HFS_BFIND_READ_LE); if (tmp < 0) { goto bail2; } key = (struct hfs_ext_key *)brec.key; if ((hfs_get_nl(key->FNum) != hfs_get_nl(target.FNum)) || (key->FkType != fork->fork)) { goto bail1; } read_extent(ext, brec.data, hfs_get_hs(key->FABN)); hfs_brec_relse(&brec, NULL); if ((alloc_block > ext->end) && (alloc_block < ext->start)) { /* something strange happened */ goto bail2; } ptr = fork->cache; if (!ptr || (alloc_block < ptr->start)) { ptr = &fork->first; } while (ptr->next && (alloc_block > ptr->end)) { ptr = ptr->next; } if (ext->start == ptr->start) { /* somebody beat us to it. */ HFS_DELETE(ext); ext = ptr; } else if (ext->start < ptr->start) { /* insert just before ptr */ ptr->prev->next = ext; ext->prev = ptr->prev; ext->next = ptr; ptr->prev = ext; } else { /* insert at end */ ptr->next = ext; ext->prev = ptr; } found: ++ext->count; /* for return value */ set_cache(fork, ext); return ext; bail1: hfs_brec_relse(&brec, NULL); bail2: HFS_DELETE(ext); bail3: return NULL; }