svn_error_t * svn_client__get_auto_props(apr_hash_t **properties, const char **mimetype, const char *path, svn_magic__cookie_t *magic_cookie, svn_client_ctx_t *ctx, apr_pool_t *pool) { svn_config_t *cfg; svn_boolean_t use_autoprops; auto_props_baton_t autoprops; /* initialisation */ autoprops.properties = apr_hash_make(pool); autoprops.filename = svn_dirent_basename(path, pool); autoprops.pool = pool; autoprops.mimetype = NULL; autoprops.have_executable = FALSE; *properties = autoprops.properties; cfg = ctx->config ? apr_hash_get(ctx->config, SVN_CONFIG_CATEGORY_CONFIG, APR_HASH_KEY_STRING) : NULL; /* check that auto props is enabled */ SVN_ERR(svn_config_get_bool(cfg, &use_autoprops, SVN_CONFIG_SECTION_MISCELLANY, SVN_CONFIG_OPTION_ENABLE_AUTO_PROPS, FALSE)); /* search for auto props */ if (use_autoprops) svn_config_enumerate2(cfg, SVN_CONFIG_SECTION_AUTO_PROPS, auto_props_enumerator, &autoprops, pool); /* if mimetype has not been set check the file */ if (! autoprops.mimetype) { SVN_ERR(svn_io_detect_mimetype2(&autoprops.mimetype, path, ctx->mimetypes_map, pool)); /* If we got no mime-type, or if it is "application/octet-stream", * try to get the mime-type from libmagic. */ if (magic_cookie && (!autoprops.mimetype || strcmp(autoprops.mimetype, "application/octet-stream") == 0)) { const char *magic_mimetype; /* Since libmagic usually treats UTF-16 files as "text/plain", * svn_magic__detect_binary_mimetype() will return NULL for such * files. This is fine for now since we currently don't support * UTF-16-encoded text files (issue #2194). * Once we do support UTF-16 this code path will fail to detect * them as text unless the svn_io_detect_mimetype2() call above * returns "text/plain" for them. */ SVN_ERR(svn_magic__detect_binary_mimetype(&magic_mimetype, path, magic_cookie, pool, pool)); if (magic_mimetype) autoprops.mimetype = magic_mimetype; } if (autoprops.mimetype) apr_hash_set(autoprops.properties, SVN_PROP_MIME_TYPE, strlen(SVN_PROP_MIME_TYPE), svn_string_create(autoprops.mimetype, pool)); } /* if executable has not been set check the file */ if (! autoprops.have_executable) { svn_boolean_t executable = FALSE; SVN_ERR(svn_io_is_file_executable(&executable, path, pool)); if (executable) apr_hash_set(autoprops.properties, SVN_PROP_EXECUTABLE, strlen(SVN_PROP_EXECUTABLE), svn_string_create("", pool)); } *mimetype = autoprops.mimetype; return SVN_NO_ERROR; }
svn_error_t * svn_client_blame5(const char *target, const svn_opt_revision_t *peg_revision, const svn_opt_revision_t *start, const svn_opt_revision_t *end, const svn_diff_file_options_t *diff_options, svn_boolean_t ignore_mime_type, svn_boolean_t include_merged_revisions, svn_client_blame_receiver3_t receiver, void *receiver_baton, svn_client_ctx_t *ctx, apr_pool_t *pool) { struct file_rev_baton frb; svn_ra_session_t *ra_session; svn_revnum_t start_revnum, end_revnum; svn_client__pathrev_t *end_loc; struct blame *walk, *walk_merged = NULL; apr_pool_t *iterpool; svn_stream_t *last_stream; svn_stream_t *stream; const char *target_abspath_or_url; if (start->kind == svn_opt_revision_unspecified || end->kind == svn_opt_revision_unspecified) return svn_error_create (SVN_ERR_CLIENT_BAD_REVISION, NULL, NULL); if (svn_path_is_url(target)) target_abspath_or_url = target; else SVN_ERR(svn_dirent_get_absolute(&target_abspath_or_url, target, pool)); /* Get an RA plugin for this filesystem object. */ SVN_ERR(svn_client__ra_session_from_path2(&ra_session, &end_loc, target, NULL, peg_revision, end, ctx, pool)); end_revnum = end_loc->rev; SVN_ERR(svn_client__get_revision_number(&start_revnum, NULL, ctx->wc_ctx, target_abspath_or_url, ra_session, start, pool)); if (end_revnum < start_revnum) return svn_error_create (SVN_ERR_CLIENT_BAD_REVISION, NULL, _("Start revision must precede end revision")); frb.start_rev = start_revnum; frb.end_rev = end_revnum; frb.target = target; frb.ctx = ctx; frb.diff_options = diff_options; frb.ignore_mime_type = ignore_mime_type; frb.include_merged_revisions = include_merged_revisions; frb.last_filename = NULL; frb.last_original_filename = NULL; frb.chain = apr_palloc(pool, sizeof(*frb.chain)); frb.chain->blame = NULL; frb.chain->avail = NULL; frb.chain->pool = pool; if (include_merged_revisions) { frb.merged_chain = apr_palloc(pool, sizeof(*frb.merged_chain)); frb.merged_chain->blame = NULL; frb.merged_chain->avail = NULL; frb.merged_chain->pool = pool; } SVN_ERR(svn_ra_get_repos_root2(ra_session, &frb.repos_root_url, pool)); frb.mainpool = pool; /* The callback will flip the following two pools, because it needs information from the previous call. Obviously, it can't rely on the lifetime of the pool provided by get_file_revs. */ frb.lastpool = svn_pool_create(pool); frb.currpool = svn_pool_create(pool); if (include_merged_revisions) { frb.filepool = svn_pool_create(pool); frb.prevfilepool = svn_pool_create(pool); } /* Collect all blame information. We need to ensure that we get one revision before the start_rev, if available so that we can know what was actually changed in the start revision. */ SVN_ERR(svn_ra_get_file_revs2(ra_session, "", start_revnum - (start_revnum > 0 ? 1 : 0), end_revnum, include_merged_revisions, file_rev_handler, &frb, pool)); if (end->kind == svn_opt_revision_working) { /* If the local file is modified we have to call the handler on the working copy file with keywords unexpanded */ svn_wc_status3_t *status; SVN_ERR(svn_wc_status3(&status, ctx->wc_ctx, target_abspath_or_url, pool, pool)); if (status->text_status != svn_wc_status_normal) { apr_hash_t *props; svn_stream_t *wcfile; svn_string_t *keywords; svn_stream_t *tempfile; const char *temppath; apr_hash_t *kw = NULL; SVN_ERR(svn_wc_prop_list2(&props, ctx->wc_ctx, target_abspath_or_url, pool, pool)); SVN_ERR(svn_stream_open_readonly(&wcfile, target, pool, pool)); keywords = apr_hash_get(props, SVN_PROP_KEYWORDS, APR_HASH_KEY_STRING); if (keywords) SVN_ERR(svn_subst_build_keywords2(&kw, keywords->data, NULL, NULL, 0, NULL, pool)); wcfile = svn_subst_stream_translated(wcfile, "\n", TRUE, kw, FALSE, pool); SVN_ERR(svn_stream_open_unique(&tempfile, &temppath, NULL, svn_io_file_del_on_pool_cleanup, pool, pool)); SVN_ERR(svn_stream_copy3(wcfile, tempfile, ctx->cancel_func, ctx->cancel_baton, pool)); SVN_ERR(add_file_blame(frb.last_filename, temppath, frb.chain, NULL, frb.diff_options, pool)); frb.last_filename = temppath; } } /* Report the blame to the caller. */ /* The callback has to have been called at least once. */ SVN_ERR_ASSERT(frb.last_filename != NULL); /* Create a pool for the iteration below. */ iterpool = svn_pool_create(pool); /* Open the last file and get a stream. */ SVN_ERR(svn_stream_open_readonly(&last_stream, frb.last_filename, pool, pool)); stream = svn_subst_stream_translated(last_stream, "\n", TRUE, NULL, FALSE, pool); /* Perform optional merged chain normalization. */ if (include_merged_revisions) { /* If we never created any blame for the original chain, create it now, with the most recent changed revision. This could occur if a file was created on a branch and them merged to another branch. This is semanticly a copy, and we want to use the revision on the branch as the most recently changed revision. ### Is this really what we want to do here? Do the sematics of copy change? */ if (!frb.chain->blame) frb.chain->blame = blame_create(frb.chain, frb.rev, 0); normalize_blames(frb.chain, frb.merged_chain, pool); walk_merged = frb.merged_chain->blame; } /* Process each blame item. */ for (walk = frb.chain->blame; walk; walk = walk->next) { apr_off_t line_no; svn_revnum_t merged_rev; const char *merged_path; apr_hash_t *merged_rev_props; if (walk_merged) { merged_rev = walk_merged->rev->revision; merged_rev_props = walk_merged->rev->rev_props; merged_path = walk_merged->rev->path; } else { merged_rev = SVN_INVALID_REVNUM; merged_rev_props = NULL; merged_path = NULL; } for (line_no = walk->start; !walk->next || line_no < walk->next->start; ++line_no) { svn_boolean_t eof; svn_stringbuf_t *sb; svn_pool_clear(iterpool); SVN_ERR(svn_stream_readline(stream, &sb, "\n", &eof, iterpool)); if (ctx->cancel_func) SVN_ERR(ctx->cancel_func(ctx->cancel_baton)); if (!eof || sb->len) { if (walk->rev) SVN_ERR(receiver(receiver_baton, start_revnum, end_revnum, line_no, walk->rev->revision, walk->rev->rev_props, merged_rev, merged_rev_props, merged_path, sb->data, FALSE, iterpool)); else SVN_ERR(receiver(receiver_baton, start_revnum, end_revnum, line_no, SVN_INVALID_REVNUM, NULL, SVN_INVALID_REVNUM, NULL, NULL, sb->data, TRUE, iterpool)); } if (eof) break; } if (walk_merged) walk_merged = walk_merged->next; } SVN_ERR(svn_stream_close(stream)); svn_pool_destroy(frb.lastpool); svn_pool_destroy(frb.currpool); if (include_merged_revisions) { svn_pool_destroy(frb.filepool); svn_pool_destroy(frb.prevfilepool); } svn_pool_destroy(iterpool); return SVN_NO_ERROR; }
static int cipher_is_blacklisted(const char *cipher, const char **psource) { *psource = apr_hash_get(BLCNames, cipher, APR_HASH_KEY_STRING); return !!*psource; }
void gpdb_get_hostlist(int* hostcnt, host_t** host_table, apr_pool_t* global_pool, mmon_options_t* opt) { apr_pool_t* pool; PGconn* conn = 0; PGresult* result = 0; int rowcount, i; unsigned int unique_hosts = 0; apr_hash_t* htab; struct hostinfo_holder_t* hostinfo_holder = NULL; host_t* hosts = NULL; int e; // 0 -- hostname, 1 -- address, 2 -- datadir, 3 -- is_master, const char *QUERY = "SELECT distinct hostname, address, case when content < 0 then 1 else 0 end as is_master, MAX(fselocation) as datadir FROM pg_filespace_entry " "JOIN gp_segment_configuration on (dbid = fsedbid) WHERE fsefsoid = (select oid from pg_filespace where fsname='pg_system') " "GROUP BY (hostname, address, is_master) order by hostname"; if (0 != (e = apr_pool_create_alloc(&pool, NULL))) { gpmon_fatalx(FLINE, e, "apr_pool_create_alloc failed"); } const char* errmsg = gpdb_exec(&conn, &result, QUERY); TR2((QUERY)); TR2(("\n")); if (errmsg) { gpmon_warning(FLINE, "GPDB error %s\n\tquery: %s\n", errmsg, QUERY); } else { // hash of hostnames to addresses htab = apr_hash_make(pool); rowcount = PQntuples(result); for (i = 0; i < rowcount; i++) { char* curr_hostname = PQgetvalue(result, i, 0); hostinfo_holder = apr_hash_get(htab, curr_hostname, APR_HASH_KEY_STRING); if (!hostinfo_holder) { hostinfo_holder = apr_pcalloc(pool, sizeof(struct hostinfo_holder_t)); CHECKMEM(hostinfo_holder); apr_hash_set(htab, curr_hostname, APR_HASH_KEY_STRING, hostinfo_holder); hostinfo_holder->hostname = curr_hostname; hostinfo_holder->is_master = atoi(PQgetvalue(result, i, 2)); hostinfo_holder->datadir = PQgetvalue(result, i, 3); // use permenant memory for address list -- stored for duration // populate 1st on list and save to head and tail hostinfo_holder->addressinfo_head = hostinfo_holder->addressinfo_tail = calloc(1, sizeof(addressinfo_holder_t)); CHECKMEM(hostinfo_holder->addressinfo_tail); // first is the hostname hostinfo_holder->addressinfo_tail->address = strdup(hostinfo_holder->hostname); CHECKMEM(hostinfo_holder->addressinfo_tail->address); // add a 2nd to the list hostinfo_holder->addressinfo_tail->next = calloc(1, sizeof(addressinfo_holder_t)); CHECKMEM(hostinfo_holder->addressinfo_tail); hostinfo_holder->addressinfo_tail = hostinfo_holder->addressinfo_tail->next; // second is address hostinfo_holder->addressinfo_tail->address = strdup(PQgetvalue(result, i, 1)); CHECKMEM(hostinfo_holder->addressinfo_tail->address); // one for hostname one for address hostinfo_holder->address_count = 2; } else { // permenant memory for address list -- stored for duration hostinfo_holder->addressinfo_tail->next = calloc(1, sizeof(addressinfo_holder_t)); CHECKMEM(hostinfo_holder->addressinfo_tail); hostinfo_holder->addressinfo_tail = hostinfo_holder->addressinfo_tail->next; // permenant memory for address list -- stored for duration hostinfo_holder->addressinfo_tail->address = strdup(PQgetvalue(result, i, 1)); CHECKMEM(hostinfo_holder->addressinfo_tail->address); hostinfo_holder->address_count++; } } // if we have any appliance specific hosts such as hadoop nodes add them to the hash table if (get_appliance_hosts_and_add_to_hosts(pool, htab)) { TR0(("Not an appliance: checking for SW Only hadoop hosts.\n")); get_hadoop_hosts_and_add_to_hosts(pool, htab, opt); // Not an appliance, so check for SW only hadoop nodes. } unique_hosts = apr_hash_count(htab); // allocate memory for host list (not freed ever) hosts = calloc(unique_hosts, sizeof(host_t)); apr_hash_index_t* hi; void* vptr; int hostcounter = 0; for (hi = apr_hash_first(0, htab); hi; hi = apr_hash_next(hi)) { // sanity check if (hostcounter >= unique_hosts) { gpmon_fatalx(FLINE, 0, "host counter exceeds unique hosts"); } apr_hash_this(hi, 0, 0, &vptr); hostinfo_holder = vptr; hosts[hostcounter].hostname = strdup(hostinfo_holder->hostname); hosts[hostcounter].data_dir = strdup(hostinfo_holder->datadir); if (hostinfo_holder->smon_dir) { hosts[hostcounter].smon_bin_location = strdup(hostinfo_holder->smon_dir); } hosts[hostcounter].is_master = hostinfo_holder->is_master; hosts[hostcounter].addressinfo_head = hostinfo_holder->addressinfo_head; hosts[hostcounter].addressinfo_tail = hostinfo_holder->addressinfo_tail; hosts[hostcounter].address_count = hostinfo_holder->address_count; hosts[hostcounter].connection_hostname.current = hosts[hostcounter].addressinfo_head; hosts[hostcounter].snmp_hostname.current = hosts[hostcounter].addressinfo_head; if (hostinfo_holder->is_hdm) hosts[hostcounter].is_hdm = 1; if (hostinfo_holder->is_hdw) hosts[hostcounter].is_hdw = 1; if (hostinfo_holder->is_etl) hosts[hostcounter].is_etl = 1; if (hostinfo_holder->is_hbw) hosts[hostcounter].is_hbw = 1; if (hostinfo_holder->is_hdc) hosts[hostcounter].is_hdc = 1; apr_thread_mutex_create(&hosts[hostcounter].mutex, APR_THREAD_MUTEX_UNNESTED, global_pool); // use the global pool so the mutexes last beyond this function hostcounter++; } *hostcnt = hostcounter; } apr_pool_destroy(pool); PQclear(result); PQfinish(conn); if (!hosts || *hostcnt < 1) { gpmon_fatalx(FLINE, 0, "no valid hosts found"); } *host_table = hosts; }
static svn_error_t * do_blame (const char *target, svn_ra_plugin_t *ra_lib, void *ra_session, struct file_rev_baton *frb) { struct log_message_baton lmb; apr_array_header_t *condensed_targets; apr_file_t *file; svn_stream_t *stream; struct rev *rev; svn_node_kind_t kind; apr_pool_t *pool = frb->mainpool; SVN_ERR (ra_lib->check_path (ra_session, target, frb->end_rev, &kind, pool)); if (kind == svn_node_dir) return svn_error_createf (SVN_ERR_CLIENT_IS_DIRECTORY, NULL, ("URL '%s' refers to a directory"), target); condensed_targets = apr_array_make (pool, 1, sizeof (const char *)); (*((const char **)apr_array_push (condensed_targets))) = ""; lmb.path = apr_pstrcat(pool, "/", target, NULL); lmb.eldest = NULL; lmb.pool = pool; /* Accumulate revision metadata by walking the revisions backwards; this allows us to follow moves/copies correctly. */ SVN_ERR (ra_lib->get_log (ra_session, condensed_targets, frb->end_rev, frb->start_rev, TRUE, FALSE, log_message_receiver, &lmb, pool)); /* Inspect the first revision's change metadata; if there are any prior revisions, compute a new starting revision/path. If no revisions were selected, no blame is assigned. A modified item certainly has a prior revision. It is reasonable for an added item to have none, but anything else is unexpected. */ if (!lmb.eldest) { lmb.eldest = apr_palloc (pool, sizeof (*rev)); lmb.eldest->revision = frb->end_rev; lmb.eldest->path = lmb.path; lmb.eldest->next = NULL; rev = apr_palloc (pool, sizeof (*rev)); rev->revision = SVN_INVALID_REVNUM; rev->author = NULL; rev->date = NULL; frb->blame = blame_create (frb, rev, 0); } else if (lmb.action == 'M' || SVN_IS_VALID_REVNUM (lmb.copyrev)) { rev = apr_palloc (pool, sizeof (*rev)); if (SVN_IS_VALID_REVNUM (lmb.copyrev)) rev->revision = lmb.copyrev; else rev->revision = lmb.eldest->revision - 1; rev->path = lmb.path; rev->next = lmb.eldest; lmb.eldest = rev; rev = apr_palloc (pool, sizeof (*rev)); rev->revision = SVN_INVALID_REVNUM; rev->author = NULL; rev->date = NULL; frb->blame = blame_create (frb, rev, 0); } else if (lmb.action == 'A') { frb->blame = blame_create (frb, lmb.eldest, 0); } else return svn_error_createf (APR_EGENERAL, NULL, ("Revision action '%c' for " "revision %ld of '%s' " "lacks a prior revision"), lmb.action, lmb.eldest->revision, lmb.eldest->path); /* Walk the revision list in chronological order, downloading each fulltext, diffing it with its predecessor, and accumulating the blame information into db.blame. Use two iteration pools rather than one, because the diff routines need to look at a sliding window of revisions. Two pools gives us a ring buffer of sorts. */ for (rev = lmb.eldest; rev; rev = rev->next) { const char *tmp; const char *temp_dir; apr_hash_t *props; svn_string_t *mimetype; apr_pool_clear (frb->currpool); SVN_ERR (svn_io_temp_dir (&temp_dir, frb->currpool)); SVN_ERR (svn_io_open_unique_file (&file, &tmp, svn_path_join (temp_dir, "tmp", frb->currpool), ".tmp", FALSE, frb->currpool)); apr_pool_cleanup_register (frb->currpool, file, cleanup_tempfile, apr_pool_cleanup_null); stream = svn_stream_from_aprfile (file, frb->currpool); SVN_ERR (ra_lib->get_file(ra_session, rev->path + 1, rev->revision, stream, NULL, &props, frb->currpool)); SVN_ERR (svn_stream_close (stream)); SVN_ERR (svn_io_file_close (file, frb->currpool)); /* If this file has a non-textual mime-type, bail out. */ if (props && ((mimetype = apr_hash_get (props, SVN_PROP_MIME_TYPE, sizeof (SVN_PROP_MIME_TYPE) - 1)))) { if (svn_mime_type_is_binary (mimetype->data)) return svn_error_createf (SVN_ERR_CLIENT_IS_BINARY_FILE, 0, ("Cannot calculate blame information for binary file '%s'"),target); } if (frb->last_filename) { frb->rev = rev; SVN_ERR (add_file_blame (frb->last_filename, tmp, frb)); } frb->last_filename = tmp; { apr_pool_t *tmppool = frb->currpool; frb->currpool = frb->lastpool; frb->lastpool = tmppool; } } return SVN_NO_ERROR; }
apr_array_header_t* fend_get_pattern(const char* def) { apr_array_header_t* parts = apr_hash_get(fend_definition, def, APR_HASH_KEY_STRING); return parts; }
void process_line_in_devices_cnf(apr_pool_t* tmp_pool, apr_hash_t* htab, char* line) { if (!line) { gpmon_warningx(FLINE, 0, "Line in devices file is null, skipping"); return; } char* host; char* device; char* category; char primary_hostname[64]; char* location = strchr(line, '#'); if (location) { *location = 0; // remove comments from the line } if (!line) { gpmon_warningx(FLINE, 0, "Line in devices file is null after removing comments, skipping"); return; } // we do these in reverse order so inserting null chars does not prevent finding other tokens if (find_token_in_config_string(line, &host, "Host")) { return; } if (find_token_in_config_string(line, &device, "Device")) { return; } if (find_token_in_config_string(line, &category, "Category")) { return; } int monitored_device = 0; int hostType = 0; if (strcmp(device, "Spidey0001") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HDW; } if (strcmp(device, "Spidey0002") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HDM; } if (strcmp(device, "Spidey0003") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HBW; } if (strcmp(device, "EtlHost") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_ETL; } //For V2 if (strcmp(device, "Locust0001") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HDW; } if (strcmp(device, "Locust0002") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HDM; } if (strcmp(device, "Locust0003") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HDC; } if (strcmp(device, "EtlHostV2") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_ETL; } // segment host, switch, etc ... we are only adding additional hosts required for performance monitoring if (!monitored_device) return; strncpy(primary_hostname, host, sizeof(primary_hostname)); primary_hostname[sizeof(primary_hostname) - 1] = 0; location = strchr(primary_hostname, ','); if (location) *location = 0; struct hostinfo_holder_t* hostinfo_holder = apr_hash_get(htab, primary_hostname, APR_HASH_KEY_STRING); if (hostinfo_holder) { gpmon_warningx(FLINE, 0, "Host '%s' is duplicated in devices.cnf", primary_hostname); return; } // OK Lets add this record at this point hostinfo_holder = apr_pcalloc(tmp_pool, sizeof(struct hostinfo_holder_t)); CHECKMEM(hostinfo_holder); apr_hash_set(htab, primary_hostname, APR_HASH_KEY_STRING, hostinfo_holder); initializeHostInfoDataFromFileEntry(tmp_pool, hostinfo_holder, primary_hostname, host, hostType, NULL, NULL); }
mapcache_source *mapcache_configuration_get_source(mapcache_cfg *config, const char *key) { return (mapcache_source*)apr_hash_get(config->sources, (void*)key, APR_HASH_KEY_STRING); }
mapcache_grid *mapcache_configuration_get_grid(mapcache_cfg *config, const char *key) { return (mapcache_grid*)apr_hash_get(config->grids, (void*)key, APR_HASH_KEY_STRING); }
/* This is a helper for svn_client__update_internal(), which see for an explanation of most of these parameters. Some stuff that's unique is as follows: ANCHOR_ABSPATH is the local absolute path of the update anchor. This is typically either the same as LOCAL_ABSPATH, or the immediate parent of LOCAL_ABSPATH. If NOTIFY_SUMMARY is set (and there's a notification handler in CTX), transmit the final update summary upon successful completion of the update. */ static svn_error_t * update_internal(svn_revnum_t *result_rev, const char *local_abspath, const char *anchor_abspath, const svn_opt_revision_t *revision, svn_depth_t depth, svn_boolean_t depth_is_sticky, svn_boolean_t ignore_externals, svn_boolean_t allow_unver_obstructions, svn_boolean_t adds_as_modification, svn_boolean_t *timestamp_sleep, svn_boolean_t notify_summary, svn_client_ctx_t *ctx, apr_pool_t *pool) { const svn_delta_editor_t *update_editor; void *update_edit_baton; const svn_ra_reporter3_t *reporter; void *report_baton; const char *anchor_url; const char *corrected_url; const char *target; const char *repos_root; svn_error_t *err; svn_revnum_t revnum; svn_boolean_t use_commit_times; svn_boolean_t sleep_here = FALSE; svn_boolean_t *use_sleep = timestamp_sleep ? timestamp_sleep : &sleep_here; svn_boolean_t clean_checkout = FALSE; const char *diff3_cmd; svn_ra_session_t *ra_session; const char *preserved_exts_str; apr_array_header_t *preserved_exts; struct svn_client__dirent_fetcher_baton_t dfb; svn_boolean_t server_supports_depth; svn_boolean_t tree_conflicted; svn_config_t *cfg = ctx->config ? apr_hash_get(ctx->config, SVN_CONFIG_CATEGORY_CONFIG, APR_HASH_KEY_STRING) : NULL; /* An unknown depth can't be sticky. */ if (depth == svn_depth_unknown) depth_is_sticky = FALSE; if (strcmp(local_abspath, anchor_abspath)) target = svn_dirent_basename(local_abspath, pool); else target = ""; /* Get full URL from the ANCHOR. */ SVN_ERR(svn_wc__node_get_url(&anchor_url, ctx->wc_ctx, anchor_abspath, pool, pool)); if (! anchor_url) return svn_error_createf(SVN_ERR_ENTRY_MISSING_URL, NULL, _("'%s' has no URL"), svn_dirent_local_style(anchor_abspath, pool)); /* Check if our anchor exists in BASE. If it doesn't we can't update. ### For performance reasons this should be handled with the same query ### as retrieving the anchor url. */ SVN_ERR(svn_wc__node_get_base_rev(&revnum, ctx->wc_ctx, anchor_abspath, pool)); /* It does not make sense to update tree-conflict victims. */ err = svn_wc_conflicted_p3(NULL, NULL, &tree_conflicted, ctx->wc_ctx, local_abspath, pool); if (err && err->apr_err == SVN_ERR_WC_PATH_NOT_FOUND) { svn_error_clear(err); tree_conflicted = FALSE; } else SVN_ERR(err); if (!SVN_IS_VALID_REVNUM(revnum) || tree_conflicted) { if (ctx->notify_func2) { svn_wc_notify_t *nt; nt = svn_wc_create_notify(local_abspath, tree_conflicted ? svn_wc_notify_skip_conflicted : svn_wc_notify_update_skip_working_only, pool); ctx->notify_func2(ctx->notify_baton2, nt, pool); } return SVN_NO_ERROR; } /* We may need to crop the tree if the depth is sticky */ if (depth_is_sticky && depth < svn_depth_infinity) { svn_node_kind_t target_kind; if (depth == svn_depth_exclude) { SVN_ERR(svn_wc_exclude(ctx->wc_ctx, local_abspath, ctx->cancel_func, ctx->cancel_baton, ctx->notify_func2, ctx->notify_baton2, pool)); /* Target excluded, we are done now */ return SVN_NO_ERROR; } SVN_ERR(svn_wc_read_kind(&target_kind, ctx->wc_ctx, local_abspath, TRUE, pool)); if (target_kind == svn_node_dir) { SVN_ERR(svn_wc_crop_tree2(ctx->wc_ctx, local_abspath, depth, ctx->cancel_func, ctx->cancel_baton, ctx->notify_func2, ctx->notify_baton2, pool)); } } /* check whether the "clean c/o" optimization is applicable */ SVN_ERR(is_empty_wc(&clean_checkout, local_abspath, anchor_abspath, pool)); /* Get the external diff3, if any. */ svn_config_get(cfg, &diff3_cmd, SVN_CONFIG_SECTION_HELPERS, SVN_CONFIG_OPTION_DIFF3_CMD, NULL); if (diff3_cmd != NULL) SVN_ERR(svn_path_cstring_to_utf8(&diff3_cmd, diff3_cmd, pool)); /* See if the user wants last-commit timestamps instead of current ones. */ SVN_ERR(svn_config_get_bool(cfg, &use_commit_times, SVN_CONFIG_SECTION_MISCELLANY, SVN_CONFIG_OPTION_USE_COMMIT_TIMES, FALSE)); /* See which files the user wants to preserve the extension of when conflict files are made. */ svn_config_get(cfg, &preserved_exts_str, SVN_CONFIG_SECTION_MISCELLANY, SVN_CONFIG_OPTION_PRESERVED_CF_EXTS, ""); preserved_exts = *preserved_exts_str ? svn_cstring_split(preserved_exts_str, "\n\r\t\v ", FALSE, pool) : NULL; /* Let everyone know we're starting a real update (unless we're asked not to). */ if (ctx->notify_func2 && notify_summary) { svn_wc_notify_t *notify = svn_wc_create_notify(local_abspath, svn_wc_notify_update_started, pool); notify->kind = svn_node_none; notify->content_state = notify->prop_state = svn_wc_notify_state_inapplicable; notify->lock_state = svn_wc_notify_lock_state_inapplicable; (*ctx->notify_func2)(ctx->notify_baton2, notify, pool); } /* Open an RA session for the URL */ SVN_ERR(svn_client__open_ra_session_internal(&ra_session, &corrected_url, anchor_url, anchor_abspath, NULL, TRUE, TRUE, ctx, pool)); SVN_ERR(svn_ra_get_repos_root2(ra_session, &repos_root, pool)); /* If we got a corrected URL from the RA subsystem, we'll need to relocate our working copy first. */ if (corrected_url) { const char *current_repos_root; const char *current_uuid; /* To relocate everything inside our repository we need the old and new repos root. ### And we should only perform relocates on the wcroot */ SVN_ERR(svn_wc__node_get_repos_info(¤t_repos_root, ¤t_uuid, ctx->wc_ctx, anchor_abspath, pool, pool)); /* ### Check uuid here before calling relocate? */ SVN_ERR(svn_client_relocate2(anchor_abspath, current_repos_root, repos_root, ignore_externals, ctx, pool)); anchor_url = corrected_url; } /* ### todo: shouldn't svn_client__get_revision_number be able to take a URL as easily as a local path? */ SVN_ERR(svn_client__get_revision_number(&revnum, NULL, ctx->wc_ctx, local_abspath, ra_session, revision, pool)); SVN_ERR(svn_ra_has_capability(ra_session, &server_supports_depth, SVN_RA_CAPABILITY_DEPTH, pool)); dfb.ra_session = ra_session; dfb.target_revision = revnum; dfb.anchor_url = anchor_url; /* Fetch the update editor. If REVISION is invalid, that's okay; the RA driver will call editor->set_target_revision later on. */ SVN_ERR(svn_wc_get_update_editor4(&update_editor, &update_edit_baton, &revnum, ctx->wc_ctx, anchor_abspath, target, use_commit_times, depth, depth_is_sticky, allow_unver_obstructions, adds_as_modification, server_supports_depth, clean_checkout, diff3_cmd, preserved_exts, svn_client__dirent_fetcher, &dfb, ctx->conflict_func2, ctx->conflict_baton2, NULL, NULL, ctx->cancel_func, ctx->cancel_baton, ctx->notify_func2, ctx->notify_baton2, pool, pool)); /* Tell RA to do an update of URL+TARGET to REVISION; if we pass an invalid revnum, that means RA will use the latest revision. */ SVN_ERR(svn_ra_do_update2(ra_session, &reporter, &report_baton, revnum, target, (!server_supports_depth || depth_is_sticky ? depth : svn_depth_unknown), FALSE, update_editor, update_edit_baton, pool)); /* Drive the reporter structure, describing the revisions within PATH. When we call reporter->finish_report, the update_editor will be driven by svn_repos_dir_delta2. */ err = svn_wc_crawl_revisions5(ctx->wc_ctx, local_abspath, reporter, report_baton, TRUE, depth, (! depth_is_sticky), (! server_supports_depth), use_commit_times, ctx->cancel_func, ctx->cancel_baton, ctx->notify_func2, ctx->notify_baton2, pool); if (err) { /* Don't rely on the error handling to handle the sleep later, do it now */ svn_io_sleep_for_timestamps(local_abspath, pool); return svn_error_trace(err); } *use_sleep = TRUE; /* We handle externals after the update is complete, so that handling external items (and any errors therefrom) doesn't delay the primary operation. */ if (SVN_DEPTH_IS_RECURSIVE(depth) && (! ignore_externals)) { apr_hash_t *new_externals; apr_hash_t *new_depths; SVN_ERR(svn_wc__externals_gather_definitions(&new_externals, &new_depths, ctx->wc_ctx, local_abspath, depth, pool, pool)); SVN_ERR(svn_client__handle_externals(new_externals, new_depths, repos_root, local_abspath, depth, use_sleep, ctx, pool)); } if (sleep_here) svn_io_sleep_for_timestamps(local_abspath, pool); /* Let everyone know we're finished here (unless we're asked not to). */ if (ctx->notify_func2 && notify_summary) { svn_wc_notify_t *notify = svn_wc_create_notify(local_abspath, svn_wc_notify_update_completed, pool); notify->kind = svn_node_none; notify->content_state = notify->prop_state = svn_wc_notify_state_inapplicable; notify->lock_state = svn_wc_notify_lock_state_inapplicable; notify->revision = revnum; (*ctx->notify_func2)(ctx->notify_baton2, notify, pool); } /* If the caller wants the result revision, give it to them. */ if (result_rev) *result_rev = revnum; return SVN_NO_ERROR; }
/* This implements the `svn_opt_subcommand_t' interface. */ svn_error_t * svn_cl__commit(apr_getopt_t *os, void *baton, apr_pool_t *pool) { svn_error_t *err; svn_cl__opt_state_t *opt_state = ((svn_cl__cmd_baton_t *) baton)->opt_state; svn_client_ctx_t *ctx = ((svn_cl__cmd_baton_t *) baton)->ctx; apr_array_header_t *targets; apr_array_header_t *condensed_targets; const char *base_dir; svn_config_t *cfg; svn_boolean_t no_unlock = FALSE; struct copy_warning_notify_baton cwnb; SVN_ERR(svn_cl__args_to_target_array_print_reserved(&targets, os, opt_state->targets, ctx, FALSE, pool)); SVN_ERR_W(svn_cl__check_targets_are_local_paths(targets), _("Commit targets must be local paths")); /* Add "." if user passed 0 arguments. */ svn_opt_push_implicit_dot_target(targets, pool); SVN_ERR(svn_cl__eat_peg_revisions(&targets, targets, pool)); /* Condense the targets (like commit does)... */ SVN_ERR(svn_dirent_condense_targets(&base_dir, &condensed_targets, targets, TRUE, pool, pool)); if ((! condensed_targets) || (! condensed_targets->nelts)) { const char *parent_dir, *base_name; SVN_ERR(svn_wc_get_actual_target2(&parent_dir, &base_name, ctx->wc_ctx, base_dir, pool, pool)); if (*base_name) base_dir = apr_pstrdup(pool, parent_dir); } if (opt_state->depth == svn_depth_unknown) opt_state->depth = svn_depth_infinity; cfg = apr_hash_get(ctx->config, SVN_CONFIG_CATEGORY_CONFIG, APR_HASH_KEY_STRING); if (cfg) SVN_ERR(svn_config_get_bool(cfg, &no_unlock, SVN_CONFIG_SECTION_MISCELLANY, SVN_CONFIG_OPTION_NO_UNLOCK, FALSE)); /* We're creating a new log message baton because we can use our base_dir to store the temp file, instead of the current working directory. The client might not have write access to their working directory, but they better have write access to the directory they're committing. */ SVN_ERR(svn_cl__make_log_msg_baton(&(ctx->log_msg_baton3), opt_state, base_dir, ctx->config, pool)); /* Copies are done server-side, and cheaply, which means they're effectively always done with infinite depth. This is a potential cause of confusion for users trying to commit copied subtrees in part by restricting the commit's depth. See issues #3699 and #3752. */ if (opt_state->depth < svn_depth_infinity) { cwnb.wrapped_func = ctx->notify_func2; cwnb.wrapped_baton = ctx->notify_baton2; cwnb.depth = opt_state->depth; cwnb.warned = FALSE; ctx->notify_func2 = copy_warning_notify_func; ctx->notify_baton2 = &cwnb; } /* Commit. */ err = svn_client_commit5(targets, opt_state->depth, no_unlock, opt_state->keep_changelists, TRUE /* commit_as_operations */, opt_state->changelists, opt_state->revprop_table, (opt_state->quiet ? NULL : svn_cl__print_commit_info), NULL, ctx, pool); SVN_ERR(svn_cl__cleanup_log_msg(ctx->log_msg_baton3, err, pool)); return SVN_NO_ERROR; }
int main (int argc, const char **argv) { apr_pool_t *pool; svn_error_t *err; apr_hash_t *dirents; const char *upload_file, *URL; const char *parent_URL, *basename; svn_ra_plugin_t *ra_lib; void *session, *ra_baton; svn_revnum_t rev; const svn_delta_editor_t *editor; void *edit_baton; svn_dirent_t *dirent; svn_ra_callbacks_t *cbtable; apr_hash_t *cfg_hash; svn_auth_baton_t *auth_baton; if (argc <= 2) { printf ("Usage: %s PATH URL\n", argv[0]); printf (" Uploads file at PATH to Subversion repository URL.\n"); return EXIT_FAILURE; } upload_file = argv[1]; URL = argv[2]; /* Initialize the app. Send all error messages to 'stderr'. */ if (svn_cmdline_init ("minimal_client", stderr) != EXIT_SUCCESS) return EXIT_FAILURE; /* Create top-level memory pool. Be sure to read the HACKING file to understand how to properly use/free subpools. */ pool = svn_pool_create (NULL); /* Initialize the FS library. */ err = svn_fs_initialize (pool); if (err) goto hit_error; /* Make sure the ~/.subversion run-time config files exist, and load. */ err = svn_config_ensure (NULL, pool); if (err) goto hit_error; err = svn_config_get_config (&cfg_hash, NULL, pool); if (err) goto hit_error; /* Build an authentication baton. */ { /* There are many different kinds of authentication back-end "providers". See svn_auth.h for a full overview. */ svn_auth_provider_object_t *provider; apr_array_header_t *providers = apr_array_make (pool, 4, sizeof (svn_auth_provider_object_t *)); svn_client_get_simple_prompt_provider (&provider, my_simple_prompt_callback, NULL, /* baton */ 2, /* retry limit */ pool); APR_ARRAY_PUSH (providers, svn_auth_provider_object_t *) = provider; svn_client_get_username_prompt_provider (&provider, my_username_prompt_callback, NULL, /* baton */ 2, /* retry limit */ pool); APR_ARRAY_PUSH (providers, svn_auth_provider_object_t *) = provider; /* Register the auth-providers into the context's auth_baton. */ svn_auth_open (&auth_baton, providers, pool); } /* Create a table of callbacks for the RA session, mostly nonexistent. */ cbtable = apr_pcalloc (pool, sizeof(*cbtable)); cbtable->auth_baton = auth_baton; cbtable->open_tmp_file = open_tmp_file; /* Now do the real work. */ /* Open an RA session to the parent URL, fetch current HEAD rev and "lock" onto that revnum for the remainder of the session. */ svn_path_split (URL, &parent_URL, &basename, pool); err = svn_ra_init_ra_libs (&ra_baton, pool); if (err) goto hit_error; err = svn_ra_get_ra_library (&ra_lib, ra_baton, parent_URL, pool); if (err) goto hit_error; err = ra_lib->open (&session, parent_URL, cbtable, NULL, cfg_hash, pool); if (err) goto hit_error; err = ra_lib->get_latest_revnum (session, &rev, pool); if (err) goto hit_error; /* Examine contents of parent dir in the rev. */ err = ra_lib->get_dir (session, "", rev, &dirents, NULL, NULL, pool); if (err) goto hit_error; /* Sanity checks. Don't let the user shoot himself *too* much. */ dirent = apr_hash_get (dirents, basename, APR_HASH_KEY_STRING); if (dirent && dirent->kind == svn_node_dir) { printf ("Sorry, a directory already exists at that URL.\n"); return EXIT_FAILURE; } if (dirent && dirent->kind == svn_node_file) { char answer[5]; printf ("\n*** WARNING ***\n\n"); printf ("You're about to overwrite r%ld of this file.\n", rev); printf ("It was last changed by user '%s',\n", dirent->last_author ? dirent->last_author : "?"); printf ("on %s.\n", svn_time_to_human_cstring (dirent->time, pool)); printf ("\nSomebody *might* have just changed the file seconds ago,\n" "and your upload would be overwriting their changes!\n\n"); err = prompt_and_read_line("Are you SURE you want to upload? [y/n]", answer, sizeof(answer)); if (err) goto hit_error; if (apr_strnatcasecmp (answer, "y")) { printf ("Operation aborted.\n"); return EXIT_SUCCESS; } } /* Fetch a commit editor (it's anchored on the parent URL, because the session is too.) */ /* ### someday add an option for a user-written commit message? */ err = ra_lib->get_commit_editor (session, &editor, &edit_baton, "File upload from 'svnput' program.", my_commit_callback, NULL, pool); if (err) goto hit_error; /* Drive the editor */ { void *root_baton, *file_baton, *handler_baton; svn_txdelta_window_handler_t handler; svn_stream_t *contents; apr_file_t *f = NULL; err = editor->open_root (edit_baton, rev, pool, &root_baton); if (err) goto hit_error; if (! dirent) { err = editor->add_file (basename, root_baton, NULL, SVN_INVALID_REVNUM, pool, &file_baton); } else { err = editor->open_file (basename, root_baton, rev, pool, &file_baton); } if (err) goto hit_error; err = editor->apply_textdelta (file_baton, NULL, pool, &handler, &handler_baton); if (err) goto hit_error; err = svn_io_file_open (&f, upload_file, APR_READ, APR_OS_DEFAULT, pool); if (err) goto hit_error; contents = svn_stream_from_aprfile (f, pool); err = svn_txdelta_send_stream (contents, handler, handler_baton, NULL, pool); if (err) goto hit_error; err = svn_io_file_close (f, pool); if (err) goto hit_error; err = editor->close_file (file_baton, NULL, pool); if (err) goto hit_error; err = editor->close_edit (edit_baton, pool); if (err) goto hit_error; } return EXIT_SUCCESS; hit_error: svn_handle_error2 (err, stderr, FALSE, "svnput: "); return EXIT_FAILURE; }
svn_error_t * svn_client__switch_internal(svn_revnum_t *result_rev, const char *path, const char *switch_url, const svn_opt_revision_t *peg_revision, const svn_opt_revision_t *revision, svn_wc_adm_access_t *adm_access, svn_depth_t depth, svn_boolean_t depth_is_sticky, svn_boolean_t *timestamp_sleep, svn_boolean_t ignore_externals, svn_boolean_t allow_unver_obstructions, svn_client_ctx_t *ctx, apr_pool_t *pool) { const svn_ra_reporter3_t *reporter; void *report_baton; const svn_wc_entry_t *entry; const char *URL, *anchor, *target, *source_root, *switch_rev_url; svn_ra_session_t *ra_session; svn_revnum_t revnum; svn_error_t *err = SVN_NO_ERROR; svn_wc_adm_access_t *dir_access; const svn_boolean_t close_adm_access = ! adm_access; const char *diff3_cmd; svn_boolean_t use_commit_times; svn_boolean_t sleep_here; svn_boolean_t *use_sleep = timestamp_sleep ? timestamp_sleep : &sleep_here; const svn_delta_editor_t *switch_editor; void *switch_edit_baton; svn_wc_traversal_info_t *traversal_info = svn_wc_init_traversal_info(pool); const char *preserved_exts_str; apr_array_header_t *preserved_exts; svn_boolean_t server_supports_depth; svn_config_t *cfg = ctx->config ? apr_hash_get(ctx->config, SVN_CONFIG_CATEGORY_CONFIG, APR_HASH_KEY_STRING) : NULL; /* An unknown depth can't be sticky. */ if (depth == svn_depth_unknown) depth_is_sticky = FALSE; /* Do not support the situation of both exclude and switch a target. */ if (depth_is_sticky && depth == svn_depth_exclude) return svn_error_createf(SVN_ERR_UNSUPPORTED_FEATURE, NULL, _("Cannot both exclude and switch a path")); /* Get the external diff3, if any. */ svn_config_get(cfg, &diff3_cmd, SVN_CONFIG_SECTION_HELPERS, SVN_CONFIG_OPTION_DIFF3_CMD, NULL); /* See if the user wants last-commit timestamps instead of current ones. */ SVN_ERR(svn_config_get_bool(cfg, &use_commit_times, SVN_CONFIG_SECTION_MISCELLANY, SVN_CONFIG_OPTION_USE_COMMIT_TIMES, FALSE)); /* See which files the user wants to preserve the extension of when conflict files are made. */ svn_config_get(cfg, &preserved_exts_str, SVN_CONFIG_SECTION_MISCELLANY, SVN_CONFIG_OPTION_PRESERVED_CF_EXTS, ""); preserved_exts = *preserved_exts_str ? svn_cstring_split(preserved_exts_str, "\n\r\t\v ", FALSE, pool) : NULL; /* Sanity check. Without these, the switch is meaningless. */ SVN_ERR_ASSERT(path); SVN_ERR_ASSERT(switch_url && (switch_url[0] != '\0')); /* ### Need to lock the whole target tree to invalidate wcprops. Does non-recursive switch really need to invalidate the whole tree? */ if (adm_access) { svn_wc_adm_access_t *a = adm_access; const char *dir_access_path; /* This is a little hacky, but open two new read-only access baton's to get the anchor and target access batons that would be used if a locked access baton was not available. */ SVN_ERR(svn_wc_adm_open_anchor(&adm_access, &dir_access, &target, path, FALSE, -1, ctx->cancel_func, ctx->cancel_baton, pool)); anchor = svn_wc_adm_access_path(adm_access); dir_access_path = svn_wc_adm_access_path(dir_access); SVN_ERR(svn_wc_adm_close2(adm_access, pool)); SVN_ERR(svn_wc_adm_retrieve(&adm_access, a, anchor, pool)); SVN_ERR(svn_wc_adm_retrieve(&dir_access, a, dir_access_path, pool)); } else { SVN_ERR(svn_wc_adm_open_anchor(&adm_access, &dir_access, &target, path, TRUE, -1, ctx->cancel_func, ctx->cancel_baton, pool)); anchor = svn_wc_adm_access_path(adm_access); } SVN_ERR(svn_wc__entry_versioned(&entry, anchor, adm_access, FALSE, pool)); if (! entry->url) return svn_error_createf(SVN_ERR_ENTRY_MISSING_URL, NULL, _("Directory '%s' has no URL"), svn_path_local_style(anchor, pool)); URL = apr_pstrdup(pool, entry->url); /* Open an RA session to 'source' URL */ SVN_ERR(svn_client__ra_session_from_path(&ra_session, &revnum, &switch_rev_url, switch_url, adm_access, peg_revision, revision, ctx, pool)); SVN_ERR(svn_ra_get_repos_root2(ra_session, &source_root, pool)); /* Disallow a switch operation to change the repository root of the target. */ if (! svn_path_is_ancestor(source_root, URL)) return svn_error_createf (SVN_ERR_WC_INVALID_SWITCH, NULL, _("'%s'\n" "is not the same repository as\n" "'%s'"), URL, source_root); /* We may need to crop the tree if the depth is sticky */ if (depth_is_sticky && depth < svn_depth_infinity) { const svn_wc_entry_t *target_entry; SVN_ERR(svn_wc_entry( &target_entry, svn_dirent_join(svn_wc_adm_access_path(adm_access), target, pool), adm_access, TRUE, pool)); if (target_entry && target_entry->kind == svn_node_dir) { SVN_ERR(svn_wc_crop_tree(adm_access, target, depth, ctx->notify_func2, ctx->notify_baton2, ctx->cancel_func, ctx->cancel_baton, pool)); } } SVN_ERR(svn_ra_reparent(ra_session, URL, pool)); /* Fetch the switch (update) editor. If REVISION is invalid, that's okay; the RA driver will call editor->set_target_revision() later on. */ SVN_ERR(svn_wc_get_switch_editor3(&revnum, adm_access, target, switch_rev_url, use_commit_times, depth, depth_is_sticky, allow_unver_obstructions, ctx->notify_func2, ctx->notify_baton2, ctx->cancel_func, ctx->cancel_baton, ctx->conflict_func, ctx->conflict_baton, diff3_cmd, preserved_exts, &switch_editor, &switch_edit_baton, traversal_info, pool)); /* Tell RA to do an update of URL+TARGET to REVISION; if we pass an invalid revnum, that means RA will use the latest revision. */ SVN_ERR(svn_ra_do_switch2(ra_session, &reporter, &report_baton, revnum, target, depth, switch_rev_url, switch_editor, switch_edit_baton, pool)); SVN_ERR(svn_ra_has_capability(ra_session, &server_supports_depth, SVN_RA_CAPABILITY_DEPTH, pool)); /* Drive the reporter structure, describing the revisions within PATH. When we call reporter->finish_report, the update_editor will be driven by svn_repos_dir_delta2. We pass in a traversal_info for recording all externals. It shouldn't be needed for a switch if it wasn't for the relative externals of type '../path'. All of those must be resolved to the new location. */ err = svn_wc_crawl_revisions4(path, dir_access, reporter, report_baton, TRUE, depth, (! depth_is_sticky), (! server_supports_depth), use_commit_times, ctx->notify_func2, ctx->notify_baton2, traversal_info, pool); if (err) { /* Don't rely on the error handling to handle the sleep later, do it now */ svn_io_sleep_for_timestamps(path, pool); return err; } *use_sleep = TRUE; /* We handle externals after the switch is complete, so that handling external items (and any errors therefrom) doesn't delay the primary operation. */ if (SVN_DEPTH_IS_RECURSIVE(depth) && (! ignore_externals)) err = svn_client__handle_externals(adm_access, traversal_info, switch_url, path, source_root, depth, use_sleep, ctx, pool); /* Sleep to ensure timestamp integrity (we do this regardless of errors in the actual switch operation(s)). */ if (sleep_here) svn_io_sleep_for_timestamps(path, pool); /* Return errors we might have sustained. */ if (err) return err; if (close_adm_access) SVN_ERR(svn_wc_adm_close2(adm_access, pool)); /* Let everyone know we're finished here. */ if (ctx->notify_func2) { svn_wc_notify_t *notify = svn_wc_create_notify(anchor, svn_wc_notify_update_completed, pool); notify->kind = svn_node_none; notify->content_state = notify->prop_state = svn_wc_notify_state_inapplicable; notify->lock_state = svn_wc_notify_lock_state_inapplicable; notify->revision = revnum; (*ctx->notify_func2)(ctx->notify_baton2, notify, pool); } /* If the caller wants the result revision, give it to them. */ if (result_rev) *result_rev = revnum; return SVN_NO_ERROR; }
svn_error_t * svn_ra_neon__has_capability(svn_ra_session_t *session, svn_boolean_t *has, const char *capability, apr_pool_t *pool) { svn_ra_neon__session_t *ras = session->priv; const char *cap_result; /* This capability doesn't rely on anything server side. */ if (strcmp(capability, SVN_RA_CAPABILITY_COMMIT_REVPROPS) == 0) { *has = TRUE; return SVN_NO_ERROR; } cap_result = apr_hash_get(ras->capabilities, capability, APR_HASH_KEY_STRING); /* If any capability is unknown, they're all unknown, so ask. */ if (cap_result == NULL) SVN_ERR(svn_ra_neon__exchange_capabilities(ras, pool)); /* Try again, now that we've fetched the capabilities. */ cap_result = apr_hash_get(ras->capabilities, capability, APR_HASH_KEY_STRING); /* Some capabilities depend on the repository as well as the server. NOTE: ../libsvn_ra_serf/serf.c:svn_ra_serf__has_capability() has a very similar code block. If you change something here, check there as well. */ if (cap_result == capability_server_yes) { if (strcmp(capability, SVN_RA_CAPABILITY_MERGEINFO) == 0) { /* Handle mergeinfo specially. Mergeinfo depends on the repository as well as the server, but the server routine that answered our svn_ra_neon__exchange_capabilities() call above didn't even know which repository we were interested in -- it just told us whether the server supports mergeinfo. If the answer was 'no', there's no point checking the particular repository; but if it was 'yes, we still must change it to 'no' iff the repository itself doesn't support mergeinfo. */ svn_mergeinfo_catalog_t ignored; svn_error_t *err; apr_array_header_t *paths = apr_array_make(pool, 1, sizeof(char *)); APR_ARRAY_PUSH(paths, const char *) = ""; err = svn_ra_neon__get_mergeinfo(session, &ignored, paths, 0, FALSE, FALSE, pool); if (err) { if (err->apr_err == SVN_ERR_UNSUPPORTED_FEATURE) { svn_error_clear(err); cap_result = capability_no; } else if (err->apr_err == SVN_ERR_FS_NOT_FOUND) { /* Mergeinfo requests use relative paths, and anyway we're in r0, so this is a likely error, but it means the repository supports mergeinfo! */ svn_error_clear(err); cap_result = capability_yes; } else return err; } else cap_result = capability_yes; apr_hash_set(ras->capabilities, SVN_RA_CAPABILITY_MERGEINFO, APR_HASH_KEY_STRING, cap_result); } else { return svn_error_createf (SVN_ERR_UNKNOWN_CAPABILITY, NULL, _("Don't know how to handle '%s' for capability '%s'"), capability_server_yes, capability); } } if (cap_result == capability_yes) { *has = TRUE; } else if (cap_result == capability_no) { *has = FALSE; } else if (cap_result == NULL) { return svn_error_createf (SVN_ERR_UNKNOWN_CAPABILITY, NULL, _("Don't know anything about capability '%s'"), capability); } else /* "can't happen" */ { /* Well, let's hope it's a string. */ return svn_error_createf (SVN_ERR_RA_DAV_OPTIONS_REQ_FAILED, NULL, _("Attempt to fetch capability '%s' resulted in '%s'"), capability, cap_result); } return SVN_NO_ERROR; }
/** Get profile by name */ MRCP_DECLARE(mrcp_profile_t*) mrcp_server_profile_get(const mrcp_server_t *server, const char *name) { return apr_hash_get(server->profile_table,name,APR_HASH_KEY_STRING); }
mapcache_image_format *mapcache_configuration_get_image_format(mapcache_cfg *config, const char *key) { return (mapcache_image_format*)apr_hash_get(config->image_formats, (void*)key, APR_HASH_KEY_STRING); }
static APR_INLINE mrcp_server_session_t* mrcp_server_session_find(mrcp_server_t *server, const apt_str_t *session_id) { return apr_hash_get(server->session_table,session_id->buf,session_id->length); }
static const char *filter_protocol(cmd_parms *cmd, void *CFG, const char *fname, const char *pname, const char *proto) { static const char *sep = ";, \t"; char *arg; char *tok = 0; unsigned int flags = 0; mod_filter_cfg *cfg = CFG; ap_filter_provider_t *provider = NULL; ap_filter_rec_t *filter = apr_hash_get(cfg->live_filters, fname, APR_HASH_KEY_STRING); if (!filter) { return "FilterProtocol: No such filter"; } /* Fixup the args: it's really pname that's optional */ if (proto == NULL) { proto = pname; pname = NULL; } else { /* Find provider */ for (provider = filter->providers; provider; provider = provider->next){ if (!strcasecmp(provider->frec->name, pname)) { break; } } if (!provider) { return "FilterProtocol: No such provider for this filter"; } } /* Now set flags from our args */ for (arg = apr_strtok(apr_pstrdup(cmd->pool, proto), sep, &tok); arg; arg = apr_strtok(NULL, sep, &tok)) { if (!strcasecmp(arg, "change=yes")) { flags |= AP_FILTER_PROTO_CHANGE | AP_FILTER_PROTO_CHANGE_LENGTH; } else if (!strcasecmp(arg, "change=1:1")) { flags |= AP_FILTER_PROTO_CHANGE; } else if (!strcasecmp(arg, "byteranges=no")) { flags |= AP_FILTER_PROTO_NO_BYTERANGE; } else if (!strcasecmp(arg, "proxy=no")) { flags |= AP_FILTER_PROTO_NO_PROXY; } else if (!strcasecmp(arg, "proxy=transform")) { flags |= AP_FILTER_PROTO_TRANSFORM; } else if (!strcasecmp(arg, "cache=no")) { flags |= AP_FILTER_PROTO_NO_CACHE; } } if (pname) { provider->frec->proto_flags = flags; } else { filter->proto_flags = flags; } return NULL; }
static const char *define_filter(cmd_parms *cmd, void *dummy, const char *args) { ef_server_t *conf = ap_get_module_config(cmd->server->module_config, &ext_filter_module); const char *token; const char *name; char *normalized_name; ef_filter_t *filter; name = ap_getword_white(cmd->pool, &args); if (!name) { return "Filter name not found"; } /* During request processing, we find information about the filter * by looking up the filter name provided by core server in our * hash table. But the core server has normalized the filter * name by converting it to lower case. Thus, when adding the * filter to our hash table we have to use lower case as well. */ normalized_name = apr_pstrdup(cmd->pool, name); ap_str_tolower(normalized_name); if (apr_hash_get(conf->h, normalized_name, APR_HASH_KEY_STRING)) { return apr_psprintf(cmd->pool, "ExtFilter %s is already defined", name); } filter = (ef_filter_t *)apr_pcalloc(conf->p, sizeof(ef_filter_t)); filter->name = name; filter->mode = OUTPUT_FILTER; filter->ftype = AP_FTYPE_RESOURCE; apr_hash_set(conf->h, normalized_name, APR_HASH_KEY_STRING, filter); while (*args) { while (apr_isspace(*args)) { ++args; } /* Nasty parsing... I wish I could simply use ap_getword_white() * here and then look at the token, but ap_getword_white() doesn't * do the right thing when we have cmd="word word word" */ if (!strncasecmp(args, "preservescontentlength", 22)) { token = ap_getword_white(cmd->pool, &args); if (!strcasecmp(token, "preservescontentlength")) { filter->preserves_content_length = 1; } else { return apr_psprintf(cmd->pool, "mangled argument `%s'", token); } continue; } if (!strncasecmp(args, "mode=", 5)) { args += 5; token = ap_getword_white(cmd->pool, &args); if (!strcasecmp(token, "output")) { filter->mode = OUTPUT_FILTER; } else if (!strcasecmp(token, "input")) { filter->mode = INPUT_FILTER; } else { return apr_psprintf(cmd->pool, "Invalid mode: `%s'", token); } continue; } if (!strncasecmp(args, "ftype=", 6)) { args += 6; token = ap_getword_white(cmd->pool, &args); filter->ftype = atoi(token); continue; } if (!strncasecmp(args, "enableenv=", 10)) { args += 10; token = ap_getword_white(cmd->pool, &args); filter->enable_env = token; continue; } if (!strncasecmp(args, "disableenv=", 11)) { args += 11; token = ap_getword_white(cmd->pool, &args); filter->disable_env = token; continue; } if (!strncasecmp(args, "intype=", 7)) { args += 7; filter->intype = ap_getword_white(cmd->pool, &args); continue; } if (!strncasecmp(args, "outtype=", 8)) { args += 8; filter->outtype = ap_getword_white(cmd->pool, &args); continue; } if (!strncasecmp(args, "cmd=", 4)) { args += 4; if ((token = parse_cmd(cmd->pool, &args, filter))) { return token; } continue; } return apr_psprintf(cmd->pool, "Unexpected parameter: `%s'", args); } /* parsing is done... register the filter */ if (filter->mode == OUTPUT_FILTER) { /* XXX need a way to ensure uniqueness among all filters */ ap_register_output_filter(filter->name, ef_output_filter, NULL, filter->ftype); } else if (filter->mode == INPUT_FILTER) { /* XXX need a way to ensure uniqueness among all filters */ ap_register_input_filter(filter->name, ef_input_filter, NULL, filter->ftype); } else { ap_assert(1 != 1); /* we set the field wrong somehow */ } return NULL; }
/** Get media engine by name */ MRCP_DECLARE(mpf_engine_t*) mrcp_server_media_engine_get(const mrcp_server_t *server, const char *name) { return apr_hash_get(server->media_engine_table,name,APR_HASH_KEY_STRING); }
void process_line_in_hadoop_cluster_info(apr_pool_t* tmp_pool, apr_hash_t* htab, char* line, char* smon_bin_location, char* smon_log_location) { if (!line) { gpmon_warningx(FLINE, 0, "Line in hadoop cluster info file is null, skipping"); return; } char* host; char* category; char primary_hostname[64]; char* location = strchr(line, '#'); if (location) { *location = 0; // remove comments from the line } if (!line) { gpmon_warningx(FLINE, 0, "Line in devices file is null after removing comments, skipping"); return; } // we do these in reverse order so inserting null chars does not prevent finding other tokens if (find_token_in_config_string(line, &category, "Categories")) { return; } location = strchr(category, ','); //remove the comma and extra categories if (location) { *location = 0; } if (find_token_in_config_string(line, &host, "Hostname")) { return; } TR1(("Found hadoop host %s\n",host )); // look for the 3 hadoop host types int monitored_device = 0; int hostType = 0; if (strcmp(category, "hdm") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HDM; } if (strcmp(category, "hdw") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HDW; } if (strcmp(category, "hdc") == 0) { monitored_device = 1; hostType = GPMON_HOSTTTYPE_HDC; } // The below code is the same as the devices file parsing code // segment host, switch, etc ... we are only adding additional hosts required for performance monitoring if (!monitored_device) { return; } strncpy(primary_hostname, host, sizeof(primary_hostname)); primary_hostname[sizeof(primary_hostname) - 1] = 0; location = strchr(primary_hostname, ','); if (location) { *location = 0; } struct hostinfo_holder_t* hostinfo_holder = apr_hash_get(htab, primary_hostname, APR_HASH_KEY_STRING); if (hostinfo_holder) { gpmon_warningx(FLINE, 0, "Host '%s' is duplicated in clusterinfo.txt", primary_hostname); return; } // OK Lets add this record at this point hostinfo_holder = apr_pcalloc(tmp_pool, sizeof(struct hostinfo_holder_t)); CHECKMEM(hostinfo_holder); apr_hash_set(htab, primary_hostname, APR_HASH_KEY_STRING, hostinfo_holder); initializeHostInfoDataFromFileEntry(tmp_pool, hostinfo_holder, primary_hostname, host, hostType, smon_bin_location, smon_log_location); }
/** Get RTP termination factory by name */ MRCP_DECLARE(mpf_termination_factory_t*) mrcp_server_rtp_factory_get(const mrcp_server_t *server, const char *name) { return apr_hash_get(server->rtp_factory_table,name,APR_HASH_KEY_STRING); }
svn_error_t * svn_client__prev_log_path (const char **prev_path_p, char *action_p, svn_revnum_t *copyfrom_rev_p, apr_hash_t *changed_paths, const char *path, svn_node_kind_t kind, svn_revnum_t revision, apr_pool_t *pool) { svn_log_changed_path_t *change; const char *prev_path = NULL; /* It's impossible to find the predecessor path of a NULL path. */ assert(path); /* Initialize our return values for the action and copyfrom_rev in case we have an unhandled case later on. */ if (action_p) *action_p = 'M'; if (copyfrom_rev_p) *copyfrom_rev_p = SVN_INVALID_REVNUM; /* See if PATH was explicitly changed in this revision. */ change = apr_hash_get (changed_paths, path, APR_HASH_KEY_STRING); if (change) { /* If PATH was not newly added in this revision, then it may or may not have also been part of a moved subtree. In this case, set a default previous path, but still look through the parents of this path for a possible copy event. */ if (change->action != 'A' && change->action != 'R') { prev_path = path; } else { /* PATH is new in this revision. This means it cannot have been part of a copied subtree. */ if (change->copyfrom_path) prev_path = apr_pstrdup (pool, change->copyfrom_path); else prev_path = NULL; *prev_path_p = prev_path; if (action_p) *action_p = change->action; if (copyfrom_rev_p) *copyfrom_rev_p = change->copyfrom_rev; return SVN_NO_ERROR; } } if (apr_hash_count (changed_paths)) { /* The path was not explicitly changed in this revision. The fact that we're hearing about this revision implies, then, that the path was a child of some copied directory. We need to find that directory, and effectively "re-base" our path on that directory's copyfrom_path. */ int i; apr_array_header_t *paths; /* Build a sorted list of the changed paths. */ paths = svn_sort__hash (changed_paths, svn_sort_compare_items_as_paths, pool); /* Now, walk the list of paths backwards, looking a parent of our path that has copyfrom information. */ for (i = paths->nelts; i > 0; i--) { svn_sort__item_t item = APR_ARRAY_IDX (paths, i - 1, svn_sort__item_t); const char *ch_path = item.key; int len = strlen (ch_path); /* See if our path is the child of this change path. If not, keep looking. */ if (! ((strncmp (ch_path, path, len) == 0) && (path[len] == '/'))) continue; /* Okay, our path *is* a child of this change path. If this change was copied, we just need to apply the portion of our path that is relative to this change's path, to the change's copyfrom path. Otherwise, this change isn't really interesting to us, and our search continues. */ change = apr_hash_get (changed_paths, ch_path, len); if (change->copyfrom_path) { if (action_p) *action_p = change->action; if (copyfrom_rev_p) *copyfrom_rev_p = change->copyfrom_rev; prev_path = svn_path_join (change->copyfrom_path, path + len + 1, pool); break; } } } /* If we didn't find what we expected to find, return an error. (Because directories bubble-up, we get a bunch of logs we might not want. Be forgiving in that case.) */ if (! prev_path) { if (kind == svn_node_dir) prev_path = apr_pstrdup (pool, path); else return svn_error_createf (SVN_ERR_CLIENT_UNRELATED_RESOURCES, NULL, ("Missing changed-path information for " "'%s' in revision %ld"), path, revision); } *prev_path_p = prev_path; return SVN_NO_ERROR; }
/** Get RTP settings by name */ MRCP_DECLARE(mpf_rtp_settings_t*) mrcp_server_rtp_settings_get(const mrcp_server_t *server, const char *name) { return apr_hash_get(server->rtp_settings_table,name,APR_HASH_KEY_STRING); }
op_generic_t *rs_simple_request(resource_service_fn_t *arg, data_attr_t *da, rs_query_t *rsq, data_cap_set_t **caps, rs_request_t *req, int req_size, rs_hints_t *hints_list, int fixed_size, int n_rid, int ignore_fixed_err, int timeout) { rs_simple_priv_t *rss = (rs_simple_priv_t *)arg->priv; rsq_base_t *query_global = (rsq_base_t *)rsq; rsq_base_t *query_local; kvq_table_t kvq_global, kvq_local, *kvq; apr_hash_t *pick_from; rid_change_entry_t *rid_change; ex_off_t change; op_status_t status; opque_t *que; rss_rid_entry_t *rse; rsq_base_ele_t *q; int slot, rnd_off, i, j, k, i_unique, i_pickone, found, err_cnt, loop, loop_end; int state, *a, *b, *op_state, unique_size; tbx_stack_t *stack; log_printf(15, "rs_simple_request: START rss->n_rids=%d n_rid=%d req_size=%d fixed_size=%d\n", rss->n_rids, n_rid, req_size, fixed_size); for (i=0; i<req_size; i++) req[i].rid_key = NULL; //** Clear the result in case of an error apr_thread_mutex_lock(rss->lock); i = _rs_simple_refresh(arg); //** Check if we need to refresh the data if (i != 0) { apr_thread_mutex_unlock(rss->lock); return(gop_dummy(op_failure_status)); } //** Determine the query sizes and make the processing arrays memset(&kvq, 0, sizeof(kvq)); rs_query_count(arg, rsq, &i, &(kvq_global.n_unique), &(kvq_global.n_pickone)); log_printf(15, "rs_simple_request: n_unique=%d n_pickone=%d\n", kvq_global.n_unique, kvq_global.n_pickone); tbx_log_flush(); //** Make space the for the uniq and pickone fields. //** Make sure we have space for at least 1 more than we need of each to pass to the routines even though they aren't used j = (kvq_global.n_pickone == 0) ? 1 : kvq_global.n_pickone + 1; tbx_type_malloc_clear(kvq_global.pickone, kvq_ele_t, j); unique_size = kvq_global.n_unique + 1; tbx_type_malloc_clear(kvq_global.unique, kvq_ele_t *, unique_size); log_printf(15, "MALLOC j=%d\n", unique_size); for (i=0; i<unique_size; i++) { tbx_type_malloc_clear(kvq_global.unique[i], kvq_ele_t, n_rid); } //** We don't allow these on the local but make a temp space anyway kvq_local.n_pickone = 0; tbx_type_malloc_clear(kvq_local.pickone, kvq_ele_t, 1); kvq_global.n_unique = 0; tbx_type_malloc_clear(kvq_local.unique, kvq_ele_t *, 1); tbx_type_malloc_clear(kvq_local.unique[0], kvq_ele_t, n_rid); status = op_success_status; que = new_opque(); stack = tbx_stack_new(); err_cnt = 0; found = 0; // max_size = (req_size > fixed_size) ? req_size : fixed_size; for (i=0; i < n_rid; i++) { found = 0; loop_end = 1; query_local = NULL; rnd_off = tbx_random_get_int64(0, rss->n_rids-1); //rnd_off = 0; //FIXME if (hints_list != NULL) { query_local = (rsq_base_t *)hints_list[i].local_rsq; if (query_local != NULL) { loop_end = 2; rs_query_count(arg, query_local, &j, &(kvq_local.n_unique), &(kvq_local.n_pickone)); if ((kvq_local.n_unique != 0) && (kvq_local.n_pickone != 0)) { log_printf(0, "Unsupported use of pickone/unique in local RSQ hints_list[%d]=%s!\n", i, hints_list[i].fixed_rid_key); status.op_status = OP_STATE_FAILURE; status.error_code = RS_ERROR_FIXED_NOT_FOUND; hints_list[i].status = RS_ERROR_HINTS_INVALID_LOCAL; err_cnt++; continue; } } if (i<fixed_size) { //** Use the fixed list for assignment rse = tbx_list_search(rss->rid_table, hints_list[i].fixed_rid_key); if (rse == NULL) { log_printf(0, "Missing element in hints list[%d]=%s! Ignoring check.\n", i, hints_list[i].fixed_rid_key); hints_list[i].status = RS_ERROR_FIXED_NOT_FOUND; continue; //** Skip the check } rnd_off = rse->slot; } } //** See if we use a restrictive list. Ususally used when rebalancing space pick_from = (hints_list != NULL) ? hints_list[i].pick_from : NULL; rid_change = NULL; change = 0; for (k=0; k<req_size; k++) { if (req[k].rid_index == i) { change += req[k].size; } } for (j=0; j<rss->n_rids; j++) { slot = (rnd_off+j) % rss->n_rids; rse = rss->random_array[slot]; if (pick_from != NULL) { rid_change = apr_hash_get(pick_from, rse->rid_key, APR_HASH_KEY_STRING); log_printf(15, "PICK_FROM != NULL i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d rid_change=%p\n", i, j, slot, rse->rid_key, rse->status, rid_change); if (rid_change == NULL) continue; //** Not in our list so skip to the next ex_off_t delta = rid_change->delta - change; log_printf(15, "PICK_FROM != NULL i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d rc->state=%d (" XOT ") > " XOT "????\n", i, j, slot, rse->rid_key, rse->status, rid_change->state, delta, rid_change->tolerance); //** Make sure we don't overshoot the target if (rid_change->state == 1) continue; //** Already converged RID if (rid_change->delta <= 0) continue; //** Need to move data OFF this RID if ((change - rid_change->delta) > rid_change->tolerance) continue; //**delta>0 if we made it here } log_printf(15, "i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d\n", i, j, slot, rse->rid_key, rse->status); if ((rse->status != RS_STATUS_UP) && (i>=fixed_size)) continue; //** Skip this if disabled and not in the fixed list tbx_stack_empty(stack, 1); q = query_global->head; kvq = &kvq_global; for (loop=0; loop<loop_end; loop++) { i_unique = 0; i_pickone = 0; while (q != NULL) { state = -1; switch (q->op) { case RSQ_BASE_OP_KV: state = rss_test(q, rse, i, kvq->unique[i_unique], &(kvq->pickone[i_pickone])); log_printf(0, "KV: key=%s val=%s i_unique=%d i_pickone=%d loop=%d rss_test=%d rse->rid_key=%s\n", q->key, q->val, i_unique, i_pickone, loop, state, rse->rid_key); tbx_log_flush(); if ((q->key_op & RSQ_BASE_KV_UNIQUE) || (q->val_op & RSQ_BASE_KV_UNIQUE)) i_unique++; if ((q->key_op & RSQ_BASE_KV_PICKONE) || (q->val_op & RSQ_BASE_KV_PICKONE)) i_pickone++; break; case RSQ_BASE_OP_NOT: a = (int *)tbx_stack_pop(stack); state = (*a == 0) ? 1 : 0; //log_printf(0, "NOT(%d)=%d\n", *a, state); free(a); break; case RSQ_BASE_OP_AND: a = (int *)tbx_stack_pop(stack); b = (int *)tbx_stack_pop(stack); state = (*a) && (*b); //log_printf(0, "%d AND %d = %d\n", *a, *b, state); free(a); free(b); break; case RSQ_BASE_OP_OR: a = (int *)tbx_stack_pop(stack); b = (int *)tbx_stack_pop(stack); state = a || b; //log_printf(0, "%d OR %d = %d\n", *a, *b, state); free(a); free(b); break; } tbx_type_malloc(op_state, int, 1); *op_state = state; tbx_stack_push(stack, (void *)op_state); log_printf(15, " stack_size=%d loop=%d push state=%d\n",tbx_stack_count(stack), loop, state); tbx_log_flush(); q = q->next; } if (query_local != NULL) { q = query_local->head; kvq = &kvq_local; } } op_state = (int *)tbx_stack_pop(stack); state = -1; if (op_state != NULL) { state = *op_state; free(op_state); } if (op_state == NULL) { log_printf(1, "rs_simple_request: ERROR processing i=%d EMPTY STACK\n", i); found = 0; status.op_status = OP_STATE_FAILURE; status.error_code = RS_ERROR_EMPTY_STACK; } else if (state == 1) { //** Got one log_printf(15, "rs_simple_request: processing i=%d ds_key=%s\n", i, rse->ds_key); found = 1; if ((i<fixed_size) && hints_list) hints_list[i].status = RS_ERROR_OK; for (k=0; k<req_size; k++) { if (req[k].rid_index == i) { log_printf(15, "rs_simple_request: i=%d ds_key=%s, rid_key=%s size=" XOT "\n", i, rse->ds_key, rse->rid_key, req[k].size); req[k].rid_key = strdup(rse->rid_key); req[k].gop = ds_allocate(rss->ds, rse->ds_key, da, req[k].size, caps[k], timeout); opque_add(que, req[k].gop); } } if (rid_change != NULL) { //** Flag that I'm tweaking things. The caller does the source pending/delta half rid_change->delta -= change; rid_change->state = ((llabs(rid_change->delta) <= rid_change->tolerance) || (rid_change->tolerance == 0)) ? 1 : 0; } break; //** Got one so exit the RID scan and start the next one } else if (i<fixed_size) { //** This should have worked so flag an error if (hints_list) { log_printf(1, "Match fail in fixed list[%d]=%s!\n", i, hints_list[i].fixed_rid_key); hints_list[i].status = RS_ERROR_FIXED_MATCH_FAIL; } else { log_printf(1, "Match fail in fixed list and no hints are provided!\n"); } status.op_status = OP_STATE_FAILURE; status.error_code = RS_ERROR_FIXED_MATCH_FAIL; if (ignore_fixed_err == 0) err_cnt++; break; //** Skip to the next in the list } else { found = 0; } } if ((found == 0) && (i>=fixed_size)) break; } //** Clean up log_printf(15, "FREE j=%d\n", unique_size); for (i=0; i<unique_size; i++) { free(kvq_global.unique[i]); } free(kvq_global.unique); free(kvq_global.pickone); free(kvq_local.unique[0]); free(kvq_local.unique); free(kvq_local.pickone); tbx_stack_free(stack, 1); log_printf(15, "rs_simple_request: END n_rid=%d\n", n_rid); //callback_t *cb = (callback_t *)que->qd.list->top->data; //op_generic_t *gop = (op_generic_t *)cb->priv; //log_printf(15, "top gid=%d reg=%d\n", gop_id(gop), gop_id(req[0].gop)); apr_thread_mutex_unlock(rss->lock); if ((found == 0) || (err_cnt>0)) { opque_free(que, OP_DESTROY); if (status.error_code == 0) { log_printf(1, "rs_simple_request: Can't find enough RIDs! requested=%d found=%d err_cnt=%d\n", n_rid, found, err_cnt); status.op_status = OP_STATE_FAILURE; status.error_code = RS_ERROR_NOT_ENOUGH_RIDS; } return(gop_dummy(status)); } return(opque_get_gop(que)); }
/** Get signaling agent by name */ MRCP_DECLARE(mrcp_sig_agent_t*) mrcp_server_signaling_agent_get(const mrcp_server_t *server, const char *name) { return apr_hash_get(server->sig_agent_table,name,APR_HASH_KEY_STRING); }
/** * DEPRECATED */ apr_hash_t* view_group_table(groupid_t gid) { gid = 1; // temporary apr_hash_t *nid_ht = apr_hash_get(gid_nid_ht_ht_, &gid, sizeof(gid)); return nid_ht; }
/** Get connection agent by name */ MRCP_DECLARE(mrcp_connection_agent_t*) mrcp_server_connection_agent_get(const mrcp_server_t *server, const char *name) { return apr_hash_get(server->cnt_agent_table,name,APR_HASH_KEY_STRING); }
svn_error_t * svn_client__get_auto_props(apr_hash_t **properties, const char **mimetype, const char *path, svn_client_ctx_t *ctx, apr_pool_t *pool) { svn_config_t *cfg; svn_boolean_t use_autoprops; auto_props_baton_t autoprops; /* initialisation */ autoprops.properties = apr_hash_make(pool); autoprops.filename = svn_path_basename(path, pool); autoprops.pool = pool; autoprops.mimetype = NULL; autoprops.have_executable = FALSE; *properties = autoprops.properties; cfg = ctx->config ? apr_hash_get(ctx->config, SVN_CONFIG_CATEGORY_CONFIG, APR_HASH_KEY_STRING) : NULL; /* check that auto props is enabled */ SVN_ERR(svn_config_get_bool(cfg, &use_autoprops, SVN_CONFIG_SECTION_MISCELLANY, SVN_CONFIG_OPTION_ENABLE_AUTO_PROPS, FALSE)); /* search for auto props */ if (use_autoprops) svn_config_enumerate2(cfg, SVN_CONFIG_SECTION_AUTO_PROPS, auto_props_enumerator, &autoprops, pool); /* if mimetype has not been set check the file */ if (! autoprops.mimetype) { SVN_ERR(svn_io_detect_mimetype2(&autoprops.mimetype, path, ctx->mimetypes_map, pool)); if (autoprops.mimetype) apr_hash_set(autoprops.properties, SVN_PROP_MIME_TYPE, strlen(SVN_PROP_MIME_TYPE), svn_string_create(autoprops.mimetype, pool)); } /* Don't automatically set the svn:executable property on added items * on OS400. While OS400 supports the executable permission its use is * inconsistent at best. */ #ifndef AS400 /* if executable has not been set check the file */ if (! autoprops.have_executable) { svn_boolean_t executable = FALSE; SVN_ERR(svn_io_is_file_executable(&executable, path, pool)); if (executable) apr_hash_set(autoprops.properties, SVN_PROP_EXECUTABLE, strlen(SVN_PROP_EXECUTABLE), svn_string_create("", pool)); } #endif *mimetype = autoprops.mimetype; return SVN_NO_ERROR; }
/* --- MRCP CLIENT --- */ static int load_profiles(mrcp_client_t *client, mrcp_connection_agent_t *shared_connection_agent, mpf_engine_t *shared_media_engine, apr_pool_t *pool) { apr_hash_index_t *hi; for (hi = apr_hash_first(NULL, globals.profiles); hi; hi = apr_hash_next(hi)) { const char *k; ast_mrcp_profile_t *v; const void *key; void *val; apr_hash_this(hi, &key, NULL, &val); k = (const char *)key; v = (ast_mrcp_profile_t *)val; if (v == NULL) continue; ast_log(LOG_DEBUG, "Processing profile %s:%s\n", k, v->version); /* A profile is a signaling agent + termination factory + media engine + connection agent (MRCPv2 only). */ mrcp_sig_agent_t *agent = NULL; mpf_termination_factory_t *termination_factory = NULL; mrcp_profile_t * mprofile = NULL; mpf_rtp_config_t *rtp_config = NULL; mpf_rtp_settings_t *rtp_settings = mpf_rtp_settings_alloc(pool); mrcp_sig_settings_t *sig_settings = mrcp_signaling_settings_alloc(pool); ast_mrcp_profile_t *mod_profile = NULL; mrcp_connection_agent_t *connection_agent = NULL; mpf_engine_t *media_engine = shared_media_engine; /* Get profile attributes. */ const char *name = apr_pstrdup(pool, k); const char *version = apr_pstrdup(pool, v->version); if ((name == NULL) || (strlen(name) == 0) || (version == NULL) || (strlen(version) == 0)) { ast_log(LOG_ERROR, "Profile %s missing name or version attribute\n", k); return -1; } // i6net Get the profile set before from the configuration file. mod_profile = (ast_mrcp_profile_t *)apr_hash_get(globals.profiles, name, APR_HASH_KEY_STRING); /* Create RTP config, common to MRCPv1 and MRCPv2. */ if ((rtp_config = mpf_rtp_config_alloc(pool)) == NULL) { ast_log(LOG_ERROR, "Unable to create RTP configuration\n"); return -1; } rtp_config->rtp_port_min = DEFAULT_RTP_PORT_MIN; rtp_config->rtp_port_max = DEFAULT_RTP_PORT_MAX; apt_string_set(&rtp_config->ip, DEFAULT_LOCAL_IP_ADDRESS); if (strcmp("1", version) == 0) { /* MRCPv1 configuration. */ rtsp_client_config_t *config = mrcp_unirtsp_client_config_alloc(pool); if (config == NULL) { ast_log(LOG_ERROR, "Unable to create RTSP configuration\n"); return -1; } config->origin = DEFAULT_SDP_ORIGIN; if (globals.unimrcp_request_timeout != NULL) { config->request_timeout = (apr_size_t)atol(globals.unimrcp_request_timeout); } sig_settings->resource_location = DEFAULT_RESOURCE_LOCATION; ast_log(LOG_DEBUG, "Loading MRCPv1 profile: %s\n", name); apr_hash_index_t *hicfg; for (hicfg = apr_hash_first(NULL, v->cfg); hicfg; hicfg = apr_hash_next(hicfg)) { const char *param_name; const char *param_value; const void *keyc; void *valc; apr_hash_this(hicfg, &keyc, NULL, &valc); param_name = (const char *)keyc; param_value = (const char *)valc; if ((param_name != NULL) && (param_value != NULL)) { if (strlen(param_name) == 0) { ast_log(LOG_ERROR, "Missing parameter name\n"); return -1; } ast_log(LOG_DEBUG, "Loading parameter %s:%s\n", param_name, param_value); if ((!process_mrcpv1_config(config, sig_settings, param_name, param_value, pool)) && (!process_rtp_config(client, rtp_config, rtp_settings, param_name, param_value, pool)) && (!process_profile_config(mod_profile, param_name, param_value, pool))) { ast_log(LOG_WARNING, "Unknown parameter %s\n", param_name); } } } agent = mrcp_unirtsp_client_agent_create(name, config, pool); } else if (strcmp("2", version) == 0) { /* MRCPv2 configuration. */ mrcp_sofia_client_config_t *config = mrcp_sofiasip_client_config_alloc(pool); if (config == NULL) { ast_log(LOG_ERROR, "Unable to create SIP configuration\n"); return -1; } config->local_ip = DEFAULT_LOCAL_IP_ADDRESS; config->local_port = DEFAULT_SIP_LOCAL_PORT; sig_settings->server_ip = DEFAULT_REMOTE_IP_ADDRESS; sig_settings->server_port = DEFAULT_SIP_REMOTE_PORT; config->ext_ip = NULL; config->user_agent_name = DEFAULT_SOFIASIP_UA_NAME; config->origin = DEFAULT_SDP_ORIGIN; ast_log(LOG_DEBUG, "Loading MRCPv2 profile: %s\n", name); apr_hash_index_t *hicfg; for (hicfg = apr_hash_first(NULL, v->cfg); hicfg; hicfg = apr_hash_next(hicfg)) { const char *param_name; const char *param_value; const void *keyc; void *valc; apr_hash_this(hicfg, &keyc, NULL, &valc); param_name = (const char *)keyc; param_value = (const char *)valc; if ((param_name != NULL) && (param_value != NULL)) { if (strlen(param_name) == 0) { ast_log(LOG_ERROR, "Missing parameter name\n"); return -1; } ast_log(LOG_DEBUG, "Loading parameter %s:%s\n", param_name, param_value); if ((!process_mrcpv2_config(config, sig_settings, param_name, param_value, pool)) && (!process_rtp_config(client, rtp_config, rtp_settings, param_name, param_value, pool)) && (!process_profile_config(mod_profile, param_name, param_value, pool))) { ast_log(LOG_WARNING, "Unknown parameter %s\n", param_name); } } } agent = mrcp_sofiasip_client_agent_create(name, config, pool); connection_agent = shared_connection_agent; } else { ast_log(LOG_ERROR, "Version must be either \"1\" or \"2\"\n"); return -1; } if ((termination_factory = mpf_rtp_termination_factory_create(rtp_config, pool)) != NULL) mrcp_client_rtp_factory_register(client, termination_factory, name); mrcp_client_rtp_settings_register(client, rtp_settings, "RTP-Settings"); mrcp_client_signaling_settings_register(client, sig_settings, "Signalling-Settings"); if (agent != NULL) mrcp_client_signaling_agent_register(client, agent); /* Create the profile and register it. */ if ((mprofile = mrcp_client_profile_create(NULL, agent, connection_agent, media_engine, termination_factory, rtp_settings, sig_settings, pool)) != NULL) { if (!mrcp_client_profile_register(client, mprofile, name)) ast_log(LOG_WARNING, "Unable to register MRCP client profile\n"); } } return 0; }