static char *test_jwt_array_has_string(apr_pool_t *pool) { apr_array_header_t *haystack = apr_array_make(pool, 3, sizeof(const char*)); *(const char**) apr_array_push(haystack) = "a"; *(const char**) apr_array_push(haystack) = "b"; *(const char**) apr_array_push(haystack) = "c"; TST_ASSERT("jwt_array_has_string (1)", apr_jwt_array_has_string(haystack, "a")); TST_ASSERT("jwt_array_has_string (2)", apr_jwt_array_has_string(haystack, "d") == FALSE); return 0; }
serf_context_t *serf_context_create_ex( void *user_baton, serf_socket_add_t addf, serf_socket_remove_t rmf, apr_pool_t *pool) { serf_context_t *ctx = apr_pcalloc(pool, sizeof(*ctx)); ctx->pool = pool; if (user_baton != NULL) { ctx->pollset_baton = user_baton; ctx->pollset_add = addf; ctx->pollset_rm = rmf; } else { /* build the pollset with a (default) number of connections */ serf_pollset_t *ps = apr_pcalloc(pool, sizeof(*ps)); /* ### TODO: As of APR 1.4.x apr_pollset_create_ex can return a status ### other than APR_SUCCESS, so we should handle it. ### Probably move creation of the pollset to later when we have ### the possibility of returning status to the caller. */ #ifdef BROKEN_WSAPOLL /* APR 1.4.x switched to using WSAPoll() on Win32, but it does not * properly handle errors on a non-blocking sockets (such as * connecting to a server where no listener is active). * * So, sadly, we must force using select() on Win32. * * http://mail-archives.apache.org/mod_mbox/apr-dev/201105.mbox/%[email protected]%3E */ (void) apr_pollset_create_ex(&ps->pollset, MAX_CONN, pool, 0, APR_POLLSET_SELECT); #else (void) apr_pollset_create(&ps->pollset, MAX_CONN, pool, 0); #endif ctx->pollset_baton = ps; ctx->pollset_add = pollset_add; ctx->pollset_rm = pollset_rm; } /* default to a single connection since that is the typical case */ ctx->conns = apr_array_make(pool, 1, sizeof(serf_connection_t *)); /* Initialize progress status */ ctx->progress_read = 0; ctx->progress_written = 0; ctx->authn_types = SERF_AUTHN_ALL; return ctx; }
apr_status_t kahanaIOReadDir( apr_pool_t *p, kDir_t **newDirObj, const char *path, apr_int32_t wanted, ... ) { apr_dir_t *dir = NULL; kDir_t *dirObj = NULL; apr_pool_t *sp = NULL; apr_status_t rc; if( ( rc = apr_dir_open( &dir, dirObj->path, p ) ) ){ kahanaLogPut( NULL, NULL, "failed to apr_dir_open(): %s", STRERROR_APR( rc ) ); } else if( ( rc = kahanaMalloc( p, sizeof( kDir_t ), (void**)&dirObj, &sp ) ) ){ kahanaLogPut( NULL, NULL, "failed to kahanaMalloc(): %s", STRERROR_APR( rc ) ); } else { apr_filetype_e type; va_list types; dirObj->p = sp; dirObj->files = apr_array_make( sp, 0, sizeof( apr_finfo_t ) ); dirObj->path = (const char*)apr_pstrdup( sp, path ); *newDirObj = dirObj; do { apr_finfo_t finfo = { 0 }; if( ( rc = apr_dir_read( &finfo, wanted, dir ) ) ){ kahanaLogPut( NULL, NULL, "failed to apr_dir_read(): %s", STRERROR_APR( rc ) ); break; } else { va_start( types, wanted ); while( ( type = va_arg( types, apr_filetype_e ) ) ) { if( finfo.filetype == type ){ APR_ARRAY_PUSH( dirObj->files, apr_finfo_t ) = finfo; break; } } va_end( types ); } } while( rc == APR_SUCCESS ); rc = ( rc == APR_ENOENT ) ? APR_SUCCESS : rc; if( ( rc = apr_dir_close( dir ) ) ){ kahanaLogPut( NULL, NULL, "failed to apr_dir_close(): %s", STRERROR_APR( rc ) ); } } return rc; }
mrcp_client_session_t* mrcp_client_session_create(mrcp_application_t *application, void *obj) { apr_pool_t *pool; mrcp_client_session_t *session = (mrcp_client_session_t*) mrcp_session_create(sizeof(mrcp_client_session_t)-sizeof(mrcp_session_t)); pool = session->base.pool; session->application = application; session->app_obj = obj; session->profile = NULL; session->context = NULL; session->terminations = apr_array_make(pool,2,sizeof(rtp_termination_slot_t)); session->channels = apr_array_make(pool,2,sizeof(mrcp_channel_t*)); session->offer = NULL; session->answer = NULL; session->active_request = NULL; session->request_queue = apt_list_create(pool); session->offer_flag_count = 0; session->answer_flag_count = 0; session->terminate_flag_count = 0; return session; }
static void SoapSharedLibraries_init(SoapSharedLibraries *This, apr_pool_t *p) { This->m_pSOAPLibrary = NULL; This->m_pPool = p; This->m_pSOAPLibrary = NULL; This->m_pLibraries = apr_array_make(p, 0, sizeof(SoapSharedLibrary **)); This->m_bAllLibrariesLoaded = FALSE; This->m_pfnEntryPoint = NULL; This->m_pIntf = (struct apache_soap_interface *)apr_pcalloc(p, sizeof(struct apache_soap_interface)); }
mapcache_dimension* mapcache_dimension_values_create(mapcache_context *ctx, apr_pool_t *pool) { mapcache_dimension_values *dimension = apr_pcalloc(pool, sizeof(mapcache_dimension_values)); dimension->dimension.type = MAPCACHE_DIMENSION_VALUES; dimension->values = apr_array_make(pool,1,sizeof(char*)); dimension->dimension.get_entries_for_value = _mapcache_dimension_values_get_entries_for_value; dimension->dimension.configuration_parse_xml = _mapcache_dimension_values_parse_xml; dimension->dimension.get_all_entries = _mapcache_dimension_values_get_all_entries; dimension->dimension.get_all_ogc_formatted_entries = _mapcache_dimension_values_get_all_entries; return (mapcache_dimension*)dimension; }
static const char *proxies_set(cmd_parms *cmd, void *internal, const char *arg) { cloudflare_config_t *config = ap_get_module_config(cmd->server->module_config, &cloudflare_module); cloudflare_proxymatch_t *match; apr_status_t rv; char *ip = apr_pstrdup(cmd->temp_pool, arg); char *s = ap_strchr(ip, '/'); if (s) *s++ = '\0'; if (!config->proxymatch_ip) config->proxymatch_ip = apr_array_make(cmd->pool, 1, sizeof(*match)); match = (cloudflare_proxymatch_t *) apr_array_push(config->proxymatch_ip); match->internal = internal; if (looks_like_ip(ip)) { /* Note s may be null, that's fine (explicit host) */ rv = apr_ipsubnet_create(&match->ip, ip, s, cmd->pool); } else { apr_sockaddr_t *temp_sa; if (s) { return apr_pstrcat(cmd->pool, "RemoteIP: Error parsing IP ", arg, " the subnet /", s, " is invalid for ", cmd->cmd->name, NULL); } rv = apr_sockaddr_info_get(&temp_sa, ip, APR_UNSPEC, 0, APR_IPV4_ADDR_OK, cmd->temp_pool); while (rv == APR_SUCCESS) { apr_sockaddr_ip_get(&ip, temp_sa); rv = apr_ipsubnet_create(&match->ip, ip, NULL, cmd->pool); if (!(temp_sa = temp_sa->next)) break; match = (cloudflare_proxymatch_t *) apr_array_push(config->proxymatch_ip); match->internal = internal; } } if (rv != APR_SUCCESS) { char msgbuf[128]; apr_strerror(rv, msgbuf, sizeof(msgbuf)); return apr_pstrcat(cmd->pool, "RemoteIP: Error parsing IP ", arg, " (", msgbuf, " error) for ", cmd->cmd->name, NULL); } return NULL; }
h2_io_set *h2_io_set_create(apr_pool_t *pool) { h2_io_set *sp = apr_pcalloc(pool, sizeof(h2_io_set)); if (sp) { sp->list = apr_array_make(pool, 100, sizeof(h2_io*)); if (!sp->list) { return NULL; } } return sp; }
/* This function is included for Subversion 1.0.x compatibility. It has no effect for fsfs backed Subversion filesystems. It conforms to the fs_library_vtable_t.bdb_logfiles() API. */ static svn_error_t * fs_logfiles(apr_array_header_t **logfiles, const char *path, svn_boolean_t only_unused, apr_pool_t *pool) { /* A no-op for FSFS. */ *logfiles = apr_array_make(pool, 0, sizeof(const char *)); return SVN_NO_ERROR; }
static void *rpaf_create_server_cfg(apr_pool_t *p, server_rec *s) { rpaf_server_cfg *cfg = (rpaf_server_cfg *)apr_pcalloc(p, sizeof(rpaf_server_cfg)); if (!cfg) return NULL; cfg->proxy_ips = apr_array_make(p, 0, sizeof(char *)); cfg->enable = 0; cfg->sethostname = 0; return (void *)cfg; }
void mapcache_locker_fallback_parse_xml(mapcache_context *ctx, mapcache_locker *self, ezxml_t doc) { mapcache_locker_fallback *lm = (mapcache_locker_fallback*)self; ezxml_t node; lm->lockers = apr_array_make(ctx->pool,2,sizeof(mapcache_locker*)); for(node = ezxml_child(doc,"locker"); node; node = node->next) { mapcache_locker *child_locker; mapcache_config_parse_locker(ctx,node,&child_locker); GC_CHECK_ERROR(ctx); APR_ARRAY_PUSH(lm->lockers,mapcache_locker*) = child_locker; } }
static void *create_substitute_dcfg(apr_pool_t *p, char *d) { subst_dir_conf *dcfg = (subst_dir_conf *) apr_palloc(p, sizeof(subst_dir_conf)); dcfg->patterns = apr_array_make(p, 10, sizeof(subst_pattern_t)); dcfg->max_line_length = AP_SUBST_MAX_LINE_LENGTH; dcfg->max_line_length_set = 0; dcfg->inherit_before = -1; return dcfg; }
h2_from_h1 *h2_from_h1_create(int stream_id, apr_pool_t *pool) { h2_from_h1 *from_h1 = apr_pcalloc(pool, sizeof(h2_from_h1)); if (from_h1) { from_h1->stream_id = stream_id; from_h1->pool = pool; from_h1->state = H2_RESP_ST_STATUS_LINE; from_h1->hlines = apr_array_make(pool, 10, sizeof(char *)); } return from_h1; }
static void pm_norepeat_config(nx_module_t *module) { const nx_directive_t *curr; nx_pm_norepeat_conf_t *modconf; const char *start, *ptr, *end; char *field = NULL; apr_size_t len; modconf = apr_pcalloc(module->pool, sizeof(nx_pm_norepeat_conf_t)); module->config = modconf; curr = module->directives; while ( curr != NULL ) { if ( nx_module_common_keyword(curr->directive) == TRUE ) { } else if ( strcasecmp(curr->directive, "CheckFields") == 0 ) { if ( modconf->fields != NULL ) { nx_conf_error(curr, "CheckFields already defined"); } ptr = curr->args; for ( ; (ptr != NULL) && apr_isspace(*ptr); (ptr)++ ); if ( (curr->args == NULL) || (strlen(ptr) == 0) ) { nx_conf_error(curr, "value missing for CheckFields"); } end = ptr + strlen(ptr); modconf->fields = apr_array_make(module->pool, 5, sizeof(const char *)); while ( *ptr != '\0' ) { start = ptr; for ( ; !((*ptr == '\0') || apr_isspace(*ptr) || (*ptr == ',')); ptr++ ); if ( ptr > start ) { len = (apr_size_t) (ptr - start + 1); field = apr_palloc(module->pool, len); apr_cpystrn(field, start, len); } *((const char **) apr_array_push(modconf->fields)) = field; for ( ; apr_isspace(*ptr) || (*ptr == ','); ptr++ ); } } else { nx_conf_error(curr, "invalid pm_norepeat keyword: %s", curr->directive); } curr = curr->next; } modconf->pid = (int) getpid(); }
static int l_commit (lua_State *L) { const char *path = (lua_gettop (L) < 1 || lua_isnil (L, 1)) ? "" : luaL_checkstring (L, 1); const char *message = (lua_gettop (L) < 2 || lua_isnil (L, 2)) ? "" : luaL_checkstring (L, 2); svn_boolean_t recursive = TRUE; svn_boolean_t keep_locks = FALSE; int itable = 3; if (lua_gettop (L) >= itable && lua_istable (L, itable)) { lua_getfield (L, itable, "recursive"); if (lua_isboolean (L, -1)) { recursive = lua_toboolean (L, -1); } lua_getfield (L, itable, "keep_locks"); if (lua_isboolean (L, -1)) { keep_locks = lua_toboolean (L, -1); } } apr_pool_t *pool; svn_error_t *err; svn_client_ctx_t *ctx; init_function (&ctx, &pool, L); path = svn_path_canonicalize (path, pool); apr_array_header_t *array; svn_commit_info_t *commit_info = NULL; array = apr_array_make (pool, 1, sizeof (const char *)); (*((const char **) apr_array_push (array))) = path; make_log_msg_baton (&(ctx->log_msg_baton2), message, path, ctx->config, pool, L); ctx->log_msg_func2 = log_msg_func2; err = svn_client_commit3 (&commit_info, array, recursive, keep_locks, ctx, pool); IF_ERROR_RETURN (err, pool, L); if (commit_info == NULL) { lua_pushnil (L); } else { lua_pushinteger (L, commit_info->revision); } svn_pool_destroy (pool); return 1; }
static apr_array_header_t* _mapcache_dimension_values_get_all_entries(mapcache_context *ctx, mapcache_dimension *dim, mapcache_tileset *tileset, mapcache_extent *extent, mapcache_grid *grid) { mapcache_dimension_values *dimension = (mapcache_dimension_values*)dim; apr_array_header_t *ret = apr_array_make(ctx->pool,dimension->values->nelts,sizeof(char*)); int i; for(i=0; i<dimension->values->nelts; i++) { APR_ARRAY_PUSH(ret,char*) = apr_pstrdup(ctx->pool,APR_ARRAY_IDX(dimension->values,i,char*)); } return ret; }
/* Setup the client context, ready to connect and send requests to a server.*/ static apr_status_t setup(test_baton_t **tb_p, serf_connection_setup_t conn_setup, const char *serv_url, int use_proxy, apr_size_t message_count, apr_pool_t *pool) { test_baton_t *tb; apr_status_t status; tb = apr_pcalloc(pool, sizeof(*tb)); *tb_p = tb; tb->pool = pool; tb->context = serf_context_create(pool); tb->bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); tb->accepted_requests = apr_array_make(pool, message_count, sizeof(int)); tb->sent_requests = apr_array_make(pool, message_count, sizeof(int)); tb->handled_requests = apr_array_make(pool, message_count, sizeof(int)); tb->serv_url = serv_url; tb->conn_setup = conn_setup; status = default_server_address(&tb->serv_addr, pool); if (status != APR_SUCCESS) return status; if (use_proxy) { status = default_proxy_address(&tb->proxy_addr, pool); if (status != APR_SUCCESS) return status; /* Configure serf to use the proxy server */ serf_config_proxy(tb->context, tb->proxy_addr); } status = use_new_connection(tb, pool); return status; }
int scan_mounts(apr_pool_t *p) { FILE *mounts; char procline[256]; char mount[128], device[128], type[32], mode[128]; int rc; fs_info_t *fs; filesystems = apr_array_make(p, 2, sizeof(fs_info_t)); metric_info = apr_array_make(p, 2, sizeof(Ganglia_25metric)); mounts = fopen(MOUNTS, "r"); if (!mounts) { debug_msg("Df Error: could not open mounts file %s. Are we on the right OS?\n", MOUNTS); return -1; } while ( fgets(procline, sizeof(procline), mounts) ) { rc=sscanf(procline, "%s %s %s %s ", device, mount, type, mode); if (!rc) continue; //if (!strncmp(mode, "ro", 2)) continue; if (remote_mount(device, type)) continue; if (strncmp(device, "/dev/", 5) != 0 && strncmp(device, "/dev2/", 6) != 0) continue; fs = apr_array_push(filesystems); bzero(fs, sizeof(fs_info_t)); fs->device = apr_pstrdup(p, device); fs->mount_point = apr_pstrdup(p, mount); fs->fs_type = apr_pstrdup(p, type); set_ganglia_name(p, fs); create_metrics_for_device(p, metric_info, fs); //thispct = device_space(mount, device, total_size, total_free); debug_msg("Found device %s (%s)", device, type); } fclose(mounts); return 0; }
apr_array_header_t *source_files_names(term_t info, apr_pool_t *pool) { apr_array_header_t *files = apr_array_make(pool, 1, sizeof(const char *)); term_t cons = info; while (is_cons(cons)) { term_box_t *cb = peel(cons); APR_ARRAY_PUSH(files, const char *) = ltoz(cb->cons.head, pool); cons = cb->cons.tail; } return files; }
/* Parse hunks from APR_FILE and store them in PATCH->HUNKS. * Parsing stops if no valid next hunk can be found. * If IGNORE_WHITESPACE is TRUE, lines without * leading spaces will be treated as context lines. * Allocate results in RESULT_POOL. * Use SCRATCH_POOL for temporary allocations. */ static svn_error_t * parse_hunks(svn_patch_t *patch, apr_file_t *apr_file, svn_boolean_t ignore_whitespace, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { svn_diff_hunk_t *hunk; svn_boolean_t is_property; const char *last_prop_name; const char *prop_name; svn_diff_operation_kind_t prop_operation; apr_pool_t *iterpool; last_prop_name = NULL; patch->hunks = apr_array_make(result_pool, 10, sizeof(svn_diff_hunk_t *)); patch->prop_patches = apr_hash_make(result_pool); iterpool = svn_pool_create(scratch_pool); do { svn_pool_clear(iterpool); SVN_ERR(parse_next_hunk(&hunk, &is_property, &prop_name, &prop_operation, patch, apr_file, ignore_whitespace, result_pool, iterpool)); if (hunk && is_property) { if (! prop_name) prop_name = last_prop_name; else last_prop_name = prop_name; /* Skip svn:mergeinfo properties. * Mergeinfo data cannot be represented as a hunk and * is therefore stored in PATCH itself. */ if (strcmp(prop_name, SVN_PROP_MERGEINFO) == 0) continue; SVN_ERR(add_property_hunk(patch, prop_name, hunk, prop_operation, result_pool)); } else if (hunk) { APR_ARRAY_PUSH(patch->hunks, svn_diff_hunk_t *) = hunk; last_prop_name = NULL; } } while (hunk); svn_pool_destroy(iterpool); return SVN_NO_ERROR; }
apr_status_t exec_self( int argc, const char *argv[] ) { apr_status_t rv = APR_SUCCESS; apr_array_header_t *dpaths = get_property_array( "hashdot.vm.libpath" ); if( dpaths == NULL ) return rv; char * ldpenv = NULL; apr_env_get( &ldpenv, LIB_PATH_VAR, _mp ); apr_array_header_t *newpaths = apr_array_make( _mp, 8, sizeof( const char* ) ); int i; for( i = 0; i < dpaths->nelts; i++ ) { const char *path = ((const char **) dpaths->elts )[i]; if( !ldpenv || !strstr( ldpenv, path ) ) { *( (const char **) apr_array_push( newpaths ) ) = path; DEBUG( "New path to add: %s", path ); } } // Need to set LD_LIBRARY_PATH, new paths found. if( newpaths->nelts > 0 ) { if( ldpenv ) { *( (const char **) apr_array_push( newpaths ) ) = ldpenv; } ldpenv = apr_array_pstrcat( _mp, newpaths, ':' ); DEBUG( "New %s = [%s]", LIB_PATH_VAR, ldpenv ); rv = apr_env_set( LIB_PATH_VAR, ldpenv, _mp ); const char *exe_name = NULL; if( rv == APR_SUCCESS ) { rv = find_self_exe( &exe_name ); } // Exec using linux(-only?) /proc/self/exe link to self, // instead of argv[0], since the later will not specify a path // when initial call is made via PATH, and since execve won't // itself look at PATH. // Note: Can't use apr_proc_create for this, since it always // forks first. if( rv == APR_SUCCESS ) { DEBUG( "Exec'ing self as %s", argv[0] ); execv( exe_name, (char * const *) argv ); rv = APR_FROM_OS_ERROR( errno ); //shouldn't return from execv call } } return rv; }
static void *create_authnz_external_dir_config(apr_pool_t *p, char *d) { authnz_external_dir_config_rec *dir= (authnz_external_dir_config_rec *) apr_palloc(p, sizeof(authnz_external_dir_config_rec)); dir->auth_name= apr_array_make(p,2,sizeof(const char *)); /* no default */ dir->group_name= NULL; /* no default */ dir->context= NULL; /* no default */ dir->groupsatonce= 1; /* default to on */ dir->providecache= 0; /* default to off */ return dir; }
/* * return all supported encryption algorithms */ apr_array_header_t *oidc_jose_jwe_supported_encryptions(apr_pool_t *pool) { apr_array_header_t *result = apr_array_make(pool, 5, sizeof(const char*)); *(const char**) apr_array_push(result) = "A128CBC-HS256"; *(const char**) apr_array_push(result) = "A192CBC-HS384"; *(const char**) apr_array_push(result) = "A256CBC-HS512"; #if (OIDC_JOSE_GCM_SUPPORT) *(const char**) apr_array_push(result) = "A128GCM"; *(const char**) apr_array_push(result) = "A192GCM"; *(const char**) apr_array_push(result) = "A256GCM"; #endif return result; }
static int ex_metric_init (apr_pool_t *p) { int i; Ganglia_25metric *gmi; init_cpu_info (); /* Allocate a pool that will be used by this module */ apr_pool_create(&pool, p); metric_info = apr_array_make(pool, 2, sizeof(Ganglia_25metric)); /* Initialize each metric */ cpu_user = init_metric (pool, metric_info, cpu_count, "multicpu_user", "Percentage of CPU utilization that occurred while " "executing at the user level"); cpu_nice = init_metric (pool, metric_info, cpu_count, "multicpu_nice", "Percentage of CPU utilization that occurred while " "executing at the nice level"); cpu_system = init_metric (pool, metric_info, cpu_count, "multicpu_system", "Percentage of CPU utilization that occurred while " "executing at the system level"); cpu_idle = init_metric (pool, metric_info, cpu_count, "multicpu_idle", "Percentage of CPU utilization that occurred while " "executing at the idle level"); cpu_wio = init_metric (pool, metric_info, cpu_count, "multicpu_wio", "Percentage of CPU utilization that occurred while " "executing at the wio level"); cpu_intr = init_metric (pool, metric_info, cpu_count, "multicpu_intr", "Percentage of CPU utilization that occurred while " "executing at the intr level"); cpu_sintr = init_metric (pool, metric_info, cpu_count, "multicpu_sintr", "Percentage of CPU utilization that occurred while " "executing at the sintr level"); /* Add a terminator to the array and replace the empty static metric definition array with the dynamic array that we just created */ gmi = apr_array_push(metric_info); memset (gmi, 0, sizeof(*gmi)); multicpu_module.metrics_info = (Ganglia_25metric *)metric_info->elts; for (i = 0; multicpu_module.metrics_info[i].name != NULL; i++) { /* Initialize the metadata storage for each of the metrics and then * store one or more key/value pairs. The define MGROUPS defines * the key for the grouping attribute. */ MMETRIC_INIT_METADATA(&(multicpu_module.metrics_info[i]),p); MMETRIC_ADD_METADATA(&(multicpu_module.metrics_info[i]),MGROUP,"cpu"); } return 0; }
/* Conforms to svn_ra_serf__xml_opened_t */ static svn_error_t * blame_opened(svn_ra_serf__xml_estate_t *xes, void *baton, int entered_state, const svn_ra_serf__dav_props_t *tag, apr_pool_t *scratch_pool) { blame_context_t *blame_ctx = baton; if (entered_state == FILE_REV) { apr_pool_t *state_pool = svn_ra_serf__xml_state_pool(xes); /* Child elements will store properties in these structures. */ blame_ctx->rev_props = apr_hash_make(state_pool); blame_ctx->prop_diffs = apr_array_make(state_pool, 5, sizeof(svn_prop_t)); blame_ctx->state_pool = state_pool; /* Clear this, so we can detect the absence of a TXDELTA. */ blame_ctx->stream = NULL; } else if (entered_state == TXDELTA) { apr_pool_t *state_pool = svn_ra_serf__xml_state_pool(xes); apr_hash_t *gathered = svn_ra_serf__xml_gather_since(xes, FILE_REV); const char *path; const char *rev; const char *merged_revision; svn_txdelta_window_handler_t txdelta; void *txdelta_baton; path = svn_hash_gets(gathered, "path"); rev = svn_hash_gets(gathered, "rev"); merged_revision = svn_hash_gets(gathered, "merged-revision"); SVN_ERR(blame_ctx->file_rev(blame_ctx->file_rev_baton, path, SVN_STR_TO_REV(rev), blame_ctx->rev_props, merged_revision != NULL, &txdelta, &txdelta_baton, blame_ctx->prop_diffs, state_pool)); blame_ctx->stream = svn_base64_decode(svn_txdelta_parse_svndiff( txdelta, txdelta_baton, TRUE /* error_on_early_close */, state_pool), state_pool); } return SVN_NO_ERROR; }
apr_array_header_t * CTSVNPathList::MakePathArray (apr_pool_t *pool) const { apr_array_header_t *targets = apr_array_make (pool, GetCount(), sizeof(const char *)); for(int nItem = 0; nItem < GetCount(); nItem++) { const char * target = m_paths[nItem].GetSVNApiPath(pool); (*((const char **) apr_array_push (targets))) = target; } return targets; }
JNIEXPORT void JNICALL Java_Subversion_openSession(JNIEnv * env, jobject obj, jstring repoURL) { svn_error_t * err; const char * repoURLC = (*env)->GetStringUTFChars(env, repoURL, 0); apr_initialize(); pool = svn_pool_create(0); if (!pool) abort(); err = svn_config_ensure(0, pool); if (err) barf(err); apr_hash_t * config; err = svn_config_get_config(&config, 0, pool); if (err) barf(err); /* Set up authentication info from the config files (copied from subversion/clients/cmdline/main.c). */ apr_array_header_t * providers = apr_array_make(pool, 10, sizeof (svn_auth_provider_object_t *)); svn_auth_provider_object_t * provider; svn_client_get_simple_provider(&provider, pool); APR_ARRAY_PUSH(providers, svn_auth_provider_object_t *) = provider; svn_client_get_username_provider(&provider, pool); APR_ARRAY_PUSH(providers, svn_auth_provider_object_t *) = provider; svn_client_get_ssl_server_trust_file_provider(&provider, pool); APR_ARRAY_PUSH(providers, svn_auth_provider_object_t *) = provider; svn_client_get_ssl_client_cert_file_provider(&provider, pool); APR_ARRAY_PUSH(providers, svn_auth_provider_object_t *) = provider; svn_client_get_ssl_client_cert_pw_file_provider(&provider, pool); APR_ARRAY_PUSH(providers, svn_auth_provider_object_t *) = provider; /* Get the plugin that handles the protocol for `repoURL'. */ err = svn_ra_init_ra_libs(&ra_baton, pool); if (err) barf(err); err = svn_ra_get_ra_library(&ra_lib, ra_baton, repoURLC, pool); if (err) barf(err); memset(&callbacks, 0, sizeof callbacks); svn_auth_open(&callbacks.auth_baton, providers, pool); /* Open a session to `repoURL'. */ err = ra_lib->open(&ra_session, repoURLC, &callbacks, 0, config, pool); if (err) barf(err); (*env)->ReleaseStringUTFChars(env, repoURL, repoURLC); }
apr_array_header_t *svn_support_blame_call(char *file_path, int revision, apr_pool_t *subpool) { svn_error_t *err; svn_opt_revision_t peg_rev,start,end; svn_diff_file_options_t *diff_options; // -- Initialisation des révisions -- if (revision != -1) { peg_rev.kind = end.kind = svn_opt_revision_number; peg_rev.value.number = end.value.number = revision; } else { peg_rev.kind = svn_opt_revision_unspecified; end.kind = svn_opt_revision_head; } start.kind = svn_opt_revision_number; start.value.number = 1; // -- Initialisation des diff_options -- diff_options = svn_diff_file_options_create(pool); // -- Initialisation du tableau et du buffer de résultats -- apr_array_header_t *list_result = apr_array_make(pool, 1, sizeof (const char *)); svn_stringbuf_t *res = svn_stringbuf_create("",pool); err = svn_client_blame4(file_path, &peg_rev, &start, &end, diff_options, FALSE, // ignore_mime_type FALSE, // include_merged_revisions blame_callback, res, ctx, pool); if (err) { svn_handle_error2(err, stderr, FALSE, "svn_support_blame: "); svn_pool_destroy(subpool); return NULL; } svn_cstring_split_append(list_result,res->data,"\n",FALSE,pool); svn_pool_destroy(subpool); return list_result; }
svn_error_t * svn_repos_fs_lock(svn_lock_t **lock, svn_repos_t *repos, const char *path, const char *token, const char *comment, svn_boolean_t is_dav_comment, apr_time_t expiration_date, svn_revnum_t current_rev, svn_boolean_t steal_lock, apr_pool_t *pool) { svn_error_t *err; svn_fs_access_t *access_ctx = NULL; const char *username = NULL; const char *new_token; apr_array_header_t *paths; /* Setup an array of paths in anticipation of the ra layers handling multiple locks in one request (1.3 most likely). This is only used by svn_repos__hooks_post_lock. */ paths = apr_array_make(pool, 1, sizeof(const char *)); APR_ARRAY_PUSH(paths, const char *) = path; SVN_ERR(svn_fs_get_access(&access_ctx, repos->fs)); if (access_ctx) SVN_ERR(svn_fs_access_get_username(&username, access_ctx)); if (! username) return svn_error_createf (SVN_ERR_FS_NO_USER, NULL, "Cannot lock path '%s', no authenticated username available.", path); /* Run pre-lock hook. This could throw error, preventing svn_fs_lock() from happening. */ SVN_ERR(svn_repos__hooks_pre_lock(repos, &new_token, path, username, comment, steal_lock, pool)); if (*new_token) token = new_token; /* Lock. */ SVN_ERR(svn_fs_lock(lock, repos->fs, path, token, comment, is_dav_comment, expiration_date, current_rev, steal_lock, pool)); /* Run post-lock hook. */ if ((err = svn_repos__hooks_post_lock(repos, paths, username, pool))) return svn_error_create (SVN_ERR_REPOS_POST_LOCK_HOOK_FAILED, err, "Lock succeeded, but post-lock hook failed"); return SVN_NO_ERROR; }
static int change_remote_ip(request_rec *r) { const char *fwdvalue; char *val; rpaf_server_cfg *cfg = (rpaf_server_cfg *)ap_get_module_config(r->server->module_config, &rpaf_module); if (!cfg->enable) return DECLINED; if (is_in_array(r->connection->remote_ip, cfg->proxy_ips) == 1) { /* check if cfg->headername is set and if it is use that instead of X-Forwarded-For by default */ if (cfg->headername && (fwdvalue = apr_table_get(r->headers_in, cfg->headername))) { // } else if (fwdvalue = apr_table_get(r->headers_in, "X-Forwarded-For")) { // } else { return DECLINED; } if (fwdvalue) { rpaf_cleanup_rec *rcr = (rpaf_cleanup_rec *)apr_pcalloc(r->pool, sizeof(rpaf_cleanup_rec)); apr_array_header_t *arr = apr_array_make(r->pool, 0, sizeof(char*)); while (*fwdvalue && (val = ap_get_token(r->pool, &fwdvalue, 1))) { *(char **)apr_array_push(arr) = apr_pstrdup(r->pool, val); if (*fwdvalue != '\0') ++fwdvalue; } rcr->old_ip = apr_pstrdup(r->connection->pool, r->connection->remote_ip); rcr->r = r; apr_pool_cleanup_register(r->pool, (void *)rcr, rpaf_cleanup, apr_pool_cleanup_null); r->connection->remote_ip = apr_pstrdup(r->connection->pool, ((char **)arr->elts)[((arr->nelts)-1)]); r->connection->remote_addr->sa.sin.sin_addr.s_addr = apr_inet_addr(r->connection->remote_ip); if (cfg->sethostname) { const char *hostvalue; if (hostvalue = apr_table_get(r->headers_in, "X-Forwarded-Host")) { /* 2.0 proxy frontend or 1.3 => 1.3.25 proxy frontend */ apr_table_set(r->headers_in, "Host", apr_pstrdup(r->pool, hostvalue)); r->hostname = apr_pstrdup(r->pool, hostvalue); ap_update_vhost_from_headers(r); } else if (hostvalue = apr_table_get(r->headers_in, "X-Host")) { /* 1.3 proxy frontend with mod_proxy_add_forward */ apr_table_set(r->headers_in, "Host", apr_pstrdup(r->pool, hostvalue)); r->hostname = apr_pstrdup(r->pool, hostvalue); ap_update_vhost_from_headers(r); } } } } return DECLINED; }