static const char *load_file(cmd_parms *cmd, void *dummy, const char *filename) { ap_log_perror(APLOG_MARK, APLOG_STARTUP, 0, cmd->pool, "WARNING: LoadFile not supported on this platform"); return NULL; }
/* * This routine is called to patch the variables * into one directive. */ static int DefineRewriteHook(apr_pool_t *pconf, apr_pool_t *plog, ap_directive_t *current) { char *cpBuf; char *cpLine; int pos; int len; char *cpError; char *cpVar; char *cpVal; const char *pFilename = current?current->filename:NULL; /* * Search for: * ....\$[a-zA-Z][:_a-zA-Z0-9]*.... * ....\${[a-zA-Z][:_a-zA-Z0-9]*}.... */ cpBuf = NULL; cpLine = (char *)current->args; pos = 0; while (DefineIndex(pconf, pFilename, cpLine, &pos, &len, &cpVar)) { #ifdef DEFINE_DEBUG { char prefix[1024]; char marker[1024]; int i; for (i = 0; i < pos; i++) prefix[i] = ' '; prefix[i] = '\0'; for (i = 0; i < len; i++) marker[i] = '^'; marker[i] = '\0'; fprintf(stderr, "Found variable `%s' (pos: %d, len: %d)\n" " %s\n" " %s%s\n", cpVar, pos, len, cpLine, prefix, marker); } #endif if (cpBuf == NULL) { cpBuf = apr_palloc(pconf, MAX_STRING_LEN); apr_cpystrn(cpBuf, current->args, MAX_STRING_LEN); cpLine = cpBuf; } if ((cpVal = DefineFetch(pconf, pFilename, cpVar)) == NULL) { ap_log_perror(APLOG_MARK, APLOG_ERR, APR_SUCCESS, plog, "mod_define: Variable '%s' not defined: file %s, line %d", cpVar, current->filename, current->line_num); cpBuf = NULL; break; } if ((cpError = DefineExpand(pconf, cpLine+pos, len, cpVal)) != NULL) { ap_log_perror(APLOG_MARK, APLOG_ERR, APR_SUCCESS, plog, "mod_define: %s: file %s, line %d", cpError, current->filename, current->line_num); cpBuf = NULL; break; } } if ( cpBuf ) { current->args = cpBuf; } return OK; }
apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_size_t maxlen, const char *msg) { apr_status_t status = APR_SUCCESS; int same_alloc; (void)msg; AP_DEBUG_ASSERT(to); AP_DEBUG_ASSERT(from); same_alloc = (to->bucket_alloc == from->bucket_alloc); if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end, *cpy; status = last_not_included(from, maxlen, 0, 0, &end); if (status != APR_SUCCESS) { return status; } for (b = APR_BRIGADE_FIRST(from); b != APR_BRIGADE_SENTINEL(from) && b != end; b = APR_BUCKET_NEXT(b)) { if (same_alloc) { status = apr_bucket_copy(b, &cpy); if (status != APR_SUCCESS) { break; } APR_BRIGADE_INSERT_TAIL(to, cpy); } else { if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); } else if (APR_BUCKET_IS_FLUSH(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); } else { /* ignore */ } } else { const char *data; apr_size_t len; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_copy: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } } } } return status; }
static const char *load_module(cmd_parms *cmd, void *dummy, const char *modname, const char *filename) { apr_dso_handle_t *modhandle; apr_dso_handle_sym_t modsym; module *modp; const char *szModuleFile = ap_server_root_relative(cmd->pool, filename); so_server_conf *sconf; ap_module_symbol_t *modi; ap_module_symbol_t *modie; int i; const char *error; /* we need to setup this value for dummy to make sure that we don't try * to add a non-existant tree into the build when we return to * execute_now. */ *(ap_directive_t **)dummy = NULL; if (!szModuleFile) { return apr_pstrcat(cmd->pool, "Invalid LoadModule path ", filename, NULL); } /* * check for already existing module * If it already exists, we have nothing to do * Check both dynamically-loaded modules and statically-linked modules. */ sconf = (so_server_conf *)ap_get_module_config(cmd->server->module_config, &so_module); modie = (ap_module_symbol_t *)sconf->loaded_modules->elts; for (i = 0; i < sconf->loaded_modules->nelts; i++) { modi = &modie[i]; if (modi->name != NULL && strcmp(modi->name, modname) == 0) { ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, "module %s is already loaded, skipping", modname); return NULL; } } for (i = 0; ap_preloaded_modules[i]; i++) { const char *preload_name; apr_size_t preload_len; apr_size_t thismod_len; modp = ap_preloaded_modules[i]; /* make sure we're comparing apples with apples * make sure name of preloaded module is mod_FOO.c * make sure name of structure being loaded is FOO_module */ if (memcmp(modp->name, "mod_", 4)) { continue; } preload_name = modp->name + strlen("mod_"); preload_len = strlen(preload_name) - 2; if (strlen(modname) <= strlen("_module")) { continue; } thismod_len = strlen(modname) - strlen("_module"); if (strcmp(modname + thismod_len, "_module")) { continue; } if (thismod_len != preload_len) { continue; } if (!memcmp(modname, preload_name, preload_len)) { /** in apache2nginx, we don't return. */ /*return apr_pstrcat(cmd->pool, "module ", modname, " is built-in and can't be loaded", NULL);*/ continue; } } modi = apr_array_push(sconf->loaded_modules); modi->name = modname; /* * Load the file into the Apache address space */ if (apr_dso_load(&modhandle, szModuleFile, cmd->pool) != APR_SUCCESS) { char my_error[256]; return apr_pstrcat(cmd->pool, "Cannot load ", szModuleFile, " into server: ", apr_dso_error(modhandle, my_error, sizeof(my_error)), NULL); } ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, cmd->pool, "loaded module %s", modname); /* * Retrieve the pointer to the module structure through the module name: * First with the hidden variant (prefix `AP_') and then with the plain * symbol name. */ if (apr_dso_sym(&modsym, modhandle, modname) != APR_SUCCESS) { char my_error[256]; return apr_pstrcat(cmd->pool, "Can't locate API module structure `", modname, "' in file ", szModuleFile, ": ", apr_dso_error(modhandle, my_error, sizeof(my_error)), NULL); } modp = (module*) modsym; modp->dynamic_load_handle = (apr_dso_handle_t *)modhandle; modi->modp = modp; /* * Make sure the found module structure is really a module structure * */ if (modp->magic != MODULE_MAGIC_COOKIE) { return apr_psprintf(cmd->pool, "API module structure '%s' in file %s " "is garbled - expected signature %08lx but saw " "%08lx - perhaps this is not an Apache module DSO, " "or was compiled for a different Apache version?", modname, szModuleFile, MODULE_MAGIC_COOKIE, modp->magic); } /* * Add this module to the Apache core structures */ error = ap_add_loaded_module(modp, cmd->pool); if (error) { return error; } /* * Register a cleanup in the config apr_pool_t (normally pconf). When * we do a restart (or shutdown) this cleanup will cause the * shared object to be unloaded. */ apr_pool_cleanup_register(cmd->pool, modi, unload_module, apr_pool_cleanup_null); /* * Finally we need to run the configuration process for the module */ ap_single_module_configure(cmd->pool, cmd->server, modp); return NULL; }
apr_status_t jxr_init_worker(jaxer_worker *aworker, apr_pool_t *p, int min_size, int max_size, int acquire_conn_timeout) { apr_status_t rv; int is_threaded; int mpm_threads = 1; ap_log_perror(APLOG_MARK, APLOG_ZDEBUG, 0, p, "mod_jaxer: initializing worker %s", aworker->name); rv = apr_pool_create(&aworker->pool, p); if (rv != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, p, "mod_jaxer: Failed to create subpool for jaxerworker %s", aworker->name); return rv; } rv = apr_pool_create(&aworker->res_pool, p); if (rv != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, p, "mod_jaxer: Failed to create res pool for jaxerworker %s", aworker->name); return rv; } ap_log_perror(APLOG_MARK, APLOG_ZDEBUG, 0, p, "mod_jaxer: created pools for worker %s", aworker->name); #if APR_HAS_THREADS rv = apr_thread_mutex_create (&aworker->mutex, APR_THREAD_MUTEX_DEFAULT, p); if (rv != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, p, "mod_jaxer: Failed to create mutex for jaxerworker %s", aworker->name); return rv; } ap_log_perror(APLOG_MARK, APLOG_ZDEBUG, 0, p, "mod_jaxer: mutex created worker %s", aworker->name); #endif #ifndef APACHE1_3 /* Set default connection cache size for multi-threaded MPMs */ if (ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded) == APR_SUCCESS && is_threaded != AP_MPMQ_NOT_SUPPORTED) { if (ap_mpm_query(AP_MPMQ_MAX_THREADS, &mpm_threads) != APR_SUCCESS) mpm_threads = 1; } #endif // ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, p, "mod_jaxer: number of threads=%d for %s", mpm_threads, aworker->name); /* * cache_size >= min_connections && cache_size <= mpm_threads * min_connections > mpm_threads/10 */ ap_log_perror(APLOG_MARK, APLOG_ZDEBUG, 0, p, "mod_jaxer: connection pool config for %s: (min=%d max=%d)", aworker->name, min_size, max_size); if (min_size < 0) { // Default is 10% of the thread pool size. min=1 min_size = mpm_threads/10; if (min_size <= 0) min_size = 1; }else if (min_size > mpm_threads) min_size = mpm_threads; if (max_size <= 0 || max_size > mpm_threads) max_size = mpm_threads; if (max_size < min_size) max_size = min_size; aworker->acquire_timeout = acquire_conn_timeout; aworker->nmin = min_size; aworker->nkeep = max_size; aworker->nmax = max_size; aworker->exptime = 0; ap_log_perror(APLOG_MARK, APLOG_ZDEBUG, 0, p, "mod_jaxer: using connection pool config for %s: (min=%d max=%d acquire-timeout=%d)", aworker->name, aworker->nmin, aworker->nmax, aworker->acquire_timeout); ap_log_perror(APLOG_MARK, APLOG_ZDEBUG, 0, p, "mod_jaxer: getting socket address for worker %s (host=%s port=%d)", aworker->name, aworker->hostname, aworker->port_number); rv = apr_sockaddr_info_get(&aworker->remote_sa, aworker->hostname, APR_INET, aworker->port_number, 0, p); //rv = apr_sockaddr_info_get(&aworker->remote_sa, aworker->hostname, APR_UNSPEC, aworker->port_number, APR_IPV4_ADDR_OK, p); if (rv != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, p, "mod_jaxer: apr_sockaddr_info_get failed with remote address %s and ip %d: return code=%d", aworker->hostname, aworker->port_number, rv); return rv; } ap_log_perror(APLOG_MARK, APLOG_ZDEBUG, 0, p, "mod_jaxer: call jxr_conn_setup for worker %s", aworker->name); if ((rv = jxr_conn_setup(aworker)) != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, p, "mod_jaxer: jxr_conn_setup failed for worker %s", aworker->name); return rv; } ap_log_perror(APLOG_MARK, APLOG_ZDEBUG, 0, p, "mod_jaxer: initialized worker %s", aworker->name); return rv; }
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_size_t maxlen, int *pfile_handles_allowed, const char *msg) { apr_status_t status = APR_SUCCESS; int same_alloc; AP_DEBUG_ASSERT(to); AP_DEBUG_ASSERT(from); same_alloc = (to->bucket_alloc == from->bucket_alloc); if (!FILE_MOVE) { pfile_handles_allowed = NULL; } if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end; status = last_not_included(from, maxlen, same_alloc, pfile_handles_allowed, &end); if (status != APR_SUCCESS) { return status; } while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) { b = APR_BRIGADE_FIRST(from); if (b == end) { break; } if (same_alloc || (b->list == to->bucket_alloc)) { /* both brigades use the same bucket_alloc and auto-cleanups * have the same life time. It's therefore safe to just move * directly. */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, passed bucket(same bucket_alloc) " "%ld-%ld, type=%s", msg, (long)b->start, (long)b->length, APR_BUCKET_IS_METADATA(b)? (APR_BUCKET_IS_EOS(b)? "EOS": (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) : (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA")); #endif } else if (DEEP_COPY) { /* we have not managed the magic of passing buckets from * one thread to another. Any attempts result in * cleanup of pools scrambling memory. */ if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); } else if (APR_BUCKET_IS_FLUSH(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); } else { /* ignore */ } } else if (pfile_handles_allowed && *pfile_handles_allowed > 0 && APR_BUCKET_IS_FILE(b)) { /* We do not want to read files when passing buckets, if * we can avoid it. However, what we've come up so far * is not working corrently, resulting either in crashes or * too many open file descriptors. */ apr_bucket_file *f = (apr_bucket_file *)b->data; apr_file_t *fd = f->fd; int setaside = (f->readpool != to->p); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, moving FILE bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p, setaside); #endif if (setaside) { status = apr_file_setaside(&fd, fd, to->p); if (status != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p, APLOGNO(02947) "h2_util: %s, setaside FILE", msg); return status; } } apr_brigade_insert_file(to, fd, b->start, b->length, to->p); --(*pfile_handles_allowed); } else { const char *data; apr_size_t len; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } apr_bucket_delete(b); } else { apr_bucket_setaside(b, to->p); APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, passed setaside bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } } return status; }
static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy) { h2_beam_lock bl; apr_bucket *b, *next; if (enter_yellow(beam, &bl) == APR_SUCCESS) { /* even when beam buckets are split, only the one where * refcount drops to 0 will call us */ H2_BPROXY_REMOVE(proxy); /* invoked from green thread, the last beam bucket for the red * bucket bred is about to be destroyed. * remove it from the hold, where it should be now */ if (proxy->bred) { for (b = H2_BLIST_FIRST(&beam->hold); b != H2_BLIST_SENTINEL(&beam->hold); b = APR_BUCKET_NEXT(b)) { if (b == proxy->bred) { break; } } if (b != H2_BLIST_SENTINEL(&beam->hold)) { /* bucket is in hold as it should be, mark this one * and all before it for purging. We might have placed meta * buckets without a green proxy into the hold before it * and schedule them for purging now */ for (b = H2_BLIST_FIRST(&beam->hold); b != H2_BLIST_SENTINEL(&beam->hold); b = next) { next = APR_BUCKET_NEXT(b); if (b == proxy->bred) { APR_BUCKET_REMOVE(b); H2_BLIST_INSERT_TAIL(&beam->purge, b); break; } else if (APR_BUCKET_IS_METADATA(b)) { APR_BUCKET_REMOVE(b); H2_BLIST_INSERT_TAIL(&beam->purge, b); } else { /* another data bucket before this one in hold. this * is normal since DATA buckets need not be destroyed * in order */ } } proxy->bred = NULL; } else { /* it should be there unless we screwed up */ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->red_pool, APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not " "in hold, n=%d", beam->id, beam->tag, (int)proxy->n); AP_DEBUG_ASSERT(!proxy->bred); } } /* notify anyone waiting on space to become available */ if (!bl.mutex) { r_purge_reds(beam); } else if (beam->m_cond) { apr_thread_cond_broadcast(beam->m_cond); } leave_yellow(beam, &bl); } }
static void setup_redisclient_child_init(apr_pool_t *pchild, server_rec *s) { apr_status_t rv; redisContext *ctx; redisReply *reply; int i; ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pchild, "redis client initializaion.(apache child. PID:[%d])", getpid()); /*********************/ secuip_svr_config *svr_config = (secuip_svr_config *)ap_get_module_config(s->module_config, &secuip_module); //TODO: per virtualServer(maybe next version) //secuip_svr_config *svr_config = (secuip_svr_config *)ap_get_module_config(s->next->module_config, &secuip_module); /*********************/ rv = apr_pool_create(&svr_config->pool, pchild); if (rv != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, pchild, "Failed to create subpool for secuip"); return; } #if APR_HAS_THREADS rv = apr_thread_mutex_create(&svr_config->mutex, APR_THREAD_MUTEX_DEFAULT, svr_config->pool); if (rv != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, svr_config->pool,"[0]Failed to create mutex for sucuip"); return; } #endif // rv = apr_queue_create(&svr_config->redis_context_queue, svr_config->redis_init_count, svr_config->pool); rv = apr_queue_create(&redis_context_queue, svr_config->redis_init_count, svr_config->pool); if (rv != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, svr_config->pool,"[1]Failed to create queue for secuip"); return; } // not using redis connection pool if (svr_config->redis_queue_enabled != 1) { ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pchild, "svr_config->redis_queue_enabled value[%d].(apache child. PID:[%d], init count[%d])", svr_config->redis_queue_enabled, getpid(), svr_config->redis_init_count); return; } for (i = 0; i < svr_config->redis_init_count; i++) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,"init redis for secuip:[%d], PID:[%d]", i, getpid()); ctx = init_redisclient(s, svr_config->redis_ip, svr_config->redis_port, svr_config->redis_password); if ( ctx == NULL) { ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, "init redisclient error."); return; } // reg. cleanup //apr_pool_cleanup_register(svr_config->pool, ctx, redisFree, apr_pool_cleanup_null) ; // add ctx to queue. rv = apr_queue_trypush(redis_context_queue, ctx); if (rv != APR_SUCCESS) { // queue full //free free_redis_ctx(ctx, s); if (rv == APR_EAGAIN) { //redisCommand(ctx, "GET trypush_queue_full(%X)", ctx); ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "redis connection pool is full.(if this occures, there is a error of queue management."); } //redisCommand(ctx, "GET trypush_error(%X)", ctx); ap_log_perror(APLOG_MARK, APLOG_ERR, rv, svr_config->pool, "[2]Failed to push queue for secuip."); return; } // log (current queue size) //redisCommand(ctx, "GET trypush_success(%X)(pid%d)(size%d)", ctx, getpid(), apr_queue_size(redis_context_queue)); //ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "redis context pushed[%d].", apr_queue_size(redis_context_queue)); } //redisFree(ctx); // not necessary in here. return; }
static apr_status_t init_ftpd_dbi(apr_pool_t * p, apr_pool_t * plog, apr_pool_t * ptemp, server_rec * s) { apr_status_t rv = APR_SUCCESS; int rval; dbi_driver dbi_driver; void *data; apr_hash_index_t *idx; char *key; ftpd_dbi_config *val; apr_ssize_t len; const char *userdata_key = "mod_ftpd_dbi_init"; /* dbi_config *conf = ap_get_module_config(s->module_config, * &ftpd_dbi_module); */ apr_pool_userdata_get(&data, userdata_key, s->process->pool); ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, plog, "[mod_ftpd_dbi.c] init."); if (!data) { apr_pool_userdata_set((const void *) 1, userdata_key, apr_pool_cleanup_null, s->process->pool); return OK; } ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, p, "[mod_ftpd_dbi.c] Running DBI init Code"); if ((rval = dbi_initialize(dbi_global_config.driverdir)) > 0) { if (dbi_global_config.driverdir == NULL) { ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, plog, "[mod_ftpd_dbi.c] Initialization of libdbi found %d drivers in default driver directory", rval); } else { ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, plog, "[mod_ftpd_dbi.c] Initialization of libdbi found %d drivers in directory %s", rval, dbi_global_config.driverdir); } if (s->loglevel >= APLOG_DEBUG) { dbi_driver = NULL; while ((dbi_driver = dbi_driver_list(dbi_driver)) != NULL) { ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, plog, "[mod_ftpd_dbi.c] Driver '%s' was loaded.", dbi_driver_get_name(dbi_driver)); } } } else { /* An error was returned or libdbi found 0 drivers */ if (dbi_global_config.driverdir == NULL) { ap_log_perror(APLOG_MARK, APLOG_EMERG, 0, plog, "[mod_ftpd_dbi.c] - Initlialization of libdbi with default driver directory failed"); } else { ap_log_perror(APLOG_MARK, APLOG_EMERG, 0, plog, "[mod_ftpd_dbi.c] - Initlialization of libdbi with FtpDbiDriverDir %s failed", dbi_global_config.driverdir); } return !APR_SUCCESS; } /* loop the hashed config stuff... */ for (idx = apr_hash_first(p, ftpd_dbi_config_hash); idx; idx = apr_hash_next(idx)) { apr_hash_this(idx, (void *) &key, &len, (void *) &val); apr_reslist_create(&val->pool, val->rec.conn_min, /* hard minimum */ val->rec.conn_soft, /* soft maximum */ val->rec.conn_max, /* hard maximum */ val->rec.conn_ttl, /* Time to live -- dbi server might override/disconnect! */ safe_dbi_new_conn, /* Make a New Connection */ safe_dbi_kill_conn, /* Kill Old Connection */ (void *) &val->rec, p); apr_hash_set(ftpd_dbi_config_hash, key, APR_HASH_KEY_STRING, val); } apr_pool_cleanup_register(p, p, kill_dbi, apr_pool_cleanup_null); ap_add_version_component(p, "mod_ftpd_dbi/" MOD_FTPD_DBI_VERSION); return rv; }
/* with a little help from ap_resolve_env() ;) */ static const char *populate_querystring(const request_rec * r, const char *querystring, ftpd_dbi_config * conf, ftpd_dbi_dconfig * dconf, ftpd_dbi_rest * dbi_res, const char *user) { char tmp[MAX_STRING_LEN]; /* 8 KByte should be enough for everyone :) */ const char *s, *e; char *p; int written = 0; tmp[0] = '\0'; if (!(s = ap_strchr_c(querystring, QUERYSTRING_MAGIC_CHAR))) return querystring; do { written += (s - querystring); if (written >= MAX_STRING_LEN) { ap_log_perror(APLOG_MARK, APLOG_ERR, 0, r->pool, "[mod_ftpd_dbi.c] Populated string would exceed %d bytes", MAX_STRING_LEN); return NULL; } strncat(tmp, querystring, s - querystring); if ((s[1] == QUERYSTRING_LEFT_DELIM_CHAR) && (e = ap_strchr_c(s, QUERYSTRING_RIGHT_DELIM_CHAR))) { const char *e2 = e; char *var; p = NULL; querystring = e + 1; e = NULL; var = apr_pstrndup(r->pool, s + 2, e2 - (s + 2)); if (!strcasecmp(var, "GivenUsername")) { e = (user ? user : EMPTY_VAR); } else if (!strcasecmp(var, "RequestHostname")) { e = (r->hostname ? r->hostname : EMPTY_VAR); } else if (!strcasecmp(var, "Name")) { e = (conf->rec.dbi_name ? conf->rec.dbi_name : EMPTY_VAR); } else if (!strcasecmp(var, "ConfigHostname")) { e = (r->server->server_hostname ? r->server-> server_hostname : EMPTY_VAR); } /* Everything but the variable values representing fieldnames and tables gets * escaped according to the selected driver */ if (e != NULL) { p = strdup(e); dbi_driver_quote_string(dbi_conn_get_driver(dbi_res->conn), &p); } if (!strcasecmp(var, "UsernameField")) { e = (conf->rec.username_field ? conf->rec. username_field : EMPTY_VAR); } else if (!strcasecmp(var, "ChrootField")) { e = (conf->rec.chroot_field ? conf->rec. chroot_field : EMPTY_VAR); } else if (!strcasecmp(var, "IsActiveField")) { e = (conf->rec.isactive_field ? conf->rec. isactive_field : EMPTY_VAR); } else if (!strcasecmp(var, "Table")) { e = (conf->rec.dbi_table ? conf->rec.dbi_table : EMPTY_VAR); } if (e == NULL) { ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, r->pool, "[mod_ftpd_dbi.c] Unknown variable: %s", var); return NULL; } if (p == NULL) { p = strdup(e); } written += strlen(p); if (written >= MAX_STRING_LEN) { ap_log_perror(APLOG_MARK, APLOG_ERR, 0, r->pool, "[mod_ftpd_dbi.c] Populated string would exceed %d bytes", MAX_STRING_LEN); free(p); return NULL; } strcat(tmp, p); free(p); } else { ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, r->pool, "[mod_ftpd_dbi.c] Invalid querystring"); return NULL; }; } while ((s = ap_strchr_c(querystring, QUERYSTRING_MAGIC_CHAR))); strcat(tmp, querystring); written += strlen(querystring); ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, r->pool, "[mod_ftpd_dbi.c] Populated result: \"%s\" / %d chars written", apr_pstrdup(r->pool, tmp), written); return apr_pstrdup(r->pool, tmp); }
static apr_status_t safe_dbi_new_conn(void **resource, void *params, apr_pool_t * r) { apr_status_t rv = APR_SUCCESS; ftpd_dbi_config_rec *conf = params; int err_num = 0; const char *err_str; const char *host = conf->dbi_host; const char *driver = conf->dbi_driver; const char *name = conf->dbi_name; const char *user = conf->dbi_user; const char *pwd = conf->dbi_pass; ftpd_dbi_rest *myres; dbi_conn_count++; if (DBI_HARD_MAX_CONNS > dbi_conn_count) { ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, r, "[mod_ftpd_dbi.c] Creating New DBI Server Connection"); myres = apr_palloc(r, sizeof(*myres)); myres->conn = dbi_conn_new(driver); if (myres->conn == NULL) { ap_log_perror(APLOG_MARK, APLOG_EMERG, 0, r, "[mod_ftpd_dbi.c] DBI Connection Failed. dbi_conn_new returned NULL."); rv = !APR_SUCCESS; /* * modules/ssl/ssl_engine_log.c:103 * said this was okay. so i do it. */ exit(1); } else { dbi_conn_set_option(myres->conn, "host", (char *)host); dbi_conn_set_option(myres->conn, "username", (char *)user); dbi_conn_set_option(myres->conn, "password", (char *)pwd); dbi_conn_set_option(myres->conn, "dbname", (char *)name); if (dbi_conn_connect(myres->conn) != 0) { err_num = dbi_conn_error(myres->conn, (const char **)&err_str); /* Connetion Failed */ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, r, "[mod_ftpd_dbi.c] DBI Connection to %s://%s@%s/%s Failed. Error: (%d) %s", driver, user, host, name, err_num, err_str); rv = !APR_SUCCESS; } else { ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, r, "[mod_ftpd_dbi.c] Connection was created sucessfully"); } } *resource = myres; } else { /* Error -- we have too many TOTAL DBI Connections. Maybe a Evil User trying to hurt our system? */ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, r, "[mod_ftpd_dbi.c] DBI Connection Failed. Hard Max Limit of %d Connections has been reached", DBI_HARD_MAX_CONNS); /* we didn't create a new connection! */ dbi_conn_count--; rv = !APR_SUCCESS; } return rv; }