static int file_inherit_unset(lua_State *L) { apr_status_t status; lua_apr_file *file; file = file_check(L, 1, 1); status = apr_file_inherit_unset(file->handle); return push_file_status(L, file, status); }
AP_DECLARE(apr_status_t) ap_mpm_pod_open(apr_pool_t *p, ap_pod_t **pod) { apr_status_t rv; *pod = apr_palloc(p, sizeof(**pod)); rv = apr_file_pipe_create(&((*pod)->pod_in), &((*pod)->pod_out), p); if (rv != APR_SUCCESS) { return rv; } apr_file_pipe_timeout_set((*pod)->pod_in, 0); (*pod)->p = p; /* close these before exec. */ apr_file_inherit_unset((*pod)->pod_in); apr_file_inherit_unset((*pod)->pod_out); return APR_SUCCESS; }
zktool_set_logfile(const char *filename, apr_pool_t *p) { apr_status_t st; apr_file_t *f = NULL; apr_pool_t *pool; FILE *fp; apr_os_file_t fd; if(filename == NULL) { if(log_pool != NULL) apr_pool_destroy(log_pool); else zoo_set_log_stream(NULL); return APR_SUCCESS; } if(p == NULL) pool = zeke_root_subpool_create(); else assert(apr_pool_create(&pool,p) == APR_SUCCESS); st = apr_file_open(&f, filename, ZOO_LOG_FLAGS, APR_OS_DEFAULT, pool); if(st == APR_SUCCESS) { apr_file_inherit_unset(f); if((st = apr_os_file_get(&fd,f)) == APR_SUCCESS) { fp = fdopen(fd,"a"); if(fp == NULL) { st = apr_get_os_error(); } else { zoo_set_log_stream(fp); log_pool = pool; apr_pool_cleanup_register(pool,f,close_zookeeper_logfile,close_zookeeper_logfile); } } } if(st != APR_SUCCESS && f != NULL) apr_file_close(f); return st; }
zktool_display_options(const apr_getopt_option_t *options, apr_file_t *f, apr_pool_t *pool) { int self_created_pool = 0; const apr_getopt_option_t *opt; const char *optfmt; if(pool == NULL) { pool = zeke_root_subpool_create(); self_created_pool++; } if(f == NULL) { apr_status_t st; if((f = zstdout) == NULL && (f = zstderr) == NULL) { st = apr_file_open_stderr(&f,pool); if(st != APR_SUCCESS) { st = apr_file_open_stdout(&f,pool); if(st != APR_SUCCESS) return; } apr_file_inherit_unset(f); } } for(opt = options; opt->name != NULL || opt->optch != 0; opt++) { optfmt = zktool_format_opt(opt,NULL,pool); assert(optfmt != NULL); #if 0 optfmt = apr_pstrcat(pool,optfmt,apr_psprintf(pool,"%%%ds%%s\n",(int)(30-strlen(optfmt))),NULL); #endif apr_file_printf(f,optfmt,opt->description); apr_file_putc('\n',f); } if(self_created_pool) apr_pool_destroy(pool); }
static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b) { apr_status_t rv; cache_object_t *obj = h->cache_obj; cache_object_t *tobj = NULL; mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj; apr_read_type_e eblock = APR_BLOCK_READ; apr_bucket *e; char *cur; int eos = 0; if (mobj->type == CACHE_TYPE_FILE) { apr_file_t *file = NULL; int fd = 0; int other = 0; /* We can cache an open file descriptor if: * - the brigade contains one and only one file_bucket && * - the brigade is complete && * - the file_bucket is the last data bucket in the brigade */ for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { if (APR_BUCKET_IS_EOS(e)) { eos = 1; } else if (APR_BUCKET_IS_FILE(e)) { apr_bucket_file *a = e->data; fd++; file = a->fd; } else { other++; } } if (fd == 1 && !other && eos) { apr_file_t *tmpfile; const char *name; /* Open a new XTHREAD handle to the file */ apr_file_name_get(&name, file); mobj->flags = ((APR_SENDFILE_ENABLED & apr_file_flags_get(file)) | APR_READ | APR_BINARY | APR_XTHREAD | APR_FILE_NOCLEANUP); rv = apr_file_open(&tmpfile, name, mobj->flags, APR_OS_DEFAULT, r->pool); if (rv != APR_SUCCESS) { return rv; } apr_file_inherit_unset(tmpfile); apr_os_file_get(&(mobj->fd), tmpfile); /* Open for business */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server, "mem_cache: Cached file: %s with key: %s", name, obj->key); obj->complete = 1; return APR_SUCCESS; } /* Content not suitable for fd caching. Cache in-memory instead. */ mobj->type = CACHE_TYPE_HEAP; } /* * FD cacheing is not enabled or the content was not * suitable for fd caching. */ if (mobj->m == NULL) { mobj->m = malloc(mobj->m_len); if (mobj->m == NULL) { return APR_ENOMEM; } obj->count = 0; } cur = (char*) mobj->m + obj->count; /* Iterate accross the brigade and populate the cache storage */ for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { const char *s; apr_size_t len; if (APR_BUCKET_IS_EOS(e)) { const char *cl_header = apr_table_get(r->headers_out, "Content-Length"); if (cl_header) { apr_int64_t cl = apr_atoi64(cl_header); if ((errno == 0) && (obj->count != cl)) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "mem_cache: URL %s didn't receive complete response, not caching", h->cache_obj->key); return APR_EGENERAL; } } if (mobj->m_len > obj->count) { /* Caching a streamed response. Reallocate a buffer of the * correct size and copy the streamed response into that * buffer */ mobj->m = realloc(mobj->m, obj->count); if (!mobj->m) { return APR_ENOMEM; } /* Now comes the crufty part... there is no way to tell the * cache that the size of the object has changed. We need * to remove the object, update the size and re-add the * object, all under protection of the lock. */ if (sconf->lock) { apr_thread_mutex_lock(sconf->lock); } /* Has the object been ejected from the cache? */ tobj = (cache_object_t *) cache_find(sconf->cache_cache, obj->key); if (tobj == obj) { /* Object is still in the cache, remove it, update the len field then * replace it under protection of sconf->lock. */ cache_remove(sconf->cache_cache, obj); /* For illustration, cache no longer has reference to the object * so decrement the refcount * apr_atomic_dec32(&obj->refcount); */ mobj->m_len = obj->count; cache_insert(sconf->cache_cache, obj); /* For illustration, cache now has reference to the object, so * increment the refcount * apr_atomic_inc32(&obj->refcount); */ } else if (tobj) { /* Different object with the same key found in the cache. Doing nothing * here will cause the object refcount to drop to 0 in decrement_refcount * and the object will be cleaned up. */ } else { /* Object has been ejected from the cache, add it back to the cache */ mobj->m_len = obj->count; cache_insert(sconf->cache_cache, obj); apr_atomic_inc32(&obj->refcount); } if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); } } /* Open for business */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server, "mem_cache: Cached url: %s", obj->key); obj->complete = 1; break; } rv = apr_bucket_read(e, &s, &len, eblock); if (rv != APR_SUCCESS) { return rv; } if (len) { /* Check for buffer (max_streaming_buffer_size) overflow */ if ((obj->count + len) > mobj->m_len) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "mem_cache: URL %s exceeds the MCacheMaxStreamingBuffer (%" APR_SIZE_T_FMT ") limit and will not be cached.", obj->key, mobj->m_len); return APR_ENOMEM; } else { memcpy(cur, s, len); cur+=len; obj->count+=len; } } /* This should not fail, but if it does, we are in BIG trouble * cause we just stomped all over the heap. */ AP_DEBUG_ASSERT(obj->count <= mobj->m_len); } return APR_SUCCESS; }
/* NAME, CMD and ARGS are the name, path to and arguments for the hook program that is to be run. The hook's exit status will be checked, and if an error occurred the hook's stderr output will be added to the returned error. If STDIN_HANDLE is non-null, pass it as the hook's stdin, else pass no stdin to the hook. */ static svn_error_t * run_hook_cmd(const char *name, const char *cmd, const char **args, apr_file_t *stdin_handle, apr_pool_t *pool) #ifndef AS400 { apr_file_t *read_errhandle, *write_errhandle, *null_handle; apr_status_t apr_err; svn_error_t *err; apr_proc_t cmd_proc; /* Create a pipe to access stderr of the child. */ apr_err = apr_file_pipe_create(&read_errhandle, &write_errhandle, pool); if (apr_err) return svn_error_wrap_apr (apr_err, _("Can't create pipe for hook '%s'"), cmd); /* Pipes are inherited by default, but we don't want that, since APR will duplicate the write end of the pipe for the child process. Not closing the read end is harmless, but if the write end is inherited, it will be inherited by grandchildren as well. This causes problems if a hook script puts long-running jobs in the background. Even if they redirect stderr to something else, the write end of our pipe will still be open, causing us to block. */ apr_err = apr_file_inherit_unset(read_errhandle); if (apr_err) return svn_error_wrap_apr (apr_err, _("Can't make pipe read handle non-inherited for hook '%s'"), cmd); apr_err = apr_file_inherit_unset(write_errhandle); if (apr_err) return svn_error_wrap_apr (apr_err, _("Can't make pipe write handle non-inherited for hook '%s'"), cmd); /* Redirect stdout to the null device */ apr_err = apr_file_open(&null_handle, SVN_NULL_DEVICE_NAME, APR_WRITE, APR_OS_DEFAULT, pool); if (apr_err) return svn_error_wrap_apr (apr_err, _("Can't create null stdout for hook '%s'"), cmd); err = svn_io_start_cmd(&cmd_proc, ".", cmd, args, FALSE, stdin_handle, null_handle, write_errhandle, pool); /* This seems to be done automatically if we pass the third parameter of apr_procattr_child_in/out_set(), but svn_io_run_cmd()'s interface does not support those parameters. We need to close the write end of the pipe so we don't hang on the read end later, if we need to read it. */ apr_err = apr_file_close(write_errhandle); if (!err && apr_err) return svn_error_wrap_apr (apr_err, _("Error closing write end of stderr pipe")); if (err) { err = svn_error_createf (SVN_ERR_REPOS_HOOK_FAILURE, err, _("Failed to start '%s' hook"), cmd); } else { err = check_hook_result(name, cmd, &cmd_proc, read_errhandle, pool); } /* Hooks are fallible, and so hook failure is "expected" to occur at times. When such a failure happens we still want to close the pipe and null file */ apr_err = apr_file_close(read_errhandle); if (!err && apr_err) return svn_error_wrap_apr (apr_err, _("Error closing read end of stderr pipe")); apr_err = apr_file_close(null_handle); if (!err && apr_err) return svn_error_wrap_apr(apr_err, _("Error closing null file")); return err; }
static svn_error_t * open_tunnel(svn_stream_t **request, svn_stream_t **response, svn_ra_close_tunnel_func_t *close_func, void **close_baton, void *tunnel_baton, const char *tunnel_name, const char *user, const char *hostname, int port, svn_cancel_func_t cancel_func, void *cancel_baton, apr_pool_t *pool) { svn_node_kind_t kind; apr_proc_t *proc; apr_procattr_t *attr; apr_status_t status; const char *args[] = { "svnserve", "-t", "-r", ".", NULL }; const char *svnserve; tunnel_baton_t *b = tunnel_baton; close_baton_t *cb; SVN_TEST_ASSERT(b->magic == TUNNEL_MAGIC); SVN_ERR(svn_dirent_get_absolute(&svnserve, "../../svnserve/svnserve", pool)); #ifdef WIN32 svnserve = apr_pstrcat(pool, svnserve, ".exe", SVN_VA_NULL); #endif SVN_ERR(svn_io_check_path(svnserve, &kind, pool)); if (kind != svn_node_file) return svn_error_createf(SVN_ERR_TEST_FAILED, NULL, "Could not find svnserve at %s", svn_dirent_local_style(svnserve, pool)); status = apr_procattr_create(&attr, pool); if (status == APR_SUCCESS) status = apr_procattr_io_set(attr, 1, 1, 0); if (status == APR_SUCCESS) status = apr_procattr_cmdtype_set(attr, APR_PROGRAM); proc = apr_palloc(pool, sizeof(*proc)); if (status == APR_SUCCESS) status = apr_proc_create(proc, svn_dirent_local_style(svnserve, pool), args, NULL, attr, pool); if (status != APR_SUCCESS) return svn_error_wrap_apr(status, "Could not run svnserve"); apr_pool_note_subprocess(pool, proc, APR_KILL_NEVER); /* APR pipe objects inherit by default. But we don't want the * tunnel agent's pipes held open by future child processes * (such as other ra_svn sessions), so turn that off. */ apr_file_inherit_unset(proc->in); apr_file_inherit_unset(proc->out); cb = apr_pcalloc(pool, sizeof(*cb)); cb->magic = CLOSE_MAGIC; cb->tb = b; cb->proc = proc; *request = svn_stream_from_aprfile2(proc->in, FALSE, pool); *response = svn_stream_from_aprfile2(proc->out, FALSE, pool); *close_func = close_tunnel; *close_baton = cb; ++b->open_count; return SVN_NO_ERROR; }