예제 #1
0
/** Register MRCP profile */
MRCP_DECLARE(apt_bool_t) mrcp_server_profile_register(
    mrcp_server_t *server,
    mrcp_profile_t *profile,
    apr_table_t *plugin_map,
    const char *name)
{
    if(!profile || !name) {
        apt_log(APT_PRIO_WARNING,"Failed to Register Profile: no name");
        return FALSE;
    }
    if(!profile->resource_factory) {
        profile->resource_factory = server->resource_factory;
    }
    mrcp_server_engine_table_make(server,profile,plugin_map);

    if(!profile->signaling_agent) {
        apt_log(APT_PRIO_WARNING,"Failed to Register Profile [%s]: missing signaling agent",name);
        return FALSE;
    }
    if(profile->signaling_agent->mrcp_version == MRCP_VERSION_2 &&
            !profile->connection_agent) {
        apt_log(APT_PRIO_WARNING,"Failed to Register Profile [%s]: missing connection agent",name);
        return FALSE;
    }
    if(!profile->media_engine) {
        apt_log(APT_PRIO_WARNING,"Failed to Register Profile [%s]: missing media engine",name);
        return FALSE;
    }
    if(!profile->rtp_termination_factory) {
        apt_log(APT_PRIO_WARNING,"Failed to Register Profile [%s]: missing RTP factory",name);
        return FALSE;
    }

    apt_log(APT_PRIO_INFO,"Register Profile [%s]",name);
    apr_hash_set(server->profile_table,name,APR_HASH_KEY_STRING,profile);
    return TRUE;
}
예제 #2
0
int submit_hp_que_op(portal_context_t *hpc, op_generic_t *op)
{
    command_op_t *hop = &(op->op->cmd);

    apr_thread_mutex_lock(hpc->lock);

    //** Check if we should do a garbage run **
    if (hpc->next_check < time(NULL)) {
        hpc->next_check = time(NULL) + hpc->compact_interval;

        apr_thread_mutex_unlock(hpc->lock);
        log_printf(15, "submit_hp_op: Calling compact_hportals\n");
        compact_hportals(hpc);
        apr_thread_mutex_lock(hpc->lock);
    }

//log_printf(1, "submit_hp_op: hpc=%p hpc->table=%p\n",hpc, hpc->table);
    host_portal_t *hp = _lookup_hportal(hpc, hop->hostport);
    if (hp == NULL) {
        log_printf(15, "submit_hp_que_op: New host: %s\n", hop->hostport);
        hp = create_hportal(hpc, hop->connect_context, hop->hostport, hpc->min_threads, hpc->max_threads, hpc->dt_connect);
        if (hp == NULL) {
            log_printf(15, "submit_hp_que_op: create_hportal failed!\n");
            return(1);
        }
        log_printf(15, "submit_op: New host.. hp->skey=%s\n", hp->skey);
        apr_hash_set(hpc->table, hp->skey, APR_HASH_KEY_STRING, (const void *)hp);
        host_portal_t *hp2 = _lookup_hportal(hpc, hop->hostport);
        log_printf(15, "submit_hp_que_op: after lookup hp2=%p\n", hp2);
    }

    //** This is done in the submit_hportal() since we have release_master=1
    //** This protects against accidental compaction removal
    //** apr_thread_mutex_unlock(hpc->lock);

    return(submit_hportal(hp, op, 0, 1));
}
예제 #3
0
// command associated with the <LMMPUser> container
static const char *lmmpuser_cmd(cmd_parms *cmd, void *dummy, const char *args) {
	
	moon_svr_cfg * cfg = ap_get_module_config(cmd->server->module_config, &moon_module);

	char *user_key;
	char *error;
	error = get_container_args(cmd, args, &user_key, LMMPUSER_NUM_ARGS);
	if(error != NULL) {
		return error;
	}

	apr_hash_t * user_to_css = cfg->user_to_css;
	char * curr_css = apr_hash_get(user_to_css, user_key, APR_HASH_KEY_STRING);
	if(curr_css == NULL) {
		curr_css = apr_palloc(cmd->pool,1);
		curr_css[0] = '\0';
	}
	error = get_container_properties(cmd, cfg, &curr_css, "</LMMPUser>");
	if(error != NULL) {
		return error;
	}
	apr_hash_set(user_to_css, user_key, APR_HASH_KEY_STRING, curr_css);
	return NULL;
}
예제 #4
0
static char *test_plaintext_decrypt_symmetric(apr_pool_t *pool) {
	apr_jwt_error_t err;
	apr_hash_t *keys = apr_hash_make(pool);
	apr_jwk_t *jwk;

	// http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-40#appendix-A.3
	// A.3.  Example JWE using AES Key Wrap and AES_128_CBC_HMAC_SHA_256
	const char * k = "{\"kty\":\"oct\", \"k\":\"GawgguFyGrWKav7AX4VKUg\"}";
	jwk = NULL;
	TST_ASSERT_ERR("apr_jwk_parse_json", _jwk_parse(pool, k, &jwk, &err) == 0,
			pool, err);
	apr_hash_set(keys, "dummy", APR_HASH_KEY_STRING, jwk);

	const char *s = "eyJhbGciOiJBMTI4S1ciLCJlbmMiOiJBMTI4Q0JDLUhTMjU2In0."
			"6KB707dM9YTIgHtLvtgWQ8mKwboJW3of9locizkDTHzBC2IlrT1oOQ."
			"AxY8DCtDaGlsbGljb3RoZQ."
			"KDlTtXchhZTGufMYmOYGS4HffxPSUrfmqCHXaI9wOGY."
			"U0m_YmjN04DJvceFICbCVQ";

	apr_array_header_t *unpacked = NULL;
	apr_jwt_header_t header;
	memset(&header, 0, sizeof(header));
	TST_ASSERT_ERR("apr_jwt_header_parse",
			apr_jwt_header_parse(pool, s, &unpacked, &header, &err), pool, err);

	char *decrypted = NULL;
	TST_ASSERT_ERR("apr_jwe_decrypt_jwt",
			apr_jwe_decrypt_jwt(pool, &header, unpacked, keys, &decrypted,
					&err), pool, err);

	TST_ASSERT_STR("decrypted", decrypted, "Live long and prosper.");

	json_decref(header.value.json);

	return 0;
}
예제 #5
0
파일: opt.c 프로젝트: DJEX93/dsploit
svn_error_t *
svn_opt_parse_revprop(apr_hash_t **revprop_table_p, const char *revprop_spec,
                      apr_pool_t *pool)
{
  const char *sep, *propname;
  svn_string_t *propval;

  if (! *revprop_spec)
    return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
                            _("Revision property pair is empty"));

  if (! *revprop_table_p)
    *revprop_table_p = apr_hash_make(pool);

  sep = strchr(revprop_spec, '=');
  if (sep)
    {
      propname = apr_pstrndup(pool, revprop_spec, sep - revprop_spec);
      SVN_ERR(svn_utf_cstring_to_utf8(&propname, propname, pool));
      propval = svn_string_create(sep + 1, pool);
    }
  else
    {
      SVN_ERR(svn_utf_cstring_to_utf8(&propname, revprop_spec, pool));
      propval = svn_string_create("", pool);
    }

  if (!svn_prop_name_is_valid(propname))
    return svn_error_createf(SVN_ERR_CLIENT_PROPERTY_NAME, NULL,
                             _("'%s' is not a valid Subversion property name"),
                             propname);

  apr_hash_set(*revprop_table_p, propname, APR_HASH_KEY_STRING, propval);

  return SVN_NO_ERROR;
}
예제 #6
0
void _rss_make_check_table(resource_service_fn_t *rs)
{
    rs_simple_priv_t *rss = (rs_simple_priv_t *)rs->priv;
    rss_check_entry_t *ce, *ce2;
    rss_rid_entry_t *re;
    int i;

    //** Clear out the old one
    _rss_clear_check_table(rss->ds, rss->rid_mapping, rss->mpool);

    //** Now make the new one
    rss->unique_rids = 1;
    for (i=0; i<rss->n_rids; i++) {
        re = rss->random_array[i];
        tbx_type_malloc(ce, rss_check_entry_t, 1);
        ce->ds_key = strdup(re->ds_key);
        ce->rid_key = strdup(re->rid_key);
        ce->space = ds_inquire_create(rss->ds);
        ce->re = re;

        //** Check for dups.  If so we only keep the 1st entry and spew a log message
        ce2 = apr_hash_get(rss->rid_mapping, ce->rid_key, APR_HASH_KEY_STRING);
        if (ce2 == NULL) {  //** Unique so add it
            apr_hash_set(rss->rid_mapping, ce->rid_key, APR_HASH_KEY_STRING, ce);
        } else {  //** Dup so disable dynamic mapping by unsetting unique_rids
            log_printf(0, "WARNING duplicate RID found.  Dropping dynamic mapping.  res=%s ---  new res=%s\n", ce2->ds_key, ce->ds_key);
            rss->unique_rids = 0;
            ds_inquire_destroy(rss->ds, ce->space);
            free(ce->rid_key);
            free(ce->ds_key);
            free(ce);
        }
    }

    return;
}
예제 #7
0
static dav_error *dav_acl_patch_exec(const dav_resource * resource,
                                     const apr_xml_elem * elem,
                                     int operation,
                                     void *context,
                                     dav_liveprop_rollback **rollback_ctx)
{
    dav_repos_resource *db_r = resource->info->db_r;
    const dav_repos_db *db = resource->info->db;
    dav_elem_private *priv = elem->priv;
    dav_error *err = NULL;

    if (priv->propid == DAV_PROPID_group_member_set) {
        apr_hash_t *new_members = (apr_hash_t*)context;
        apr_array_header_t *to_remove = (apr_array_header_t *)apr_hash_get
          (new_members, "-to-remove-", APR_HASH_KEY_STRING);
        apr_hash_set(new_members, "-to-remove-", APR_HASH_KEY_STRING, NULL);
        dav_repos_resource *prin = NULL;
        int i = 0;
        for (i = 0; to_remove && i < to_remove->nelts; i++) {
            prin = &APR_ARRAY_IDX(to_remove, i, dav_repos_resource);
            err = sabridge_rem_prin_frm_grp
              (db_r->p, db, db_r->serialno, prin->serialno);
            if (err) return err;
        }
        
        apr_hash_index_t *iter = apr_hash_first(db_r->p, new_members);
        while (iter) {
            apr_hash_this(iter, NULL, NULL, (void *)&prin);
            err = sabridge_add_prin_to_grp
              (db_r->p, db, db_r->serialno, prin->serialno);
            if (err) return err;
            iter = apr_hash_next(iter);
        }
    }
    return NULL;
}
static void clear_cached_recipient(const char* thread) {
    apr_pool_t *pool = NULL;                                                
    request_rec *r = trans->server->request(trans->server);

    if (apr_hash_get(trans->stateful_session_cache, thread, APR_HASH_KEY_STRING)) {

        osrfLogDebug(OSRF_LOG_MARK, "WS removing cached recipient on disconnect");

        // remove it from the hash
        apr_hash_set(trans->stateful_session_cache, thread, APR_HASH_KEY_STRING, NULL);

        if (apr_hash_count(trans->stateful_session_cache) == 0) {
            osrfLogDebug(OSRF_LOG_MARK, "WS re-setting stateful_session_pool");

            // memory accumulates in the stateful_session_pool as
            // sessions are cached then un-cached.  Un-caching removes
            // strings from the hash, but not from the pool.  Clear the
            // pool here. note: apr_pool_clear does not free memory, it
            // reclaims it for use again within the pool.  This is more
            // effecient than freeing and allocating every time.
            apr_pool_clear(trans->stateful_session_pool);
        }
    }
}
예제 #9
0
/* A status callback function for printing STATUS for PATH. */
static svn_error_t *
print_status(void *baton,
             const char *path,
             svn_wc_status2_t *status,
             apr_pool_t *pool)
{
    struct status_baton *sb = baton;

    /* If there's a changelist attached to the entry, then we don't print
       the item, but instead dup & cache the status structure for later. */
    if (status->entry && status->entry->changelist)
    {
        /* The hash maps a changelist name to an array of status_cache
           structures. */
        apr_array_header_t *path_array;
        const char *cl_key = apr_pstrdup(sb->cl_pool, status->entry->changelist);
        struct status_cache *scache = apr_pcalloc(sb->cl_pool, sizeof(*scache));
        scache->path = apr_pstrdup(sb->cl_pool, path);
        scache->status = svn_wc_dup_status2(status, sb->cl_pool);

        path_array = (apr_array_header_t *)
                     apr_hash_get(sb->cached_changelists, cl_key, APR_HASH_KEY_STRING);
        if (path_array == NULL)
        {
            path_array = apr_array_make(sb->cl_pool, 1,
                                        sizeof(struct status_cache *));
            apr_hash_set(sb->cached_changelists, cl_key,
                         APR_HASH_KEY_STRING, path_array);
        }

        APR_ARRAY_PUSH(path_array, struct status_cache *) = scache;
        return SVN_NO_ERROR;
    }

    return print_status_normal_or_xml(baton, path, status, pool);
}
예제 #10
0
void compact_hportals(portal_context_t *hpc)
{
    apr_hash_index_t *hi;
    host_portal_t *hp;
    void *val;

    apr_thread_mutex_lock(hpc->lock);

    for (hi=apr_hash_first(hpc->pool, hpc->table); hi != NULL; hi = apr_hash_next(hi)) {
        apr_hash_this(hi, NULL, NULL, &val);
        hp = (host_portal_t *)val;

        hportal_lock(hp);

        _reap_hportal(hp, 1);  //** Clean up any closed connections

        compact_hportal_direct(hp);

        if ((hp->n_conn == 0) && (hp->closing_conn == 0) && (tbx_stack_count(hp->que) == 0) &&
                (tbx_stack_count(hp->direct_list) == 0) && (tbx_stack_count(hp->closed_que) == 0)) { //** if not used so remove it
            if (tbx_stack_count(hp->conn_list) != 0) {
                log_printf(0, "ERROR! DANGER WILL ROBINSON! tbx_stack_count(hp->conn_list)=%d hp=%s\n", tbx_stack_count(hp->conn_list), hp->skey);
                tbx_log_flush();
                assert(tbx_stack_count(hp->conn_list) == 0);
            } else {
                hportal_unlock(hp);
                apr_hash_set(hpc->table, hp->skey, APR_HASH_KEY_STRING, NULL);  //** This removes the key
                destroy_hportal(hp);
            }
        } else {
            hportal_unlock(hp);
        }
    }

    apr_thread_mutex_unlock(hpc->lock);
}
예제 #11
0
// 卡牌合成
apr_status_t card_compose_parser(
	apr_size_t nargs,
	char ** argv,
	apr_hash_t * res_subhash,
	apr_pool_t * res_pool)
{
	card_compose_t * clu = (card_compose_t *)
		apr_palloc(res_pool, sizeof(card_compose_t));
	clu->level = (level_t)apr_atoi64(argv[0]);
	clu->compose_exp[0] = (apr_int32_t)apr_atoi64(argv[1]);
	clu->compose_exp[1] = (apr_int32_t)apr_atoi64(argv[2]);
	clu->compose_exp[2] = (apr_int32_t)apr_atoi64(argv[3]);
	clu->compose_exp[3] = (apr_int32_t)apr_atoi64(argv[4]);

	level_t key = clu->level;
	if (apr_hash_get(res_subhash, &key, sizeof(key)) == NULL) {
		apr_hash_set(res_subhash, &key, sizeof(key), clu);
	} else {
		fprintf(stderr,
			"[RES] FAULT: duplicate key = %d\n",
			key);
	}
	return APR_SUCCESS;
}
예제 #12
0
파일: log.c 프로젝트: vocho/openqnx
/*
 * This implements the `svn_ra_neon__xml_endelm_cb' prototype.
 */
static svn_error_t *
log_end_element(void *baton, int state,
                const char *nspace, const char *name)
{
  struct log_baton *lb = baton;

  switch (state)
    {
    case ELEM_version_name:
      lb->log_entry->revision = SVN_STR_TO_REV(lb->cdata->data);
      break;
    case ELEM_creator_displayname:
      if (lb->want_author)
        {
          if (! lb->log_entry->revprops)
            lb->log_entry->revprops = apr_hash_make(lb->subpool);
          apr_hash_set(lb->log_entry->revprops, SVN_PROP_REVISION_AUTHOR,
                       APR_HASH_KEY_STRING,
                       svn_string_create_from_buf(lb->cdata, lb->subpool));
        }
      break;
    case ELEM_log_date:
      if (lb->want_date)
        {
          if (! lb->log_entry->revprops)
            lb->log_entry->revprops = apr_hash_make(lb->subpool);
          apr_hash_set(lb->log_entry->revprops, SVN_PROP_REVISION_DATE,
                       APR_HASH_KEY_STRING,
                       svn_string_create_from_buf(lb->cdata, lb->subpool));
        }
      break;
    case ELEM_added_path:
    case ELEM_replaced_path:
    case ELEM_deleted_path:
    case ELEM_modified_path:
      {
        char *path = apr_pstrdup(lb->subpool, lb->cdata->data);
        if (! lb->log_entry->changed_paths)
          lb->log_entry->changed_paths = apr_hash_make(lb->subpool);
        apr_hash_set(lb->log_entry->changed_paths, path, APR_HASH_KEY_STRING,
                     lb->this_path_item);
        break;
      }
    case ELEM_revprop:
      if (! lb->log_entry->revprops)
        lb->log_entry->revprops = apr_hash_make(lb->subpool);
      apr_hash_set(lb->log_entry->revprops, lb->revprop_name,
                   APR_HASH_KEY_STRING,
                   svn_string_create_from_buf(lb->cdata, lb->subpool));
      break;
    case ELEM_comment:
      if (lb->want_message)
        {
          if (! lb->log_entry->revprops)
            lb->log_entry->revprops = apr_hash_make(lb->subpool);
          apr_hash_set(lb->log_entry->revprops, SVN_PROP_REVISION_LOG,
                       APR_HASH_KEY_STRING,
                       svn_string_create_from_buf(lb->cdata, lb->subpool));
        }
      break;
    case ELEM_log_item:
      {
        /* Compatability cruft so that we can provide limit functionality
           even if the server doesn't support it.

           If we've seen as many log entries as we're going to show just
           error out of the XML parser so we can avoid having to parse the
           remaining XML, but set lb->err to SVN_NO_ERROR so no error will
           end up being shown to the user. */
        if (lb->limit && (lb->nest_level == 0) && (++lb->count > lb->limit))
          {
            lb->limit_compat_bailout = TRUE;
            return svn_error_create(APR_EGENERAL, NULL, NULL);
          }
        SVN_ERR((*(lb->receiver))(lb->receiver_baton,
                                  lb->log_entry,
                                  lb->subpool));
        if (lb->log_entry->has_children)
          {
            lb->nest_level++;
          }
        if (! SVN_IS_VALID_REVNUM(lb->log_entry->revision))
          {
            assert(lb->nest_level);
            lb->nest_level--;
          }
        reset_log_item(lb);
      }
      break;
    }

  /* Stop collecting cdata */
  lb->want_cdata = NULL;
  return SVN_NO_ERROR;
}
예제 #13
0
    char *load_cpp_module( apr_pool_t *pool, server_rec *server, cpp_server_rec *server_rec, apr_hash_index_t *next)
    {
        apr_dso_handle_t *sohandle;
        apr_dso_handle_sym_t sosymbol;
	ApacheServerRec *pServer = new ApacheServerRec(server);
        cpp_factory_t *cur_handler;
	apr_ssize_t n_len;
	const void *n;
	void *p;
        char *name;
	char *path;
	apr_hash_this(next, &n, &n_len, &p);
	name = apr_pstrndup(pool, (char *)n, n_len);
        path = apr_pstrdup(pool, (char *)p);

        if ( apr_dso_load(&sohandle, path, pool) != APR_SUCCESS ) {
            char my_error[256];
            return apr_pstrcat( 
                pool, "Error Loading CPP SO ", path, " into server: ",
                apr_dso_error( sohandle, my_error, sizeof(my_error)),
                NULL);
        }

        /* Log the event */
        ap_log_perror( APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 
                       0, pool, 
                       "loaded CPP so: %s", name);

        if ( apr_dso_sym ( &sosymbol, sohandle, name) != APR_SUCCESS) {
            char my_error[256];
            return apr_pstrcat(
                pool, "Can't locate cpp_factory_t `", 
                name, "' in file ", path, ": ", 
                apr_dso_error(sohandle, my_error, sizeof(my_error)),
                NULL);
        }

        /* symbol should now be ref the factory structure with
         * the handler, input, output and protocol filters
         */
        cur_handler = (cpp_factory_t *) sosymbol;

        ApacheHandler *handler = cur_handler->handler_func ? 
            cur_handler->handler_func(pServer) : NULL;
        ApacheInputFilter *input_filter = cur_handler->input_filter_func ?
            cur_handler->input_filter_func(pServer) : NULL;
        ApacheOutputFilter *output_filter = cur_handler->output_filter_func ?
            cur_handler->output_filter_func(pServer) : NULL;
        ApacheProtocol *protocol = cur_handler->protocol_func ?
            cur_handler->protocol_func(pServer) : NULL;
        
        if( handler != NULL ) {
            apr_hash_set(server_rec->handler_hash, 
                         name, strlen(name), handler);
        }
        if( input_filter != NULL ) {
            apr_hash_set(server_rec->input_filter_hash, 
                         name, strlen(name), input_filter);
            apr_pool_cleanup_register(pool, (void *)input_filter, delete_input_filter,
                                      apr_pool_cleanup_null);
        }
        if( output_filter != NULL ) {
            apr_hash_set(server_rec->output_filter_hash, 
                         name, strlen(name), output_filter);
            apr_pool_cleanup_register(pool, (void *)output_filter, delete_output_filter,
                                      apr_pool_cleanup_null);
        }
        if( protocol != NULL ) {
            apr_hash_set(server_rec->protocol_hash, 
                         name, strlen(name), protocol);
            apr_pool_cleanup_register(pool, (void *)protocol, delete_protocol_handler,
                                      apr_pool_cleanup_null);
        }
        return NULL;
    }
예제 #14
0
파일: apu_dso.c 프로젝트: cmjonze/apr
apr_status_t apu_dso_load(apr_dso_handle_t **dlhandleptr,
                          apr_dso_handle_sym_t *dsoptr,
                          const char *module,
                          const char *modsym,
                          apr_pool_t *pool)
{
    apr_dso_handle_t *dlhandle = NULL;
    char *pathlist;
    char path[APR_PATH_MAX + 1];
    apr_array_header_t *paths;
    apr_pool_t *global;
    apr_status_t rv = APR_EDSOOPEN;
    char *eos = NULL;
    int i;

    *dsoptr = apr_hash_get(dsos, module, APR_HASH_KEY_STRING);
    if (*dsoptr) {
        return APR_EINIT;
    }

    /* The driver DSO must have exactly the same lifetime as the
     * drivers hash table; ignore the passed-in pool */
    global = apr_hash_pool_get(dsos);

    /* Retrieve our path search list or prepare for a single search */
    if ((apr_env_get(&pathlist, APR_DSOPATH, pool) != APR_SUCCESS)
          || (apr_filepath_list_split(&paths, pathlist, pool) != APR_SUCCESS))
        paths = apr_array_make(pool, 1, sizeof(char*));

#if defined(APR_DSO_LIBDIR)
    /* Always search our prefix path, but on some platforms such as
     * win32 this may be left undefined
     */
    (*((char **)apr_array_push(paths))) = APR_DSO_LIBDIR;
#endif

    for (i = 0; i < paths->nelts; ++i)
    {
#if defined(WIN32)
        /* Use win32 dso search semantics and attempt to
         * load the relative lib on the first pass.
         */
        if (!eos) {
            eos = path;
            --i;
        }
        else
#endif
        {
            eos = apr_cpystrn(path, ((char**)paths->elts)[i], sizeof(path));
            if ((eos > path) && (eos - path < sizeof(path) - 1))
                *(eos++) = '/';
        }
        apr_cpystrn(eos, module, sizeof(path) - (eos - path));

        rv = apr_dso_load(&dlhandle, path, global);
        if (dlhandleptr) {
            *dlhandleptr = dlhandle;
        }
        if (rv == APR_SUCCESS) { /* APR_EDSOOPEN */
            break;
        }
#if defined(APR_DSO_LIBDIR)
        else if (i < paths->nelts - 1) {
#else
        else {   /* No APR_DSO_LIBDIR to skip */
#endif
             /* try with apr-APR_MAJOR_VERSION appended */
            eos = apr_cpystrn(eos,
                              "apr-" APR_STRINGIFY(APR_MAJOR_VERSION) "/",
                              sizeof(path) - (eos - path));

            apr_cpystrn(eos, module, sizeof(path) - (eos - path));

            rv = apr_dso_load(&dlhandle, path, global);
            if (dlhandleptr) {
                *dlhandleptr = dlhandle;
            }
            if (rv == APR_SUCCESS) { /* APR_EDSOOPEN */
                break;
            }
        }
    }

    if (rv != APR_SUCCESS) /* APR_ESYMNOTFOUND */
        return rv;

    rv = apr_dso_sym(dsoptr, dlhandle, modsym);
    if (rv != APR_SUCCESS) { /* APR_ESYMNOTFOUND */
        apr_dso_unload(dlhandle);
    }
    else {
        module = apr_pstrdup(global, module);
        apr_hash_set(dsos, module, APR_HASH_KEY_STRING, *dsoptr);
    }
    return rv;
}
예제 #15
0
APR_DECLARE(apr_status_t) stomp_read(stomp_connection *connection, stomp_frame **frame, apr_pool_t *pool) {
   
   apr_status_t rc;
   stomp_frame *f;
      
   f = apr_pcalloc(pool, sizeof(stomp_frame));
   if( f == NULL )
      return APR_ENOMEM;
   
   f->headers = apr_hash_make(pool);
   if( f->headers == NULL )
      return APR_ENOMEM;
         
#define CHECK_SUCCESS if( rc!=APR_SUCCESS ) { return rc; }
   
   // Parse the frame out.
   {
      char *p;
	  int length;
      
      // Parse the command.
	  rc = stomp_read_line(connection, &p, &length, pool);
	  CHECK_SUCCESS;

      f->command = p;
      
      // Start parsing the headers.
      while( 1 ) {
         rc = stomp_read_line(connection, &p, &length, pool);
		 CHECK_SUCCESS;
		 
		 // Done with headers
		 if(length == 0)
			break;

         {
            // Parse the header line.
            char *p2; 
            void *key;
            void *value;
            
            p2 = strstr(p,":");
            if( p2 == NULL ) {
               // Expected at 1 : to delimit the key from the value.
               return APR_EGENERAL;
            }
            
            // Null terminate the key
            *p2=0;            
            key = p;
            
            // The rest if the value.
            value = p2+1;
            
            // Insert key/value into hash table.
            apr_hash_set(f->headers, key, APR_HASH_KEY_STRING, value);            
         }
      }
      
      // Check for content length
	  {
		  char* content_length = apr_hash_get(f->headers, "content-length", APR_HASH_KEY_STRING);
		  if(content_length) {
			  char endbuffer[2];
			  apr_size_t length = 2;

			  f->body_length = atoi(content_length);
			  f->body = apr_pcalloc(pool, f->body_length);
			  rc = apr_socket_recv(connection->socket, f->body, &f->body_length);
			  CHECK_SUCCESS;

			  // Expect a \n after the end
			  rc = apr_socket_recv(connection->socket, endbuffer, &length);
			  CHECK_SUCCESS;
			  if(length != 2 || endbuffer[0] != '\0' || endbuffer[1] != '\n')
				  return APR_EGENERAL;
		  }
		  else
		  {
			  // The remainder of the buffer (including the \n at the end) is the body)
			  rc = stomp_read_buffer(connection, &f->body, pool);
			  CHECK_SUCCESS;
		  }
	  }
   }
   
#undef CHECK_SUCCESS
   *frame = f;
	return APR_SUCCESS;
}
예제 #16
0
파일: apr_dbd.c 프로젝트: cookrn/openamq
APU_DECLARE(apr_status_t) apr_dbd_get_driver(apr_pool_t *pool, const char *name,
                                             const apr_dbd_driver_t **driver)
{
#if APU_DSO_BUILD
    char modname[32];
    char symname[34];
    apr_dso_handle_sym_t symbol;
#endif
    apr_status_t rv;

#if APU_DSO_BUILD
    rv = apu_dso_mutex_lock();
    if (rv) {
        return rv;
    }
#endif
    *driver = apr_hash_get(drivers, name, APR_HASH_KEY_STRING);
    if (*driver) {
#if APU_DSO_BUILD
        apu_dso_mutex_unlock();
#endif
        return APR_SUCCESS;
    }

#if APU_DSO_BUILD
    /* The driver DSO must have exactly the same lifetime as the
     * drivers hash table; ignore the passed-in pool */
    pool = apr_hash_pool_get(drivers);

#if defined(NETWARE)
    apr_snprintf(modname, sizeof(modname), "dbd%s.nlm", name);
#elif defined(WIN32)
    apr_snprintf(modname, sizeof(modname), 
                 "apr_dbd_%s-" APU_STRINGIFY(APU_MAJOR_VERSION) ".dll", name);
#else
    apr_snprintf(modname, sizeof(modname), 
                 "apr_dbd_%s-" APU_STRINGIFY(APU_MAJOR_VERSION) ".so", name);
#endif
    apr_snprintf(symname, sizeof(symname), "apr_dbd_%s_driver", name);
    rv = apu_dso_load(&symbol, modname, symname, pool);
    if (rv != APR_SUCCESS) { /* APR_EDSOOPEN or APR_ESYMNOTFOUND? */
        if (rv == APR_EINIT) { /* previously loaded?!? */
            name = apr_pstrdup(pool, name);
            apr_hash_set(drivers, name, APR_HASH_KEY_STRING, *driver);
            rv = APR_SUCCESS;
        }
        goto unlock;
    }
    *driver = symbol;
    if ((*driver)->init) {
        (*driver)->init(pool);
    }
    name = apr_pstrdup(pool, name);
    apr_hash_set(drivers, name, APR_HASH_KEY_STRING, *driver);

unlock:
    apu_dso_mutex_unlock();

#else /* not builtin and !APR_HAS_DSO => not implemented */
    rv = APR_ENOTIMPL;
#endif

    return rv;
}
예제 #17
0
파일: gpsmon.c 프로젝트: phan-pivotal/gpdb
static void gx_gettcpcmd(SOCKET sock, short event, void* arg)
{
	char dump;
	int n, e;
	apr_pool_t* oldpool;
	apr_hash_t* qetab;
	apr_hash_t* qdtab;
	apr_hash_t* pidtab;
	apr_hash_t* segtab;
	if (event & EV_TIMEOUT) // didn't get command from gpmmon, quit
	{
		if(gx.tcp_sock)
		{
			close(gx.tcp_sock);
			gx.tcp_sock=0;
		}
		return;
	}
	apr_hash_t* querysegtab;
	n = recv(sock, &dump, 1, 0);
	if (n == 0)
		gx_exit("peer closed");

	if (n == -1)
		gx_exit("socket error");

	if (dump != 'D')
		gx_exit("bad data");

	TR1(("start dump %c\n", dump));

	qetab = gx.qexectab;
	qdtab = gx.qlogtab;
	pidtab = gx.pidtab;
	segtab = gx.segmenttab;
	querysegtab = gx.querysegtab;

	oldpool = apr_hash_pool_get(qetab);

	/* make new  hashtabs for next cycle */
	{
		apr_pool_t* newpool;
		if (0 != (e = apr_pool_create_alloc(&newpool, gx.pool)))
		{
			gpsmon_fatalx(FLINE, e, "apr_pool_create_alloc failed");
		}
		/* qexec hash table */
		gx.qexectab = apr_hash_make(newpool);
		CHECKMEM(gx.qexectab);

		/* qlog hash table */
		gx.qlogtab = apr_hash_make(newpool);
		CHECKMEM(gx.qlogtab);

		/* segment hash table */
		gx.segmenttab = apr_hash_make(newpool);
		CHECKMEM(gx.segmenttab);

		/* queryseg hash table */
		gx.querysegtab = apr_hash_make(newpool);
		CHECKMEM(gx.querysegtab);

		/* pidtab hash table */
		gx.pidtab = apr_hash_make(newpool);
		CHECKMEM(gx.pidtab);
	}

	/* push out a metric of the machine */
	send_machine_metrics(sock);
	send_fsinfo(sock);

	/* push out records */
	{
		apr_hash_index_t* hi;
		gp_smon_to_mmon_packet_t* ppkt = 0;
		gp_smon_to_mmon_packet_t localPacketObject;
		pidrec_t* pidrec;
		int count = 0;
		apr_hash_t* query_cpu_table = NULL;

		for (hi = apr_hash_first(0, querysegtab); hi; hi = apr_hash_next(hi))
		{
 			void* vptr;
			apr_hash_this(hi, 0, 0, &vptr);
			ppkt = vptr;
			if (ppkt->header.pkttype != GPMON_PKTTYPE_QUERYSEG)
				continue;

			TR2(("sending magic %x, pkttype %d\n", ppkt->header.magic, ppkt->header.pkttype));
			send_smon_to_mon_pkt(sock, ppkt);
			count++;
		}

		for (hi = apr_hash_first(0, segtab); hi; hi = apr_hash_next(hi))
		{
 			void* vptr;
			apr_hash_this(hi, 0, 0, &vptr);
			ppkt = vptr;
			if (ppkt->header.pkttype != GPMON_PKTTYPE_SEGINFO)
				continue;

			/* fill in hostname */
			strncpy(ppkt->u.seginfo.hostname, gx.hostname, sizeof(ppkt->u.seginfo.hostname) - 1);
			ppkt->u.seginfo.hostname[sizeof(ppkt->u.seginfo.hostname) - 1] = 0;

			TR2(("sending magic %x, pkttype %d\n", ppkt->header.magic, ppkt->header.pkttype));
			send_smon_to_mon_pkt(sock, ppkt);
			count++;
		}


		for (hi = apr_hash_first(0, qdtab); hi; hi = apr_hash_next(hi))
		{
 			void* vptr;
			apr_hash_this(hi, 0, 0, &vptr);
			ppkt = vptr;
			if (ppkt->header.pkttype != GPMON_PKTTYPE_QLOG)
				continue;
			TR2(("sending magic %x, pkttype %d\n", ppkt->header.magic, ppkt->header.pkttype));
			send_smon_to_mon_pkt(sock, ppkt);
			count++;
		}

		for (hi = apr_hash_first(0, qetab); hi; hi = apr_hash_next(hi))
		{
			gpmon_qexec_t* qexec;
			void *vptr;

			apr_hash_this(hi, 0, 0, &vptr);
            qexec = vptr;
            /* fill in _p_metrics */
            pidrec = apr_hash_get(pidtab, &qexec->key.hash_key.pid, sizeof(qexec->key.hash_key.pid));
            if (pidrec) {
                qexec->_p_metrics = pidrec->p_metrics;
                qexec->_cpu_elapsed = pidrec->cpu_elapsed;
            } else {
                memset(&qexec->_p_metrics, 0, sizeof(qexec->_p_metrics));
            }

			/* fill in _hname */
			strncpy(qexec->_hname, gx.hostname, sizeof(qexec->_hname) - 1);
			qexec->_hname[sizeof(qexec->_hname) - 1] = 0;

			if (0 == create_qexec_packet(qexec, &localPacketObject)) {
				break;
			}

			TR2(("sending qexec, pkttype %d\n", localPacketObject.header.pkttype));
			send_smon_to_mon_pkt(sock, &localPacketObject);
			count++;
		}

		// calculate CPU utilization per query for this machine
		query_cpu_table = apr_hash_make(oldpool);
		CHECKMEM(query_cpu_table);

		// loop through PID's and add to Query CPU Hash Table
		for (hi = apr_hash_first(0, pidtab); hi; hi = apr_hash_next(hi))
		{
			void* vptr;
			pidrec_t* lookup;

			apr_hash_this(hi, 0, 0, &vptr);
			pidrec = vptr;

			TR2(("tmid %d ssid %d ccnt %d pid %d (CPU elapsed %d CPU Percent %.2f)\n",
				pidrec->query_key.tmid, pidrec->query_key.ssid, pidrec->query_key.ccnt, pidrec->pid,
				pidrec->cpu_elapsed, pidrec->p_metrics.cpu_pct));

			// table is keyed on query key
			lookup = apr_hash_get(query_cpu_table, &pidrec->query_key, sizeof(pidrec->query_key));

			if (lookup)
			{
				// found other pids with same query key so add the metrics to that

				lookup->cpu_elapsed += pidrec->cpu_elapsed;
				lookup->p_metrics.cpu_pct += pidrec->p_metrics.cpu_pct;
			}
			else
			{
				// insert existing pid record into table keyed by query key
				apr_hash_set(query_cpu_table, &pidrec->query_key, sizeof(pidrec->query_key), pidrec);
			}

		}

		// reset packet to 0
		ppkt = &localPacketObject;
		memset(ppkt, 0, sizeof(gp_smon_to_mmon_packet_t));
		gp_smon_to_mmon_set_header(ppkt,GPMON_PKTTYPE_QUERY_HOST_METRICS);

		// add the hostname into the packet for DEBUGGING purposes only.  This is not used
		strncpy(ppkt->u.qlog.user, gx.hostname, sizeof(ppkt->u.qlog.user) - 1);
		ppkt->u.qlog.user[sizeof(ppkt->u.qlog.user) - 1] = 0;

		// loop through the query per cpu table and send the metrics
		for (hi = apr_hash_first(0, query_cpu_table); hi; hi = apr_hash_next(hi))
		{
			void* vptr;
			apr_hash_this(hi, 0, 0, &vptr);
			pidrec = vptr;

			ppkt->u.qlog.key.tmid = pidrec->query_key.tmid;
			ppkt->u.qlog.key.ssid = pidrec->query_key.ssid;
			ppkt->u.qlog.key.ccnt = pidrec->query_key.ccnt;
			ppkt->u.qlog.cpu_elapsed = pidrec->cpu_elapsed;
			ppkt->u.qlog.p_metrics.cpu_pct = pidrec->p_metrics.cpu_pct;

			TR2(("SEND tmid %d ssid %d ccnt %d (CPU elapsed %d CPU Percent %.2f)\n",
				ppkt->u.qlog.key.tmid, ppkt->u.qlog.key.ssid, ppkt->u.qlog.key.ccnt,
				ppkt->u.qlog.cpu_elapsed, ppkt->u.qlog.p_metrics.cpu_pct));

			send_smon_to_mon_pkt(sock, ppkt);
			count++;
		}

		TR1(("end dump ... sent %d entries\n", count));
	}

	/* get rid of the old pool */
	{
		apr_pool_destroy(oldpool);
	}
	struct timeval tv;
	tv.tv_sec = opt.terminate_timeout;
	tv.tv_usec = 0;
	if (event_add(&gx.tcp_event, &tv)) //reset timeout
        {
		gpmon_warningx(FLINE, APR_FROM_OS_ERROR(errno), "event_add failed");
        }
	return;
}
예제 #18
0
static svn_error_t *
test_stream_seek_translated(apr_pool_t *pool)
{
  svn_stream_t *stream, *translated_stream;
  svn_stringbuf_t *stringbuf;
  char buf[44]; /* strlen("One$MyKeyword: my keyword was expanded $Two") + \0 */
  apr_size_t len;
  svn_stream_mark_t *mark;
  apr_hash_t *keywords;
  svn_string_t *keyword_val;

  keywords = apr_hash_make(pool);
  keyword_val = svn_string_create("my keyword was expanded", pool);
  apr_hash_set(keywords, "MyKeyword", APR_HASH_KEY_STRING, keyword_val);
  stringbuf = svn_stringbuf_create("One$MyKeyword$Two", pool);
  stream = svn_stream_from_stringbuf(stringbuf, pool);
  translated_stream = svn_subst_stream_translated(stream, APR_EOL_STR,
                                                  FALSE, keywords, TRUE, pool);
  /* Seek from outside of keyword to inside of keyword. */
  len = 25;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 25);
  buf[25] = '\0';
  SVN_TEST_STRING_ASSERT(buf, "One$MyKeyword: my keyword");
  SVN_ERR(svn_stream_mark(translated_stream, &mark, pool));
  SVN_ERR(svn_stream_reset(translated_stream));
  SVN_ERR(svn_stream_seek(translated_stream, mark));
  len = 4;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 4);
  buf[4] = '\0';
  SVN_TEST_STRING_ASSERT(buf, " was");

  SVN_ERR(svn_stream_seek(translated_stream, mark));
  SVN_ERR(svn_stream_skip(translated_stream, 2));
  len = 2;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 2);
  buf[len] = '\0';
  SVN_TEST_STRING_ASSERT(buf, "as");

  /* Seek from inside of keyword to inside of keyword. */
  SVN_ERR(svn_stream_mark(translated_stream, &mark, pool));
  len = 9;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 9);
  buf[9] = '\0';
  SVN_TEST_STRING_ASSERT(buf, " expanded");
  SVN_ERR(svn_stream_seek(translated_stream, mark));
  len = 9;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 9);
  buf[9] = '\0';
  SVN_TEST_STRING_ASSERT(buf, " expanded");

  SVN_ERR(svn_stream_seek(translated_stream, mark));
  SVN_ERR(svn_stream_skip(translated_stream, 6));
  len = 3;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 3);
  buf[len] = '\0';
  SVN_TEST_STRING_ASSERT(buf, "ded");

  /* Seek from inside of keyword to outside of keyword. */
  SVN_ERR(svn_stream_mark(translated_stream, &mark, pool));
  len = 4;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 4);
  buf[4] = '\0';
  SVN_TEST_STRING_ASSERT(buf, " $Tw");
  SVN_ERR(svn_stream_seek(translated_stream, mark));
  len = 4;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 4);
  buf[4] = '\0';
  SVN_TEST_STRING_ASSERT(buf, " $Tw");

  SVN_ERR(svn_stream_seek(translated_stream, mark));
  SVN_ERR(svn_stream_skip(translated_stream, 2));
  len = 2;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 2);
  buf[len] = '\0';
  SVN_TEST_STRING_ASSERT(buf, "Tw");

  /* Seek from outside of keyword to outside of keyword. */
  SVN_ERR(svn_stream_mark(translated_stream, &mark, pool));
  len = 1;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 1);
  buf[1] = '\0';
  SVN_TEST_STRING_ASSERT(buf, "o");
  SVN_ERR(svn_stream_seek(translated_stream, mark));
  len = 1;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 1);
  buf[1] = '\0';
  SVN_TEST_STRING_ASSERT(buf, "o");

  SVN_ERR(svn_stream_seek(translated_stream, mark));
  SVN_ERR(svn_stream_skip(translated_stream, 2));
  len = 1;
  SVN_ERR(svn_stream_read_full(translated_stream, buf, &len));
  SVN_TEST_ASSERT(len == 0);
  buf[len] = '\0';
  SVN_TEST_STRING_ASSERT(buf, "");

  SVN_ERR(svn_stream_close(stream));

  return SVN_NO_ERROR;
}
예제 #19
0
파일: gpsmon.c 프로젝트: phan-pivotal/gpdb
static void get_pid_metrics(apr_int32_t pid, apr_int32_t tmid, apr_int32_t ssid, apr_int32_t ccnt)
{
	apr_int32_t status;
	sigar_proc_cpu_t cpu;
	sigar_proc_mem_t mem;
	sigar_proc_fd_t fd;
	pidrec_t* rec;
	apr_pool_t* pool = apr_hash_pool_get(gx.pidtab);

	rec = apr_hash_get(gx.pidtab, &pid, sizeof(pid));
	if (rec && rec->updated_tick == gx.tick)
		return; /* updated in current cycle */

	memset(&cpu, 0, sizeof(cpu));
	memset(&mem, 0, sizeof(mem));
	memset(&fd, 0, sizeof(fd));

	TR2(("--------------------- starting %d\n", pid));

	if (!rec)
	{
		sigar_proc_exe_t exe;

		/* There might be cases where the pid no longer exist, so we'll just
		 * zero out the memory first before doing anything */
		rec = apr_pcalloc(pool, sizeof(*rec));
		CHECKMEM(rec);

		rec->pid = pid;
		rec->query_key.tmid = tmid;
		rec->query_key.ssid = ssid;
		rec->query_key.ccnt = ccnt;

		rec->pname = rec->cwd = 0;
		if (0 == sigar_proc_exe_get(gx.sigar, pid, &exe))
		{
			rec->pname = apr_pstrdup(pool, exe.name);
			rec->cwd = apr_pstrdup(pool, exe.root);
		}
		if (!rec->pname)
			rec->pname = "unknown";
		if (!rec->cwd)
			rec->cwd = "unknown";

		apr_hash_set(gx.pidtab, &rec->pid, sizeof(rec->pid), rec);
	}

	status = sigar_proc_mem_get(gx.sigar, pid, &mem);
	/* ESRCH is error 3: (No such process) */
	if (status != SIGAR_OK)
	{
		if (status != ESRCH) {
			TR2(("[WARNING] %s. PID: %d\n", sigar_strerror(gx.sigar, status), pid));
		}
		return;
	}

	status = sigar_proc_cpu_get(gx.sigar, pid, &cpu);
	if (status != SIGAR_OK)
	{
		if (status != ESRCH) {
			TR2(("[WARNING] %s. PID: %d\n", sigar_strerror(gx.sigar, status), pid));
		}
		return;
	}

	status = sigar_proc_fd_get(gx.sigar, pid, &fd);
	if (status != SIGAR_OK)
	{
		if (status != ESRCH) {
			TR2(("[WARNING] %s. PID: %d\n", sigar_strerror(gx.sigar, status), pid));
		}
		return;
	}

	rec->updated_tick = gx.tick;
	rec->p_metrics.fd_cnt = (apr_uint32_t) fd.total;
	rec->p_metrics.cpu_pct = (float) (cpu.percent * cpu_cores_utilization_multiplier);
	rec->p_metrics.mem.size = mem.size;
	rec->p_metrics.mem.resident = mem.resident;

#ifdef __linux__
	rec->p_metrics.mem.share = mem.share;
#else
	rec->p_metrics.mem.share = 0;
#endif

	rec->cpu_elapsed = cpu.total;
}
예제 #20
0
/* ### FIXME:  Consider somehow sharing code with
   ### libsvn_repos/load-fs-vtable.c:renumber_mergeinfo_revs() */
static svn_error_t *
renumber_mergeinfo_revs(svn_string_t **final_val,
                        const svn_string_t *initial_val,
                        struct revision_baton *rb,
                        apr_pool_t *pool)
{
  apr_pool_t *subpool = svn_pool_create(pool);
  svn_mergeinfo_t mergeinfo, predates_stream_mergeinfo;
  svn_mergeinfo_t final_mergeinfo = apr_hash_make(subpool);
  apr_hash_index_t *hi;

  SVN_ERR(svn_mergeinfo_parse(&mergeinfo, initial_val->data, subpool));

  /* Issue #3020
     http://subversion.tigris.org/issues/show_bug.cgi?id=3020#desc16
     Remove mergeinfo older than the oldest revision in the dump stream
     and adjust its revisions by the difference between the head rev of
     the target repository and the current dump stream rev. */
  if (rb->pb->oldest_dumpstream_rev > 1)
    {
      SVN_ERR(svn_mergeinfo__filter_mergeinfo_by_ranges(
                  &predates_stream_mergeinfo, mergeinfo,
                  rb->pb->oldest_dumpstream_rev - 1, 0,
                  TRUE, subpool, subpool));
      SVN_ERR(svn_mergeinfo__filter_mergeinfo_by_ranges(
                  &mergeinfo, mergeinfo,
                  rb->pb->oldest_dumpstream_rev - 1, 0,
                  FALSE, subpool, subpool));
      SVN_ERR(svn_mergeinfo__adjust_mergeinfo_rangelists(
                  &predates_stream_mergeinfo,
                  predates_stream_mergeinfo,
                  -rb->rev_offset, subpool, subpool));
    }
  else
    {
      predates_stream_mergeinfo = NULL;
    }

  for (hi = apr_hash_first(subpool, mergeinfo); hi; hi = apr_hash_next(hi))
    {
      svn_rangelist_t *rangelist;
      struct parse_baton *pb = rb->pb;
      int i;
      const void *path;
      apr_ssize_t pathlen;
      void *val;

      apr_hash_this(hi, &path, &pathlen, &val);
      rangelist = val;

      /* Possibly renumber revisions in merge source's rangelist. */
      for (i = 0; i < rangelist->nelts; i++)
        {
          svn_revnum_t rev_from_map;
          svn_merge_range_t *range = APR_ARRAY_IDX(rangelist, i,
                                                   svn_merge_range_t *);
          rev_from_map = get_revision_mapping(pb->rev_map, range->start);
          if (SVN_IS_VALID_REVNUM(rev_from_map))
            {
              range->start = rev_from_map;
            }
          else if (range->start == pb->oldest_dumpstream_rev - 1)
            {
              /* Since the start revision of svn_merge_range_t are not
                 inclusive there is one possible valid start revision that
                 won't be found in the PB->REV_MAP mapping of load stream
                 revsions to loaded revisions: The revision immediately
                 preceeding the oldest revision from the load stream.
                 This is a valid revision for mergeinfo, but not a valid
                 copy from revision (which PB->REV_MAP also maps for) so it
                 will never be in the mapping.

                 If that is what we have here, then find the mapping for the
                 oldest rev from the load stream and subtract 1 to get the
                 renumbered, non-inclusive, start revision. */
              rev_from_map = get_revision_mapping(pb->rev_map,
                                                  pb->oldest_dumpstream_rev);
              if (SVN_IS_VALID_REVNUM(rev_from_map))
                range->start = rev_from_map - 1;
            }
          else
            {
              /* If we can't remap the start revision then don't even bother
                 trying to remap the end revision.  It's possible we might
                 actually succeed at the latter, which can result in invalid
                 mergeinfo with a start rev > end rev.  If that gets into the
                 repository then a world of bustage breaks loose anytime that
                 bogus mergeinfo is parsed.  See
                 http://subversion.tigris.org/issues/show_bug.cgi?id=3020#desc16.
                 */
              continue;
            }

          rev_from_map = get_revision_mapping(pb->rev_map, range->end);
          if (SVN_IS_VALID_REVNUM(rev_from_map))
            range->end = rev_from_map;
        }
      apr_hash_set(final_mergeinfo, path, pathlen, rangelist);
    }

  if (predates_stream_mergeinfo)
    {
      SVN_ERR(svn_mergeinfo_merge2(final_mergeinfo, predates_stream_mergeinfo,
                                   subpool, subpool));
    }

  SVN_ERR(svn_mergeinfo__canonicalize_ranges(final_mergeinfo, subpool));

  SVN_ERR(svn_mergeinfo_to_string(final_val, final_mergeinfo, pool));
  svn_pool_destroy(subpool);

  return SVN_NO_ERROR;
}
예제 #21
0
/**
 * Processes one transaction phase. The phase number does not
 * need to be explicitly provided since it's already available
 * in the modsec_rec structure.
 */
apr_status_t modsecurity_process_phase(modsec_rec *msr, unsigned int phase) {
    /* Check if we should run. */
    if ((msr->was_intercepted)&&(phase != PHASE_LOGGING)) {
        if (msr->txcfg->debuglog_level >= 4) {
            msr_log(msr, 4, "Skipping phase %d as request was already intercepted.", phase);
        }
        
        return 0;
    }

    /* Do not process the same phase twice. */
    if (msr->phase >= phase) {
        if (msr->txcfg->debuglog_level >= 4) {
            msr_log(msr, 4, "Skipping phase %d because it was previously run (at %d now).",
                phase, msr->phase);
        }
        
        return 0;
    }

    msr->phase = phase;

    /* Clear out the transformation cache at the start of each phase */
    if (msr->txcfg->cache_trans == MODSEC_CACHE_ENABLED) {
        if (msr->tcache) {
            apr_hash_index_t *hi;
            void *dummy;
            apr_table_t *tab;
            const void *key;
            apr_ssize_t klen;
            #ifdef CACHE_DEBUG
            apr_pool_t *mp = msr->msc_rule_mptmp;
            const apr_array_header_t *ctarr;
            const apr_table_entry_t *ctelts;
            msre_cache_rec *rec;
            int cn = 0;
            int ri;
            #else
            apr_pool_t *mp = msr->mp;
            #endif

            for (hi = apr_hash_first(mp, msr->tcache); hi; hi = apr_hash_next(hi)) {
                apr_hash_this(hi, &key, &klen, &dummy);
                tab = (apr_table_t *)dummy;

                if (tab == NULL) continue;

                #ifdef CACHE_DEBUG
                /* Dump the cache out as we clear */
                ctarr = apr_table_elts(tab);
                ctelts = (const apr_table_entry_t*)ctarr->elts;
                for (ri = 0; ri < ctarr->nelts; ri++) {
                    cn++;
                    rec = (msre_cache_rec *)ctelts[ri].val;
                    if (rec->changed) {
                        if (msr->txcfg->debuglog_level >= 9) {
                            msr_log(msr, 9, "CACHE: %5d) hits=%d key=%pp %x;%s=\"%s\" (%pp - %pp)",
                                cn, rec->hits, key, rec->num, rec->path, log_escape_nq_ex(mp, rec->val, rec->val_len),
                                rec->val, rec->val + rec->val_len);
                        }
                    }
                    else {
                        if (msr->txcfg->debuglog_level >= 9) {
                            msr_log(msr, 9, "CACHE: %5d) hits=%d key=%pp %x;%s=<no change>",
                                cn, rec->hits, key, rec->num, rec->path);
                        }
                    }
                }
                #endif

                apr_table_clear(tab);
                apr_hash_set(msr->tcache, key, klen, NULL);
            }

            if (msr->txcfg->debuglog_level >= 9) {
                msr_log(msr, 9, "Cleared transformation cache for phase %d", msr->phase);
            }
        }

        msr->tcache_items = 0;
        msr->tcache = apr_hash_make(msr->mp);
        if (msr->tcache == NULL) return -1;
    }

    switch(phase) {
        case 1 :
            return modsecurity_process_phase_request_headers(msr);
        case 2 :
            return modsecurity_process_phase_request_body(msr);
        case 3 :
            return modsecurity_process_phase_response_headers(msr);
        case 4 :
            return modsecurity_process_phase_response_body(msr);
        case 5 :
            return modsecurity_process_phase_logging(msr);
        default :
            msr_log(msr, 1, "Invalid processing phase: %d", msr->phase);
            break;
    }

    return -1;
}
예제 #22
0
/* This implements serf_bucket_headers_do_callback_fn_t.
 */
static int
capabilities_headers_iterator_callback(void *baton,
                                       const char *key,
                                       const char *val)
{
  options_context_t *opt_ctx = baton;
  svn_ra_serf__session_t *session = opt_ctx->session;

  if (svn_cstring_casecmp(key, "dav") == 0)
    {
      /* Each header may contain multiple values, separated by commas, e.g.:
           DAV: version-control,checkout,working-resource
           DAV: merge,baseline,activity,version-controlled-collection
           DAV: http://subversion.tigris.org/xmlns/dav/svn/depth */
      apr_array_header_t *vals = svn_cstring_split(val, ",", TRUE,
                                                   opt_ctx->pool);

      opt_ctx->received_dav_header = TRUE;

      /* Right now we only have a few capabilities to detect, so just
         seek for them directly.  This could be written slightly more
         efficiently, but that wouldn't be worth it until we have many
         more capabilities. */

      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_DEPTH, vals))
        {
          svn_hash_sets(session->capabilities,
                        SVN_RA_CAPABILITY_DEPTH, capability_yes);
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_MERGEINFO, vals))
        {
          /* The server doesn't know what repository we're referring
             to, so it can't just say capability_yes. */
          if (!svn_hash_gets(session->capabilities,
                             SVN_RA_CAPABILITY_MERGEINFO))
            {
              svn_hash_sets(session->capabilities, SVN_RA_CAPABILITY_MERGEINFO,
                            capability_server_yes);
            }
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_LOG_REVPROPS, vals))
        {
          svn_hash_sets(session->capabilities,
                        SVN_RA_CAPABILITY_LOG_REVPROPS, capability_yes);
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_ATOMIC_REVPROPS, vals))
        {
          svn_hash_sets(session->capabilities,
                        SVN_RA_CAPABILITY_ATOMIC_REVPROPS, capability_yes);
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_PARTIAL_REPLAY, vals))
        {
          svn_hash_sets(session->capabilities,
                        SVN_RA_CAPABILITY_PARTIAL_REPLAY, capability_yes);
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_INHERITED_PROPS, vals))
        {
          svn_hash_sets(session->capabilities,
                        SVN_RA_CAPABILITY_INHERITED_PROPS, capability_yes);
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_REVERSE_FILE_REVS,
                                 vals))
        {
          svn_hash_sets(session->capabilities,
                        SVN_RA_CAPABILITY_GET_FILE_REVS_REVERSE,
                        capability_yes);
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_EPHEMERAL_TXNPROPS, vals))
        {
          svn_hash_sets(session->capabilities,
                        SVN_RA_CAPABILITY_EPHEMERAL_TXNPROPS, capability_yes);
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_INLINE_PROPS, vals))
        {
          session->supports_inline_props = TRUE;
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_REPLAY_REV_RESOURCE, vals))
        {
          session->supports_rev_rsrc_replay = TRUE;
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_SVNDIFF1, vals))
        {
          /* Use compressed svndiff1 format for servers that properly
             advertise this capability (Subversion 1.10 and greater). */
          session->supports_svndiff1 = TRUE;
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_LIST, vals))
        {
          svn_hash_sets(session->capabilities,
                        SVN_RA_CAPABILITY_LIST, capability_yes);
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_SVNDIFF2, vals))
        {
          /* Same for svndiff2. */
          session->supports_svndiff2 = TRUE;
        }
      if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_PUT_RESULT_CHECKSUM, vals))
        {
          session->supports_put_result_checksum = TRUE;
        }
    }

  /* SVN-specific headers -- if present, server supports HTTP protocol v2 */
  else if (!svn_ctype_casecmp(key[0], 'S')
           && !svn_ctype_casecmp(key[1], 'V')
           && !svn_ctype_casecmp(key[2], 'N'))
    {
      /* If we've not yet seen any information about supported POST
         requests, we'll initialize the list/hash with "create-txn"
         (which we know is supported by virtue of the server speaking
         HTTPv2 at all. */
      if (! session->supported_posts)
        {
          session->supported_posts = apr_hash_make(session->pool);
          apr_hash_set(session->supported_posts, "create-txn", 10, (void *)1);
        }

      if (svn_cstring_casecmp(key, SVN_DAV_ROOT_URI_HEADER) == 0)
        {
          session->repos_root = session->session_url;
          session->repos_root.path =
            (char *)svn_fspath__canonicalize(val, session->pool);
          session->repos_root_str =
            svn_urlpath__canonicalize(
                apr_uri_unparse(session->pool, &session->repos_root, 0),
                session->pool);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_ME_RESOURCE_HEADER) == 0)
        {
#ifdef SVN_DEBUG
          char *ignore_v2_env_var = getenv(SVN_IGNORE_V2_ENV_VAR);

          if (!(ignore_v2_env_var
                && apr_strnatcasecmp(ignore_v2_env_var, "yes") == 0))
            session->me_resource = apr_pstrdup(session->pool, val);
#else
          session->me_resource = apr_pstrdup(session->pool, val);
#endif
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_REV_STUB_HEADER) == 0)
        {
          session->rev_stub = apr_pstrdup(session->pool, val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_REV_ROOT_STUB_HEADER) == 0)
        {
          session->rev_root_stub = apr_pstrdup(session->pool, val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_TXN_STUB_HEADER) == 0)
        {
          session->txn_stub = apr_pstrdup(session->pool, val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_TXN_ROOT_STUB_HEADER) == 0)
        {
          session->txn_root_stub = apr_pstrdup(session->pool, val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_VTXN_STUB_HEADER) == 0)
        {
          session->vtxn_stub = apr_pstrdup(session->pool, val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_VTXN_ROOT_STUB_HEADER) == 0)
        {
          session->vtxn_root_stub = apr_pstrdup(session->pool, val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_REPOS_UUID_HEADER) == 0)
        {
          session->uuid = apr_pstrdup(session->pool, val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_YOUNGEST_REV_HEADER) == 0)
        {
          opt_ctx->youngest_rev = SVN_STR_TO_REV(val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_ALLOW_BULK_UPDATES) == 0)
        {
          session->server_allows_bulk = apr_pstrdup(session->pool, val);
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_SUPPORTED_POSTS_HEADER) == 0)
        {
          /* May contain multiple values, separated by commas. */
          int i;
          apr_array_header_t *vals = svn_cstring_split(val, ",", TRUE,
                                                       session->pool);

          for (i = 0; i < vals->nelts; i++)
            {
              const char *post_val = APR_ARRAY_IDX(vals, i, const char *);

              svn_hash_sets(session->supported_posts, post_val, (void *)1);
            }
        }
      else if (svn_cstring_casecmp(key, SVN_DAV_REPOSITORY_MERGEINFO) == 0)
예제 #23
0
static svn_error_t *
svn_fs_bdb__open_internal(bdb_env_baton_t **bdb_batonp,
                          const char *path,
                          u_int32_t flags, int mode,
                          apr_pool_t *pool)
{
  bdb_env_key_t key;
  bdb_env_t *bdb;
  svn_boolean_t panic;

  /* We can safely discard the open DB_CONFIG file handle.  If the
     environment descriptor is in the cache, the key's immutability is
     guaranteed.  If it's not, we don't care if the key changes,
     between here and the actual insertion of the newly-created
     environment into the cache, because no other thread can touch the
     cache in the meantime. */
  SVN_ERR(bdb_cache_key(&key, NULL, path, pool));

  bdb = bdb_cache_get(&key, &panic);
  if (panic)
    return svn_error_create(SVN_ERR_FS_BERKELEY_DB, NULL,
                            db_strerror(DB_RUNRECOVERY));

  /* Make sure that the environment's open flags haven't changed. */
  if (bdb && bdb->flags != flags)
    {
      /* Handle changes to the DB_PRIVATE flag specially */
      if ((flags ^ bdb->flags) & DB_PRIVATE)
        {
          if (flags & DB_PRIVATE)
            return svn_error_create(SVN_ERR_FS_BERKELEY_DB, NULL,
                                    "Reopening a public Berkeley DB"
                                    " environment with private attributes");
          else
            return svn_error_create(SVN_ERR_FS_BERKELEY_DB, NULL,
                                    "Reopening a private Berkeley DB"
                                    " environment with public attributes");
        }

      /* Otherwise return a generic "flags-mismatch" error. */
      return svn_error_create(SVN_ERR_FS_BERKELEY_DB, NULL,
                              "Reopening a Berkeley DB environment"
                              " with different attributes");
    }

  if (!bdb)
    {
      svn_error_t *err;

      SVN_ERR(create_env(&bdb, path, svn_pool_create(bdb_cache_pool)));
      err = bdb_open(bdb, flags, mode);
      if (err)
        {
          /* Clean up, and we can't do anything about returned errors. */
          svn_error_clear(bdb_close(bdb));
          return svn_error_trace(err);
        }

      apr_hash_set(bdb_cache, &bdb->key, sizeof bdb->key, bdb);
      bdb->flags = flags;
      bdb->refcount = 1;
    }
  else
    {
      ++bdb->refcount;
    }

  *bdb_batonp = apr_palloc(pool, sizeof **bdb_batonp);
  (*bdb_batonp)->env = bdb->env;
  (*bdb_batonp)->bdb = bdb;
  (*bdb_batonp)->error_info = get_error_info(bdb);
  ++(*bdb_batonp)->error_info->refcount;
  apr_pool_cleanup_register(pool, *bdb_batonp, cleanup_env_baton,
                            apr_pool_cleanup_null);

  return SVN_NO_ERROR;
}
예제 #24
0
static struct sqlite_conn* _sqlite_get_conn(mapcache_context *ctx, mapcache_tile* tile, int readonly) {
  apr_status_t rv;
  mapcache_cache_sqlite *cache = (mapcache_cache_sqlite*) tile->tileset->cache;
  struct sqlite_conn *conn = NULL;
  apr_reslist_t *pool = NULL;
  apr_hash_t *pool_container;
  if (readonly) {
    pool_container = ro_connection_pools;
  } else {
    pool_container = rw_connection_pools;
  }
  if(!pool_container || NULL == (pool = apr_hash_get(pool_container,cache->cache.name, APR_HASH_KEY_STRING)) ) {
#ifdef APR_HAS_THREADS
    if(ctx->threadlock)
      apr_thread_mutex_lock((apr_thread_mutex_t*)ctx->threadlock);
#endif
    if(!ro_connection_pools) {
      ro_connection_pools = apr_hash_make(ctx->process_pool);
      rw_connection_pools = apr_hash_make(ctx->process_pool);
    }

    /* probably doesn't exist, unless the previous mutex locked us, so we check */
    pool = apr_hash_get(ro_connection_pools,cache->cache.name, APR_HASH_KEY_STRING);
    if(!pool) {
      /* there where no existing connection pools, create them*/
      rv = apr_reslist_create(&pool,
                              0 /* min */,
                              10 /* soft max */,
                              200 /* hard max */,
                              60*1000000 /*60 seconds, ttl*/,
                              _sqlite_reslist_get_ro_connection, /* resource constructor */
                              _sqlite_reslist_free_connection, /* resource destructor */
                              cache, ctx->process_pool);
      if(rv != APR_SUCCESS) {
        ctx->set_error(ctx,500,"failed to create bdb ro connection pool");
#ifdef APR_HAS_THREADS
        if(ctx->threadlock)
          apr_thread_mutex_unlock((apr_thread_mutex_t*)ctx->threadlock);
#endif
        return NULL;
      }
      apr_hash_set(ro_connection_pools,cache->cache.name,APR_HASH_KEY_STRING,pool);
      rv = apr_reslist_create(&pool,
                              0 /* min */,
                              1 /* soft max */,
                              1 /* hard max */,
                              60*1000000 /*60 seconds, ttl*/,
                              _sqlite_reslist_get_rw_connection, /* resource constructor */
                              _sqlite_reslist_free_connection, /* resource destructor */
                              cache, ctx->process_pool);
      if(rv != APR_SUCCESS) {
        ctx->set_error(ctx,500,"failed to create bdb rw connection pool");
#ifdef APR_HAS_THREADS
        if(ctx->threadlock)
          apr_thread_mutex_unlock((apr_thread_mutex_t*)ctx->threadlock);
#endif
        return NULL;
      }
      apr_hash_set(rw_connection_pools,cache->cache.name,APR_HASH_KEY_STRING,pool);
    }
#ifdef APR_HAS_THREADS
    if(ctx->threadlock)
      apr_thread_mutex_unlock((apr_thread_mutex_t*)ctx->threadlock);
#endif
    if(readonly)
      pool = apr_hash_get(ro_connection_pools,cache->cache.name, APR_HASH_KEY_STRING);
    else
      pool = apr_hash_get(rw_connection_pools,cache->cache.name, APR_HASH_KEY_STRING);
    assert(pool);
  }
  rv = apr_reslist_acquire(pool, (void **) &conn);
  if (rv != APR_SUCCESS) {
    ctx->set_error(ctx, 500, "failed to aquire connection to sqlite backend: %s", (conn && conn->errmsg)?conn->errmsg:"unknown error");
    return NULL;
  }
  return conn;
}
예제 #25
0
/** Register resource engine plugin */
MRCP_DECLARE(apt_bool_t) mrcp_server_plugin_register(mrcp_server_t *server, const char *path, const char *name)
{
	apr_dso_handle_t *plugin = NULL;
	apr_dso_handle_sym_t func_handle = NULL;
	apt_bool_t dso_err = FALSE;
	mrcp_plugin_creator_f plugin_creator = NULL;
	mrcp_resource_engine_t *engine;
	if(!path || !name) {
		apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Register Plugin: no name");
		return FALSE;
	}

	apt_log(APT_LOG_MARK,APT_PRIO_INFO,"Register Plugin [%s] [%s]",path,name);
	if(apr_dso_load(&plugin,path,server->pool) == APR_SUCCESS) {
		if(apr_dso_sym(&func_handle,plugin,MRCP_PLUGIN_SYM_NAME) == APR_SUCCESS) {
			if(func_handle) {
				plugin_creator = (mrcp_plugin_creator_f)(intptr_t)func_handle;
			}
		}
		else {
			dso_err = TRUE;
		}
	}
	else {
		dso_err = TRUE;
	}

	if(dso_err == TRUE) {
		char derr[512] = "";
		apr_dso_error(plugin,derr,sizeof(derr));
		apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Load DSO Symbol: %s", derr);
		apr_dso_unload(plugin);
		return FALSE;
	}

	if(!plugin_creator) {
		apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"No Entry Point Found for Plugin");
		apr_dso_unload(plugin);
		return FALSE;
	}

	engine = plugin_creator(server->pool);
	if(!engine) {
		apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Null Resource Engine");
		apr_dso_unload(plugin);
		return FALSE;
	}

	if(!mrcp_plugin_version_check(&engine->plugin_version)) {
		apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Incompatible Plugin Version [%d.%d.%d] < ["PLUGIN_VERSION_STRING"]",
			engine->plugin_version.major,
			engine->plugin_version.minor,
			engine->plugin_version.patch);
		apr_dso_unload(plugin);
		return FALSE;
	}

	mrcp_server_resource_engine_register(server,engine,name);
	apr_hash_set(server->plugin_table,name,APR_HASH_KEY_STRING,plugin);
	return TRUE;
}
예제 #26
0
파일: gpsmon.c 프로젝트: phan-pivotal/gpdb
static void send_machine_metrics(SOCKET sock)
{
	sigar_mem_t mem;
	sigar_swap_t swap;
	sigar_cpu_t cpu;
	sigar_loadavg_t loadavg;
	sigar_disk_usage_t tdisk;
	sigar_net_interface_stat_t tnet;
	static int first = 1;
	static sigar_cpu_t pcpu = { 0 };
	static sigar_swap_t pswap = { 0 };
	gp_smon_to_mmon_packet_t pkt;
	struct timeval currenttime = { 0 };
	double seconds_duration = 0.0;
	sigar_file_system_usage_t fsusage;
	const char** fsdir;
	const char** netname;
	sigar_net_interface_stat_t netstat;
	int cpu_total_diff;

	/* NIC metrics */
	apr_uint64_t rx_packets = 0;
	apr_uint64_t tx_packets = 0;
	apr_uint64_t rx_bytes = 0;
	apr_uint64_t tx_bytes = 0;

	/* Disk metrics */
	apr_uint64_t reads = 0;
	apr_uint64_t writes = 0;
	apr_uint64_t read_bytes = 0;
	apr_uint64_t write_bytes = 0;

	memset(&mem, 0, sizeof(mem));
	sigar_mem_get(gx.sigar, &mem);
	TR2(("mem ram: %" FMT64 " total: %" FMT64 " used: %" FMT64 " free: %" FMT64 "\n",
		 mem.ram, mem.total, mem.used, mem.free));

	memset(&swap, 0, sizeof(swap));
	sigar_swap_get(gx.sigar, &swap);
	TR2(("swap total: %" FMT64 " used: %" FMT64 "page_in: %" FMT64 " page_out: %" FMT64 "\n",
		 swap.total, swap.used, swap.page_in, swap.page_out));

	memset(&cpu, 0, sizeof(cpu));
	sigar_cpu_get(gx.sigar, &cpu);
	TR2(("cpu user: %" FMT64 " sys: %" FMT64 " idle: %" FMT64 " wait: %" FMT64 " nice: %" FMT64 " total: %" FMT64 "\n",
			cpu.user, cpu.sys, cpu.idle, cpu.wait, cpu.nice, cpu.total));

	memset(&loadavg, 0, sizeof(loadavg));
	sigar_loadavg_get(gx.sigar, &loadavg);
	TR2(("load_avg: %e %e %e\n", loadavg.loadavg[0], loadavg.loadavg[1], loadavg.loadavg[2]));
	memset(&tdisk, 0, sizeof(tdisk));
	memset(&tnet, 0, sizeof(tnet));

	for (fsdir = gx.fslist; *fsdir; fsdir++)
	{
		int e = sigar_file_system_usage_get(gx.sigar, *fsdir, &fsusage);

		if (0 == e)
		{
			disk_device_t* disk = (disk_device_t*)apr_hash_get(disk_devices, *fsdir, APR_HASH_KEY_STRING);
			/* Check if this is a new device */
			if (!disk)
			{
				disk = (disk_device_t*)apr_palloc(gx.pool, sizeof(disk_device_t));
				disk->name = apr_pstrdup(gx.pool, *fsdir);
				disk->read_bytes = disk->write_bytes = disk->reads = disk->writes = 0;
				apr_hash_set(disk_devices, disk->name, APR_HASH_KEY_STRING, disk);
			}
			reads = disk->reads;
			writes = disk->writes;
			read_bytes = disk->read_bytes;
			write_bytes = disk->write_bytes;

			// DISK READS
			reads = metric_diff_calc(fsusage.disk.reads, disk->reads, disk->name, "disk reads");
			disk->reads = fsusage.disk.reads; // old = new

			// DISK WRITES
			writes = metric_diff_calc(fsusage.disk.writes, disk->writes, disk->name, "disk writes");
			disk->writes = fsusage.disk.writes; // old = new

			// WRITE BYTES
			write_bytes = metric_diff_calc(fsusage.disk.write_bytes, disk->write_bytes, disk->name, "disk write bytes");
			disk->write_bytes = fsusage.disk.write_bytes; // old = new

			// READ BYTES
			read_bytes = metric_diff_calc(fsusage.disk.read_bytes, disk->read_bytes, disk->name, "disk read bytes");
			disk->read_bytes = fsusage.disk.read_bytes; // old = new

			tdisk.reads += reads;
			tdisk.writes += writes;
			tdisk.write_bytes += write_bytes;
			tdisk.read_bytes += read_bytes;
		}
	}
	TR2(("disk reads: %" APR_UINT64_T_FMT " writes: %" APR_UINT64_T_FMT
		 " rbytes: %" APR_UINT64_T_FMT " wbytes: %" APR_UINT64_T_FMT "\n",
		 tdisk.reads, tdisk.writes, tdisk.read_bytes, tdisk.write_bytes));

	for (netname = gx.netlist; *netname; netname++)
	{
		int e = sigar_net_interface_stat_get(gx.sigar, *netname, &netstat);

		if (0 == e)
		{
			net_device_t* nic = (net_device_t*)apr_hash_get(net_devices, *netname, APR_HASH_KEY_STRING);

			/* Check if this is a new device */
			if (!nic)
			{
				nic = (net_device_t*)apr_palloc(gx.pool, sizeof(net_device_t));
				nic->name = apr_pstrdup(gx.pool, *netname);
				nic->tx_bytes = nic->rx_bytes = nic->tx_packets = nic->rx_packets = 0;
				apr_hash_set(net_devices, nic->name, APR_HASH_KEY_STRING, nic);
			}

			//////// RECEIVE PACKEtS
			rx_packets = metric_diff_calc(netstat.rx_packets, nic->rx_packets, nic->name, "rx packets");
			nic->rx_packets = netstat.rx_packets; // old = new

			//////// RECEIVE BYTES
			rx_bytes = metric_diff_calc(netstat.rx_bytes, nic->rx_bytes, nic->name, "rx bytes");
			nic->rx_bytes = netstat.rx_bytes; // old = new

			//////// SEND PACKETS
			tx_packets = metric_diff_calc(netstat.tx_packets, nic->tx_packets, nic->name, "tx packets");
			nic->tx_packets = netstat.tx_packets; // old = new

			//////// SEND BYTES
			tx_bytes = metric_diff_calc(netstat.tx_bytes, nic->tx_bytes, nic->name, "tx bytes");
			nic->tx_bytes = netstat.tx_bytes; // old = new

			tnet.rx_packets += rx_packets;
			tnet.rx_bytes += rx_bytes;
			tnet.tx_packets += tx_packets;
			tnet.tx_bytes += tx_bytes;
		}
	}

	TR2(("rx: %" APR_UINT64_T_FMT " rx_bytes: %" APR_UINT64_T_FMT "\n",
					tnet.rx_packets, tnet.rx_bytes));
	TR2(("tx: %" APR_UINT64_T_FMT " tx_bytes: %" APR_UINT64_T_FMT "\n",
					tnet.tx_packets, tnet.tx_bytes));

	if (first)
	{
		pswap = swap, pcpu = cpu;

		/* We want 0s for these metrics on first pass rather
		 * than some possibly huge number that will throw off
		 * the UI graphs.
		 */
		memset(&tdisk, 0, sizeof(tdisk));
		memset(&tnet, 0, sizeof(tnet));
	}
	first = 0;

	gp_smon_to_mmon_set_header(&pkt,GPMON_PKTTYPE_METRICS);

	pkt.u.metrics.mem.total = mem.total;
	pkt.u.metrics.mem.used = mem.used;
	pkt.u.metrics.mem.actual_used = mem.actual_used;
	pkt.u.metrics.mem.actual_free = mem.actual_free;
	pkt.u.metrics.swap.total = swap.total;
	pkt.u.metrics.swap.used = swap.used;
	pkt.u.metrics.swap.page_in = swap.page_in - pswap.page_in;
	pkt.u.metrics.swap.page_out = swap.page_out - pswap.page_out;
	cpu_total_diff = cpu.total - pcpu.total;
	if (cpu_total_diff)
	{
		float cpu_user = calc_diff_percentage(cpu.user, pcpu.user, cpu_total_diff, "cpu.user") + calc_diff_percentage(cpu.nice, pcpu.nice, cpu_total_diff, "cpu.nice");
		float cpu_sys  = calc_diff_percentage(cpu.sys,  pcpu.sys,  cpu_total_diff, "cpu.sys")  + calc_diff_percentage(cpu.wait, pcpu.wait, cpu_total_diff, "cpu.wait");
		float cpu_idle = calc_diff_percentage(cpu.idle, pcpu.idle, cpu_total_diff, "cpu.idle");


		pkt.u.metrics.cpu.user_pct = cpu_user;
		pkt.u.metrics.cpu.sys_pct = cpu_sys;
		pkt.u.metrics.cpu.idle_pct = cpu_idle;
	}
	else
	{
		pkt.u.metrics.cpu.user_pct = 0;
		pkt.u.metrics.cpu.sys_pct = 0;
		pkt.u.metrics.cpu.idle_pct = 0;
	}
	pkt.u.metrics.load_avg.value[0] = (float) loadavg.loadavg[0];
	pkt.u.metrics.load_avg.value[1] = (float) loadavg.loadavg[1];
	pkt.u.metrics.load_avg.value[2] = (float) loadavg.loadavg[2];

	gettimeofday(&currenttime, NULL);
	seconds_duration = subtractTimeOfDay(&g_time_last_reading, &currenttime);

	pkt.u.metrics.disk.ro_rate = (apr_uint64_t)ceil(tdisk.reads/seconds_duration);
	pkt.u.metrics.disk.wo_rate = (apr_uint64_t)ceil(tdisk.writes/seconds_duration);
	pkt.u.metrics.disk.rb_rate = (apr_uint64_t)ceil(tdisk.read_bytes/seconds_duration);
	pkt.u.metrics.disk.wb_rate = (apr_uint64_t)ceil(tdisk.write_bytes/seconds_duration);
	pkt.u.metrics.net.rp_rate = (apr_uint64_t)ceil(tnet.rx_packets/seconds_duration);
	pkt.u.metrics.net.wp_rate = (apr_uint64_t)ceil(tnet.tx_packets/seconds_duration);
	pkt.u.metrics.net.rb_rate = (apr_uint64_t)ceil(tnet.rx_bytes/seconds_duration);
	pkt.u.metrics.net.wb_rate = (apr_uint64_t)ceil(tnet.tx_bytes/seconds_duration);

	g_time_last_reading = currenttime;

	strncpy(pkt.u.metrics.hname, gx.hostname, sizeof(pkt.u.metrics.hname) - 1);
	pkt.u.metrics.hname[sizeof(pkt.u.metrics.hname) - 1] = 0;
	send_smon_to_mon_pkt(sock, &pkt);

	/* save for next time around */
	pswap = swap, pcpu = cpu;
}
예제 #27
0
파일: htcacheclean.c 프로젝트: aptana/Jaxer
/*
 * walk the cache directory tree
 */
static int process_dir(char *path, apr_pool_t *pool)
{
    apr_dir_t *dir;
    apr_pool_t *p;
    apr_hash_t *h;
    apr_hash_index_t *i;
    apr_file_t *fd;
    apr_status_t status;
    apr_finfo_t info;
    apr_size_t len;
    apr_time_t current, deviation;
    char *nextpath, *base, *ext, *orig_basename;
    APR_RING_ENTRY(_direntry) anchor;
    DIRENTRY *d, *t, *n;
    ENTRY *e;
    int skip, retries;
    disk_cache_info_t disk_info;

    APR_RING_INIT(&anchor, _direntry, link);
    apr_pool_create(&p, pool);
    h = apr_hash_make(p);
    fd = NULL;
    skip = 0;
    deviation = MAXDEVIATION * APR_USEC_PER_SEC;

    if (apr_dir_open(&dir, path, p) != APR_SUCCESS) {
        return 1;
    }

    while (apr_dir_read(&info, 0, dir) == APR_SUCCESS && !interrupted) {
        if (!strcmp(info.name, ".") || !strcmp(info.name, "..")) {
            continue;
        }
        d = apr_pcalloc(p, sizeof(DIRENTRY));
        d->basename = apr_pstrcat(p, path, "/", info.name, NULL);
        APR_RING_INSERT_TAIL(&anchor, d, _direntry, link);
    }

    apr_dir_close(dir);

    if (interrupted) {
        return 1;
    }

    skip = baselen + 1;

    for (d = APR_RING_FIRST(&anchor);
         !interrupted && d != APR_RING_SENTINEL(&anchor, _direntry, link);
         d=n) {
        n = APR_RING_NEXT(d, link);
        base = strrchr(d->basename, '/');
        if (!base++) {
            base = d->basename;
        }
        ext = strchr(base, '.');

        /* there may be temporary files which may be gone before
         * processing, always skip these if not in realclean mode
         */
        if (!ext && !realclean) {
            if (!strncasecmp(base, AP_TEMPFILE_BASE, AP_TEMPFILE_BASELEN)
                && strlen(base) == AP_TEMPFILE_NAMELEN) {
                continue;
            }
        }

        /* this may look strange but apr_stat() may return errno which
         * is system dependent and there may be transient failures,
         * so just blindly retry for a short while
         */
        retries = STAT_ATTEMPTS;
        status = APR_SUCCESS;
        do {
            if (status != APR_SUCCESS) {
                apr_sleep(STAT_DELAY);
            }
            status = apr_stat(&info, d->basename, DIRINFO, p);
        } while (status != APR_SUCCESS && !interrupted && --retries);

        /* what may happen here is that apache did create a file which
         * we did detect but then does delete the file before we can
         * get file information, so if we don't get any file information
         * we will ignore the file in this case
         */
        if (status != APR_SUCCESS) {
            if (!realclean && !interrupted) {
                continue;
            }
            return 1;
        }

        if (info.filetype == APR_DIR) {
            /* Make a copy of the basename, as process_dir modifies it */
            orig_basename = apr_pstrdup(pool, d->basename);
            if (process_dir(d->basename, pool)) {
                return 1;
            }

            /* If asked to delete dirs, do so now. We don't care if it fails.
             * If it fails, it likely means there was something else there.
             */
            if (deldirs && !dryrun) {
                apr_dir_remove(orig_basename, pool);
            }
            continue;
        }

        if (info.filetype != APR_REG) {
            continue;
        }

        if (!ext) {
            if (!strncasecmp(base, AP_TEMPFILE_BASE, AP_TEMPFILE_BASELEN)
                && strlen(base) == AP_TEMPFILE_NAMELEN) {
                d->basename += skip;
                d->type = TEMP;
                d->dsize = info.size;
                apr_hash_set(h, d->basename, APR_HASH_KEY_STRING, d);
            }
            continue;
        }

        if (!strcasecmp(ext, CACHE_HEADER_SUFFIX)) {
            *ext = '\0';
            d->basename += skip;
            /* if a user manually creates a '.header' file */
            if (d->basename[0] == '\0') {
                continue;
            }
            t = apr_hash_get(h, d->basename, APR_HASH_KEY_STRING);
            if (t) {
                d = t;
            }
            d->type |= HEADER;
            d->htime = info.mtime;
            d->hsize = info.size;
            apr_hash_set(h, d->basename, APR_HASH_KEY_STRING, d);
            continue;
        }

        if (!strcasecmp(ext, CACHE_DATA_SUFFIX)) {
            *ext = '\0';
            d->basename += skip;
            /* if a user manually creates a '.data' file */
            if (d->basename[0] == '\0') {
                continue;
            }
            t = apr_hash_get(h, d->basename, APR_HASH_KEY_STRING);
            if (t) {
                d = t;
            }
            d->type |= DATA;
            d->dtime = info.mtime;
            d->dsize = info.size;
            apr_hash_set(h, d->basename, APR_HASH_KEY_STRING, d);
        }
    }

    if (interrupted) {
        return 1;
    }

    path[baselen] = '\0';

    for (i = apr_hash_first(p, h); i && !interrupted; i = apr_hash_next(i)) {
        void *hvalue;
        apr_uint32_t format;

        apr_hash_this(i, NULL, NULL, &hvalue);
        d = hvalue;

        switch(d->type) {
        case HEADERDATA:
            nextpath = apr_pstrcat(p, path, "/", d->basename,
                                   CACHE_HEADER_SUFFIX, NULL);
            if (apr_file_open(&fd, nextpath, APR_FOPEN_READ | APR_FOPEN_BINARY,
                              APR_OS_DEFAULT, p) == APR_SUCCESS) {
                len = sizeof(format);
                if (apr_file_read_full(fd, &format, len,
                                       &len) == APR_SUCCESS) {
                    if (format == DISK_FORMAT_VERSION) {
                        apr_off_t offset = 0;

                        apr_file_seek(fd, APR_SET, &offset);

                        len = sizeof(disk_cache_info_t);

                        if (apr_file_read_full(fd, &disk_info, len,
                                               &len) == APR_SUCCESS) {
                            apr_file_close(fd);
                            e = apr_palloc(pool, sizeof(ENTRY));
                            APR_RING_INSERT_TAIL(&root, e, _entry, link);
                            e->expire = disk_info.expire;
                            e->response_time = disk_info.response_time;
                            e->htime = d->htime;
                            e->dtime = d->dtime;
                            e->hsize = d->hsize;
                            e->dsize = d->dsize;
                            e->basename = apr_pstrdup(pool, d->basename);
                            break;
                        }
                        else {
                            apr_file_close(fd);
                        }
                    }
                    else if (format == VARY_FORMAT_VERSION) {
                        /* This must be a URL that added Vary headers later,
                         * so kill the orphaned .data file
                         */
                        apr_file_close(fd);
                        apr_file_remove(apr_pstrcat(p, path, "/", d->basename,
                                                    CACHE_DATA_SUFFIX, NULL),
                                        p);
                    }
                }
                else {
                    apr_file_close(fd);
                }

            }
            /* we have a somehow unreadable headers file which is associated
             * with a data file. this may be caused by apache currently
             * rewriting the headers file. thus we may delete the file set
             * either in realclean mode or if the headers file modification
             * timestamp is not within a specified positive or negative offset
             * to the current time.
             */
            current = apr_time_now();
            if (realclean || d->htime < current - deviation
                || d->htime > current + deviation) {
                delete_entry(path, d->basename, p);
                unsolicited += d->hsize;
                unsolicited += d->dsize;
            }
            break;

        /* single data and header files may be deleted either in realclean
         * mode or if their modification timestamp is not within a
         * specified positive or negative offset to the current time.
         * this handling is necessary due to possible race conditions
         * between apache and this process
         */
        case HEADER:
            current = apr_time_now();
            nextpath = apr_pstrcat(p, path, "/", d->basename,
                                   CACHE_HEADER_SUFFIX, NULL);
            if (apr_file_open(&fd, nextpath, APR_FOPEN_READ | APR_FOPEN_BINARY,
                              APR_OS_DEFAULT, p) == APR_SUCCESS) {
                len = sizeof(format);
                if (apr_file_read_full(fd, &format, len,
                                       &len) == APR_SUCCESS) {
                    if (format == VARY_FORMAT_VERSION) {
                        apr_time_t expires;

                        len = sizeof(expires);

                        apr_file_read_full(fd, &expires, len, &len);

                        apr_file_close(fd);

                        if (expires < current) {
                            delete_entry(path, d->basename, p);
                        }
                        break;
                    }
                }
                apr_file_close(fd);
            }

            if (realclean || d->htime < current - deviation
                || d->htime > current + deviation) {
                delete_entry(path, d->basename, p);
                unsolicited += d->hsize;
            }
            break;

        case DATA:
            current = apr_time_now();
            if (realclean || d->dtime < current - deviation
                || d->dtime > current + deviation) {
                delete_entry(path, d->basename, p);
                unsolicited += d->dsize;
            }
            break;

        /* temp files may only be deleted in realclean mode which
         * is asserted above if a tempfile is in the hash array
         */
        case TEMP:
            delete_file(path, d->basename, p);
            unsolicited += d->dsize;
            break;
        }
    }

    if (interrupted) {
        return 1;
    }

    apr_pool_destroy(p);

    if (benice) {
        apr_sleep(NICE_DELAY);
    }

    if (interrupted) {
        return 1;
    }

    return 0;
}
예제 #28
0
svn_error_t *
svn_client__get_auto_props(apr_hash_t **properties,
                           const char **mimetype,
                           const char *path,
                           svn_magic__cookie_t *magic_cookie,
                           svn_client_ctx_t *ctx,
                           apr_pool_t *pool)
{
  svn_config_t *cfg;
  svn_boolean_t use_autoprops;
  auto_props_baton_t autoprops;

  /* initialisation */
  autoprops.properties = apr_hash_make(pool);
  autoprops.filename = svn_dirent_basename(path, pool);
  autoprops.pool = pool;
  autoprops.mimetype = NULL;
  autoprops.have_executable = FALSE;
  *properties = autoprops.properties;

  cfg = ctx->config ? apr_hash_get(ctx->config, SVN_CONFIG_CATEGORY_CONFIG,
                                   APR_HASH_KEY_STRING) : NULL;

  /* check that auto props is enabled */
  SVN_ERR(svn_config_get_bool(cfg, &use_autoprops,
                              SVN_CONFIG_SECTION_MISCELLANY,
                              SVN_CONFIG_OPTION_ENABLE_AUTO_PROPS, FALSE));

  /* search for auto props */
  if (use_autoprops)
    svn_config_enumerate2(cfg, SVN_CONFIG_SECTION_AUTO_PROPS,
                          auto_props_enumerator, &autoprops, pool);

  /* if mimetype has not been set check the file */
  if (! autoprops.mimetype)
    {
      SVN_ERR(svn_io_detect_mimetype2(&autoprops.mimetype, path,
                                      ctx->mimetypes_map, pool));

      /* If we got no mime-type, or if it is "application/octet-stream",
       * try to get the mime-type from libmagic. */
      if (magic_cookie &&
          (!autoprops.mimetype ||
           strcmp(autoprops.mimetype, "application/octet-stream") == 0))
        {
          const char *magic_mimetype;

         /* Since libmagic usually treats UTF-16 files as "text/plain",
          * svn_magic__detect_binary_mimetype() will return NULL for such
          * files. This is fine for now since we currently don't support
          * UTF-16-encoded text files (issue #2194).
          * Once we do support UTF-16 this code path will fail to detect
          * them as text unless the svn_io_detect_mimetype2() call above
          * returns "text/plain" for them. */
          SVN_ERR(svn_magic__detect_binary_mimetype(&magic_mimetype,
                                                    path, magic_cookie,
                                                    pool, pool));
          if (magic_mimetype)
            autoprops.mimetype = magic_mimetype;
        }

      if (autoprops.mimetype)
        apr_hash_set(autoprops.properties, SVN_PROP_MIME_TYPE,
                     strlen(SVN_PROP_MIME_TYPE),
                     svn_string_create(autoprops.mimetype, pool));
    }

  /* if executable has not been set check the file */
  if (! autoprops.have_executable)
    {
      svn_boolean_t executable = FALSE;
      SVN_ERR(svn_io_is_file_executable(&executable, path, pool));
      if (executable)
        apr_hash_set(autoprops.properties, SVN_PROP_EXECUTABLE,
                     strlen(SVN_PROP_EXECUTABLE),
                     svn_string_create_empty(pool));
    }

  *mimetype = autoprops.mimetype;
  return SVN_NO_ERROR;
}
예제 #29
0
/*
** Append the specified lock(s) to the set of locks on this resource.
**
** If "make_indirect" is true (non-zero), then the specified lock(s)
** should be converted to an indirect lock (if it is a direct lock)
** before appending. Note that the conversion to an indirect lock does
** not alter the passed-in lock -- the change is internal the
** append_locks function.
**
** Multiple locks are specified using the lock->next links.
*/
static dav_error *
append_locks(dav_lockdb *lockdb,
             const dav_resource *resource,
             int make_indirect,
             const dav_lock *lock)
{
  dav_lockdb_private *info = lockdb->info;
  svn_lock_t *slock;
  svn_error_t *serr;
  dav_error *derr;

  /* If the resource's fs path is unreadable, we don't allow a lock to
     be created on it. */
  if (! dav_svn__allow_read_resource(resource, SVN_INVALID_REVNUM,
                                     resource->pool))
    return dav_svn__new_error(resource->pool, HTTP_FORBIDDEN,
                              DAV_ERR_LOCK_SAVE_LOCK,
                              "Path is not accessible.");

  if (lock->next)
    return dav_svn__new_error(resource->pool, HTTP_BAD_REQUEST,
                              DAV_ERR_LOCK_SAVE_LOCK,
                              "Tried to attach multiple locks to a resource.");

  /* RFC2518bis (section 7.4) doesn't require us to support
     'lock-null' resources at all.  Instead, it asks that we treat
     'LOCK nonexistentURL' as a PUT (followed by a LOCK) of a 0-byte file.  */
  if (! resource->exists)
    {
      svn_revnum_t rev, new_rev;
      svn_fs_txn_t *txn;
      svn_fs_root_t *txn_root;
      const char *conflict_msg;
      dav_svn_repos *repos = resource->info->repos;
      apr_hash_t *revprop_table = apr_hash_make(resource->pool);
      apr_hash_set(revprop_table, SVN_PROP_REVISION_AUTHOR,
                   APR_HASH_KEY_STRING, svn_string_create(repos->username,
                                                          resource->pool));

      if (resource->info->repos->is_svn_client)
        return dav_svn__new_error(resource->pool, HTTP_METHOD_NOT_ALLOWED,
                                  DAV_ERR_LOCK_SAVE_LOCK,
                                  "Subversion clients may not lock "
                                  "nonexistent paths.");

      else if (! resource->info->repos->autoversioning)
        return dav_svn__new_error(resource->pool, HTTP_METHOD_NOT_ALLOWED,
                                  DAV_ERR_LOCK_SAVE_LOCK,
                                  "Attempted to lock non-existent path; "
                                  "turn on autoversioning first.");

      /* Commit a 0-byte file: */

      if ((serr = svn_fs_youngest_rev(&rev, repos->fs, resource->pool)))
        return dav_svn__convert_err(serr, HTTP_INTERNAL_SERVER_ERROR,
                                    "Could not determine youngest revision",
                                    resource->pool);

      if ((serr = svn_repos_fs_begin_txn_for_commit2(&txn, repos->repos, rev,
                                                     revprop_table,
                                                     resource->pool)))
        return dav_svn__convert_err(serr, HTTP_INTERNAL_SERVER_ERROR,
                                    "Could not begin a transaction",
                                    resource->pool);

      if ((serr = svn_fs_txn_root(&txn_root, txn, resource->pool)))
        return dav_svn__convert_err(serr, HTTP_INTERNAL_SERVER_ERROR,
                                    "Could not begin a transaction",
                                    resource->pool);

      if ((serr = svn_fs_make_file(txn_root, resource->info->repos_path,
                                   resource->pool)))
        return dav_svn__convert_err(serr, HTTP_INTERNAL_SERVER_ERROR,
                                    "Could not create empty file.",
                                    resource->pool);

      if ((serr = dav_svn__attach_auto_revprops(txn,
                                                resource->info->repos_path,
                                                resource->pool)))
        return dav_svn__convert_err(serr, HTTP_INTERNAL_SERVER_ERROR,
                                    "Could not create empty file.",
                                    resource->pool);

      serr = svn_repos_fs_commit_txn(&conflict_msg, repos->repos,
                                     &new_rev, txn, resource->pool);
      if (SVN_IS_VALID_REVNUM(new_rev))
        {
          /* ### Log an error in post commit FS processing? */
          svn_error_clear(serr);
        }
      else
        {
          svn_error_clear(svn_fs_abort_txn(txn, resource->pool));
          if (serr)
            return dav_svn__convert_err(serr, HTTP_CONFLICT,
                                        apr_psprintf(resource->pool,
                                                     "Conflict when "
                                                     "committing '%s'.",
                                                     conflict_msg),
                                        resource->pool);
          else
            return dav_svn__new_error(resource->pool,
                                      HTTP_INTERNAL_SERVER_ERROR,
                                      0,
                                      "Commit failed but there was no error "
                                      "provided.");
        }
    }

  /* Convert the dav_lock into an svn_lock_t. */
  derr = dav_lock_to_svn_lock(&slock, lock, resource->info->repos_path,
                              info, resource->info->repos->is_svn_client,
                              resource->pool);
  if (derr)
    return derr;

  /* Now use the svn_lock_t to actually perform the lock. */
  serr = svn_repos_fs_lock(&slock,
                           resource->info->repos->repos,
                           slock->path,
                           slock->token,
                           slock->comment,
                           slock->is_dav_comment,
                           slock->expiration_date,
                           info->working_revnum,
                           info->lock_steal,
                           resource->pool);

  if (serr && serr->apr_err == SVN_ERR_FS_NO_USER)
    {
      svn_error_clear(serr);
      return dav_svn__new_error(resource->pool, HTTP_UNAUTHORIZED,
                                DAV_ERR_LOCK_SAVE_LOCK,
                                "Anonymous lock creation is not allowed.");
    }
  else if (serr)
    return dav_svn__convert_err(serr, HTTP_INTERNAL_SERVER_ERROR,
                                "Failed to create new lock.",
                                resource->pool);


  /* A standard webdav LOCK response doesn't include any information
     about the creation date.  We send it in a custom header, so that
     svn clients can fill in svn_lock_t->creation_date.  A generic DAV
     client should just ignore the header. */
  apr_table_setn(info->r->headers_out, SVN_DAV_CREATIONDATE_HEADER,
                 svn_time_to_cstring(slock->creation_date, resource->pool));

  /* A standard webdav LOCK response doesn't include any information
     about the owner of the lock.  ('DAV:owner' has nothing to do with
     authorization, it's just a comment that we map to
     svn_lock_t->comment.)  We send the owner in a custom header, so
     that svn clients can fill in svn_lock_t->owner.  A generic DAV
     client should just ignore the header. */
  apr_table_setn(info->r->headers_out, SVN_DAV_LOCK_OWNER_HEADER,
                 slock->owner);

  /* Log the locking as a 'high-level' action. */
  dav_svn__operational_log(resource->info,
                           svn_log__lock_one_path(slock->path, info->lock_steal,
                                                  resource->info->r->pool));

  return 0;
}
예제 #30
0
/* Print the properties in PROPS to the stream OUT. PROPS is a hash mapping
 * (const char *) path to (svn_string_t) property value.
 * If IS_URL is true, all paths are URLs, else all paths are local paths.
 * PNAME_UTF8 is the property name of all the properties.
 * If PRINT_FILENAMES is true, print the item's path before each property.
 * If OMIT_NEWLINE is true, don't add a newline at the end of each property.
 * If LIKE_PROPLIST is true, print everything in a more verbose format
 * like "svn proplist -v" does.
 * */
static svn_error_t *
print_properties(svn_stream_t *out,
                 svn_boolean_t is_url,
                 const char *pname_utf8,
                 apr_hash_t *props,
                 svn_boolean_t print_filenames,
                 svn_boolean_t omit_newline,
                 svn_boolean_t like_proplist,
                 apr_pool_t *pool)
{
  apr_array_header_t *sorted_props;
  int i;
  apr_pool_t *iterpool = svn_pool_create(pool);
  const char *path_prefix;

  SVN_ERR(svn_dirent_get_absolute(&path_prefix, "", pool));

  sorted_props = svn_sort__hash(props, svn_sort_compare_items_as_paths, pool);
  for (i = 0; i < sorted_props->nelts; i++)
    {
      svn_sort__item_t item = APR_ARRAY_IDX(sorted_props, i, svn_sort__item_t);
      const char *filename = item.key;
      svn_string_t *propval = item.value;

      svn_pool_clear(iterpool);

      if (print_filenames)
        {
          const char *header;

          /* Print the file name. */

          if (! is_url)
            filename = svn_cl__local_style_skip_ancestor(path_prefix, filename,
                                                         iterpool);

          /* In verbose mode, print exactly same as "proplist" does;
           * otherwise, print a brief header. */
          header = apr_psprintf(iterpool, like_proplist
                                ? _("Properties on '%s':\n")
                                : "%s - ", filename);
          SVN_ERR(svn_cmdline_cstring_from_utf8(&header, header, iterpool));
          SVN_ERR(svn_subst_translate_cstring2(header, &header,
                                               APR_EOL_STR,  /* 'native' eol */
                                               FALSE, /* no repair */
                                               NULL,  /* no keywords */
                                               FALSE, /* no expansion */
                                               iterpool));
          SVN_ERR(stream_write(out, header, strlen(header)));
        }

      if (like_proplist)
        {
          /* Print the property name and value just as "proplist -v" does */
          apr_hash_t *hash = apr_hash_make(iterpool);

          apr_hash_set(hash, pname_utf8, APR_HASH_KEY_STRING, propval);
          SVN_ERR(svn_cl__print_prop_hash(out, hash, FALSE, iterpool));
        }
      else
        {
          /* If this is a special Subversion property, it is stored as
             UTF8, so convert to the native format. */
          if (svn_prop_needs_translation(pname_utf8))
            SVN_ERR(svn_subst_detranslate_string(&propval, propval,
                                                 TRUE, iterpool));

          SVN_ERR(stream_write(out, propval->data, propval->len));

          if (! omit_newline)
            SVN_ERR(stream_write(out, APR_EOL_STR,
                                 strlen(APR_EOL_STR)));
        }
    }

  svn_pool_destroy(iterpool);

  return SVN_NO_ERROR;
}