/* Initialize the thread sync. structures. * To be called by svn_atomic__init_once. */ static svn_error_t * init_thread_mutex(void *baton, apr_pool_t *pool) { /* let the mutex live as long as the APR */ apr_pool_t *global_pool = svn_pool_create(NULL); return svn_mutex__init(&thread_mutex, USE_THREAD_MUTEX, global_pool); }
static svn_error_t * atomic_init_func(void *baton, apr_pool_t *pool) { dso_pool = svn_pool_create(NULL); SVN_ERR(svn_mutex__init(&dso_mutex, TRUE, dso_pool)); dso_cache = apr_hash_make(dso_pool); return SVN_NO_ERROR; }
static svn_error_t * bdb_init_cb(void *baton, apr_pool_t *pool) { bdb_cache_pool = svn_pool_create(pool); bdb_cache = apr_hash_make(bdb_cache_pool); SVN_ERR(svn_mutex__init(&bdb_cache_lock, TRUE, bdb_cache_pool)); apr_pool_cleanup_register(bdb_cache_pool, NULL, clear_cache, apr_pool_cleanup_null); return SVN_NO_ERROR; }
svn_error_t * svn_dso_initialize2(void) { if (dso_pool) return SVN_NO_ERROR; dso_pool = svn_pool_create(NULL); SVN_ERR(svn_mutex__init(&dso_mutex, TRUE, dso_pool)); dso_cache = apr_hash_make(dso_pool); return SVN_NO_ERROR; }
svn_error_t * logger__create_for_stderr(logger_t **logger, apr_pool_t *pool) { logger_t *result = apr_pcalloc(pool, sizeof(*result)); result->pool = svn_pool_create(pool); SVN_ERR(svn_stream_for_stderr(&result->stream, pool)); SVN_ERR(svn_mutex__init(&result->mutex, TRUE, pool)); *logger = result; return SVN_NO_ERROR; }
svn_error_t * svn_cache__create_inprocess(svn_cache__t **cache_p, svn_cache__serialize_func_t serialize, svn_cache__deserialize_func_t deserialize, apr_ssize_t klen, apr_int64_t pages, apr_int64_t items_per_page, svn_boolean_t thread_safe, const char *id, apr_pool_t *pool) { svn_cache__t *wrapper = apr_pcalloc(pool, sizeof(*wrapper)); inprocess_cache_t *cache = apr_pcalloc(pool, sizeof(*cache)); cache->id = apr_pstrdup(pool, id); SVN_ERR_ASSERT(klen == APR_HASH_KEY_STRING || klen >= 1); cache->hash = apr_hash_make(pool); cache->klen = klen; cache->serialize_func = serialize; cache->deserialize_func = deserialize; SVN_ERR_ASSERT(pages >= 1); cache->total_pages = pages; cache->unallocated_pages = pages; SVN_ERR_ASSERT(items_per_page >= 1); cache->items_per_page = items_per_page; cache->sentinel = apr_pcalloc(pool, sizeof(*(cache->sentinel))); cache->sentinel->prev = cache->sentinel; cache->sentinel->next = cache->sentinel; /* The sentinel doesn't need a pool. (We're happy to crash if we * accidentally try to treat it like a real page.) */ SVN_ERR(svn_mutex__init(&cache->mutex, thread_safe, pool)); cache->cache_pool = pool; wrapper->vtable = &inprocess_cache_vtable; wrapper->cache_internal = cache; wrapper->pretend_empty = !!getenv("SVN_X_DOES_NOT_MARK_THE_SPOT"); *cache_p = wrapper; return SVN_NO_ERROR; }
svn_error_t * svn_root_pools__create(svn_root_pools__t **pools) { /* the collection of root pools must be managed independently from any other pool */ apr_pool_t *pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE)); /* construct result object */ svn_root_pools__t *result = apr_pcalloc(pool, sizeof(*result)); SVN_ERR(svn_mutex__init(&result->mutex, TRUE, pool)); result->unused_pools = apr_array_make(pool, 16, sizeof(apr_pool_t *)); /* done */ *pools = result; return SVN_NO_ERROR; }
svn_error_t * svn_fs_initialize(apr_pool_t *pool) { /* Protect against multiple calls. */ if (common_pool) return SVN_NO_ERROR; common_pool = svn_pool_create(pool); SVN_ERR(svn_mutex__init(&common_pool_lock, TRUE, common_pool)); /* ### This won't work if POOL is NULL and libsvn_fs is loaded as a DSO ### (via libsvn_ra_local say) since the global common_pool will live ### longer than the DSO, which gets unloaded when the pool used to ### load it is cleared, and so when the handler runs it will refer to ### a function that no longer exists. libsvn_ra_local attempts to ### work around this by explicitly calling svn_fs_initialize. */ apr_pool_cleanup_register(common_pool, NULL, uninit, apr_pool_cleanup_null); return SVN_NO_ERROR; }
svn_error_t * logger__create(logger_t **logger, const char *filename, apr_pool_t *pool) { logger_t *result = apr_pcalloc(pool, sizeof(*result)); apr_file_t *file; SVN_ERR(svn_io_file_open(&file, filename, APR_WRITE | APR_CREATE | APR_APPEND, APR_OS_DEFAULT, pool)); SVN_ERR(svn_mutex__init(&result->mutex, TRUE, pool)); result->stream = svn_stream_from_aprfile2(file, FALSE, pool); result->pool = svn_pool_create(pool); *logger = result; return SVN_NO_ERROR; }
static svn_error_t * fs_serialized_init(svn_fs_t *fs, apr_pool_t *common_pool, apr_pool_t *pool) { fs_fs_data_t *ffd = fs->fsap_data; const char *key; void *val; fs_fs_shared_data_t *ffsd; apr_status_t status; /* Note that we are allocating a small amount of long-lived data for each separate repository opened during the lifetime of the svn_fs_initialize pool. It's unlikely that anyone will notice the modest expenditure; the alternative is to allocate each structure in a subpool, add a reference-count, and add a serialized deconstructor to the FS vtable. That's more machinery than it's worth. Using the uuid to obtain the lock creates a corner case if a caller uses svn_fs_set_uuid on the repository in a process where other threads might be using the same repository through another FS object. The only real-world consumer of svn_fs_set_uuid is "svnadmin load", so this is a low-priority problem, and we don't know of a better way of associating such data with the repository. */ SVN_ERR_ASSERT(fs->uuid); key = apr_pstrcat(pool, SVN_FSFS_SHARED_USERDATA_PREFIX, fs->uuid, (char *) NULL); status = apr_pool_userdata_get(&val, key, common_pool); if (status) return svn_error_wrap_apr(status, _("Can't fetch FSFS shared data")); ffsd = val; if (!ffsd) { ffsd = apr_pcalloc(common_pool, sizeof(*ffsd)); ffsd->common_pool = common_pool; /* POSIX fcntl locks are per-process, so we need a mutex for intra-process synchronization when grabbing the repository write lock. */ SVN_ERR(svn_mutex__init(&ffsd->fs_write_lock, SVN_FS_FS__USE_LOCK_MUTEX, common_pool)); /* ... not to mention locking the txn-current file. */ SVN_ERR(svn_mutex__init(&ffsd->txn_current_lock, SVN_FS_FS__USE_LOCK_MUTEX, common_pool)); /* We also need a mutex for synchronizing access to the active transaction list and free transaction pointer. This one is enabled unconditionally. */ SVN_ERR(svn_mutex__init(&ffsd->txn_list_lock, TRUE, common_pool)); key = apr_pstrdup(common_pool, key); status = apr_pool_userdata_set(ffsd, key, NULL, common_pool); if (status) return svn_error_wrap_apr(status, _("Can't store FSFS shared data")); } ffd->shared = ffsd; return SVN_NO_ERROR; }
/* Initialize the part of FS that requires global serialization across all instances. The caller is responsible of ensuring that serialization. Use COMMON_POOL for process-wide and POOL for temporary allocations. */ static svn_error_t * fs_serialized_init(svn_fs_t *fs, apr_pool_t *common_pool, apr_pool_t *pool) { fs_fs_data_t *ffd = fs->fsap_data; const char *key; void *val; fs_fs_shared_data_t *ffsd; apr_status_t status; /* Note that we are allocating a small amount of long-lived data for each separate repository opened during the lifetime of the svn_fs_initialize pool. It's unlikely that anyone will notice the modest expenditure; the alternative is to allocate each structure in a subpool, add a reference-count, and add a serialized destructor to the FS vtable. That's more machinery than it's worth. Picking an appropriate key for the shared data is tricky, because, unfortunately, a filesystem UUID is not really unique. It is implicitly shared between hotcopied (1), dump / loaded (2) or naively copied (3) filesystems. We tackle this problem by using a combination of the UUID and an instance ID as the key. This allows us to avoid key clashing in (1) and (2) for formats >= SVN_FS_FS__MIN_INSTANCE_ID_FORMAT, which do support instance IDs. For old formats the shared data (locks, shared transaction data, ...) will still clash. Speaking of (3), there is not so much we can do about it, except maybe provide a convenient way of fixing things. Naively copied filesystems have identical filesystem UUIDs *and* instance IDs. With the key being a combination of these two, clashes can be fixed by changing either of them (or both), e.g. with svn_fs_set_uuid(). */ SVN_ERR_ASSERT(fs->uuid); SVN_ERR_ASSERT(ffd->instance_id); key = apr_pstrcat(pool, SVN_FSFS_SHARED_USERDATA_PREFIX, fs->uuid, ":", ffd->instance_id, SVN_VA_NULL); status = apr_pool_userdata_get(&val, key, common_pool); if (status) return svn_error_wrap_apr(status, _("Can't fetch FSFS shared data")); ffsd = val; if (!ffsd) { ffsd = apr_pcalloc(common_pool, sizeof(*ffsd)); ffsd->common_pool = common_pool; /* POSIX fcntl locks are per-process, so we need a mutex for intra-process synchronization when grabbing the repository write lock. */ SVN_ERR(svn_mutex__init(&ffsd->fs_write_lock, SVN_FS_FS__USE_LOCK_MUTEX, common_pool)); /* ... the pack lock ... */ SVN_ERR(svn_mutex__init(&ffsd->fs_pack_lock, SVN_FS_FS__USE_LOCK_MUTEX, common_pool)); /* ... not to mention locking the txn-current file. */ SVN_ERR(svn_mutex__init(&ffsd->txn_current_lock, SVN_FS_FS__USE_LOCK_MUTEX, common_pool)); /* We also need a mutex for synchronizing access to the active transaction list and free transaction pointer. */ SVN_ERR(svn_mutex__init(&ffsd->txn_list_lock, TRUE, common_pool)); key = apr_pstrdup(common_pool, key); status = apr_pool_userdata_set(ffsd, key, NULL, common_pool); if (status) return svn_error_wrap_apr(status, _("Can't store FSFS shared data")); } ffd->shared = ffsd; return SVN_NO_ERROR; }