Esempio n. 1
0
static dav_error *
_load_request_info(const dav_resource *resource, char **full_path, struct storage_policy_s **sp)
{
	dav_error *e = NULL;
	const request_rec *r = resource->info->request;

	/* configure full path */
	e = __build_chunk_full_path(resource, full_path);
	if (NULL != e)
		return e;

	DAV_DEBUG_REQ(r, 0, "Chunk path build from request: %s", *full_path);
	
	/* init loaded storage policy */
	const char *pol_name = apr_table_get(r->headers_in, "storage-policy");
	if (!pol_name) {
		return server_create_and_stat_error(request_get_server_config(r), r->pool,
				HTTP_BAD_REQUEST, 0, "No storage-policy specified");
	}
	DAV_DEBUG_REQ(r, 0, "Policy found in request: %s", pol_name);

	dav_rawx_server_conf *conf = resource_get_server_config(resource);

	*sp = storage_policy_init(conf->rawx_conf->ni, pol_name);
	apr_pool_cleanup_register(r->pool, *sp, apr_storage_policy_clean, apr_pool_cleanup_null);

	return NULL;
}
Esempio n. 2
0
apr_status_t
chunk_bucket_split(apr_bucket *e, apr_size_t point)
{
	dav_resource_private *ctx = e->data;
	DAV_DEBUG_REQ(ctx->request, 0, "Calling bucket split, want to split bucket(start=%"APR_OFF_T_FMT",length=%"APR_SIZE_T_FMT") at %"APR_SIZE_T_FMT, e->start, e->length, point);
	apr_bucket *splitted = NULL;
	apr_bucket_copy(e, &splitted);
	splitted->start = e->start + point;
	splitted->length = e->length - point;
	e->length = point;
	APR_BUCKET_INSERT_AFTER(e, splitted);
	DAV_DEBUG_REQ(ctx->request, 0, "Split result : b1(start=%"APR_OFF_T_FMT",length=%"APR_SIZE_T_FMT"), b2(start=%"APR_OFF_T_FMT",length=%"APR_SIZE_T_FMT")", e->start, e->length, splitted->start, splitted->length);
	return APR_SUCCESS;
}
static dav_error *
dav_rawx_set_headers(request_rec *r, const dav_resource *resource)
{
	if (!resource->exists)
		return NULL;

	DAV_DEBUG_REQ(r, 0, "%s(%s)", __FUNCTION__, resource_get_pathname(resource));

	/* make sure the proper mtime is in the request record */
	ap_update_mtime(r, resource->info->finfo.mtime);
	ap_set_last_modified(r);
	ap_set_etag(r);

	/* we accept byte-ranges */
	apr_table_setn(r->headers_out, apr_pstrdup(r->pool, "Accept-Ranges"),
			apr_pstrdup(r->pool, "bytes"));

	/* set up the Content-Length header */
	ap_set_content_length(r, resource->info->finfo.size);

	request_fill_headers(r, &(resource->info->chunk));

	/* compute metadata_compress if compressed content */
	if (resource->info->compression) {
		char *buf = apr_pstrcat(r->pool, "compression=on;compression_algorithm=", resource->info->compress_algo,
				";compression_blocksize=", apr_psprintf(r->pool, "%d", resource->info->cp_chunk.block_size), ";", NULL);
		apr_table_setn(r->headers_out, apr_pstrdup(r->pool, "metadatacompress"), buf);
	}

	return NULL;
}
Esempio n. 4
0
static dav_error *
_update_chunk_storage(const dav_resource *resource, const char *path, const struct data_treatments_s *dt, GHashTable *comp_opt)
{
	GError *e = NULL;
	dav_error *de = NULL;
	const char *c = NULL;
	const request_rec *r = resource->info->request;
	c = g_hash_table_lookup(comp_opt, NS_COMPRESSION_OPTION);
	if(NULL != c && 0 == g_ascii_strcasecmp(c, NS_COMPRESSION_ON)) {
		DAV_DEBUG_REQ(r, 0, "In place chunk is compressed, uncompress it");
		if(1 != uncompress_chunk(path, TRUE, &e)) {
			de = server_create_and_stat_error(request_get_server_config(r),
					r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
					apr_pstrcat(r->pool, "Failed to uncompress chunk : ",
					((NULL != e)? e->message : "No error specified"), NULL));
			if(NULL != e)
				g_clear_error(&e);
			return de;
		}
		DAV_DEBUG_REQ(r, 0, "Chunk uncompressed");
	}

	if(COMPRESSION == data_treatments_get_type(dt)) {
		DAV_DEBUG_REQ(r, 0, "Re compressing chunk");
		const char *algo = data_treatments_get_param(dt, DT_KEY_ALGO);
		const char *bs = data_treatments_get_param(dt, DT_KEY_BLOCKSIZE);
		if(!algo || !bs) {
			return server_create_and_stat_error(request_get_server_config(r),
					r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
					apr_pstrcat(r->pool, "Cannot compress chunk, missing info: ",
						algo, "|", bs, NULL));
		}

		if(1 != compress_chunk(path, algo, g_ascii_strtoll(bs, NULL, 10), TRUE, &e)) {
			de = server_create_and_stat_error(request_get_server_config(r),
					r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
					apr_pstrcat(r->pool, "Failed to compress chunk : ",
						((NULL != e)? e->message : "No error specified"), NULL));
			if(NULL != e)
				g_clear_error(&e);
			return de;
		}
	}

	return NULL;
}
static dav_error *
dav_rawx_close_stream(dav_stream *stream, int commit)
{
	/* LAST STEP OF PUT REQUEST */

	dav_error *e = NULL;

	DAV_DEBUG_REQ(stream->r->info->request, 0,
			"Closing (%s) the stream to [%s]",
			(commit ? "commit" : "rollback"), stream->pathname);

	if (!commit) {
		e = rawx_repo_rollback_upload(stream);
	} else {
		e = rawx_repo_write_last_data_crumble(stream);
		if (e) {
			DAV_DEBUG_REQ(stream->r->info->request, 0,
					"Cannot commit, an error occured while writing end of data");
			dav_error *e_tmp = NULL;
			e_tmp = rawx_repo_rollback_upload(stream);
			if (e_tmp) {
				DAV_ERROR_REQ(stream->r->info->request, 0,
						"Error while rolling back upload: %s", e_tmp->desc);
			}
		} else {
			e = rawx_repo_commit_upload(stream);
		}
	}

	/* stats update */
	if (stream->total_size > 0) {
		server_add_stat(resource_get_server_config(stream->r),
				RAWX_STATNAME_REP_BWRITTEN,
				stream->total_size, 0);
	}
	server_inc_request_stat(resource_get_server_config(stream->r),
			RAWX_STATNAME_REQ_CHUNKPUT,
			request_get_duration(stream->r->info->request));

	if (stream->md5) {
		g_checksum_free(stream->md5);
		stream->md5 = NULL;
	}
	return e;
}
static dav_error *
dav_rawx_open_stream(const dav_resource *resource, dav_stream_mode mode, dav_stream **stream)
{
	/* FIRST STEP OF PUT REQUEST */
	(void) mode;
	DAV_DEBUG_REQ(resource->info->request, 0, "%s(%s/%s)", __FUNCTION__, resource->info->dirname, resource->info->hex_chunkid);

	dav_stream *ds = NULL;
	dav_error *e = rawx_repo_stream_create(resource, &ds);
	if ( NULL != e ) {
		DAV_DEBUG_REQ(resource->info->request, 0, "Dav stream initialization failure");
		return e;
	}

	*stream = ds;

	DAV_DEBUG_REQ(resource->info->request, 0, "About to write in [%s]", ds->pathname);

	return NULL;
}
static dav_error *
_load_request_info(const dav_resource *resource, char **full_path)
{
	dav_error *e = NULL;
	const request_rec *r = resource->info->request;

	/* configure full path */
	e = __build_chunk_full_path(resource, full_path);
	if (NULL != e)
		return e;

	DAV_DEBUG_REQ(r, 0, "Chunk path build from request: %s", *full_path);

	return NULL;
}
Esempio n. 8
0
void send_chunk_event(const char *type, const dav_resource *resource) {
	int rc;
	dav_rawx_server_conf *conf = resource_get_server_config(resource);

	GString *json = g_string_sized_new(128);
	g_string_append_printf(json,
			"{"
			"\"volume_id\":\"%s\","
			"\"container_id\":\"%s\","
			"\"content_id\":\"%s\","
			"\"content_version\":\"%s\","
			"\"content_path\":\"%s\","
			"\"content_storage_policy\":\"%s\","
			"\"content_mime_type\":\"%s\","
			"\"content_chunk_method\":\"%s\","
			"\"chunk_id\":\"%s\","
			"\"chunk_hash\":\"%s\","
			"\"chunk_position\":\"%s\","
			"\"chunk_size\":\"%s\"",
			conf->rawx_id,
			resource->info->content.container_id,
			resource->info->content.content_id,
			resource->info->content.version,
			resource->info->content.path,
			resource->info->content.storage_policy,
			resource->info->content.mime_type,
			resource->info->content.chunk_method,
			resource->info->chunk.id,
			resource->info->chunk.hash,
			resource->info->chunk.position,
			resource->info->chunk.size);

	if (resource->info->content.size)
		g_string_append_printf(json,
				",\"content_size\":\"%s\"",
				resource->info->content.size);

	if (resource->info->content.chunk_nb)
		g_string_append_printf(json,
				",\"content_nbchunks\":\"%s\"",
				resource->info->content.chunk_nb);

	g_string_append_printf(json, "}");

	rc = rawx_event_send(type, json);
	DAV_DEBUG_REQ(resource->info->request, 0, "Event %s %s", type, rc ? "OK" : "KO");
}
Esempio n. 9
0
apr_status_t
chunk_bucket_copy(apr_bucket *e, apr_bucket **c)
{
	dav_resource_private *ctx = e->data;
	DAV_DEBUG_REQ(ctx->request, 0, "Calling bucket copy");

	apr_bucket *result = NULL;
	result = apr_palloc(ctx->pool, sizeof(struct apr_bucket));
	result->type = e->type;
	result->length = e->length;
	result->start = e->start;
	result->data = ctx;
	result->free = chunk_bucket_free_noop;
	result->list = e->list;

	*c = result;

	return APR_SUCCESS;

}
Esempio n. 10
0
apr_status_t
chunk_bucket_read(apr_bucket *b, const char **str, apr_size_t *len, apr_read_type_e block)
{
	apr_size_t written=0, length=0;
	apr_size_t offset = 0;
	apr_int64_t total64, remaining64, done64;
	apr_int64_t bl64, w64;
	ssize_t w;
	dav_resource_private *ctx;
	apr_status_t rc;
	rc = APR_SUCCESS;

	(void) block;

	*str = NULL;  /* in case we die prematurely */
	*len = 0;

	/*dummy bucket*/
	if (b->length == (apr_size_t)(-1) || b->start == (apr_off_t)(-1))
		return APR_SUCCESS;
	
	ctx = b->data;
	offset = b->start - ctx->cp_chunk.read;
	DAV_DEBUG_REQ(ctx->request, 0, "Reading data for this bucket start at current position + %"APR_SIZE_T_FMT, offset);
	DAV_DEBUG_REQ(ctx->request, 0, "Bucket length %"APR_SIZE_T_FMT, b->length);
	total64 = g_ascii_strtoll(ctx->cp_chunk.uncompressed_size, NULL, 10);
	done64 = b->start;
	bl64 = b->length;
	remaining64 = MIN(total64 - done64, bl64);

	DAV_DEBUG_REQ(ctx->request, 0, "Data already returned=%"APR_INT64_T_FMT", remaining=%"APR_INT64_T_FMT, done64, remaining64);

	if (remaining64 <= 0){
		DAV_DEBUG_REQ(ctx->request, 0, "No remaining data, end of resource delivering.");
		apr_bucket_heap_make(b, NULL, *len, apr_bucket_free);
		return APR_SUCCESS;
	}
	else { /* determine the size of THIS bucket */
		if (remaining64 > APR_BUCKET_BUFF_SIZE)
			length = APR_BUCKET_BUFF_SIZE;
		else
			length = remaining64;
	}

	*len = length;

	guint8 *buf = apr_bucket_alloc(length, b->list);
	for (written=0; written < length ;) {
		GError *gerror;
	
		DAV_DEBUG_REQ(ctx->request, 0, "Trying to read at most %"APR_SSIZE_T_FMT" bytes (%"APR_SSIZE_T_FMT" received)",
				length - written, written);
		
		gerror = NULL;
		w = ctx->comp_ctx.data_uncompressor(&ctx->cp_chunk, offset, buf+written, length-written, &gerror);
		offset = 0;
		DAV_DEBUG_REQ(ctx->request, 0 , "%"APR_SSIZE_T_FMT" bytes read from local resource", w);
		if (w < 0) {
			DAV_ERROR_REQ(ctx->request, 0, "Read from chunk failed : %s", gerror_get_message(gerror));
			if (gerror)
				g_error_free(gerror);
			apr_bucket_free(buf);
			return APR_INCOMPLETE;
		}
		if (gerror)
			g_error_free(gerror);
		if (w == 0) {
			DAV_DEBUG_REQ(ctx->request, 0, "No bytes read from local resource whereas we"
						" must read again, this should never happened");
			apr_bucket_free(buf);
			return APR_INCOMPLETE;
		}
		written += w;
	}
	*len = written;

	DAV_DEBUG_REQ(ctx->request, 0, "Bucket done (%"APR_SSIZE_T_FMT" bytes, rc=%d)", written, rc);

	w64 = written;

	DAV_DEBUG_REQ(ctx->request, 0, "Status info : %"APR_INT64_T_FMT" written , %"APR_INT64_T_FMT" length total", w64, bl64);

	apr_bucket_heap_make(b, (char*)buf, *len, apr_bucket_free);

	if(w64 < bl64) {
		apr_bucket *bkt;
		DAV_DEBUG_REQ(ctx->request, 0, "Creating bucket info: bkt->length = %"APR_INT64_T_FMT", bkt->start ="
					" %"APR_INT64_T_FMT", bkt->data = %p, bkt->list = %p\n", remaining64, done64 + w64,
					&(ctx->cp_chunk), b->list);
		bkt = apr_bucket_alloc(sizeof(*bkt), b->list);
		bkt->type = &chunk_bucket_type;
		bkt->length = remaining64 - w64;
		bkt->start = done64 + w64;
		bkt->data = ctx;
		bkt->free = chunk_bucket_free_noop;
		bkt->list = b->list;
		APR_BUCKET_INSERT_AFTER(b, bkt);
		DAV_DEBUG_REQ(ctx->request, 0, "Starting a new RAWX bucket (length=%"APR_SIZE_T_FMT" start=%"APR_INT64_T_FMT")", bkt->length, bkt->start);
	}

	*str = (char*)buf;
	return rc;
}
Esempio n. 11
0
apr_status_t
rainx_http_req(struct req_params_store* rps) {
	const dav_resource* resource = rps->resource;
	char* remote_uri = rps->service_address;
	char* req_type = rps->req_type;
	char* header = rps->header;
	char* data = rps->data_to_send;
	int data_length = rps->data_to_send_size;
	char** reply = &(rps->reply);
	apr_pool_t *local_pool = rps->pool;
	dav_rainx_server_conf *server_conf = resource_get_server_config(resource);

	if (NULL == resource || NULL == remote_uri ||
			NULL == req_type || NULL == server_conf) {
		DAV_ERROR_POOL(local_pool, APR_EINVAL, "One of these params is wrong: "
				"remote_uri=%p, req_type=%p, server_conf=%p"
				" (__FILE__:__LINE__)",
				remote_uri, req_type, server_conf);
		return APR_EINVAL;
	}

	const gboolean is_get = (0 == g_strcmp0(req_type, "GET"));

	/* Isolating Rawx IP and port */
	char *temp_remote_uri = apr_pstrdup(local_pool, remote_uri);
	char* last;
	char* full_remote_url = apr_strtok(temp_remote_uri, "/", &last);
	char* content_hexid = apr_pstrdup(local_pool, remote_uri + strlen(full_remote_url));

	char* remote_ip = NULL;
	char* scope_id = NULL;
	apr_port_t remote_port;
	apr_parse_addr_port(&remote_ip, &scope_id, &remote_port, full_remote_url, local_pool);
	/* ------- */

	/* Preparing the socket */
	apr_socket_t* sock;
	apr_sockaddr_t* sockaddr;
	apr_status_t status;

	if ((status = apr_sockaddr_info_get(&sockaddr, remote_ip, APR_INET, remote_port, 0, local_pool)) != APR_SUCCESS) {
		DAV_DEBUG_REQ(resource->info->request, 0, "unable to connect to the rawx %s", full_remote_url);
		return status;
	}

	if ((status = apr_socket_create(&sock, sockaddr->family, SOCK_STREAM, APR_PROTO_TCP, local_pool)) != APR_SUCCESS) {
		DAV_DEBUG_REQ(resource->info->request, 0, "unable to create a socket to the rawx %s", full_remote_url);
		return status;
	}

	if ((status = apr_socket_timeout_set(sock, server_conf->socket_timeout)) != APR_SUCCESS) {
		DAV_DEBUG_REQ(resource->info->request, 0, "unable to set timeout for the socket to the rawx %s", full_remote_url);
		return status;
	}

	if ((status = apr_socket_connect(sock, sockaddr)) != APR_SUCCESS) {
		DAV_DEBUG_REQ(resource->info->request, 0, "unable to establish the connection to the rawx %s", full_remote_url);
		return status;
	}
	/* ------- */

	/* Forging the message */
	char* forged_header = apr_psprintf(local_pool, "%s %s HTTP/1.1\nHost: %s", req_type, content_hexid, full_remote_url);
	if (header)
		forged_header = apr_psprintf(local_pool, "%s\n%s", forged_header, header);
	if (data)
		forged_header = apr_psprintf(local_pool, "%s\nContent-Length: %d\n\n", forged_header, data_length);
	else
		forged_header = apr_psprintf(local_pool, "%s\n\n", forged_header);
	/* ------- */

	/* Sending the message */
	int remaining_to_send = strlen(forged_header);
	char* ptr_start = forged_header;
	apr_size_t send_buffer_size;
	while (remaining_to_send > 0) {
		if (remaining_to_send < REQUEST_BUFFER_SIZE)
			send_buffer_size = (apr_size_t)remaining_to_send;
		else
			send_buffer_size = REQUEST_BUFFER_SIZE;

		if ((status = apr_socket_send(sock, ptr_start, &send_buffer_size)) != APR_SUCCESS) {
            DAV_DEBUG_REQ(resource->info->request, 0, "failed to send the %s request to the rawx %s", req_type, full_remote_url);
            apr_status_t status_sav = status;
            apr_socket_close(sock);
            return status_sav;
        }

		remaining_to_send -= send_buffer_size;
		ptr_start = ptr_start + send_buffer_size;
	}
	if (NULL != data) {
		remaining_to_send = data_length;
		ptr_start = data;
		while (remaining_to_send > 0) {
			if (remaining_to_send < REQUEST_BUFFER_SIZE)
				send_buffer_size = (apr_size_t)remaining_to_send;
			else
				send_buffer_size = REQUEST_BUFFER_SIZE;

			if ((status = apr_socket_send(sock, ptr_start, &send_buffer_size)) != APR_SUCCESS) {
				DAV_DEBUG_REQ(resource->info->request, 0, "failed to send the %s request to the rawx %s", req_type, full_remote_url);
				apr_status_t status_sav = status;
				apr_socket_close(sock);
				return status_sav;
			}

			remaining_to_send -= send_buffer_size;
			ptr_start = ptr_start + send_buffer_size;
		}
	}

	if (is_get) {
		/* This avoids a ~5s delay in the communication */
		apr_socket_shutdown(sock, APR_SHUTDOWN_WRITE);
	}

	DAV_DEBUG_REQ(resource->info->request, 0, "%s request to the rawx %s sent for the content %s", req_type, full_remote_url, content_hexid);
    /* ------ */

	/* Getting the reply */
	char* reply_ptr = *reply;
	apr_size_t total_size;
	if (!is_get)
		total_size = REPLY_BUFFER_SIZE; // PUT or DELETE
	else
		total_size = MAX_REPLY_HEADER_SIZE + data_length; // GET
	apr_size_t reply_size = (apr_size_t)total_size;
	apr_size_t total_replied_size;
	do {
		status = apr_socket_recv(sock, reply_ptr, &reply_size);
		reply_ptr += reply_size;
		total_replied_size = reply_ptr - *reply;
		/* Leave when OK, or error != timeout, or buffer full */
        if (status == APR_EOF || (status == APR_SUCCESS && !is_get) ||
				(reply_size == 0) ||
				total_replied_size >= total_size) {
            break;
		}
		/* Take care of overflows! */
		reply_size = total_size - total_replied_size;
	} while (total_replied_size < total_size);
	/* ------- */

	apr_socket_close(sock);

	return status;
}
Esempio n. 12
0
static dav_error *
dav_rawx_deliver_SPECIAL(const dav_resource *resource, ap_filter_t *output)
{
	(void) output;
	dav_error *e = NULL;
	struct storage_policy_s *sp = NULL;
	const struct data_treatments_s *dt = NULL;
	const request_rec *r = resource->info->request;
	GHashTable *comp_opt = NULL;
	struct content_textinfo_s *content = NULL;
	struct chunk_textinfo_s *chunk = NULL;
	char *path = NULL;
	apr_pool_t *p = resource->pool;
	
	/* Load request informations */
	e = _load_request_info(resource, &path, &sp);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to load request informations: %s", e->desc);
		goto end_deliver;
	}

	if(!sp) {
		DAV_DEBUG_REQ(r, 0, "Storage policy not initialized with value found in header, don't do anything");
		goto end_deliver;
	}

	dt = storage_policy_get_data_treatments(sp);
	if(!dt)
		DAV_DEBUG_REQ(r, 0, "Data treatments not defined for this policy");

	comp_opt = g_hash_table_new_full( g_str_hash, g_str_equal, g_free, g_free);
	apr_pool_cleanup_register(p, comp_opt, apr_hash_table_clean, apr_pool_cleanup_null);
	chunk = apr_palloc(p, sizeof(struct chunk_textinfo_s));
	content = apr_palloc(p, sizeof(struct content_textinfo_s));

	/* Load in place informations (sys-metadata & metadatacompress) */
	e = _load_in_place_chunk_info(resource, path, content, chunk, &comp_opt);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to load in place chunk information: %s", e->desc);
		goto end_deliver;
	}

	DAV_DEBUG_REQ(r, 0, "In place chunk info loaded, compression status : %s", (gchar*)g_hash_table_lookup(comp_opt, NS_COMPRESSION_OPTION));

	/* check chunk not in required state */
	if (APR_SUCCESS != _is_storage_policy_already_applied(dt, comp_opt)) {
		DAV_DEBUG_REQ(r, 0, "Storage policy not already applied, apply it!");
		/* operate the data treatments */
		e = _update_chunk_storage(resource, path, dt, comp_opt);
		if (NULL != e) {
			DAV_ERROR_REQ(r, 0, "Failed to update chunk storage: %s", e->desc);
			goto end_deliver;
		}
		DAV_DEBUG_REQ(r, 0, "Chunk storage updated");
	} else {
		DAV_DEBUG_REQ(r, 0, "Storage policy already applied, don't do anything!");
	}


	/* ensure sys-metadata header is valid */
	e = _ensure_sys_metadata(resource, path, storage_policy_get_name(sp), content);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to ensure sys-metadata, storage-policy possibly not correctly present in xattr: %s", e->desc);
		goto end_deliver;
	}

end_deliver:

	/* stats inc */

	return e;
}
Esempio n. 13
0
static dav_error *
dav_rawx_get_resource(request_rec *r, const char *root_dir, const char *label,
	int use_checked_in, dav_resource **result_resource)
{
	(void) use_checked_in;
	*result_resource = NULL;

	dav_rawx_server_conf *conf = request_get_server_config(r);

	/* Check if client allowed to work with us */
	if (conf->enabled_acl) {
#if MODULE_MAGIC_COOKIE == 0x41503234UL /* "AP24" */
		if (!authorized_personal_only(r->connection->client_ip, conf->rawx_conf->acl))
#else
		if (!authorized_personal_only(r->connection->remote_ip, conf->rawx_conf->acl))
#endif
		{
			return server_create_and_stat_error(conf, r->pool,
					HTTP_UNAUTHORIZED, 0, "Permission Denied (APO)");
		}
	}

	/* Create private resource context descriptor */
	dav_resource_private ctx = {0};
	ctx.pool = r->pool;
	ctx.request = r;

	dav_error *e = rawx_repo_check_request(r, root_dir, label, use_checked_in,
			&ctx, result_resource);
	/* Return in case we have an error or
	 * if result_resource != null because it was an info request */
	if (e || *result_resource) {
		return e;
	}

	/* Build the hashed path */
	if (conf->hash_width <= 0 || conf->hash_depth <= 0) {
		apr_snprintf(ctx.dirname, sizeof(ctx.dirname),
			"%.*s", (int)sizeof(conf->docroot), conf->docroot);
	} else {
		e = rawx_repo_configure_hash_dir(r, &ctx);
		if ( NULL != e) {
			return e;
		}
	}
	DAV_DEBUG_REQ(r, 0, "Hashed directory: %.*s", (int)sizeof(ctx.dirname), ctx.dirname);

	/* All the checks on the URL have passed, now build a resource */
	dav_resource *resource = apr_pcalloc(r->pool, sizeof(*resource));
	resource->type = DAV_RESOURCE_TYPE_REGULAR;
	resource->info = apr_pcalloc(r->pool, sizeof(ctx));;
	memcpy(resource->info, &ctx, sizeof(ctx));
	resource->hooks = &dav_hooks_repository_rawx;
	resource->pool = r->pool;
	memset(&(resource->info->comp_ctx), 0, sizeof(struct compression_ctx_s));

	resource->info->fullpath = apr_pstrcat(resource->pool,
		resource->info->dirname, resource->info->hex_chunkid,
		resource->info->file_extension,
		NULL);

	/* init compression context structure if we are in get method */
	if (r->method_number == M_GET && !ctx.update_only) {
		resource_init_decompression(resource, conf);
	}

	/* Check the chunk's existence */
	int flags = (r->method_number == M_GET ||
			r->method_number == M_OPTIONS ||
			r->method_number == M_DELETE)?
				 RESOURCE_STAT_CHUNK_READ_ATTRS : 0;
	if (r->method_number == M_PUT || r->method_number == M_POST)
		flags |= RESOURCE_STAT_CHUNK_PENDING;

	resource_stat_chunk(resource, flags);

	if (r->method_number == M_PUT || r->method_number == M_POST ||
			r->method_number == M_MOVE ||
			(r->method_number == M_GET && ctx.update_only)) {
		request_load_chunk_info_from_headers(r, &(resource->info->chunk));
		const char *missing = check_chunk_info(&resource->info->chunk);
		if (missing != NULL) {
			return server_create_and_stat_error(request_get_server_config(r), r->pool,
				HTTP_BAD_REQUEST, 0, apr_pstrcat(r->pool, "missing or invalid header ", missing, NULL));
		}
	}

	if (r->method_number == M_POST || r->method_number == M_PUT) {
		if (resource->info->chunk.chunk_id) {
			if (0 != apr_strnatcasecmp(resource->info->chunk.chunk_id, resource->info->hex_chunkid))
				return server_create_and_stat_error(request_get_server_config(r), r->pool,
						HTTP_BAD_REQUEST, 0, "chunk-id mismatch");
		}
		if (resource->exists)
			return server_create_and_stat_error(request_get_server_config(r), r->pool,
				HTTP_CONFLICT, 0, "Resource busy or already exists");
		request_parse_query(r, resource);
	}

	*result_resource = resource;
	return NULL;
}