static dav_error *
dav_rawx_deliver_SPECIAL(const dav_resource *resource, ap_filter_t *output)
{
	(void) output;
	dav_error *e = NULL;
	const request_rec *r = resource->info->request;
	GHashTable *comp_opt = NULL;
	struct chunk_textinfo_s *chunk = NULL;
	char *path = NULL;
	apr_pool_t *p = resource->pool;

	/* Load request informations */
	e = _load_request_info(resource, &path);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to load request informations: %s", e->desc);
		goto end_deliver;
	}

	comp_opt = g_hash_table_new_full( g_str_hash, g_str_equal, g_free, g_free);
	apr_pool_cleanup_register(p, comp_opt, apr_hash_table_clean, apr_pool_cleanup_null);
	chunk = apr_palloc(p, sizeof(struct chunk_textinfo_s));

	/* Load in place informations (sys-metadata & metadatacompress) */
	e = _load_in_place_chunk_info(resource, path, chunk, comp_opt);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to load in place chunk information: %s", e->desc);
		goto end_deliver;
	}

	DAV_ERROR_REQ(r, 0, "Failed to update chunk storage: PAYMENT REQUIRED"
			" to compress data (we accept bitcoins)");
	dav_rawx_server_conf *conf = resource_get_server_config(resource);
	e = server_create_and_stat_error(conf, p, HTTP_PAYMENT_REQUIRED,
			0, "Pay more to manage compression");

end_deliver:
	/* stats inc */
	return e;
}
static dav_error *
dav_rawx_close_stream(dav_stream *stream, int commit)
{
	/* LAST STEP OF PUT REQUEST */

	dav_error *e = NULL;

	DAV_DEBUG_REQ(stream->r->info->request, 0,
			"Closing (%s) the stream to [%s]",
			(commit ? "commit" : "rollback"), stream->pathname);

	if (!commit) {
		e = rawx_repo_rollback_upload(stream);
	} else {
		e = rawx_repo_write_last_data_crumble(stream);
		if (e) {
			DAV_DEBUG_REQ(stream->r->info->request, 0,
					"Cannot commit, an error occured while writing end of data");
			dav_error *e_tmp = NULL;
			e_tmp = rawx_repo_rollback_upload(stream);
			if (e_tmp) {
				DAV_ERROR_REQ(stream->r->info->request, 0,
						"Error while rolling back upload: %s", e_tmp->desc);
			}
		} else {
			e = rawx_repo_commit_upload(stream);
		}
	}

	/* stats update */
	if (stream->total_size > 0) {
		server_add_stat(resource_get_server_config(stream->r),
				RAWX_STATNAME_REP_BWRITTEN,
				stream->total_size, 0);
	}
	server_inc_request_stat(resource_get_server_config(stream->r),
			RAWX_STATNAME_REQ_CHUNKPUT,
			request_get_duration(stream->r->info->request));

	if (stream->md5) {
		g_checksum_free(stream->md5);
		stream->md5 = NULL;
	}
	return e;
}
apr_status_t
chunk_bucket_read(apr_bucket *b, const char **str, apr_size_t *len, apr_read_type_e block)
{
	apr_size_t written=0, length=0;
	apr_size_t offset = 0;
	apr_int64_t total64, remaining64, done64;
	apr_int64_t bl64, w64;
	ssize_t w;
	dav_resource_private *ctx;
	apr_status_t rc;
	rc = APR_SUCCESS;

	(void) block;

	*str = NULL;  /* in case we die prematurely */
	*len = 0;

	/*dummy bucket*/
	if (b->length == (apr_size_t)(-1) || b->start == (apr_off_t)(-1))
		return APR_SUCCESS;
	
	ctx = b->data;
	offset = b->start - ctx->cp_chunk.read;
	DAV_DEBUG_REQ(ctx->request, 0, "Reading data for this bucket start at current position + %"APR_SIZE_T_FMT, offset);
	DAV_DEBUG_REQ(ctx->request, 0, "Bucket length %"APR_SIZE_T_FMT, b->length);
	total64 = g_ascii_strtoll(ctx->cp_chunk.uncompressed_size, NULL, 10);
	done64 = b->start;
	bl64 = b->length;
	remaining64 = MIN(total64 - done64, bl64);

	DAV_DEBUG_REQ(ctx->request, 0, "Data already returned=%"APR_INT64_T_FMT", remaining=%"APR_INT64_T_FMT, done64, remaining64);

	if (remaining64 <= 0){
		DAV_DEBUG_REQ(ctx->request, 0, "No remaining data, end of resource delivering.");
		apr_bucket_heap_make(b, NULL, *len, apr_bucket_free);
		return APR_SUCCESS;
	}
	else { /* determine the size of THIS bucket */
		if (remaining64 > APR_BUCKET_BUFF_SIZE)
			length = APR_BUCKET_BUFF_SIZE;
		else
			length = remaining64;
	}

	*len = length;

	guint8 *buf = apr_bucket_alloc(length, b->list);
	for (written=0; written < length ;) {
		GError *gerror;
	
		DAV_DEBUG_REQ(ctx->request, 0, "Trying to read at most %"APR_SSIZE_T_FMT" bytes (%"APR_SSIZE_T_FMT" received)",
				length - written, written);
		
		gerror = NULL;
		w = ctx->comp_ctx.data_uncompressor(&ctx->cp_chunk, offset, buf+written, length-written, &gerror);
		offset = 0;
		DAV_DEBUG_REQ(ctx->request, 0 , "%"APR_SSIZE_T_FMT" bytes read from local resource", w);
		if (w < 0) {
			DAV_ERROR_REQ(ctx->request, 0, "Read from chunk failed : %s", gerror_get_message(gerror));
			if (gerror)
				g_error_free(gerror);
			apr_bucket_free(buf);
			return APR_INCOMPLETE;
		}
		if (gerror)
			g_error_free(gerror);
		if (w == 0) {
			DAV_DEBUG_REQ(ctx->request, 0, "No bytes read from local resource whereas we"
						" must read again, this should never happened");
			apr_bucket_free(buf);
			return APR_INCOMPLETE;
		}
		written += w;
	}
	*len = written;

	DAV_DEBUG_REQ(ctx->request, 0, "Bucket done (%"APR_SSIZE_T_FMT" bytes, rc=%d)", written, rc);

	w64 = written;

	DAV_DEBUG_REQ(ctx->request, 0, "Status info : %"APR_INT64_T_FMT" written , %"APR_INT64_T_FMT" length total", w64, bl64);

	apr_bucket_heap_make(b, (char*)buf, *len, apr_bucket_free);

	if(w64 < bl64) {
		apr_bucket *bkt;
		DAV_DEBUG_REQ(ctx->request, 0, "Creating bucket info: bkt->length = %"APR_INT64_T_FMT", bkt->start ="
					" %"APR_INT64_T_FMT", bkt->data = %p, bkt->list = %p\n", remaining64, done64 + w64,
					&(ctx->cp_chunk), b->list);
		bkt = apr_bucket_alloc(sizeof(*bkt), b->list);
		bkt->type = &chunk_bucket_type;
		bkt->length = remaining64 - w64;
		bkt->start = done64 + w64;
		bkt->data = ctx;
		bkt->free = chunk_bucket_free_noop;
		bkt->list = b->list;
		APR_BUCKET_INSERT_AFTER(b, bkt);
		DAV_DEBUG_REQ(ctx->request, 0, "Starting a new RAWX bucket (length=%"APR_SIZE_T_FMT" start=%"APR_INT64_T_FMT")", bkt->length, bkt->start);
	}

	*str = (char*)buf;
	return rc;
}
static dav_error *
dav_rawx_deliver_SPECIAL(const dav_resource *resource, ap_filter_t *output)
{
	(void) output;
	dav_error *e = NULL;
	struct storage_policy_s *sp = NULL;
	const struct data_treatments_s *dt = NULL;
	const request_rec *r = resource->info->request;
	GHashTable *comp_opt = NULL;
	struct content_textinfo_s *content = NULL;
	struct chunk_textinfo_s *chunk = NULL;
	char *path = NULL;
	apr_pool_t *p = resource->pool;
	
	/* Load request informations */
	e = _load_request_info(resource, &path, &sp);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to load request informations: %s", e->desc);
		goto end_deliver;
	}

	if(!sp) {
		DAV_DEBUG_REQ(r, 0, "Storage policy not initialized with value found in header, don't do anything");
		goto end_deliver;
	}

	dt = storage_policy_get_data_treatments(sp);
	if(!dt)
		DAV_DEBUG_REQ(r, 0, "Data treatments not defined for this policy");

	comp_opt = g_hash_table_new_full( g_str_hash, g_str_equal, g_free, g_free);
	apr_pool_cleanup_register(p, comp_opt, apr_hash_table_clean, apr_pool_cleanup_null);
	chunk = apr_palloc(p, sizeof(struct chunk_textinfo_s));
	content = apr_palloc(p, sizeof(struct content_textinfo_s));

	/* Load in place informations (sys-metadata & metadatacompress) */
	e = _load_in_place_chunk_info(resource, path, content, chunk, &comp_opt);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to load in place chunk information: %s", e->desc);
		goto end_deliver;
	}

	DAV_DEBUG_REQ(r, 0, "In place chunk info loaded, compression status : %s", (gchar*)g_hash_table_lookup(comp_opt, NS_COMPRESSION_OPTION));

	/* check chunk not in required state */
	if (APR_SUCCESS != _is_storage_policy_already_applied(dt, comp_opt)) {
		DAV_DEBUG_REQ(r, 0, "Storage policy not already applied, apply it!");
		/* operate the data treatments */
		e = _update_chunk_storage(resource, path, dt, comp_opt);
		if (NULL != e) {
			DAV_ERROR_REQ(r, 0, "Failed to update chunk storage: %s", e->desc);
			goto end_deliver;
		}
		DAV_DEBUG_REQ(r, 0, "Chunk storage updated");
	} else {
		DAV_DEBUG_REQ(r, 0, "Storage policy already applied, don't do anything!");
	}


	/* ensure sys-metadata header is valid */
	e = _ensure_sys_metadata(resource, path, storage_policy_get_name(sp), content);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to ensure sys-metadata, storage-policy possibly not correctly present in xattr: %s", e->desc);
		goto end_deliver;
	}

end_deliver:

	/* stats inc */

	return e;
}