コード例 #1
0
static dav_error *
_load_in_place_chunk_info(const dav_resource *r, const char *path, struct content_textinfo_s *content,
		struct chunk_textinfo_s *chunk, GHashTable **comp_opt)
{
	dav_error *e = NULL;
	GError *ge = NULL;
	apr_pool_t *p = r->pool;
	dav_rawx_server_conf *conf = resource_get_server_config(r);
	
	
	apr_finfo_t finfo;

	/* check chunk presence */

	if(APR_SUCCESS != apr_stat(&finfo, path, APR_FINFO_NORM, p)) {
		return server_create_and_stat_error(conf, r->pool, HTTP_NOT_FOUND,
				0, "Chunk file not found");
	}

	if(!get_rawx_info_in_attr(path, &ge,
				content, chunk)) {
		if(NULL != ge) {	
			e = server_create_and_stat_error(conf, p, HTTP_CONFLICT,
				0, apr_pstrcat(p, "Failed to get chunk attributes: ", ge->message, NULL));
			g_clear_error(&ge);
		} else {
			e = server_create_and_stat_error(conf, p, HTTP_CONFLICT,
			                                0, "Failed to get chunk chunk attributes: No error specified");
		}
		return e;
	}

	str_replace_by_pooled_str(p, &(content->path));
	str_replace_by_pooled_str(p, &(content->size));
	str_replace_by_pooled_str(p, &(content->chunk_nb));
	str_replace_by_pooled_str(p, &(content->metadata));
	str_replace_by_pooled_str(p, &(content->system_metadata));
	str_replace_by_pooled_str(p, &(content->container_id));
	str_replace_by_pooled_str(p, &(chunk->id));
	str_replace_by_pooled_str(p, &(chunk->path));
	str_replace_by_pooled_str(p, &(chunk->size));
	str_replace_by_pooled_str(p, &(chunk->hash));
	str_replace_by_pooled_str(p, &(chunk->position));
	str_replace_by_pooled_str(p, &(chunk->metadata));
	str_replace_by_pooled_str(p, &(chunk->container_id));

	if(!get_compression_info_in_attr(path, &ge, comp_opt)){
		if(NULL != ge) {	
			e = server_create_and_stat_error(conf, p, HTTP_CONFLICT,
				0, apr_pstrcat(p, "Failed to get chunk compression attributes: ", ge->message, NULL));
			g_clear_error(&ge);
		} else {
			e = server_create_and_stat_error(conf, p, HTTP_CONFLICT,
			                                0, "Failed to get chunk compression attributes: No error specified");
		}
		return e;
	}

	return NULL;
}
コード例 #2
0
/* JFS : walks are not managed by this rawx */
static dav_error *
dav_rawx_walk(const dav_walk_params *params, int depth, dav_response **response)
{
	dav_walk_resource wres;
	dav_error *err;

	(void) depth;
	err = NULL;
	memset(&wres, 0x00, sizeof(wres));
	wres.walk_ctx = params->walk_ctx;
	wres.pool = params->pool;
	wres.resource = params->root;

	DAV_XDEBUG_RES(params->root, 0, "sanity checks for %s(%s)", __FUNCTION__, resource_get_pathname(wres.resource));

	if (wres.resource->type != DAV_RESOURCE_TYPE_REGULAR)
		return server_create_and_stat_error(resource_get_server_config(params->root), params->root->pool,
			HTTP_CONFLICT, 0, "Only regular resources can be deleted with RAWX");
	if (wres.resource->collection)
		return server_create_and_stat_error(resource_get_server_config(params->root), params->root->pool,
			HTTP_CONFLICT, 0, "Collection resources canot be deleted with RAWX");
	if (!wres.resource->exists)
		return server_create_and_stat_error(resource_get_server_config(params->root), params->root->pool,
			HTTP_NOT_FOUND, 0, "Resource not found (no chunk)");

	DAV_DEBUG_RES(params->root, 0, "ready for %s(%s)", __FUNCTION__, resource_get_pathname(wres.resource));
	err = (*params->func)(&wres, DAV_CALLTYPE_MEMBER);
	*response = wres.response;
	return err;
}
コード例 #3
0
static dav_error *
dav_rainx_deliver_SPECIAL(const dav_resource *resource, ap_filter_t *output)
{
	const char *result;
	int result_len;
	apr_status_t status;
	apr_pool_t *pool;
	apr_bucket_brigade *bb;
	apr_bucket *bkt;

	DAV_XDEBUG_RES(resource, 0, "%s()", __FUNCTION__);
	pool = resource->info->request->pool;

	/* Check resource type */
	if (resource->type != DAV_RESOURCE_TYPE_PRIVATE)
		return server_create_and_stat_error(resource_get_server_config(resource), pool,
			HTTP_CONFLICT, 0, apr_pstrdup(pool, "Cannot GET this type of resource."));
	if (resource->collection)
		return server_create_and_stat_error(resource_get_server_config(resource), pool,
			HTTP_CONFLICT, 0, apr_pstrdup(pool,"No GET on collections"));

	/* Generate the output */
	result = resource->info->generator(resource, pool);
	result_len = strlen(result);

	/* We must reply a buffer */
	bkt = apr_bucket_heap_create(result, result_len, NULL, output->c->bucket_alloc);
	bb = apr_brigade_create(pool, output->c->bucket_alloc);
	APR_BRIGADE_INSERT_TAIL(bb, bkt);

	/* Nothing more to reply */
	bkt = apr_bucket_eos_create(output->c->bucket_alloc);
	APR_BRIGADE_INSERT_TAIL(bb, bkt);

	DAV_XDEBUG_RES(resource, 0, "%s : ready to deliver", __FUNCTION__);

	if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS)
		return server_create_and_stat_error(resource_get_server_config(resource), pool,
			HTTP_FORBIDDEN, 0, apr_pstrdup(pool,"Could not write contents to filter."));

	server_inc_stat(resource_get_server_config(resource), RAWX_STATNAME_REP_2XX, 0);

	/* HERE ADD request counter */
	switch(resource->info->type) {
		case STAT:
			server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_STAT,
				request_get_duration(resource->info->request));
			break;
		case INFO:
			server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_INFO,
				request_get_duration(resource->info->request));
			break;
		default:
			break;
	}

	return NULL;
}
コード例 #4
0
static dav_error *
_load_in_place_chunk_info(const dav_resource *r, const char *path, struct chunk_textinfo_s *chunk, GHashTable *comp_opt)
{
	dav_error *e = NULL;
	GError *ge = NULL;
	apr_pool_t *p = r->pool;
	dav_rawx_server_conf *conf = resource_get_server_config(r);

	/* No need to check for the chunk's presence, getting its attributes will
	 * fail if the chunk doesn't exists */
	if (!get_rawx_info_from_file(path, &ge, chunk)) {
		if (NULL != ge) {
			e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0,
					apr_pstrcat(p, "Failed to get chunk attributes: ", ge->message, NULL));
			g_clear_error(&ge);
		} else {
			e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0,
					"Failed to get chunk chunk attributes: No error specified");
		}
		return e;
	}

	str_replace_by_pooled_str(p, &(chunk->container_id));

	str_replace_by_pooled_str(p, &(chunk->content_id));
	str_replace_by_pooled_str(p, &(chunk->content_path));
	str_replace_by_pooled_str(p, &(chunk->content_version));
	str_replace_by_pooled_str(p, &(chunk->content_size));
	str_replace_by_pooled_str(p, &(chunk->content_chunk_nb));

	str_replace_by_pooled_str(p, &(chunk->content_storage_policy));
	str_replace_by_pooled_str(p, &(chunk->content_chunk_method));
	str_replace_by_pooled_str(p, &(chunk->content_mime_type));

	str_replace_by_pooled_str(p, &(chunk->chunk_id));
	str_replace_by_pooled_str(p, &(chunk->chunk_size));
	str_replace_by_pooled_str(p, &(chunk->chunk_position));
	str_replace_by_pooled_str(p, &(chunk->chunk_hash));

	if(!get_compression_info_in_attr(path, &ge, comp_opt)){
		if(NULL != ge) {
			e = server_create_and_stat_error(conf, p, HTTP_CONFLICT,
				0, apr_pstrcat(p, "Failed to get chunk compression attributes: ", ge->message, NULL));
			g_clear_error(&ge);
		} else {
			e = server_create_and_stat_error(conf, p, HTTP_CONFLICT,
			                                0, "Failed to get chunk compression attributes: No error specified");
		}
		return e;
	}

	return NULL;
}
コード例 #5
0
static dav_error *
dav_rawx_move_resource(dav_resource *src_res, dav_resource *dst_res,
		dav_response **response)
{
	char buff[128];
	apr_pool_t *pool;
	pool = dst_res->pool;
	apr_status_t status;
	dav_error *e = NULL;
	dav_rawx_server_conf *srv_conf = resource_get_server_config(src_res);

	*response = NULL;

	if (DAV_RESOURCE_TYPE_REGULAR != src_res->type)  {
		e = server_create_and_stat_error(srv_conf, pool,
			HTTP_CONFLICT, 0, "Cannot MOVE this type of resource.");
		goto end_move;
	}
	if (src_res->collection) {
		e = server_create_and_stat_error(srv_conf, pool,
			HTTP_CONFLICT, 0, "No MOVE on collections");
		goto end_move;
	}
	if (apr_strnatcasecmp(src_res->info->hex_chunkid,
			dst_res->info->hex_chunkid)) {
		e = server_create_and_stat_error(srv_conf, pool,
				HTTP_FORBIDDEN, 0,
				"Source and destination chunk ids are not the same");
		goto end_move;
	}

	DAV_DEBUG_RES(src_res, 0, "Moving %s to %s",
			resource_get_pathname(src_res), resource_get_pathname(dst_res));
	status = apr_file_rename(resource_get_pathname(src_res),
			resource_get_pathname(dst_res), pool);

	if (status != APR_SUCCESS) {
		e = server_create_and_stat_error(srv_conf,
				pool, HTTP_INTERNAL_SERVER_ERROR, status,
				apr_pstrcat(pool, "Failed to MOVE this chunk: ",
					apr_strerror(status, buff, sizeof(buff)), NULL));
		goto end_move;
	}

	server_inc_stat(srv_conf, RAWX_STATNAME_REP_2XX, 0);

end_move:
	server_inc_request_stat(srv_conf, RAWX_STATNAME_REQ_OTHER,
			request_get_duration(src_res->info->request));

	return e;
}
コード例 #6
0
static dav_error *
dav_rawx_remove_resource(dav_resource *resource, dav_response **response)
{
	char buff[128];
	apr_pool_t *pool;
	apr_status_t status;
	dav_error *e = NULL;

	DAV_XDEBUG_RES(resource, 0, "%s(%s)", __FUNCTION__, resource_get_pathname(resource));
	pool = resource->pool;
	*response = NULL;

	if (DAV_RESOURCE_TYPE_REGULAR != resource->type)  {
		e = server_create_and_stat_error(resource_get_server_config(resource), pool,
			HTTP_CONFLICT, 0, "Cannot DELETE this type of resource.");
		goto end_remove;
	}
	if (resource->collection) {
		e = server_create_and_stat_error(resource_get_server_config(resource), pool,
			HTTP_CONFLICT, 0, "No DELETE on collections");
		goto end_remove;
	}

	status = apr_file_remove(resource_get_pathname(resource), pool);
	if (APR_SUCCESS != status) {
		e = server_create_and_stat_error(resource_get_server_config(resource), pool,
			HTTP_FORBIDDEN, 0, apr_pstrcat(pool,
					"Failed to DELETE this chunk : ",
					apr_strerror(status, buff, sizeof(buff)),
					NULL));
		goto end_remove;
	}

	send_chunk_event("storage.chunk.deleted", resource);

	resource->exists = 0;
	resource->collection = 0;

	server_inc_stat(resource_get_server_config(resource), RAWX_STATNAME_REP_2XX, 0);

end_remove:

	/* Now we pass here even if an error occured, for process request duration */
	server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_CHUNKDEL,
				request_get_duration(resource->info->request));

	return e;
}
コード例 #7
0
static dav_error *
_load_request_info(const dav_resource *resource, char **full_path, struct storage_policy_s **sp)
{
	dav_error *e = NULL;
	const request_rec *r = resource->info->request;

	/* configure full path */
	e = __build_chunk_full_path(resource, full_path);
	if (NULL != e)
		return e;

	DAV_DEBUG_REQ(r, 0, "Chunk path build from request: %s", *full_path);
	
	/* init loaded storage policy */
	const char *pol_name = apr_table_get(r->headers_in, "storage-policy");
	if (!pol_name) {
		return server_create_and_stat_error(request_get_server_config(r), r->pool,
				HTTP_BAD_REQUEST, 0, "No storage-policy specified");
	}
	DAV_DEBUG_REQ(r, 0, "Policy found in request: %s", pol_name);

	dav_rawx_server_conf *conf = resource_get_server_config(resource);

	*sp = storage_policy_init(conf->rawx_conf->ni, pol_name);
	apr_pool_cleanup_register(r->pool, *sp, apr_storage_policy_clean, apr_pool_cleanup_null);

	return NULL;
}
コード例 #8
0
static dav_error *
__build_chunk_full_path(const dav_resource *resource, char **full_path)
{
	
	const request_rec *r = resource->info->request;
	dav_rawx_server_conf *conf = request_get_server_config(r);

	if(strlen(r->uri) < 65)
		return server_create_and_stat_error(request_get_server_config(r), r->pool,
				HTTP_BAD_REQUEST, 0, apr_pstrcat(r->pool, "Cannot parse request uri ", r->uri, NULL));
	char *p = NULL;

	uint i_p = 1;
	uint i_uri = 1;
		
	p = apr_palloc(r->pool, (65 + 1 + (conf->hash_depth * conf->hash_width) + conf->hash_depth));

	p[0] = '/';

	for (int i = 0; i < conf->hash_depth ; i++) {
		for (int j = 0; j < conf->hash_width ; j++)
			p[i_p++] = r->uri[i_uri++];
		p[i_p++] = '/';
	}
		
	memcpy(p + i_p, r->uri + 1, 64);
	i_p += 64;
	p[i_p] = '\0';

	*full_path = apr_pstrcat(r->pool, conf->docroot, p, NULL);

	return NULL;
}
コード例 #9
0
static dav_error *
_update_chunk_storage(const dav_resource *resource, const char *path, const struct data_treatments_s *dt, GHashTable *comp_opt)
{
	GError *e = NULL;
	dav_error *de = NULL;
	const char *c = NULL;
	const request_rec *r = resource->info->request;
	c = g_hash_table_lookup(comp_opt, NS_COMPRESSION_OPTION);
	if(NULL != c && 0 == g_ascii_strcasecmp(c, NS_COMPRESSION_ON)) {
		DAV_DEBUG_REQ(r, 0, "In place chunk is compressed, uncompress it");
		if(1 != uncompress_chunk(path, TRUE, &e)) {
			de = server_create_and_stat_error(request_get_server_config(r),
					r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
					apr_pstrcat(r->pool, "Failed to uncompress chunk : ",
					((NULL != e)? e->message : "No error specified"), NULL));
			if(NULL != e)
				g_clear_error(&e);
			return de;
		}
		DAV_DEBUG_REQ(r, 0, "Chunk uncompressed");
	}

	if(COMPRESSION == data_treatments_get_type(dt)) {
		DAV_DEBUG_REQ(r, 0, "Re compressing chunk");
		const char *algo = data_treatments_get_param(dt, DT_KEY_ALGO);
		const char *bs = data_treatments_get_param(dt, DT_KEY_BLOCKSIZE);
		if(!algo || !bs) {
			return server_create_and_stat_error(request_get_server_config(r),
					r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
					apr_pstrcat(r->pool, "Cannot compress chunk, missing info: ",
						algo, "|", bs, NULL));
		}

		if(1 != compress_chunk(path, algo, g_ascii_strtoll(bs, NULL, 10), TRUE, &e)) {
			de = server_create_and_stat_error(request_get_server_config(r),
					r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
					apr_pstrcat(r->pool, "Failed to compress chunk : ",
						((NULL != e)? e->message : "No error specified"), NULL));
			if(NULL != e)
				g_clear_error(&e);
			return de;
		}
	}

	return NULL;
}
コード例 #10
0
static dav_error *
_ensure_sys_metadata(const dav_resource *resource, const char *path, const char *sp, struct content_textinfo_s *content)
{

	if(!sp) {
		return NULL;
	}

	GError *ge = NULL;
	dav_error *e = NULL;
	int change = 1;

	if(!content->system_metadata) {
		content->system_metadata = apr_pstrcat(resource->pool, "storage-policy=", sp, ";", NULL);
	} else {
		const char *p = NULL;
		if (content->system_metadata)
			p = g_strrstr(content->system_metadata, "storage-policy=");
		if(NULL != p) {
			const char *end = NULL;
			p = p + strlen("storage-policy=");
			end = strchr(p, ';');
			if((strlen(sp) != (size_t)(end - p))
					|| 0 != g_ascii_strncasecmp(sp, p , strlen(sp))) {
				content->system_metadata = apr_pstrndup(resource->pool, content->system_metadata, p - content->system_metadata);
				content->system_metadata = apr_pstrcat(resource->pool, content->system_metadata, sp, end, NULL); 
			} else {
				change = 0;
			}
		} else {
			if(g_str_has_suffix(content->system_metadata, ";")) {
				content->system_metadata = apr_pstrcat(resource->pool, content->system_metadata, "storage-policy=", sp, NULL);
			} else {
				content->system_metadata = apr_pstrcat(resource->pool, content->system_metadata, ";storage-policy=", sp, NULL);
			}
		}
	}
	
	if(change && !set_content_info_in_attr(path, &ge, content)) {
		e = server_create_and_stat_error(resource_get_server_config(resource), resource->pool,
			HTTP_INTERNAL_SERVER_ERROR, 0, apr_pstrcat( resource->pool, "Failed to set chunk xattr : ",
			(NULL != ge) ? ge->message : "No error specified", NULL));
		if(NULL != ge)
			g_clear_error(&ge);
		return e;
	}

	return NULL;
}
コード例 #11
0
static dav_error *
dav_rawx_seek_stream(dav_stream *stream, apr_off_t abs_pos)
{
	DAV_XDEBUG_POOL(stream->p, 0, "%s(%s)", __FUNCTION__, stream->pathname);
	TRACE("Seek stream: START please contact CDR if you get this TRACE");

	if (fseek(stream->f, abs_pos, SEEK_SET) != 0) {
		/* ### should check whether apr_file_seek set abs_pos was set to the
		 * correct position? */
		/* ### use something besides 500? */
		return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p,
			HTTP_INTERNAL_SERVER_ERROR, 0,
				"Could not seek to specified position in the "
				"resource.");
	}
	return NULL;
}
コード例 #12
0
dav_error *
dav_rainx_info_get_resource(request_rec *r, const char *root_dir, const char *label,
	int use_checked_in, dav_resource **result_resource)
{
	(void) root_dir;
	(void) label;
	(void) use_checked_in;

	DAV_XDEBUG_REQ(r, 0, "%s(...)", __FUNCTION__);
	*result_resource = NULL;

	if (r->method_number != M_GET)
		return server_create_and_stat_error(request_get_server_config(r), r->pool,
				HTTP_BAD_REQUEST, 0, apr_pstrdup(r->pool, "Invalid request method, only GET"));

	*result_resource = __build_req_resource(r, &dav_hooks_repository_rainxinfo, __gen_info);
	(*result_resource)->info->type = INFO;
	return NULL;
}
コード例 #13
0
static dav_error *
dav_rawx_deliver_SPECIAL(const dav_resource *resource, ap_filter_t *output)
{
	(void) output;
	dav_error *e = NULL;
	const request_rec *r = resource->info->request;
	GHashTable *comp_opt = NULL;
	struct chunk_textinfo_s *chunk = NULL;
	char *path = NULL;
	apr_pool_t *p = resource->pool;

	/* Load request informations */
	e = _load_request_info(resource, &path);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to load request informations: %s", e->desc);
		goto end_deliver;
	}

	comp_opt = g_hash_table_new_full( g_str_hash, g_str_equal, g_free, g_free);
	apr_pool_cleanup_register(p, comp_opt, apr_hash_table_clean, apr_pool_cleanup_null);
	chunk = apr_palloc(p, sizeof(struct chunk_textinfo_s));

	/* Load in place informations (sys-metadata & metadatacompress) */
	e = _load_in_place_chunk_info(resource, path, chunk, comp_opt);
	if (NULL != e) {
		DAV_ERROR_REQ(r, 0, "Failed to load in place chunk information: %s", e->desc);
		goto end_deliver;
	}

	DAV_ERROR_REQ(r, 0, "Failed to update chunk storage: PAYMENT REQUIRED"
			" to compress data (we accept bitcoins)");
	dav_rawx_server_conf *conf = resource_get_server_config(resource);
	e = server_create_and_stat_error(conf, p, HTTP_PAYMENT_REQUIRED,
			0, "Pay more to manage compression");

end_deliver:
	/* stats inc */
	return e;
}
コード例 #14
0
static dav_error *
dav_rawx_deliver(const dav_resource *resource, ap_filter_t *output)
{
	dav_rawx_server_conf *conf;
	apr_pool_t *pool;
	apr_bucket_brigade *bb = NULL;
	apr_status_t status;
	apr_bucket *bkt = NULL;
	dav_resource_private *ctx;
	dav_error *e = NULL;

	apr_finfo_t info;

	DAV_XDEBUG_RES(resource, 0, "%s(%s)", __FUNCTION__, resource_get_pathname(resource));

	pool = resource->pool;
	conf = resource_get_server_config(resource);

	/* Check resource type */
	if (DAV_RESOURCE_TYPE_REGULAR != resource->type) {
		e = server_create_and_stat_error(conf, pool, HTTP_CONFLICT, 0, "Cannot GET this type of resource.");
		goto end_deliver;
	}

	if (resource->collection) {
		e = server_create_and_stat_error(conf, pool, HTTP_CONFLICT, 0, "No GET on collections");
		goto end_deliver;
	}

	ctx = resource->info;

	if (ctx->update_only) {
		/* Check if it is not a busy file. We accept reads during
		 * compression but not attr updates. */
		char *pending_file = apr_pstrcat(pool,
				resource_get_pathname(resource), ".pending", NULL);
		status = apr_stat(&info, pending_file, APR_FINFO_ATIME, pool);
		if (status == APR_SUCCESS) {
			e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN,
					0, "File in pending mode.");
			goto end_deliver;
		}

		GError *error_local = NULL;
		/* UPDATE chunk attributes and go on */
		const char *path = resource_get_pathname(resource);
		FILE *f = NULL;
		f = fopen(path, "r");
		/* Try to open the file but forbids a creation */
		if (!set_rawx_info_to_fd(fileno(f), &error_local, &(ctx->chunk))) {
			fclose(f);
			e = server_create_and_stat_error(conf, pool,
					HTTP_FORBIDDEN, 0, apr_pstrdup(pool, gerror_get_message(error_local)));
			g_clear_error(&error_local);
			goto end_deliver;
		}
		fclose(f);
	} else {
		bb = apr_brigade_create(pool, output->c->bucket_alloc);

		if (!ctx->compression){
			apr_file_t *fd = NULL;

			/* Try to open the file but forbids a creation */
			status = apr_file_open(&fd, resource_get_pathname(resource),
					APR_READ|APR_BINARY|APR_BUFFERED, 0, pool);
			if (APR_SUCCESS != status) {
				e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN,
						0, "File permissions deny server access.");
				goto end_deliver;
			}

			/* FIXME this does not handle large files. but this is test code anyway */
			bkt = apr_bucket_file_create(fd, 0,
					(apr_size_t)resource->info->finfo.size,
					pool, output->c->bucket_alloc);
		}
		else {
			DAV_DEBUG_RES(resource, 0, "Building a compressed resource bucket");
			gint i64;

			i64 = g_ascii_strtoll(ctx->cp_chunk.uncompressed_size, NULL, 10);

			/* creation of compression specific bucket */
			bkt = apr_pcalloc(pool, sizeof(struct apr_bucket));
			bkt->type = &chunk_bucket_type;
			bkt->length = i64;
			bkt->start = 0;
			bkt->data = ctx;
			bkt->free = chunk_bucket_free_noop;
			bkt->list = output->c->bucket_alloc;
		}

		APR_BRIGADE_INSERT_TAIL(bb, bkt);

		/* as soon as the chunk has been sent, end of stream!*/
		bkt = apr_bucket_eos_create(output->c->bucket_alloc);
		APR_BRIGADE_INSERT_TAIL(bb, bkt);

		if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS){
			e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, "Could not write contents to filter.");
			/* close file */
			if (ctx->cp_chunk.fd) {
				fclose(ctx->cp_chunk.fd);
			}
			goto end_deliver;
		}
		if (ctx->cp_chunk.buf){
			g_free(ctx->cp_chunk.buf);
			ctx->cp_chunk.buf = NULL;
		}
		if (ctx->cp_chunk.uncompressed_size){
			g_free(ctx->cp_chunk.uncompressed_size);
			ctx->cp_chunk.uncompressed_size = NULL;
		}

		/* close file */
		if (ctx->cp_chunk.fd) {
			fclose(ctx->cp_chunk.fd);
		}

		server_inc_stat(conf, RAWX_STATNAME_REP_2XX, 0);
		server_add_stat(conf, RAWX_STATNAME_REP_BWRITTEN, resource->info->finfo.size, 0);
	}

end_deliver:

	if (bb) {
		apr_brigade_destroy(bb);
		bb = NULL;
	}

	/* Now we pass here even if an error occured, for process request duration */
	server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_CHUNKGET,
			request_get_duration(resource->info->request));

	return e;
}
コード例 #15
0
static dav_error *
dav_rawx_write_stream(dav_stream *stream, const void *buf, apr_size_t bufsize)
{
	DAV_XDEBUG_POOL(stream->p, 0, "%s(%s)", __FUNCTION__, stream->pathname);

	guint written = 0;
	gulong checksum = stream->compress_checksum;

	while (written < bufsize) {
		memcpy(stream->buffer + stream->bufsize, buf + written, MIN(bufsize - written, stream->blocksize - stream->bufsize));
		guint tmp = MIN(bufsize - written, stream->blocksize - stream->bufsize);
		written += tmp;
		stream->bufsize += tmp;

		/* If buffer full, compress if needed and write to distant file */
		if (stream->blocksize - stream->bufsize <=0){
			gsize nb_write = 0;
			if (!stream->compression) {
				nb_write = fwrite(stream->buffer, stream->bufsize, 1, stream->f);
				if (nb_write != 1) {
					/* ### use something besides 500? */
					return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p,
							HTTP_INTERNAL_SERVER_ERROR, 0,
							"An error occurred while writing to a "
							"resource.");
				}
			} else {
				GByteArray *gba = g_byte_array_new();
				if (stream->comp_ctx.data_compressor(stream->buffer, stream->bufsize, gba,
							&checksum)!=0) {
					if (gba)
						g_byte_array_free(gba, TRUE);
					/* ### use something besides 500? */
					return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p,
							HTTP_INTERNAL_SERVER_ERROR, 0,
							"An error occurred while compressing data.");
				}
				nb_write = fwrite(gba->data, gba->len, 1, stream->f);
				if (nb_write != 1) {
					if (gba)
						g_byte_array_free(gba, TRUE);
					/* ### use something besides 500? */
					return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p,
							HTTP_INTERNAL_SERVER_ERROR, 0,
							"An error occurred while writing to a "
							"resource.");
				}
				stream->compressed_size += gba->len;
				if (gba)
					g_byte_array_free(gba, TRUE);
			}

			stream->buffer = apr_pcalloc(stream->p, stream->blocksize);
			stream->bufsize = 0;
		}
	}

	stream->compress_checksum = checksum;

	/* update the hash and the stats */
	g_checksum_update(stream->md5, buf, bufsize);
	/* update total_size */
	stream->total_size += bufsize;
	return NULL;
}
コード例 #16
0
static dav_error *
dav_rawx_get_resource(request_rec *r, const char *root_dir, const char *label,
	int use_checked_in, dav_resource **result_resource)
{
	(void) use_checked_in;
	*result_resource = NULL;

	dav_rawx_server_conf *conf = request_get_server_config(r);

	/* Check if client allowed to work with us */
	if (conf->enabled_acl) {
#if MODULE_MAGIC_COOKIE == 0x41503234UL /* "AP24" */
		if (!authorized_personal_only(r->connection->client_ip, conf->rawx_conf->acl))
#else
		if (!authorized_personal_only(r->connection->remote_ip, conf->rawx_conf->acl))
#endif
		{
			return server_create_and_stat_error(conf, r->pool,
					HTTP_UNAUTHORIZED, 0, "Permission Denied (APO)");
		}
	}

	/* Create private resource context descriptor */
	dav_resource_private ctx = {0};
	ctx.pool = r->pool;
	ctx.request = r;

	dav_error *e = rawx_repo_check_request(r, root_dir, label, use_checked_in,
			&ctx, result_resource);
	/* Return in case we have an error or
	 * if result_resource != null because it was an info request */
	if (e || *result_resource) {
		return e;
	}

	/* Build the hashed path */
	if (conf->hash_width <= 0 || conf->hash_depth <= 0) {
		apr_snprintf(ctx.dirname, sizeof(ctx.dirname),
			"%.*s", (int)sizeof(conf->docroot), conf->docroot);
	} else {
		e = rawx_repo_configure_hash_dir(r, &ctx);
		if ( NULL != e) {
			return e;
		}
	}
	DAV_DEBUG_REQ(r, 0, "Hashed directory: %.*s", (int)sizeof(ctx.dirname), ctx.dirname);

	/* All the checks on the URL have passed, now build a resource */
	dav_resource *resource = apr_pcalloc(r->pool, sizeof(*resource));
	resource->type = DAV_RESOURCE_TYPE_REGULAR;
	resource->info = apr_pcalloc(r->pool, sizeof(ctx));;
	memcpy(resource->info, &ctx, sizeof(ctx));
	resource->hooks = &dav_hooks_repository_rawx;
	resource->pool = r->pool;
	memset(&(resource->info->comp_ctx), 0, sizeof(struct compression_ctx_s));

	resource->info->fullpath = apr_pstrcat(resource->pool,
		resource->info->dirname, resource->info->hex_chunkid,
		resource->info->file_extension,
		NULL);

	/* init compression context structure if we are in get method */
	if (r->method_number == M_GET && !ctx.update_only) {
		resource_init_decompression(resource, conf);
	}

	/* Check the chunk's existence */
	int flags = (r->method_number == M_GET ||
			r->method_number == M_OPTIONS ||
			r->method_number == M_DELETE)?
				 RESOURCE_STAT_CHUNK_READ_ATTRS : 0;
	if (r->method_number == M_PUT || r->method_number == M_POST)
		flags |= RESOURCE_STAT_CHUNK_PENDING;

	resource_stat_chunk(resource, flags);

	if (r->method_number == M_PUT || r->method_number == M_POST ||
			r->method_number == M_MOVE ||
			(r->method_number == M_GET && ctx.update_only)) {
		request_load_chunk_info_from_headers(r, &(resource->info->chunk));
		const char *missing = check_chunk_info(&resource->info->chunk);
		if (missing != NULL) {
			return server_create_and_stat_error(request_get_server_config(r), r->pool,
				HTTP_BAD_REQUEST, 0, apr_pstrcat(r->pool, "missing or invalid header ", missing, NULL));
		}
	}

	if (r->method_number == M_POST || r->method_number == M_PUT) {
		if (resource->info->chunk.chunk_id) {
			if (0 != apr_strnatcasecmp(resource->info->chunk.chunk_id, resource->info->hex_chunkid))
				return server_create_and_stat_error(request_get_server_config(r), r->pool,
						HTTP_BAD_REQUEST, 0, "chunk-id mismatch");
		}
		if (resource->exists)
			return server_create_and_stat_error(request_get_server_config(r), r->pool,
				HTTP_CONFLICT, 0, "Resource busy or already exists");
		request_parse_query(r, resource);
	}

	*result_resource = resource;
	return NULL;
}