/* JFS : walks are not managed by this rawx */ static dav_error * dav_rawx_walk(const dav_walk_params *params, int depth, dav_response **response) { dav_walk_resource wres; dav_error *err; (void) depth; err = NULL; memset(&wres, 0x00, sizeof(wres)); wres.walk_ctx = params->walk_ctx; wres.pool = params->pool; wres.resource = params->root; DAV_XDEBUG_RES(params->root, 0, "sanity checks for %s(%s)", __FUNCTION__, resource_get_pathname(wres.resource)); if (wres.resource->type != DAV_RESOURCE_TYPE_REGULAR) return server_create_and_stat_error(resource_get_server_config(params->root), params->root->pool, HTTP_CONFLICT, 0, "Only regular resources can be deleted with RAWX"); if (wres.resource->collection) return server_create_and_stat_error(resource_get_server_config(params->root), params->root->pool, HTTP_CONFLICT, 0, "Collection resources canot be deleted with RAWX"); if (!wres.resource->exists) return server_create_and_stat_error(resource_get_server_config(params->root), params->root->pool, HTTP_NOT_FOUND, 0, "Resource not found (no chunk)"); DAV_DEBUG_RES(params->root, 0, "ready for %s(%s)", __FUNCTION__, resource_get_pathname(wres.resource)); err = (*params->func)(&wres, DAV_CALLTYPE_MEMBER); *response = wres.response; return err; }
static dav_error * dav_rawx_remove_resource(dav_resource *resource, dav_response **response) { char buff[128]; apr_pool_t *pool; apr_status_t status; dav_error *e = NULL; DAV_XDEBUG_RES(resource, 0, "%s(%s)", __FUNCTION__, resource_get_pathname(resource)); pool = resource->pool; *response = NULL; if (DAV_RESOURCE_TYPE_REGULAR != resource->type) { e = server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_CONFLICT, 0, "Cannot DELETE this type of resource."); goto end_remove; } if (resource->collection) { e = server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_CONFLICT, 0, "No DELETE on collections"); goto end_remove; } status = apr_file_remove(resource_get_pathname(resource), pool); if (APR_SUCCESS != status) { e = server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_FORBIDDEN, 0, apr_pstrcat(pool, "Failed to DELETE this chunk : ", apr_strerror(status, buff, sizeof(buff)), NULL)); goto end_remove; } send_chunk_event("storage.chunk.deleted", resource); resource->exists = 0; resource->collection = 0; server_inc_stat(resource_get_server_config(resource), RAWX_STATNAME_REP_2XX, 0); end_remove: /* Now we pass here even if an error occured, for process request duration */ server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_CHUNKDEL, request_get_duration(resource->info->request)); return e; }
static dav_error * _load_in_place_chunk_info(const dav_resource *r, const char *path, struct content_textinfo_s *content, struct chunk_textinfo_s *chunk, GHashTable **comp_opt) { dav_error *e = NULL; GError *ge = NULL; apr_pool_t *p = r->pool; dav_rawx_server_conf *conf = resource_get_server_config(r); apr_finfo_t finfo; /* check chunk presence */ if(APR_SUCCESS != apr_stat(&finfo, path, APR_FINFO_NORM, p)) { return server_create_and_stat_error(conf, r->pool, HTTP_NOT_FOUND, 0, "Chunk file not found"); } if(!get_rawx_info_in_attr(path, &ge, content, chunk)) { if(NULL != ge) { e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0, apr_pstrcat(p, "Failed to get chunk attributes: ", ge->message, NULL)); g_clear_error(&ge); } else { e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0, "Failed to get chunk chunk attributes: No error specified"); } return e; } str_replace_by_pooled_str(p, &(content->path)); str_replace_by_pooled_str(p, &(content->size)); str_replace_by_pooled_str(p, &(content->chunk_nb)); str_replace_by_pooled_str(p, &(content->metadata)); str_replace_by_pooled_str(p, &(content->system_metadata)); str_replace_by_pooled_str(p, &(content->container_id)); str_replace_by_pooled_str(p, &(chunk->id)); str_replace_by_pooled_str(p, &(chunk->path)); str_replace_by_pooled_str(p, &(chunk->size)); str_replace_by_pooled_str(p, &(chunk->hash)); str_replace_by_pooled_str(p, &(chunk->position)); str_replace_by_pooled_str(p, &(chunk->metadata)); str_replace_by_pooled_str(p, &(chunk->container_id)); if(!get_compression_info_in_attr(path, &ge, comp_opt)){ if(NULL != ge) { e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0, apr_pstrcat(p, "Failed to get chunk compression attributes: ", ge->message, NULL)); g_clear_error(&ge); } else { e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0, "Failed to get chunk compression attributes: No error specified"); } return e; } return NULL; }
static dav_error * _load_request_info(const dav_resource *resource, char **full_path, struct storage_policy_s **sp) { dav_error *e = NULL; const request_rec *r = resource->info->request; /* configure full path */ e = __build_chunk_full_path(resource, full_path); if (NULL != e) return e; DAV_DEBUG_REQ(r, 0, "Chunk path build from request: %s", *full_path); /* init loaded storage policy */ const char *pol_name = apr_table_get(r->headers_in, "storage-policy"); if (!pol_name) { return server_create_and_stat_error(request_get_server_config(r), r->pool, HTTP_BAD_REQUEST, 0, "No storage-policy specified"); } DAV_DEBUG_REQ(r, 0, "Policy found in request: %s", pol_name); dav_rawx_server_conf *conf = resource_get_server_config(resource); *sp = storage_policy_init(conf->rawx_conf->ni, pol_name); apr_pool_cleanup_register(r->pool, *sp, apr_storage_policy_clean, apr_pool_cleanup_null); return NULL; }
static dav_error * dav_rawx_close_stream(dav_stream *stream, int commit) { /* LAST STEP OF PUT REQUEST */ dav_error *e = NULL; DAV_DEBUG_REQ(stream->r->info->request, 0, "Closing (%s) the stream to [%s]", (commit ? "commit" : "rollback"), stream->pathname); if (!commit) { e = rawx_repo_rollback_upload(stream); } else { e = rawx_repo_write_last_data_crumble(stream); if (e) { DAV_DEBUG_REQ(stream->r->info->request, 0, "Cannot commit, an error occured while writing end of data"); dav_error *e_tmp = NULL; e_tmp = rawx_repo_rollback_upload(stream); if (e_tmp) { DAV_ERROR_REQ(stream->r->info->request, 0, "Error while rolling back upload: %s", e_tmp->desc); } } else { e = rawx_repo_commit_upload(stream); } } /* stats update */ if (stream->total_size > 0) { server_add_stat(resource_get_server_config(stream->r), RAWX_STATNAME_REP_BWRITTEN, stream->total_size, 0); } server_inc_request_stat(resource_get_server_config(stream->r), RAWX_STATNAME_REQ_CHUNKPUT, request_get_duration(stream->r->info->request)); if (stream->md5) { g_checksum_free(stream->md5); stream->md5 = NULL; } return e; }
static dav_error * _load_in_place_chunk_info(const dav_resource *r, const char *path, struct chunk_textinfo_s *chunk, GHashTable *comp_opt) { dav_error *e = NULL; GError *ge = NULL; apr_pool_t *p = r->pool; dav_rawx_server_conf *conf = resource_get_server_config(r); /* No need to check for the chunk's presence, getting its attributes will * fail if the chunk doesn't exists */ if (!get_rawx_info_from_file(path, &ge, chunk)) { if (NULL != ge) { e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0, apr_pstrcat(p, "Failed to get chunk attributes: ", ge->message, NULL)); g_clear_error(&ge); } else { e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0, "Failed to get chunk chunk attributes: No error specified"); } return e; } str_replace_by_pooled_str(p, &(chunk->container_id)); str_replace_by_pooled_str(p, &(chunk->content_id)); str_replace_by_pooled_str(p, &(chunk->content_path)); str_replace_by_pooled_str(p, &(chunk->content_version)); str_replace_by_pooled_str(p, &(chunk->content_size)); str_replace_by_pooled_str(p, &(chunk->content_chunk_nb)); str_replace_by_pooled_str(p, &(chunk->content_storage_policy)); str_replace_by_pooled_str(p, &(chunk->content_chunk_method)); str_replace_by_pooled_str(p, &(chunk->content_mime_type)); str_replace_by_pooled_str(p, &(chunk->chunk_id)); str_replace_by_pooled_str(p, &(chunk->chunk_size)); str_replace_by_pooled_str(p, &(chunk->chunk_position)); str_replace_by_pooled_str(p, &(chunk->chunk_hash)); if(!get_compression_info_in_attr(path, &ge, comp_opt)){ if(NULL != ge) { e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0, apr_pstrcat(p, "Failed to get chunk compression attributes: ", ge->message, NULL)); g_clear_error(&ge); } else { e = server_create_and_stat_error(conf, p, HTTP_CONFLICT, 0, "Failed to get chunk compression attributes: No error specified"); } return e; } return NULL; }
static dav_error * dav_rawx_move_resource(dav_resource *src_res, dav_resource *dst_res, dav_response **response) { char buff[128]; apr_pool_t *pool; pool = dst_res->pool; apr_status_t status; dav_error *e = NULL; dav_rawx_server_conf *srv_conf = resource_get_server_config(src_res); *response = NULL; if (DAV_RESOURCE_TYPE_REGULAR != src_res->type) { e = server_create_and_stat_error(srv_conf, pool, HTTP_CONFLICT, 0, "Cannot MOVE this type of resource."); goto end_move; } if (src_res->collection) { e = server_create_and_stat_error(srv_conf, pool, HTTP_CONFLICT, 0, "No MOVE on collections"); goto end_move; } if (apr_strnatcasecmp(src_res->info->hex_chunkid, dst_res->info->hex_chunkid)) { e = server_create_and_stat_error(srv_conf, pool, HTTP_FORBIDDEN, 0, "Source and destination chunk ids are not the same"); goto end_move; } DAV_DEBUG_RES(src_res, 0, "Moving %s to %s", resource_get_pathname(src_res), resource_get_pathname(dst_res)); status = apr_file_rename(resource_get_pathname(src_res), resource_get_pathname(dst_res), pool); if (status != APR_SUCCESS) { e = server_create_and_stat_error(srv_conf, pool, HTTP_INTERNAL_SERVER_ERROR, status, apr_pstrcat(pool, "Failed to MOVE this chunk: ", apr_strerror(status, buff, sizeof(buff)), NULL)); goto end_move; } server_inc_stat(srv_conf, RAWX_STATNAME_REP_2XX, 0); end_move: server_inc_request_stat(srv_conf, RAWX_STATNAME_REQ_OTHER, request_get_duration(src_res->info->request)); return e; }
static dav_error * dav_rainx_deliver_SPECIAL(const dav_resource *resource, ap_filter_t *output) { const char *result; int result_len; apr_status_t status; apr_pool_t *pool; apr_bucket_brigade *bb; apr_bucket *bkt; DAV_XDEBUG_RES(resource, 0, "%s()", __FUNCTION__); pool = resource->info->request->pool; /* Check resource type */ if (resource->type != DAV_RESOURCE_TYPE_PRIVATE) return server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_CONFLICT, 0, apr_pstrdup(pool, "Cannot GET this type of resource.")); if (resource->collection) return server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_CONFLICT, 0, apr_pstrdup(pool,"No GET on collections")); /* Generate the output */ result = resource->info->generator(resource, pool); result_len = strlen(result); /* We must reply a buffer */ bkt = apr_bucket_heap_create(result, result_len, NULL, output->c->bucket_alloc); bb = apr_brigade_create(pool, output->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bkt); /* Nothing more to reply */ bkt = apr_bucket_eos_create(output->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bkt); DAV_XDEBUG_RES(resource, 0, "%s : ready to deliver", __FUNCTION__); if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS) return server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_FORBIDDEN, 0, apr_pstrdup(pool,"Could not write contents to filter.")); server_inc_stat(resource_get_server_config(resource), RAWX_STATNAME_REP_2XX, 0); /* HERE ADD request counter */ switch(resource->info->type) { case STAT: server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_STAT, request_get_duration(resource->info->request)); break; case INFO: server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_INFO, request_get_duration(resource->info->request)); break; default: break; } return NULL; }
static dav_error * _ensure_sys_metadata(const dav_resource *resource, const char *path, const char *sp, struct content_textinfo_s *content) { if(!sp) { return NULL; } GError *ge = NULL; dav_error *e = NULL; int change = 1; if(!content->system_metadata) { content->system_metadata = apr_pstrcat(resource->pool, "storage-policy=", sp, ";", NULL); } else { const char *p = NULL; if (content->system_metadata) p = g_strrstr(content->system_metadata, "storage-policy="); if(NULL != p) { const char *end = NULL; p = p + strlen("storage-policy="); end = strchr(p, ';'); if((strlen(sp) != (size_t)(end - p)) || 0 != g_ascii_strncasecmp(sp, p , strlen(sp))) { content->system_metadata = apr_pstrndup(resource->pool, content->system_metadata, p - content->system_metadata); content->system_metadata = apr_pstrcat(resource->pool, content->system_metadata, sp, end, NULL); } else { change = 0; } } else { if(g_str_has_suffix(content->system_metadata, ";")) { content->system_metadata = apr_pstrcat(resource->pool, content->system_metadata, "storage-policy=", sp, NULL); } else { content->system_metadata = apr_pstrcat(resource->pool, content->system_metadata, ";storage-policy=", sp, NULL); } } } if(change && !set_content_info_in_attr(path, &ge, content)) { e = server_create_and_stat_error(resource_get_server_config(resource), resource->pool, HTTP_INTERNAL_SERVER_ERROR, 0, apr_pstrcat( resource->pool, "Failed to set chunk xattr : ", (NULL != ge) ? ge->message : "No error specified", NULL)); if(NULL != ge) g_clear_error(&ge); return e; } return NULL; }
void send_chunk_event(const char *type, const dav_resource *resource) { int rc; dav_rawx_server_conf *conf = resource_get_server_config(resource); GString *json = g_string_sized_new(128); g_string_append_printf(json, "{" "\"volume_id\":\"%s\"," "\"container_id\":\"%s\"," "\"content_id\":\"%s\"," "\"content_version\":\"%s\"," "\"content_path\":\"%s\"," "\"content_storage_policy\":\"%s\"," "\"content_mime_type\":\"%s\"," "\"content_chunk_method\":\"%s\"," "\"chunk_id\":\"%s\"," "\"chunk_hash\":\"%s\"," "\"chunk_position\":\"%s\"," "\"chunk_size\":\"%s\"", conf->rawx_id, resource->info->content.container_id, resource->info->content.content_id, resource->info->content.version, resource->info->content.path, resource->info->content.storage_policy, resource->info->content.mime_type, resource->info->content.chunk_method, resource->info->chunk.id, resource->info->chunk.hash, resource->info->chunk.position, resource->info->chunk.size); if (resource->info->content.size) g_string_append_printf(json, ",\"content_size\":\"%s\"", resource->info->content.size); if (resource->info->content.chunk_nb) g_string_append_printf(json, ",\"content_nbchunks\":\"%s\"", resource->info->content.chunk_nb); g_string_append_printf(json, "}"); rc = rawx_event_send(type, json); DAV_DEBUG_REQ(resource->info->request, 0, "Event %s %s", type, rc ? "OK" : "KO"); }
static dav_error * dav_rawx_seek_stream(dav_stream *stream, apr_off_t abs_pos) { DAV_XDEBUG_POOL(stream->p, 0, "%s(%s)", __FUNCTION__, stream->pathname); TRACE("Seek stream: START please contact CDR if you get this TRACE"); if (fseek(stream->f, abs_pos, SEEK_SET) != 0) { /* ### should check whether apr_file_seek set abs_pos was set to the * correct position? */ /* ### use something besides 500? */ return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p, HTTP_INTERNAL_SERVER_ERROR, 0, "Could not seek to specified position in the " "resource."); } return NULL; }
static const char * __gen_stats(const dav_resource *resource, apr_pool_t *pool) { DAV_XDEBUG_POOL(pool, 0, "%s()", __FUNCTION__); dav_rainx_server_conf *c = resource_get_server_config(resource); apr_global_mutex_lock(c->lock.handle); struct shm_stats_s *stats = apr_shm_baseaddr_get(c->shm.handle); apr_global_mutex_unlock(c->lock.handle); return apr_pstrcat(pool, STR_KV(time_all, "counter req.time"), STR_KV(time_put, "counter req.time.put"), STR_KV(time_get, "counter req.time.get"), STR_KV(time_del, "counter req.time.del"), STR_KV(time_stat, "counter req.time.stat"), STR_KV(time_info, "counter req.time.info"), STR_KV(time_raw, "counter req.time.raw"), STR_KV(time_other, "counter req.time.other"), STR_KV(req_all, "counter req.hits"), STR_KV(req_chunk_put, "counter req.hits.put"), STR_KV(req_chunk_get, "counter req.hits.get"), STR_KV(req_chunk_del, "counter req.hits.del"), STR_KV(req_stat, "counter req.hits.stat"), STR_KV(req_info, "counter req.hits.info"), STR_KV(req_raw, "counter req.hits.raw"), STR_KV(req_other, "counter req.hits.other"), STR_KV(rep_2XX, "counter rep.hits.2xx"), STR_KV(rep_4XX, "counter rep.hits.4xx"), STR_KV(rep_5XX, "counter rep.hits.5xx"), STR_KV(rep_other, "counter rep.hits.other"), STR_KV(rep_403, "counter rep.hits.403"), STR_KV(rep_404, "counter rep.hits.404"), STR_KV(rep_bread, "counter rep.bread"), STR_KV(rep_bwritten, "counter rep.bwritten"), NULL); }
static dav_error * dav_rawx_deliver_SPECIAL(const dav_resource *resource, ap_filter_t *output) { (void) output; dav_error *e = NULL; const request_rec *r = resource->info->request; GHashTable *comp_opt = NULL; struct chunk_textinfo_s *chunk = NULL; char *path = NULL; apr_pool_t *p = resource->pool; /* Load request informations */ e = _load_request_info(resource, &path); if (NULL != e) { DAV_ERROR_REQ(r, 0, "Failed to load request informations: %s", e->desc); goto end_deliver; } comp_opt = g_hash_table_new_full( g_str_hash, g_str_equal, g_free, g_free); apr_pool_cleanup_register(p, comp_opt, apr_hash_table_clean, apr_pool_cleanup_null); chunk = apr_palloc(p, sizeof(struct chunk_textinfo_s)); /* Load in place informations (sys-metadata & metadatacompress) */ e = _load_in_place_chunk_info(resource, path, chunk, comp_opt); if (NULL != e) { DAV_ERROR_REQ(r, 0, "Failed to load in place chunk information: %s", e->desc); goto end_deliver; } DAV_ERROR_REQ(r, 0, "Failed to update chunk storage: PAYMENT REQUIRED" " to compress data (we accept bitcoins)"); dav_rawx_server_conf *conf = resource_get_server_config(resource); e = server_create_and_stat_error(conf, p, HTTP_PAYMENT_REQUIRED, 0, "Pay more to manage compression"); end_deliver: /* stats inc */ return e; }
/* XXX JFS : etags are strings that uniquely identify a content. * A chunk is unique in a namespace, thus the e-tag must contain * both fields. */ static const char * dav_rawx_getetag(const dav_resource *resource) { const char *etag; dav_rawx_server_conf *conf; dav_resource_private *ctx; ctx = resource->info; conf = resource_get_server_config(resource); if (!resource->exists) { DAV_DEBUG_RES(resource, 0, "%s(%s) : resource not found", __FUNCTION__, resource_get_pathname(resource)); return NULL; } etag = apr_psprintf(resource->pool, "chunk-%s-%s", conf->ns_name, ctx->hex_chunkid); DAV_DEBUG_RES(resource, 0, "%s(%s) : ETag=[%s]", __FUNCTION__, resource_get_pathname(resource), etag); return etag; }
static const char * __gen_stats(const dav_resource *resource, apr_pool_t *pool) { struct shm_stats_s *stats = NULL; DAV_XDEBUG_POOL(pool, 0, "%s()", __FUNCTION__); memset(&stats, 0, sizeof(stats)); dav_rainx_server_conf *c = NULL; c = resource_get_server_config(resource); apr_global_mutex_lock(c->lock.handle); stats = apr_shm_baseaddr_get(c->shm.handle); apr_global_mutex_unlock(c->lock.handle); apr_uint64_t req = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_sec), 4); apr_uint64_t reqavgtime = rainx_stats_rrd_get_delta(&(stats->body.rrd_duration), 4); apr_uint64_t req_put = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_put_sec), 4); apr_uint64_t reqavgtime_put = rainx_stats_rrd_get_delta(&(stats->body.rrd_put_duration), 4); apr_uint64_t req_get = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_get_sec), 4); apr_uint64_t reqavgtime_get = rainx_stats_rrd_get_delta(&(stats->body.rrd_get_duration), 4); apr_uint64_t req_del = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_del_sec), 4); apr_uint64_t reqavgtime_del = rainx_stats_rrd_get_delta(&(stats->body.rrd_del_duration), 4); apr_uint64_t r_time = 0, r_put_time = 0, r_get_time = 0, r_del_time = 0; if(req > 0) r_time = reqavgtime / req; if(req_put > 0) r_put_time = reqavgtime_put / req_put; if(req_get > 0) r_get_time = reqavgtime_get / req_get; if(req_del > 0) r_del_time = reqavgtime_del / req_del; double r_rate = 0, r_put_rate = 0, r_get_rate = 0, r_del_rate = 0; r_rate = (double)req / 4; r_put_rate = (double)req_put / 4; r_get_rate = (double)req_get / 4; r_del_rate = (double)req_del / 4; return apr_pstrcat(pool, STR_KV(req_all, "req.all"), STR_KV(req_chunk_put, "req.put"), STR_KV(req_chunk_get, "req.get"), STR_KV(req_chunk_del, "req.del"), STR_KV(req_stat, "req.stat"), STR_KV(req_info, "req.info"), STR_KV(req_raw, "req.raw"), STR_KV(req_other, "req.other"), STR_KV(rep_2XX, "rep.2xx"), STR_KV(rep_4XX, "rep.4xx"), STR_KV(rep_5XX, "rep.5xx"), STR_KV(rep_other, "rep.other"), STR_KV(rep_403, "rep.403"), STR_KV(rep_404, "rep.404"), STR_KV(rep_bread, "rep.bread"), STR_KV(rep_bwritten, "rep.bwritten"), apr_psprintf(pool, "rainx.reqpersec %f\n", r_rate), apr_psprintf(pool, "rainx.avreqtime %"APR_UINT64_T_FMT"\n", r_time), apr_psprintf(pool, "rainx.reqputpersec %f\n", r_put_rate), apr_psprintf(pool, "rainx.avputreqtime %"APR_UINT64_T_FMT"\n", r_put_time), apr_psprintf(pool, "rainx.reqgetpersec %f\n", r_get_rate), apr_psprintf(pool, "rainx.avgetreqtime %"APR_UINT64_T_FMT"\n", r_get_time), apr_psprintf(pool, "rainx.reqdelpersec %f\n", r_del_rate), apr_psprintf(pool, "rainx.avdelreqtime %"APR_UINT64_T_FMT"\n", r_del_time), NULL); }
static dav_error * dav_rawx_write_stream(dav_stream *stream, const void *buf, apr_size_t bufsize) { DAV_XDEBUG_POOL(stream->p, 0, "%s(%s)", __FUNCTION__, stream->pathname); guint written = 0; gulong checksum = stream->compress_checksum; while (written < bufsize) { memcpy(stream->buffer + stream->bufsize, buf + written, MIN(bufsize - written, stream->blocksize - stream->bufsize)); guint tmp = MIN(bufsize - written, stream->blocksize - stream->bufsize); written += tmp; stream->bufsize += tmp; /* If buffer full, compress if needed and write to distant file */ if (stream->blocksize - stream->bufsize <=0){ gsize nb_write = 0; if (!stream->compression) { nb_write = fwrite(stream->buffer, stream->bufsize, 1, stream->f); if (nb_write != 1) { /* ### use something besides 500? */ return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p, HTTP_INTERNAL_SERVER_ERROR, 0, "An error occurred while writing to a " "resource."); } } else { GByteArray *gba = g_byte_array_new(); if (stream->comp_ctx.data_compressor(stream->buffer, stream->bufsize, gba, &checksum)!=0) { if (gba) g_byte_array_free(gba, TRUE); /* ### use something besides 500? */ return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p, HTTP_INTERNAL_SERVER_ERROR, 0, "An error occurred while compressing data."); } nb_write = fwrite(gba->data, gba->len, 1, stream->f); if (nb_write != 1) { if (gba) g_byte_array_free(gba, TRUE); /* ### use something besides 500? */ return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p, HTTP_INTERNAL_SERVER_ERROR, 0, "An error occurred while writing to a " "resource."); } stream->compressed_size += gba->len; if (gba) g_byte_array_free(gba, TRUE); } stream->buffer = apr_pcalloc(stream->p, stream->blocksize); stream->bufsize = 0; } } stream->compress_checksum = checksum; /* update the hash and the stats */ g_checksum_update(stream->md5, buf, bufsize); /* update total_size */ stream->total_size += bufsize; return NULL; }
apr_status_t rainx_http_req(struct req_params_store* rps) { const dav_resource* resource = rps->resource; char* remote_uri = rps->service_address; char* req_type = rps->req_type; char* header = rps->header; char* data = rps->data_to_send; int data_length = rps->data_to_send_size; char** reply = &(rps->reply); apr_pool_t *local_pool = rps->pool; dav_rainx_server_conf *server_conf = resource_get_server_config(resource); if (NULL == resource || NULL == remote_uri || NULL == req_type || NULL == server_conf) { DAV_ERROR_POOL(local_pool, APR_EINVAL, "One of these params is wrong: " "remote_uri=%p, req_type=%p, server_conf=%p" " (__FILE__:__LINE__)", remote_uri, req_type, server_conf); return APR_EINVAL; } const gboolean is_get = (0 == g_strcmp0(req_type, "GET")); /* Isolating Rawx IP and port */ char *temp_remote_uri = apr_pstrdup(local_pool, remote_uri); char* last; char* full_remote_url = apr_strtok(temp_remote_uri, "/", &last); char* content_hexid = apr_pstrdup(local_pool, remote_uri + strlen(full_remote_url)); char* remote_ip = NULL; char* scope_id = NULL; apr_port_t remote_port; apr_parse_addr_port(&remote_ip, &scope_id, &remote_port, full_remote_url, local_pool); /* ------- */ /* Preparing the socket */ apr_socket_t* sock; apr_sockaddr_t* sockaddr; apr_status_t status; if ((status = apr_sockaddr_info_get(&sockaddr, remote_ip, APR_INET, remote_port, 0, local_pool)) != APR_SUCCESS) { DAV_DEBUG_REQ(resource->info->request, 0, "unable to connect to the rawx %s", full_remote_url); return status; } if ((status = apr_socket_create(&sock, sockaddr->family, SOCK_STREAM, APR_PROTO_TCP, local_pool)) != APR_SUCCESS) { DAV_DEBUG_REQ(resource->info->request, 0, "unable to create a socket to the rawx %s", full_remote_url); return status; } if ((status = apr_socket_timeout_set(sock, server_conf->socket_timeout)) != APR_SUCCESS) { DAV_DEBUG_REQ(resource->info->request, 0, "unable to set timeout for the socket to the rawx %s", full_remote_url); return status; } if ((status = apr_socket_connect(sock, sockaddr)) != APR_SUCCESS) { DAV_DEBUG_REQ(resource->info->request, 0, "unable to establish the connection to the rawx %s", full_remote_url); return status; } /* ------- */ /* Forging the message */ char* forged_header = apr_psprintf(local_pool, "%s %s HTTP/1.1\nHost: %s", req_type, content_hexid, full_remote_url); if (header) forged_header = apr_psprintf(local_pool, "%s\n%s", forged_header, header); if (data) forged_header = apr_psprintf(local_pool, "%s\nContent-Length: %d\n\n", forged_header, data_length); else forged_header = apr_psprintf(local_pool, "%s\n\n", forged_header); /* ------- */ /* Sending the message */ int remaining_to_send = strlen(forged_header); char* ptr_start = forged_header; apr_size_t send_buffer_size; while (remaining_to_send > 0) { if (remaining_to_send < REQUEST_BUFFER_SIZE) send_buffer_size = (apr_size_t)remaining_to_send; else send_buffer_size = REQUEST_BUFFER_SIZE; if ((status = apr_socket_send(sock, ptr_start, &send_buffer_size)) != APR_SUCCESS) { DAV_DEBUG_REQ(resource->info->request, 0, "failed to send the %s request to the rawx %s", req_type, full_remote_url); apr_status_t status_sav = status; apr_socket_close(sock); return status_sav; } remaining_to_send -= send_buffer_size; ptr_start = ptr_start + send_buffer_size; } if (NULL != data) { remaining_to_send = data_length; ptr_start = data; while (remaining_to_send > 0) { if (remaining_to_send < REQUEST_BUFFER_SIZE) send_buffer_size = (apr_size_t)remaining_to_send; else send_buffer_size = REQUEST_BUFFER_SIZE; if ((status = apr_socket_send(sock, ptr_start, &send_buffer_size)) != APR_SUCCESS) { DAV_DEBUG_REQ(resource->info->request, 0, "failed to send the %s request to the rawx %s", req_type, full_remote_url); apr_status_t status_sav = status; apr_socket_close(sock); return status_sav; } remaining_to_send -= send_buffer_size; ptr_start = ptr_start + send_buffer_size; } } if (is_get) { /* This avoids a ~5s delay in the communication */ apr_socket_shutdown(sock, APR_SHUTDOWN_WRITE); } DAV_DEBUG_REQ(resource->info->request, 0, "%s request to the rawx %s sent for the content %s", req_type, full_remote_url, content_hexid); /* ------ */ /* Getting the reply */ char* reply_ptr = *reply; apr_size_t total_size; if (!is_get) total_size = REPLY_BUFFER_SIZE; // PUT or DELETE else total_size = MAX_REPLY_HEADER_SIZE + data_length; // GET apr_size_t reply_size = (apr_size_t)total_size; apr_size_t total_replied_size; do { status = apr_socket_recv(sock, reply_ptr, &reply_size); reply_ptr += reply_size; total_replied_size = reply_ptr - *reply; /* Leave when OK, or error != timeout, or buffer full */ if (status == APR_EOF || (status == APR_SUCCESS && !is_get) || (reply_size == 0) || total_replied_size >= total_size) { break; } /* Take care of overflows! */ reply_size = total_size - total_replied_size; } while (total_replied_size < total_size); /* ------- */ apr_socket_close(sock); return status; }
static dav_error * dav_rawx_deliver(const dav_resource *resource, ap_filter_t *output) { dav_rawx_server_conf *conf; apr_pool_t *pool; apr_bucket_brigade *bb = NULL; apr_status_t status; apr_bucket *bkt = NULL; dav_resource_private *ctx; dav_error *e = NULL; apr_finfo_t info; DAV_XDEBUG_RES(resource, 0, "%s(%s)", __FUNCTION__, resource_get_pathname(resource)); pool = resource->pool; conf = resource_get_server_config(resource); /* Check resource type */ if (DAV_RESOURCE_TYPE_REGULAR != resource->type) { e = server_create_and_stat_error(conf, pool, HTTP_CONFLICT, 0, "Cannot GET this type of resource."); goto end_deliver; } if (resource->collection) { e = server_create_and_stat_error(conf, pool, HTTP_CONFLICT, 0, "No GET on collections"); goto end_deliver; } ctx = resource->info; if (ctx->update_only) { /* Check if it is not a busy file. We accept reads during * compression but not attr updates. */ char *pending_file = apr_pstrcat(pool, resource_get_pathname(resource), ".pending", NULL); status = apr_stat(&info, pending_file, APR_FINFO_ATIME, pool); if (status == APR_SUCCESS) { e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, "File in pending mode."); goto end_deliver; } GError *error_local = NULL; /* UPDATE chunk attributes and go on */ const char *path = resource_get_pathname(resource); FILE *f = NULL; f = fopen(path, "r"); /* Try to open the file but forbids a creation */ if (!set_rawx_info_to_fd(fileno(f), &error_local, &(ctx->chunk))) { fclose(f); e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, apr_pstrdup(pool, gerror_get_message(error_local))); g_clear_error(&error_local); goto end_deliver; } fclose(f); } else { bb = apr_brigade_create(pool, output->c->bucket_alloc); if (!ctx->compression){ apr_file_t *fd = NULL; /* Try to open the file but forbids a creation */ status = apr_file_open(&fd, resource_get_pathname(resource), APR_READ|APR_BINARY|APR_BUFFERED, 0, pool); if (APR_SUCCESS != status) { e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, "File permissions deny server access."); goto end_deliver; } /* FIXME this does not handle large files. but this is test code anyway */ bkt = apr_bucket_file_create(fd, 0, (apr_size_t)resource->info->finfo.size, pool, output->c->bucket_alloc); } else { DAV_DEBUG_RES(resource, 0, "Building a compressed resource bucket"); gint i64; i64 = g_ascii_strtoll(ctx->cp_chunk.uncompressed_size, NULL, 10); /* creation of compression specific bucket */ bkt = apr_pcalloc(pool, sizeof(struct apr_bucket)); bkt->type = &chunk_bucket_type; bkt->length = i64; bkt->start = 0; bkt->data = ctx; bkt->free = chunk_bucket_free_noop; bkt->list = output->c->bucket_alloc; } APR_BRIGADE_INSERT_TAIL(bb, bkt); /* as soon as the chunk has been sent, end of stream!*/ bkt = apr_bucket_eos_create(output->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bkt); if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS){ e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, "Could not write contents to filter."); /* close file */ if (ctx->cp_chunk.fd) { fclose(ctx->cp_chunk.fd); } goto end_deliver; } if (ctx->cp_chunk.buf){ g_free(ctx->cp_chunk.buf); ctx->cp_chunk.buf = NULL; } if (ctx->cp_chunk.uncompressed_size){ g_free(ctx->cp_chunk.uncompressed_size); ctx->cp_chunk.uncompressed_size = NULL; } /* close file */ if (ctx->cp_chunk.fd) { fclose(ctx->cp_chunk.fd); } server_inc_stat(conf, RAWX_STATNAME_REP_2XX, 0); server_add_stat(conf, RAWX_STATNAME_REP_BWRITTEN, resource->info->finfo.size, 0); } end_deliver: if (bb) { apr_brigade_destroy(bb); bb = NULL; } /* Now we pass here even if an error occured, for process request duration */ server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_CHUNKGET, request_get_duration(resource->info->request)); return e; }