int any_request(ne_session *sess, const char *uri) { ne_request *req = ne_request_create(sess, "GET", uri); int ret = ne_request_dispatch(req); ne_request_destroy(req); return ret; }
static Hbf_State _hbf_transfer_no_chunk(ne_session *session, hbf_transfer_t *transfer, const char *verb) { int res; const ne_status* req_status; ne_request *req = ne_request_create(session, verb ? verb : "PUT", transfer->url); if (!req) return HBF_MEMORY_FAIL; ne_add_request_header( req, "Content-Type", "application/octet-stream"); ne_set_request_body_fd(req, transfer->fd, 0, transfer->stat_size); DEBUG_HBF("HBF: chunking not supported for %s", transfer->url); res = ne_request_dispatch(req); req_status = ne_get_status( req ); if (res == NE_OK && req_status->klass == 2) { ne_request_destroy(req); return HBF_SUCCESS; } if( transfer->error_string ) free( transfer->error_string ); transfer->error_string = strdup( ne_get_error(session) ); transfer->status_code = req_status->code; ne_request_destroy(req); return HBF_FAIL; }
void get(const StringSlice& url, WriteTarget out) { static int inited = ne_sock_init(); if (inited != 0) { throw Exception("ne_sock_init()"); } CString cstr(url); ne_uri uri = {}; if (ne_uri_parse(cstr.data(), &uri)) { throw Exception("ne_uri_parse()"); } if (uri.port == 0) { uri.port = ne_uri_defaultport(uri.scheme); } unique_ptr<ne_uri, decltype(&ne_uri_free)> uri_free(&uri, ne_uri_free); unique_ptr<ne_session, decltype(&ne_session_destroy)> sess( ne_session_create(uri.scheme, uri.host, uri.port), ne_session_destroy); unique_ptr<ne_request, decltype(&ne_request_destroy)> req( ne_request_create(sess.get(), "GET", uri.path), ne_request_destroy); ne_userdata userdata = {out}; ne_add_response_body_reader(req.get(), accept, reader, &userdata); auto err = ne_request_dispatch(req.get()); if (err != NE_OK) { throw Exception("ne_request_dispatch()"); } auto* st = ne_get_status(req.get()); if (st->code != 200) { throw Exception(st->code); } }
static GByteArray* _download_to_gba(ne_session *session, const gchar *path_url, GError **error) { GByteArray *gba; ne_request *http_request; DEBUG("About to download [%s] into a memory buffer", path_url); gba = g_byte_array_new(); http_request = _build_request(session, path_url); ne_add_response_body_reader(http_request, ne_accept_2xx, read_to_gba, gba); switch (ne_request_dispatch(http_request)) { case NE_OK: if (ne_get_status(http_request)->klass != 2) { GSETERROR (error, "Failed to download '%s': %s", path_url, ne_get_error(session)); g_byte_array_free(gba, TRUE); gba = NULL; } break; case NE_AUTH: case NE_CONNECT: case NE_TIMEOUT: case NE_ERROR: default: GSETERROR(error,"Failed download '%s': %s", path_url, ne_get_error(session)); g_byte_array_free(gba, TRUE); gba = NULL; break; } ne_request_destroy(http_request); return gba; }
int ne_getmodtime(ne_session *sess, const char *uri, time_t *modtime) { ne_request *req = ne_request_create(sess, "HEAD", uri); const char *value; int ret; ret = ne_request_dispatch(req); value = ne_get_response_header(req, "Last-Modified"); if (ret == NE_OK && ne_get_status(req)->klass != 2) { *modtime = -1; ret = NE_ERROR; } else if (value) { *modtime = ne_httpdate_parse(value); } else { *modtime = -1; } ne_request_destroy(req); return ret; }
/* PUT's from fd to URI */ int ne_put(ne_session *sess, const char *uri, int fd) { ne_request *req; struct stat st; int ret; if (fstat(fd, &st)) { int errnum = errno; char buf[200]; ne_set_error(sess, _("Could not determine file size: %s"), ne_strerror(errnum, buf, sizeof buf)); return NE_ERROR; } req = ne_request_create(sess, "PUT", uri); #ifdef NE_HAVE_DAV ne_lock_using_resource(req, uri, 0); ne_lock_using_parent(req, uri); #endif ne_set_request_body_fd(req, fd, 0, st.st_size); ret = ne_request_dispatch(req); if (ret == NE_OK && ne_get_status(req)->klass != 2) ret = NE_ERROR; ne_request_destroy(req); return ret; }
int any_2xx_request(ne_session *sess, const char *uri) { ne_request *req = ne_request_create(sess, "GET", uri); int ret = ne_request_dispatch(req); int klass = ne_get_status(req)->klass; ne_request_destroy(req); ONV(ret != NE_OK || klass != 2, ("request failed: %s", ne_get_error(sess))); return ret; }
int any_2xx_request_body(ne_session *sess, const char *uri) { ne_request *req = ne_request_create(sess, "GET", uri); #define BSIZE 5000 char *body = memset(ne_malloc(BSIZE), 'A', BSIZE); int ret; ne_set_request_body_buffer(req, body, BSIZE); ret = ne_request_dispatch(req); ne_free(body); ONV(ret != NE_OK || ne_get_status(req)->klass != 2, ("request failed: %s", ne_get_error(sess))); ne_request_destroy(req); return ret; }
int ne_options(ne_session *sess, const char *uri, ne_server_capabilities *caps) { ne_request *req = ne_request_create(sess, "OPTIONS", uri); int ret = ne_request_dispatch(req); const char *header = ne_get_response_header(req, "DAV"); if (header) parse_dav_header(header, caps); if (ret == NE_OK && ne_get_status(req)->klass != 2) { ret = NE_ERROR; } ne_request_destroy(req); return ret; }
/* Perform a conditional PUT request with given If: header value, * placing response status-code in *code and class in *klass. Fails * if requests cannot be dispatched. */ static int conditional_put(const char *ifhdr, int *klass, int *code) { ne_request *req; req = ne_request_create(i_session, "PUT", res); ne_set_request_body_fd(req, i_foo_fd, 0, i_foo_len); ne_print_request_header(req, "If", "%s", ifhdr); ONMREQ("PUT", res, ne_request_dispatch(req)); if (code) *code = ne_get_status(req)->code; if (klass) *klass = ne_get_status(req)->klass; ne_request_destroy(req); return OK; }
static gboolean _ne_request(const char *host, int port, const char *target, const char *method, GSList *headers, GError **err) { GRID_TRACE("%s", __FUNCTION__); gboolean result = FALSE; ne_session* session = ne_session_create("http", host, port); ne_set_connect_timeout(session, 10); ne_set_read_timeout(session, 30); GRID_DEBUG("%s http://%s:%d%s", method, host, port, target); ne_request* req = ne_request_create(session, method, target); if (NULL != req) { for (GSList *l = headers; l; l = l->next) { gchar **toks = g_strsplit(l->data, ":", 2); ne_add_request_header(req, toks[0], toks[1]); g_strfreev(toks); } switch (ne_request_dispatch(req)) { case NE_OK: if (ne_get_status(req)->klass != 2) { *err = NEWERROR(0, "cannot %s '%s' (%s)", method, target, ne_get_error(session)); } else { result = TRUE; } break; case NE_AUTH: case NE_CONNECT: case NE_TIMEOUT: case NE_ERROR: default: *err = NEWERROR(0, "unexpected error from the WebDAV server (%s)", ne_get_error(session)); break; } ne_request_destroy(req); } else { // This should be an assertion *err = NEWERROR(0, "Failed to create request"); } ne_session_destroy (session); return result; }
static int large_put(void) { ne_request *req = ne_request_create(i_session, "PUT", path); int count, ret; #ifdef NE_LFS ne_set_request_body_provider64(req, TOTALSIZE, provider, &count); #else ne_set_request_body_provider(req, TOTALSIZE, provider, &count); #endif ret = ne_request_dispatch(req); ONNREQ("large PUT request", ret || ne_get_status(req)->klass != 2); ne_request_destroy(req); return OK; }
int s3_head_object(S3 *s3,const char *bucket,const char *key,S3ObjectInfo *oi) { ne_request *req; int err; if(!s3) return -1; if(!bucket) return -1; s3_begin_session(s3); req = s3_new_request(s3,"HEAD",bucket,key,NULL,NULL); // send to server err = ne_request_dispatch(req); if(err != NE_OK) err = -EIO; if(ne_get_status(req)->code != 200) { s3_handle_error_response(s3,req); if(ne_get_status(req)->code == 404) err = -ENOENT; else err = -EACCES; } else if(oi) { const char *str; str = ne_get_response_header(req,"Content-Length"); if(str) oi->content_length = strtol(str,NULL,10); str = ne_get_response_header(req,"Content-Type"); if(str) { strncpy(oi->content_type,str,31); oi->content_type[31] = 0; } str = ne_get_response_header(req,"ETag"); if(str) { strncpy(oi->etag,str,79); oi->etag[79] = 0; } } ne_request_destroy(req); s3_end_session(s3); return err; }
/* sends a small segment of the file from a high offset. */ static int send_high_offset(void) { int ret, fd = open64(SPARSE, O_RDONLY); ne_session *sess; ne_request *req; ONN("could not open sparse file", fd < 0); CALL(make_session(&sess, serve_check_body, NULL)); req = ne_request_create(sess, "PUT", "/sparse"); ne_set_request_body_fd64(req, fd, point, strlen(data)); ret = ne_request_dispatch(req); CALL(await_server()); ONV(ret != NE_OK || ne_get_status(req)->klass != 2, ("request failed: %s", ne_get_error(sess))); ne_request_destroy(req); ne_session_destroy(sess); close(fd); return OK; }
/* Normally, this module uses get and put. But for creation of new files * with owncloud_creat, write is still needed. */ static ssize_t owncloud_write(csync_vio_method_handle_t *fhandle, const void *buf, size_t count) { struct transfer_context *writeCtx; int rc = 0; int neon_stat; const ne_status *status; writeCtx = (struct transfer_context*) fhandle; if (fhandle == NULL) { errno = EBADF; rc = -1; } ne_set_request_body_buffer(writeCtx->req, buf, count ); /* Start the request. */ neon_stat = ne_request_dispatch( writeCtx->req ); set_errno_from_neon_errcode( neon_stat ); status = ne_get_status( writeCtx->req ); if( status->klass != 2 ) { DEBUG_WEBDAV("sendfile request failed with http status %d!", status->code); set_errno_from_http_errcode( status->code ); /* decide if soft error or hard error that stops the whole sync. */ /* Currently all problems concerning one file are soft errors */ if( status->klass == 4 /* Forbidden and stuff, soft error */ ) { rc = 1; } else if( status->klass == 5 /* Server errors and such */ ) { rc = 1; /* No Abort on individual file errors. */ } else { rc = 1; } } else { DEBUG_WEBDAV("write request all ok, result code %d", status->code); } return rc; }
int ne_unlock(ne_session *sess, const struct ne_lock *lock) { ne_request *req = ne_request_create(sess, "UNLOCK", lock->uri.path); int ret; ne_print_request_header(req, "Lock-Token", "<%s>", lock->token); /* UNLOCK of a lock-null resource removes the resource from the * parent collection; so an UNLOCK may modify the parent * collection. (somewhat counter-intuitive, and not easily derived * from 2518.) */ ne_lock_using_parent(req, lock->uri.path); ret = ne_request_dispatch(req); if (ret == NE_OK && ne_get_status(req)->klass != 2) { ret = NE_ERROR; } ne_request_destroy(req); return ret; }
static char * _check_chunk(const char *cid) { ne_session *session=NULL; ne_request *request=NULL; GString *str = g_string_new(""); char **split = g_strsplit(cid, "/", 0); char **addr_tok = g_strsplit(split[2], ":", 2); if(NULL != (session = ne_session_create("http", addr_tok[0], atoi(addr_tok[1])))) { ne_set_connect_timeout(session, 10); ne_set_read_timeout(session, 30); /* FIXME: I'm a little harder with strrchr success presumption */ if(NULL != (request = ne_request_create (session, "HEAD", strrchr(cid, '/')))) { switch (ne_request_dispatch (request)) { case NE_OK: if (ne_get_status(request)->klass != 2) { g_string_append_printf(str, "(Chunk unavailable : %s)", ne_get_error(session)); } break; default: g_string_append_printf(str, "(Chunk unavailable : %s)", ne_get_error(session)); } ne_request_destroy (request); } ne_session_destroy (session); } g_strfreev(addr_tok); g_strfreev(split); return g_string_free(str, FALSE); }
int ne_acl_set(ne_session *sess, const char *uri, ne_acl_entry *entries, int numentries) { int ret; ne_request *req = ne_request_create(sess, "ACL", uri); ne_buffer *body = acl_body(entries, numentries); #ifdef NE_HAVE_DAV ne_lock_using_resource(req, uri, 0); #endif ne_set_request_body_buffer(req, body->data, ne_buffer_size(body)); ne_add_request_header(req, "Content-Type", NE_XML_MEDIA_TYPE); ret = ne_request_dispatch(req); ne_buffer_destroy(body); if (ret == NE_OK && ne_get_status(req)->code == 207) { ret = NE_ERROR; } ne_request_destroy(req); return ret; }
static gboolean _rawx_update_chunk_attrs(chunk_id_t *cid, GSList *attrs, GError **err) { ne_session *s = NULL; ne_request *r = NULL; int ne_rc; gboolean result = FALSE; gchar dst[128]; guint16 port = 0; GString *req_str = NULL; char idstr[65]; if (!addr_info_get_addr(&(cid->addr), dst, sizeof(dst), &port)) return result; s = ne_session_create("http", dst, port); if (!s) { GSETERROR(err, "Failed to create session to rawx %s:%d", dst, port); return result; } ne_set_connect_timeout(s, 10); ne_set_read_timeout(s, 30); req_str =g_string_new("/rawx/chunk/set/"); bzero(idstr, sizeof(idstr)); buffer2str(&(cid->id), sizeof(cid->id), idstr, sizeof(idstr)); req_str = g_string_append(req_str, idstr); GRID_TRACE("Calling %s", req_str->str); r = ne_request_create (s, "GET", req_str->str); if (!r) { goto end_attr; } for (; attrs != NULL; attrs = attrs->next) { struct chunk_attr_s *attr = attrs->data; ne_add_request_header(r, attr->key, attr->val); } switch (ne_rc = ne_request_dispatch(r)) { case NE_OK: result = TRUE; break; case NE_ERROR: GSETCODE(err, 500, "Request NE_ERROR"); break; case NE_TIMEOUT: GSETCODE(err, 500, "Request Timeout"); break; case NE_CONNECT: GSETCODE(err, 500, "Request Connection timeout"); break; default: GSETCODE(err, 500, "Request failed"); break; } end_attr: if (NULL != req_str) g_string_free(req_str, TRUE); if (NULL != r) ne_request_destroy (r); if (NULL != s) ne_session_destroy (s); return result; }
svn_error_t * svn_ra_neon__request_dispatch(int *code_p, svn_ra_neon__request_t *req, apr_hash_t *extra_headers, const char *body, int okay_1, int okay_2, apr_pool_t *pool) { ne_xml_parser *error_parser; const ne_status *statstruct; /* add any extra headers passed in by caller. */ if (extra_headers != NULL) { apr_hash_index_t *hi; for (hi = apr_hash_first(pool, extra_headers); hi; hi = apr_hash_next(hi)) { const void *key; void *val; apr_hash_this(hi, &key, NULL, &val); ne_add_request_header(req->ne_req, (const char *) key, (const char *) val); } } /* Certain headers must be transmitted unconditionally with every request; see issue #3255 ("mod_dav_svn does not pass client capabilities to start-commit hooks") for why. It's okay if one of these headers was already added via extra_headers above -- they are all idempotent headers. Note that at most one could have been sent via extra_headers, because extra_headers is a hash and the key would be the same for all of them: "DAV". In a just and righteous world, extra_headers would be an array, not a hash, so that callers could send the same header with different values too. But, apparently, that hasn't been necessary yet. */ ne_add_request_header(req->ne_req, "DAV", SVN_DAV_NS_DAV_SVN_DEPTH); ne_add_request_header(req->ne_req, "DAV", SVN_DAV_NS_DAV_SVN_MERGEINFO); ne_add_request_header(req->ne_req, "DAV", SVN_DAV_NS_DAV_SVN_LOG_REVPROPS); if (body) ne_set_request_body_buffer(req->ne_req, body, strlen(body)); /* attach a standard <D:error> body parser to the request */ error_parser = error_parser_create(req); if (req->ne_sess == req->sess->ne_sess) /* We're consuming 'session 1' */ req->sess->main_session_busy = TRUE; /* run the request, see what comes back. */ req->rv = ne_request_dispatch(req->ne_req); if (req->ne_sess == req->sess->ne_sess) /* We're done consuming 'session 1' */ req->sess->main_session_busy = FALSE; /* Save values from the request */ statstruct = ne_get_status(req->ne_req); req->code_desc = apr_pstrdup(pool, statstruct->reason_phrase); req->code = statstruct->code; /* If we see a successful request that used authentication, we should store the credentials for future use. */ if (req->sess->auth_used && statstruct->code < 400) { req->sess->auth_used = FALSE; SVN_ERR(svn_ra_neon__maybe_store_auth_info(req->sess, pool)); } if (code_p) *code_p = req->code; if (!req->marshalled_error) SVN_ERR(req->err); /* If the status code was one of the two that we expected, then go ahead and return now. IGNORE any marshalled error. */ if (req->rv == NE_OK && (req->code == okay_1 || req->code == okay_2)) return SVN_NO_ERROR; /* Any other errors? Report them */ SVN_ERR(req->err); SVN_ERR(svn_ra_neon__check_parse_error(req->method, error_parser, req->url)); /* We either have a neon error, or some other error that we didn't expect. */ return generate_error(req, pool); }
/* Gets a file from the owncloud url to the open file descriptor. */ static int owncloud_get(csync_vio_method_handle_t *flocal, csync_vio_method_handle_t *fremote, csync_vio_file_stat_t *vfs) { int rc = 0; int neon_stat; const ne_status *status; int fd; struct transfer_context *write_ctx = (struct transfer_context*) fremote; (void) vfs; /* stat information of the source file */ fd = csync_vio_getfd(flocal); if (fd == -1) { errno = EINVAL; return -1; } /* GET a file to the open file descriptor */ if( write_ctx == NULL ) { errno = EINVAL; return -1; } if( write_ctx->req == NULL ) { errno = EINVAL; return -1; } DEBUG_WEBDAV(" -- GET on %s", write_ctx->url); write_ctx->fd = fd; /* Allow compressed content by setting the header */ ne_add_request_header( write_ctx->req, "Accept-Encoding", "gzip,deflate" ); /* hook called before the content is parsed to set the correct reader, * either the compressed- or uncompressed reader. */ ne_hook_post_headers( dav_session.ctx, install_content_reader, write_ctx ); neon_stat = ne_request_dispatch(write_ctx->req ); /* possible return codes are: * NE_OK, NE_AUTH, NE_CONNECT, NE_TIMEOUT, NE_ERROR (from ne_request.h) */ if( neon_stat != NE_OK ) { set_errno_from_neon_errcode(neon_stat); DEBUG_WEBDAV("Error GET: Neon: %d, errno %d", neon_stat, errno); rc = -1; } else { status = ne_get_status( write_ctx->req ); if( status->klass != 2 ) { DEBUG_WEBDAV("sendfile request failed with http status %d!", status->code); set_errno_from_http_errcode( status->code ); /* decide if soft error or hard error that stops the whole sync. */ /* Currently all problems concerning one file are soft errors */ if( status->klass == 4 /* Forbidden and stuff, soft error */ ) { rc = 1; } else if( status->klass == 5 /* Server errors and such */ ) { rc = 1; /* No Abort on individual file errors. */ } else { rc = 1; } } else { DEBUG_WEBDAV("http request all cool, result code %d (%s)", status->code, status->reason_phrase ? status->reason_phrase : "<empty>"); } } /* delete the hook again, otherwise they get chained as they are with the session */ ne_unhook_post_headers( dav_session.ctx, install_content_reader, write_ctx ); /* if the compression handle is set through the post_header hook, delete it. */ if( write_ctx->decompress ) { ne_decompress_destroy( write_ctx->decompress ); } return rc; }
static gboolean _download_to_file(ne_session *session, const gchar *path_url, const gchar *path_local, GError **error) { gchar *dirname, path_tmp[2048]; gboolean rc = FALSE; int rc_dispatch; FILE *stream_out; ne_request *http_request; g_snprintf(path_tmp, sizeof(path_tmp), "%s.%d.%ld", path_local, getpid(), time(0)); DEBUG("About to download [%s] into [%s]", path_url, path_tmp); /*create the destination*/ dirname = g_path_get_dirname(path_tmp); if (!dirname) { GSETERROR(error,"Failed to extract the dirname of '%s'", path_tmp); return FALSE; } if (-1 == g_mkdir_with_parents(dirname,0755)) { g_free(dirname); GSETERROR(error,"Failed to create the dirname of '%s' : %s", path_tmp, strerror(errno)); return FALSE; } g_free(dirname); /*open the destination*/ stream_out = fopen(path_tmp,"w"); if (!stream_out) { GSETERROR(error,"Failed to open '%s' in write mode : %s", path_local, strerror(errno)); return FALSE; } http_request = _build_request(session, path_url); ne_add_response_body_reader(http_request, ne_accept_2xx, read_to_stream, stream_out); switch (rc_dispatch = ne_request_dispatch(http_request)) { case NE_OK: if (ne_get_status(http_request)->klass != 2) { GSETERROR (error, "Failed to download '%s': %s", path_url, ne_get_error(session)); goto label_error; } break; case NE_AUTH: case NE_CONNECT: case NE_TIMEOUT: case NE_ERROR: GSETERROR(error,"Failed download '%s' (rc=%d) : %s", path_url, rc_dispatch, ne_get_error(session)); goto label_error; } if (-1 == g_rename(path_tmp, path_local)) { GSETERROR(error,"Failed to commit the temporary download file '%s' : %s", path_tmp, strerror(errno)); goto label_error; } g_chmod(path_local,0644); DEBUG("Download of '%s' succeeded", path_url); rc = TRUE; label_error: ne_request_destroy(http_request); fclose(stream_out); if (g_file_test(path_tmp, G_FILE_TEST_IS_REGULAR)) g_remove(path_tmp); return rc; }
static const char* owncloud_get_etag( const char *path ) { ne_request *req = NULL; const char *header = NULL; char *uri = _cleanPath(path); char *cbuf = NULL; csync_vio_file_stat_t *fs = NULL; bool doHeadRequest = false; if (_id_cache.uri && c_streq(path, _id_cache.uri)) { header = _id_cache.id; } doHeadRequest= false; /* ownCloud server doesn't have good support for HEAD yet */ if( !header && doHeadRequest ) { int neon_stat; /* Perform an HEAD request to the resource. HEAD delivers the * ETag header back. */ req = ne_request_create(dav_session.ctx, "HEAD", uri); neon_stat = ne_request_dispatch(req); set_errno_from_neon_errcode( neon_stat ); header = ne_get_response_header(req, "etag"); } /* If the request went wrong or the server did not respond correctly * (that can happen for collections) a stat call is done which translates * into a PROPFIND request. */ if( ! header ) { /* ... and do a stat call. */ fs = csync_vio_file_stat_new(); if(fs == NULL) { DEBUG_WEBDAV( "owncloud_get_etag: memory fault."); errno = ENOMEM; return NULL; } if( owncloud_stat( path, fs ) == 0 ) { header = fs->etag; } } /* In case the result is surrounded by "" cut them away. */ if( header ) { cbuf = csync_normalize_etag(header); } /* fix server problem: If we end up with an empty string, set something strange... */ if( c_streq(cbuf, "") || c_streq(cbuf, "\"\"") ) { SAFE_FREE(cbuf); cbuf = c_strdup("empty_etag"); } DEBUG_WEBDAV("Get file ID for %s: %s", path, cbuf ? cbuf:"<null>"); if( fs ) csync_vio_file_stat_destroy(fs); if( req ) ne_request_destroy(req); SAFE_FREE(uri); return cbuf; }
/* * perform one transfer of one block. * returns HBF_TRANSFER_SUCCESS if the transfer of this block was a success * returns HBF_SUCCESS if the server aknoweldge that he received all the blocks */ static int _hbf_dav_request(hbf_transfer_t *transfer, ne_request *req, int fd, hbf_block_t *blk ) { Hbf_State state = HBF_TRANSFER_SUCCESS; int res; const ne_status *req_status = NULL; const char *etag = NULL; (void) transfer; if( ! (blk && req) ) return HBF_PARAM_FAIL; ne_set_request_body_fd(req, fd, blk->start, blk->size); DEBUG_HBF("Block: %d , Start: %" PRId64 " and Size: %" PRId64 "", blk->seq_number, blk->start, blk->size ); res = ne_request_dispatch(req); req_status = ne_get_status( req ); switch(res) { case NE_OK: blk->state = HBF_TRANSFER_FAILED; state = HBF_FAIL; etag = 0; if( req_status->klass == 2 ) { state = HBF_TRANSFER_SUCCESS; blk->state = HBF_TRANSFER_SUCCESS; etag = ne_get_response_header(req, "ETag"); if (etag && etag[0]) { /* When there is an etag, it means the transfer was complete */ state = HBF_SUCCESS; if( etag[0] == '"' && etag[ strlen(etag)-1] == '"') { int len = strlen( etag )-2; blk->etag = malloc( len+1 ); strncpy( blk->etag, etag+1, len ); blk->etag[len] = '\0'; } else { blk->etag = strdup( etag ); } } else { /* DEBUG_HBF("OOOOOOOO No etag returned!"); */ } /* check if the server was able to set the mtime already. */ etag = ne_get_response_header(req, "X-OC-MTime"); if( etag && strcmp(etag, "accepted") == 0 ) { /* the server acknowledged that the mtime was set. */ transfer->modtime_accepted = 1; } etag = ne_get_response_header(req, "OC-FileID"); if( etag ) { transfer->file_id = strdup( etag ); } } break; case NE_AUTH: state = HBF_AUTH_FAIL; blk->state = HBF_TRANSFER_FAILED; break; case NE_PROXYAUTH: state = HBF_PROXY_AUTH_FAIL; blk->state = HBF_TRANSFER_FAILED; break; case NE_CONNECT: state = HBF_CONNECT_FAIL; blk->state = HBF_TRANSFER_FAILED; break; case NE_TIMEOUT: state = HBF_TIMEOUT_FAIL; blk->state = HBF_TRANSFER_FAILED; break; case NE_ERROR: state = HBF_FAIL; blk->state = HBF_TRANSFER_FAILED; break; } blk->http_result_code = req_status->code; if( req_status->reason_phrase ) { blk->http_error_msg = strdup(req_status->reason_phrase); } return state; }
static csync_vio_method_handle_t *owncloud_open(const char *durl, int flags, mode_t mode) { char *uri = NULL; char *dir = NULL; char getUrl[PATH_MAX]; int put = 0; int rc = NE_OK; #ifdef _WIN32 int gtp = 0; char tmpname[13]; #endif struct transfer_context *writeCtx = NULL; csync_stat_t statBuf; memset( getUrl, '\0', PATH_MAX ); (void) mode; /* unused on webdav server */ DEBUG_WEBDAV(( "=> open called for %s\n", durl )); uri = _cleanPath( durl ); if( ! uri ) { DEBUG_WEBDAV(("Failed to clean path for %s\n", durl )); errno = EACCES; rc = NE_ERROR; } if( rc == NE_OK ) dav_connect( durl ); if (flags & O_WRONLY) { put = 1; } if (flags & O_RDWR) { put = 1; } if (flags & O_CREAT) { put = 1; } if( rc == NE_OK && put ) { /* check if the dir name exists. Otherwise return ENOENT */ dir = c_dirname( durl ); if (dir == NULL) { errno = ENOMEM; return NULL; } DEBUG_WEBDAV(("Stating directory %s\n", dir )); if( c_streq( dir, _lastDir )) { DEBUG_WEBDAV(("Dir %s is there, we know it already.\n", dir)); } else { if( owncloud_stat( dir, (csync_vio_method_handle_t*)(&statBuf) ) == 0 ) { DEBUG_WEBDAV(("Directory of file to open exists.\n")); SAFE_FREE( _lastDir ); _lastDir = c_strdup(dir); } else { DEBUG_WEBDAV(("Directory %s of file to open does NOT exist.\n", dir )); /* the directory does not exist. That is an ENOENT */ errno = ENOENT; SAFE_FREE( dir ); return NULL; } } } writeCtx = c_malloc( sizeof(struct transfer_context) ); writeCtx->bytes_written = 0; if( rc == NE_OK ) { /* open a temp file to store the incoming data */ #ifdef _WIN32 memset( tmpname, '\0', 13 ); gtp = GetTempPath( PATH_MAX, getUrl ); DEBUG_WEBDAV(("win32 tmp path: %s\n", getUrl )); if ( gtp > MAX_PATH || (gtp == 0) ) { DEBUG_WEBDAV(("Failed to compute Win32 tmp path, trying /tmp\n")); strcpy( getUrl, "/tmp/"); } strcpy( tmpname, "csync.XXXXXX" ); if( c_tmpname( tmpname ) == 0 ) { _fmode = _O_BINARY; strcat( getUrl, tmpname ); writeCtx->tmpFileName = c_strdup( getUrl ); writeCtx->fd = open( writeCtx->tmpFileName, O_RDWR | O_CREAT | O_EXCL, 0600 ); } else { writeCtx->fd = -1; } #else writeCtx->tmpFileName = c_strdup( "/tmp/csync.XXXXXX" ); writeCtx->fd = mkstemp( writeCtx->tmpFileName ); #endif DEBUG_WEBDAV(("opening temp directory %s: %d\n", writeCtx->tmpFileName, writeCtx->fd )); if( writeCtx->fd == -1 ) { DEBUG_WEBDAV(("Failed to open temp file, errno = %d\n", errno )); rc = NE_ERROR; /* errno is set by the mkstemp call above. */ } } if( rc == NE_OK && put) { DEBUG_WEBDAV(("PUT request on %s!\n", uri)); /* reset the write buffer */ writeCtx->bytes_written = 0; writeCtx->fileWritten = 0; /* flag to indicate if contents was pushed to file */ writeCtx->req = ne_request_create(dav_session.ctx, "PUT", uri); writeCtx->method = "PUT"; } if( rc == NE_OK && ! put ) { writeCtx->req = 0; writeCtx->method = "GET"; /* Download the data into a local temp file. */ /* the download via the get function requires a full uri */ snprintf( getUrl, PATH_MAX, "%s://%s%s", ne_get_scheme( dav_session.ctx), ne_get_server_hostport( dav_session.ctx ), uri ); DEBUG_WEBDAV(("GET request on %s\n", getUrl )); #define WITH_HTTP_COMPRESSION #ifdef WITH_HTTP_COMPRESSION writeCtx->req = ne_request_create( dav_session.ctx, "GET", getUrl ); /* Allow compressed content by setting the header */ ne_add_request_header( writeCtx->req, "Accept-Encoding", "gzip,deflate" ); /* hook called before the content is parsed to set the correct reader, * either the compressed- or uncompressed reader. */ ne_hook_post_headers( dav_session.ctx, install_content_reader, writeCtx ); /* actually do the request */ rc = ne_request_dispatch(writeCtx->req ); /* possible return codes are: * NE_OK, NE_AUTH, NE_CONNECT, NE_TIMEOUT, NE_ERROR (from ne_request.h) */ if( rc != NE_OK || (rc == NE_OK && ne_get_status(writeCtx->req)->klass != 2) ) { DEBUG_WEBDAV(("request_dispatch failed with rc=%d\n", rc )); if( rc == NE_OK ) rc = NE_ERROR; errno = EACCES; } /* delete the hook again, otherwise they get chained as they are with the session */ ne_unhook_post_headers( dav_session.ctx, install_content_reader, writeCtx ); /* if the compression handle is set through the post_header hook, delete it. */ if( writeCtx->decompress ) { ne_decompress_destroy( writeCtx->decompress ); } /* delete the request in any case */ ne_request_destroy(writeCtx->req); #else DEBUG_WEBDAV(("GET Compression not supported!\n")); rc = ne_get( dav_session.ctx, getUrl, writeCtx->fd ); /* FIX_ESCAPE? */ #endif if( rc != NE_OK ) { DEBUG_WEBDAV(("Download to local file failed: %d.\n", rc)); errno = EACCES; } if( close( writeCtx->fd ) == -1 ) { DEBUG_WEBDAV(("Close of local download file failed.\n")); writeCtx->fd = -1; rc = NE_ERROR; errno = EACCES; } writeCtx->fd = -1; } if( rc != NE_OK ) { SAFE_FREE( writeCtx ); } SAFE_FREE( uri ); SAFE_FREE( dir ); return (csync_vio_method_handle_t *) writeCtx; }
static int owncloud_close(csync_vio_method_handle_t *fhandle) { struct transfer_context *writeCtx; csync_stat_t st; int rc; int ret = 0; size_t len = 0; writeCtx = (struct transfer_context*) fhandle; if (fhandle == NULL) { errno = EBADF; ret = -1; } /* handle the PUT request, means write to the WebDAV server */ if( ret != -1 && strcmp( writeCtx->method, "PUT" ) == 0 ) { /* if there is a valid file descriptor, close it, reopen in read mode and start the PUT request */ if( writeCtx->fd > -1 ) { if( writeCtx->fileWritten && writeCtx->bytes_written > 0 ) { /* was content written to file? */ /* push the rest of the buffer to file as well. */ DEBUG_WEBDAV(("Write remaining %lu bytes to disk.\n", (unsigned long) writeCtx->bytes_written )); len = write( writeCtx->fd, _buffer, writeCtx->bytes_written ); if( len != writeCtx->bytes_written ) { DEBUG_WEBDAV(("WRN: write wrote wrong number of remaining bytes\n")); } writeCtx->bytes_written = 0; } if( close( writeCtx->fd ) < 0 ) { DEBUG_WEBDAV(("Could not close file %s\n", writeCtx->tmpFileName )); errno = EBADF; ret = -1; } /* and open it again to read from */ #ifdef _WIN32 _fmode = _O_BINARY; #endif if( writeCtx->fileWritten ) { DEBUG_WEBDAV(("Putting file through file cache.\n")); /* we need to go the slow way and close and open the file and read from fd. */ if (( writeCtx->fd = open( writeCtx->tmpFileName, O_RDONLY )) < 0) { errno = EIO; ret = -1; } else { if (fstat( writeCtx->fd, &st ) < 0) { DEBUG_WEBDAV(("Could not stat file %s\n", writeCtx->tmpFileName )); errno = EIO; ret = -1; } /* successfully opened for read. Now start the request via ne_put */ ne_set_request_body_fd( writeCtx->req, writeCtx->fd, 0, st.st_size ); rc = ne_request_dispatch( writeCtx->req ); if( close( writeCtx->fd ) == -1 ) { errno = EBADF; ret = -1; } if (rc == NE_OK) { if ( ne_get_status( writeCtx->req )->klass != 2 ) { DEBUG_WEBDAV(("Error - PUT status value no 2xx\n")); errno = EIO; ret = -1; } } else { DEBUG_WEBDAV(("Error - put request on close failed: %d!\n", rc )); errno = EIO; ret = -1; } } } else { /* all content is in the buffer. */ DEBUG_WEBDAV(("Putting file through memory cache.\n")); ne_set_request_body_buffer( writeCtx->req, _buffer, writeCtx->bytes_written ); rc = ne_request_dispatch( writeCtx->req ); if( rc == NE_OK ) { if ( ne_get_status( writeCtx->req )->klass != 2 ) { DEBUG_WEBDAV(("Error - PUT status value no 2xx\n")); errno = EIO; ret = -1; } } else { DEBUG_WEBDAV(("Error - put request from memory failed: %d!\n", rc )); errno = EIO; ret = -1; } } } ne_request_destroy( writeCtx->req ); } else { /* Its a GET request, not much to do in close. */ if( writeCtx->fd > -1) { if( close( writeCtx->fd ) == -1 ) { errno = EBADF; ret = -1; } } } /* Remove the local file. */ unlink( writeCtx->tmpFileName ); /* free mem. Note that the request mem is freed by the ne_request_destroy call */ SAFE_FREE( writeCtx->tmpFileName ); SAFE_FREE( writeCtx ); return ret; }
GHashTable * rawx_client_get_statistics(rawx_session_t * session, const gchar *url, GError ** err) { int rc; gchar str_addr[64]; gsize str_addr_size; GHashTable *parsed = NULL; GHashTable *result = NULL; GByteArray *buffer = NULL; ne_request *request = NULL; if (!session || !url) { GSETERROR(err, "Invalid parameter"); return NULL; } ne_set_connect_timeout(session->neon_session, session->timeout.cnx / 1000); ne_set_read_timeout(session->neon_session, session->timeout.req / 1000); request = ne_request_create(session->neon_session, "GET", url); if (!request) { GSETERROR(err, "neon request creation error"); return NULL; } buffer = g_byte_array_new(); ne_add_response_body_reader(request, ne_accept_2xx, body_reader, buffer); switch (rc = ne_request_dispatch(request)) { case NE_OK: if (ne_get_status(request)->klass != 2) { GSETERROR(err, "RAWX returned an error"); goto exit; } else if (!(parsed = body_parser(buffer, err))) { GSETERROR(err, "No statistics from the RAWX server"); goto exit; } break; case NE_ERROR: case NE_TIMEOUT: case NE_CONNECT: case NE_AUTH: str_addr_size = addr_info_to_string(&(session->addr), str_addr, sizeof(str_addr)); GSETERROR(err, "cannot download the stats from [%.*s]' (%s)", (int)str_addr_size, str_addr, ne_get_error(session->neon_session)); goto exit; default: GSETERROR(err, "Unexpected return code from the neon library : %d", rc); goto exit; } result = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free); g_hash_table_foreach(parsed, _convert_string_to_double, result); exit: if (buffer != NULL) g_byte_array_free(buffer, TRUE); if (request != NULL) ne_request_destroy(request); if (parsed != NULL) g_hash_table_destroy(parsed); return result; }
gboolean rawx_client_get_directory_data(rawx_session_t * session, hash_sha256_t chunk_id, struct content_textinfo_s *content, struct chunk_textinfo_s *chunk, GError ** error) { int rc; gchar str_addr[64]; gsize str_addr_size; gchar str_req[2048]; gchar str_chunk_id[(sizeof(hash_sha256_t) * 2) + 1]; GHashTable *result = NULL; GByteArray *buffer = NULL; ne_request *request = NULL; if (!session) { GSETERROR(error, "Invalid parameter"); return FALSE; } memset(str_chunk_id, '\0', sizeof(str_chunk_id)); oio_str_bin2hex(chunk_id, sizeof(hash_sha256_t), str_chunk_id, sizeof(str_chunk_id)); memset(str_req, '\0', sizeof(str_req)); snprintf(str_req, sizeof(str_req) - 1, "%s/%s", RAWX_REQ_GET_DIRINFO, str_chunk_id); ne_set_connect_timeout(session->neon_session, session->timeout.cnx / 1000); ne_set_read_timeout(session->neon_session, session->timeout.req / 1000); request = ne_request_create(session->neon_session, "HEAD", str_req); if (!request) { GSETERROR(error, "neon request creation error"); return FALSE; } buffer = g_byte_array_new(); ne_add_response_body_reader(request, ne_accept_2xx, body_reader, buffer); switch (rc = ne_request_dispatch(request)) { case NE_OK: if (ne_get_status(request)->klass != 2) { GSETERROR(error, "RAWX returned an error %d : %s", ne_get_status(request)->code, ne_get_status(request)->reason_phrase); goto error; } else if (!(result = header_parser(request))) { GSETERROR(error, "No attr from the RAWX server"); goto error; } break; case NE_ERROR: case NE_TIMEOUT: case NE_CONNECT: case NE_AUTH: str_addr_size = addr_info_to_string(&(session->addr), str_addr, sizeof(str_addr)); GSETERROR(error, "cannot download the data from [%.*s]' (%s)", (int)str_addr_size, str_addr, ne_get_error(session->neon_session)); goto error; default: GSETERROR(error, "Unexpected return code from the neon library : %d", rc); goto error; } g_byte_array_free(buffer, TRUE); ne_request_destroy(request); /* Fill the textinfo structs */ parse_chunkinfo_from_rawx(result, content, chunk); g_hash_table_destroy(result); return TRUE; error: g_byte_array_free(buffer, TRUE); ne_request_destroy(request); return FALSE; }
/* * Puts a file read from the open file descriptor to the ownCloud URL. */ static int owncloud_put(csync_vio_method_handle_t *flocal, csync_vio_method_handle_t *fremote, csync_vio_file_stat_t *vfs) { int rc = 0; int neon_stat; const ne_status *status; csync_stat_t sb; struct transfer_context *write_ctx = (struct transfer_context*) fremote; int fd; ne_request *request = NULL; fd = csync_vio_getfd(flocal); if (fd == -1) { errno = EINVAL; return -1; } if( write_ctx == NULL ) { errno = EINVAL; return -1; } request = write_ctx->req; if( request == NULL) { errno = EINVAL; return -1; } /* stat the source-file to get the file size. */ if( fstat( fd, &sb ) == 0 ) { if( sb.st_size != vfs->size ) { DEBUG_WEBDAV("WRN: Stat size differs from vfs size!"); } /* Attach the request to the file descriptor */ ne_set_request_body_fd(request, fd, 0, sb.st_size); DEBUG_WEBDAV("Put file size: %lld, variable sizeof: %ld", (long long int) sb.st_size, sizeof(sb.st_size)); /* Start the request. */ neon_stat = ne_request_dispatch( write_ctx->req ); set_errno_from_neon_errcode( neon_stat ); status = ne_get_status( request ); if( status->klass != 2 ) { DEBUG_WEBDAV("sendfile request failed with http status %d!", status->code); set_errno_from_http_errcode( status->code ); /* decide if soft error or hard error that stops the whole sync. */ /* Currently all problems concerning one file are soft errors */ if( status->klass == 4 /* Forbidden and stuff, soft error */ ) { rc = 1; } else if( status->klass == 5 /* Server errors and such */ ) { rc = 1; /* No Abort on individual file errors. */ } else { rc = 1; } } else { DEBUG_WEBDAV("http request all cool, result code %d", status->code); } } else { DEBUG_WEBDAV("Could not stat file descriptor"); rc = 1; } return rc; }
gs_status_t rawx_delete (gs_chunk_t *chunk, GError **err) { char str_req_id [1024]; char str_addr [STRLEN_ADDRINFO]; char str_ci [STRLEN_CHUNKID]; char cPath [CI_FULLPATHLEN]; char str_hash[STRLEN_CHUNKHASH]; ne_request *request=NULL; ne_session *session=NULL; memset(str_req_id, 0x00, sizeof(str_req_id)); if (!chunk || !chunk->ci || !chunk->content) { GSETERROR (err,"Invalid parameter (bad chunk structure)"); goto error_label; } addr_info_to_string (&(chunk->ci->id.addr), str_addr, sizeof(str_addr)); chunk_id2str(chunk, str_ci, sizeof(str_ci)); chunk_getpath (chunk, cPath, sizeof(cPath)); DEBUG("about to delete %s on %s", str_ci, cPath); gscstat_tags_start(GSCSTAT_SERVICE_RAWX, GSCSTAT_TAGS_REQPROCTIME); session = rawx_opensession (chunk, err); if (!session) { GSETERROR (err, "Cannot open a webdav session"); goto error_label; } /*Create a webdav request*/ do { request = ne_request_create (session, RAWX_DELETE, cPath); if (!request) { GSETERROR (err, "cannot create a %s WebDAV request", RAWX_DELETE); goto error_label; } } while (0); chunk_id2str (chunk, str_ci, sizeof(str_ci)); chunk_gethash (chunk, str_hash, sizeof(str_hash)); /* Add request header */ add_req_id_header(request, str_req_id, sizeof(str_req_id)-1); ne_add_request_header (request, "chunkid", str_ci); ne_add_request_header (request, "chunkhash", str_hash); ne_add_request_header (request, "containerid", C1_IDSTR(chunk->content)); ne_add_request_header (request, "contentpath", chunk->content->info.path); ne_print_request_header(request, "chunkpos", "%"G_GUINT32_FORMAT, chunk->ci->position); ne_print_request_header(request, "chunknb", "%"G_GUINT32_FORMAT, chunk->ci->nb); ne_print_request_header(request, "chunksize", "%"G_GINT64_FORMAT, chunk->ci->size); ne_print_request_header(request, "contentsize", "%"G_GINT64_FORMAT, chunk->content->info.size); /*now perform the request*/ switch (ne_request_dispatch (request)) { case NE_OK: if (ne_get_status(request)->klass != 2) { GSETERROR (err, "cannot delete '%s' (%s) (ReqId:%s)", cPath, ne_get_error(session), str_req_id); goto error_label; } DEBUG("chunk deletion finished (success) : %s", cPath); break; case NE_AUTH: case NE_CONNECT: case NE_TIMEOUT: case NE_ERROR: GSETERROR (err, "unexpected error from the WebDAV server (%s) (ReqId:%s)", ne_get_error(session), str_req_id); goto error_label; } ne_request_destroy (request); ne_session_destroy (session); TRACE("%s deleted (ReqId:%s)", cPath, str_req_id); gscstat_tags_end(GSCSTAT_SERVICE_RAWX, GSCSTAT_TAGS_REQPROCTIME); return 1; error_label: TRACE("could not delete %s", cPath); if (request) ne_request_destroy (request); if (session) ne_session_destroy (session); gscstat_tags_end(GSCSTAT_SERVICE_RAWX, GSCSTAT_TAGS_REQPROCTIME); return 0; }