/* copy a file from one to another. */ void MVM_file_copy(MVMThreadContext *tc, MVMString *src, MVMString *dest) { uv_fs_t req; char * const a = MVM_string_utf8_encode_C_string(tc, src); const uv_file in_fd = uv_fs_open(tc->loop, &req, (const char *)a, O_RDONLY, 0, NULL); if (in_fd >= 0 && uv_fs_stat(tc->loop, &req, a, NULL) >= 0) { char * const b = MVM_string_utf8_encode_C_string(tc, dest); const uv_file out_fd = uv_fs_open(tc->loop, &req, (const char *)b, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_MODE, NULL); MVM_free(a); if (out_fd >= 0 && uv_fs_sendfile(tc->loop, &req, out_fd, in_fd, 0, req.statbuf.st_size, NULL) >= 0) { MVM_free(b); if (uv_fs_close(tc->loop, &req, in_fd, NULL) < 0) { uv_fs_close(tc->loop, &req, out_fd, NULL); /* should close out_fd before throw. */ MVM_exception_throw_adhoc(tc, "Failed to close file: %s", uv_strerror(req.result)); } if (uv_fs_close(tc->loop, &req, out_fd, NULL) < 0) { MVM_exception_throw_adhoc(tc, "Failed to close file: %s", uv_strerror(req.result)); } return; } else MVM_free(b); } else MVM_free(a); MVM_exception_throw_adhoc(tc, "Failed to copy file: %s", uv_strerror(req.result)); }
// Sync readfile using libuv APIs as an API function. static duk_ret_t duv_loadfile(duk_context *ctx) { const char* path = duk_require_string(ctx, 0); uv_fs_t req; int fd = 0; uint64_t size; char* chunk; uv_buf_t buf; if (uv_fs_open(&loop, &req, path, O_RDONLY, 0644, NULL) < 0) goto fail; fd = req.result; if (uv_fs_fstat(&loop, &req, fd, NULL) < 0) goto fail; size = req.statbuf.st_size; chunk = duk_alloc(ctx, size); buf = uv_buf_init(chunk, size); if (uv_fs_read(&loop, &req, fd, &buf, 1, 0, NULL) < 0) goto fail; duk_push_lstring(ctx, chunk, size); duk_free(ctx, chunk); uv_fs_close(&loop, &req, fd, NULL); uv_fs_req_cleanup(&req); return 1; fail: if (fd) uv_fs_close(&loop, &req, fd, NULL); uv_fs_req_cleanup(&req); duk_error(ctx, DUK_ERR_ERROR, "%s: %s: %s", uv_err_name(req.result), uv_strerror(req.result), path); }
static void read_cb(uv_fs_t* req) { int r; TUV_ASSERT(req == &read_req); TUV_ASSERT(req->fs_type == UV_FS_READ); TUV_ASSERT(req->result >= 0); /* FIXME(bnoordhuis) Check if requested size? */ read_cb_count++; uv_fs_req_cleanup(req); if (read_cb_count == 1) { TUV_ASSERT(strcmp(buf, test_buf) == 0); #if !defined(__NUTTX__) r = uv_fs_ftruncate(loop, &ftruncate_req, open_req1.result, 7, ftruncate_cb); #else /* cannot truncate on nuttx so just close the file */ r = uv_fs_close(loop, &close_req, open_req1.result, close_cb); #endif } else { #if !defined(__NUTTX__) TUV_ASSERT(strcmp(buf, "test-bu") == 0); #else TUV_ASSERT(strcmp(buf, test_buf) == 0); #endif r = uv_fs_close(loop, &close_req, open_req1.result, close_cb); } TUV_ASSERT(r == 0); }
DLLEXPORT void jl_close_uv(uv_handle_t *handle) { if (handle->type==UV_TTY) uv_tty_set_mode((uv_tty_t*)handle,0); if ((handle->type == UV_NAMED_PIPE || handle->type == UV_TCP) && uv_is_writable((uv_stream_t*)handle)) { uv_shutdown_t *req = (uv_shutdown_t*)malloc(sizeof(uv_shutdown_t)); req->data = 0; /* * We are explicity ignoring the error here for the following reason: * There is only two scenarios in which this returns an error: * a) In case the stream is already shut down, in which case we're likely * in the process of closing this stream (since there's no other call to * uv_shutdown). * b) In case the stream is already closed, in which case uv_close would * cause an assertion failure. */ uv_shutdown(req, (uv_stream_t*)handle, &jl_uv_shutdownCallback); } else if (handle->type == UV_FILE) { uv_fs_t req; jl_uv_file_t *fd = (jl_uv_file_t*)handle; if (fd->file != -1) { uv_fs_close(handle->loop, &req, fd->file, NULL); fd->file = -1; } } else if (!uv_is_closing((uv_handle_t*)handle)) { uv_close(handle,&jl_uv_closeHandle); } }
void read_cb(uv_fs_t *read_req) { int err = 0; if (read_req->result < 0) { UV_CHECK(read_req->result, "uv_fs_read callback"); } /* extracting our context from the read_req */ context_t *context = read_req->data; /* 4. Report the contents of the buffer */ //log_report("%s", read_req->bufs->base); lwlog_info("Read buf: %s", read_req->bufs->base); free(read_req->bufs->base); /* 5. Close the file descriptor (synchronously) */ uv_fs_t close_req; err = uv_fs_close(uv_default_loop(), &close_req, context->open_req->result, NULL); if (err < 0) { UV_CHECK(abs(err), "uv_fs_close"); } /* cleanup all requests and context */ uv_fs_req_cleanup(context->open_req); uv_fs_req_cleanup(read_req); uv_fs_req_cleanup(&close_req); free(context); return; }
void read_cb(uv_fs_t* read_req) { int r = 0; if (read_req->result < 0) CHECK(read_req->result, "uv_fs_read callback"); /* extracting our context from the read_req */ context_t* context = read_req->data; /* 4. Report the contents of the buffer */ /* log_report("%s", read_req->bufsml->base); log_info("%s", read_req->bufsml->base); free(read_req->bufsml->base); log_report("%s", context->iov->base); log_info("%s", context->iov->base); if I don't call log_report/log_info on context->iov, then valgrind has 0 erro! */ log_report("%s", context->iov.base); log_info("%s", context->iov.base); /* 5. Close the file descriptor (synchronously) */ uv_fs_t close_req; r = uv_fs_close(read_req->loop, &close_req, context->open_req->result, NULL); if (r < 0) CHECK(abs(r), "uv_fs_close"); /* cleanup all requests and context */ uv_fs_req_cleanup(context->open_req); uv_fs_req_cleanup(read_req); uv_fs_req_cleanup(&close_req); free(context); }
void saveBlock(block_s * block) { // Get region path char * regionPath; int regionPathLength = setRegionPath(®ionPath, 0, 0); // Make the path makeDir(regionPath); // Get chunk path char * chunkPath; setChunkPath(&chunkPath, regionPath, regionPathLength, block->cx, block->cy); uv_fs_t open_req; uv_fs_t write_req; uv_fs_t close_req; // store type in buffer unsigned char typeBuffer[2]; shortToBuffer(4, 0, typeBuffer); // Open uv_fs_open(uv_default_loop(), &open_req, chunkPath, O_WRONLY | O_CREAT, 0700, NULL); uv_fs_req_cleanup(&open_req); uv_fs_write(uv_default_loop(), &write_req, open_req.result, typeBuffer, 2,(block->z * 512 + ((block->x * 2) + block->y * 16 * 2)), NULL); uv_fs_req_cleanup(&write_req); // Close uv_fs_close(uv_default_loop(), &close_req, open_req.result, NULL); uv_fs_req_cleanup(&close_req); free(regionPath); free(chunkPath); }
static void touch_file(const char* name, unsigned int size) { uv_file file; uv_fs_t req; uv_buf_t buf; int r; unsigned int i; r = uv_fs_open(NULL, &req, name, O_WRONLY | O_CREAT | O_TRUNC, S_IWUSR | S_IRUSR, NULL); uv_fs_req_cleanup(&req); ASSERT(r >= 0); file = r; buf = uv_buf_init("a", 1); /* Inefficient but simple. */ for (i = 0; i < size; i++) { r = uv_fs_write(NULL, &req, file, &buf, 1, i, NULL); uv_fs_req_cleanup(&req); ASSERT(r >= 0); } r = uv_fs_close(NULL, &req, file, NULL); uv_fs_req_cleanup(&req); ASSERT(r == 0); }
static bool adc_read_data(uint32_t pin, struct adc_msg_s* msg) { int32_t adc_number = ADC_GET_NUMBER(pin); char path[ADC_DEVICE_PATH_BUFFER_SIZE] = { 0 }; adc_get_path(path, adc_number); const iotjs_environment_t* env = iotjs_environment_get(); uv_loop_t* loop = iotjs_environment_loop(env); int result, close_result; // Open file uv_fs_t open_req; result = uv_fs_open(loop, &open_req, path, O_RDONLY, 0666, NULL); uv_fs_req_cleanup(&open_req); if (result < 0) { return false; } // Read value uv_fs_t read_req; uv_buf_t uvbuf = uv_buf_init((char*)msg, sizeof(*msg)); result = uv_fs_read(loop, &read_req, open_req.result, &uvbuf, 1, 0, NULL); uv_fs_req_cleanup(&read_req); // Close file uv_fs_t close_req; close_result = uv_fs_close(loop, &close_req, open_req.result, NULL); uv_fs_req_cleanup(&close_req); if (result < 0 || close_result < 0) { return false; } DDDLOG("ADC Read - path: %s, value: %d", path, msg->am_data); return true; }
/* * fs.close */ static int fs_close(lua_State* L) { FSR__SETUP int fd = luaL_checkint(L, 1); FSR__SET_OPT_CB(2, on_fs_callback) uv_fs_close(loop, req, fd, cb); FSR__TEARDOWN }
static int lmz_reader_gc(lua_State *L) { lmz_file_t* zip = luaL_checkudata(L, 1, "miniz_reader"); uv_fs_close(zip->loop, &(zip->req), zip->fd, NULL); uv_fs_req_cleanup(&(zip->req)); mz_zip_reader_end(&(zip->archive)); return 0; }
JL_DLLEXPORT int jl_fs_close(int handle) { uv_fs_t req; int ret = uv_fs_close(jl_io_loop, &req, handle, NULL); uv_fs_req_cleanup(&req); return ret; }
bool xmrig::CommonConfig::save() { if (m_fileName.isNull()) { return false; } uv_fs_t req; const int fd = uv_fs_open(uv_default_loop(), &req, m_fileName.data(), O_WRONLY | O_CREAT | O_TRUNC, 0644, nullptr); if (fd < 0) { return false; } uv_fs_req_cleanup(&req); rapidjson::Document doc; getJSON(doc); FILE *fp = fdopen(fd, "w"); char buf[4096]; rapidjson::FileWriteStream os(fp, buf, sizeof(buf)); rapidjson::PrettyWriter<rapidjson::FileWriteStream> writer(os); doc.Accept(writer); fflush(fp); uv_fs_close(uv_default_loop(), &req, fd, nullptr); uv_fs_req_cleanup(&req); LOG_NOTICE("configuration saved to: \"%s\"", m_fileName.data()); return true; }
void FileRequestBaton::file_stated(uv_fs_t *req) { FileRequestBaton *ptr = (FileRequestBaton *)req->data; assert(ptr->thread_id == std::this_thread::get_id()); if (req->result != 0 || ptr->canceled || !ptr->request) { // Stating failed or was canceled. We already have an open file handle // though, which we'll have to close. notify_error(req); uv_fs_req_cleanup(req); uv_fs_close(req->loop, req, ptr->fd, file_closed); } else { #if UV_VERSION_MAJOR == 0 && UV_VERSION_MINOR <= 10 const uv_statbuf_t *stat = static_cast<const uv_statbuf_t *>(req->ptr); #else const uv_stat_t *stat = static_cast<const uv_stat_t *>(req->ptr); #endif if (stat->st_size > std::numeric_limits<int>::max()) { // File is too large for us to open this way because uv_buf's only support unsigned // ints as maximum size. if (ptr->request) { ptr->request->response = util::make_unique<Response>(); ptr->request->response->code = UV_EFBIG; #if UV_VERSION_MAJOR == 0 && UV_VERSION_MINOR <= 10 ptr->request->response->message = uv_strerror(uv_err_t {UV_EFBIG, 0}); #else ptr->request->response->message = uv_strerror(UV_EFBIG); #endif ptr->request->notify(); } uv_fs_req_cleanup(req); uv_fs_close(req->loop, req, ptr->fd, file_closed); } else { const unsigned int size = (unsigned int)(stat->st_size); ptr->body.resize(size); ptr->buffer = uv_buf_init(const_cast<char *>(ptr->body.data()), size); uv_fs_req_cleanup(req); #if UV_VERSION_MAJOR == 0 && UV_VERSION_MINOR <= 10 uv_fs_read(req->loop, req, ptr->fd, ptr->buffer.base, ptr->buffer.len, -1, file_read); #else uv_fs_read(req->loop, req, ptr->fd, &ptr->buffer, 1, 0, file_read); #endif } } }
int uvc_fs_close(uvc_io *io){ io->handle->data=io; uv_fs_close(uvc_loop_default(),(uv_fs_t *)io->handle,io->file,uvc_fs_cb); io->cur =uvc_self(); uvc_yield(); uv_fs_req_cleanup((uv_fs_t *)io->handle); free(io->handle); return 0; }
static void fsync_cb(uv_fs_t* req) { int r; ASSERT(req == &fsync_req); ASSERT(req->fs_type == UV_FS_FSYNC); ASSERT(req->result != -1); fsync_cb_count++; uv_fs_req_cleanup(req); r = uv_fs_close(loop, &close_req, open_req1.result, close_cb); }
static void ftruncate_cb(uv_fs_t* req) { int r; ASSERT(req == &ftruncate_req); ASSERT(req->fs_type == UV_FS_FTRUNCATE); ASSERT(req->result != -1); ftruncate_cb_count++; uv_fs_req_cleanup(req); r = uv_fs_close(loop, &close_req, open_req1.result, close_cb); }
static void destroy_response(http_response* response, int close_handle) { if (response->pbuf) free(response->pbuf); if (response->request) destroy_request(response->request, close_handle); if (response->fd != -1) { uv_fs_t close_req; uv_fs_close(loop, &close_req, response->fd, NULL); } free(response); }
void FileObject::close(gc<Fiber> fiber) { ASSERT(isOpen_, "IO library should not call close on a closed file."); // Mark the file closed immediately so other fibers can't try to use it. isOpen_ = false; FSTask* task = new FSTask(fiber); uv_fs_close(task->loop(), task->request(), file_, closeFileCallback); }
void fileFinalize(void* data) { int fd = *(int*)data; // Already closed. if (fd == -1) return; uv_fs_t request; uv_fs_close(getLoop(), &request, fd, NULL); uv_fs_req_cleanup(&request); }
void on_open(uv_fs_t* req) { int result = req->result; if (result == -1) { fprintf(stderr, "Error on opening file: %s.\n", uv_strerror(uv_last_error(loop))); } uv_fs_req_cleanup(req); uv_fs_close(loop, &close_req, open_req.result, on_close); }
void on_read(uv_fs_t *req) { if (req->result < 0) { fprintf(stderr, "Read error: %s\n", uv_strerror(req->result)); } else if(req->result == 0) { uv_fs_t close_req; uv_fs_close(uv_default_loop(), &close_req, open_req.result, NULL); } else if(req->result > 0) { iov.len = req->result; uv_fs_write(uv_default_loop(), &write_req, 1, &iov, 1, -1, on_write); } }
JL_DLLEXPORT void jl_close_uv(uv_handle_t *handle) { if (handle->type == UV_FILE) { uv_fs_t req; jl_uv_file_t *fd = (jl_uv_file_t*)handle; if (fd->file != -1) { uv_fs_close(handle->loop, &req, fd->file, NULL); fd->file = -1; } jl_uv_closeHandle(handle); // synchronous (ok since the callback is known to not interact with any global state) return; } if (handle->type == UV_NAMED_PIPE || handle->type == UV_TCP) { #ifdef _OS_WINDOWS_ if (((uv_stream_t*)handle)->stream.conn.shutdown_req) { #else if (((uv_stream_t*)handle)->shutdown_req) { #endif // don't close the stream while attempting a graceful shutdown return; } if (uv_is_writable((uv_stream_t*)handle)) { // attempt graceful shutdown of writable streams to give them a chance to flush first uv_shutdown_t *req = (uv_shutdown_t*)malloc(sizeof(uv_shutdown_t)); req->data = 0; /* * We are explicitly ignoring the error here for the following reason: * There is only two scenarios in which this returns an error: * a) In case the stream is already shut down, in which case we're likely * in the process of closing this stream (since there's no other call to * uv_shutdown). * b) In case the stream is already closed, in which case uv_close would * cause an assertion failure. */ uv_shutdown(req, (uv_stream_t*)handle, &jl_uv_shutdownCallback); return; } } if (!uv_is_closing((uv_handle_t*)handle)) { // avoid double-closing the stream if (handle->type == UV_TTY) uv_tty_set_mode((uv_tty_t*)handle, UV_TTY_MODE_NORMAL); uv_close(handle, &jl_uv_closeHandle); } } JL_DLLEXPORT void jl_forceclose_uv(uv_handle_t *handle) { uv_close(handle,&jl_uv_closeHandle); }
static void create_file(const char* name) { int r; uv_file file; uv_fs_t req; r = uv_fs_open(NULL, &req, name, O_WRONLY | O_CREAT, S_IWUSR | S_IRUSR, NULL); ASSERT(r >= 0); file = r; uv_fs_req_cleanup(&req); r = uv_fs_close(NULL, &req, file, NULL); ASSERT(r == 0); uv_fs_req_cleanup(&req); }
/* Closes the file. */ static MVMint64 closefh(MVMThreadContext *tc, MVMOSHandle *h) { MVMIOFileData *data = (MVMIOFileData *)h->body.data; uv_fs_t req; if (data->ds) { MVM_string_decodestream_destory(tc, data->ds); data->ds = NULL; } if (uv_fs_close(tc->loop, &req, data->fd, NULL) < 0) { data->fd = -1; MVM_exception_throw_adhoc(tc, "Failed to close filehandle: %s", uv_strerror(req.result)); } data->fd = -1; return 0; }
static void create_file(uv_loop_t* loop, const char* name) { int r; uv_file file; uv_fs_t req; r = uv_fs_open(loop, &req, name, O_WRONLY | O_CREAT, S_IWRITE | S_IREAD, NULL); ASSERT(r != -1); file = r; uv_fs_req_cleanup(&req); r = uv_fs_close(loop, &req, file, NULL); ASSERT(r == 0); uv_fs_req_cleanup(&req); }
void on_read(uv_fs_t *req) { uv_fs_req_cleanup(req); if (req->result < 0) { fprintf(stderr, "Read error: %s\n", uv_strerror(uv_last_error(uv_default_loop()))); } else if (req->result == 0) { uv_fs_t close_req; // synchronous uv_fs_close(uv_default_loop(), &close_req, open_req.result, NULL); } else { uv_fs_write(uv_default_loop(), &write_req, 1, buffer, req->result, -1, on_write); } }
static void read_cb(uv_fs_t* req) { int r; ASSERT(req == &read_req); ASSERT(req->fs_type == UV_FS_READ); ASSERT(req->result != -1); read_cb_count++; uv_fs_req_cleanup(req); if (read_cb_count == 1) { ASSERT(strcmp(buf, test_buf) == 0); r = uv_fs_ftruncate(&ftruncate_req, open_req1.result, 7, ftruncate_cb); } else { ASSERT(strcmp(buf, "test-bu") == 0); r = uv_fs_close(&close_req, open_req1.result, close_cb); } }
static bool_t getPage(EFSRepoRef const repo, HTTPConnectionRef const conn, HTTPMethod const method, strarg_t const URI) { if(HTTP_GET != method) return false; size_t pathlen = prefix("/", URI); if(!pathlen) return false; if(!pathterm(URI, (size_t)pathlen)) return false; EFSSessionRef const session = auth(repo, conn, method, URI+pathlen); if(!session) { HTTPConnectionSendStatus(conn, 403); return true; } // TODO: Parse querystring `q` parameter EFSFilterRef const filter = EFSFilterCreate(EFSTypeFilter); EFSFilterAddStringArg(filter, "text/html; charset=utf-8"); EFSFileInfo *const files = EFSSessionCreateFileInfoList(session, filter, RESULTS_MAX); HTTPConnectionWriteResponse(conn, 200, "OK"); HTTPConnectionWriteHeader(conn, "Content-Type", "text/html; charset=utf-8"); HTTPConnectionWriteHeader(conn, "Transfer-Encoding", "chunked"); HTTPConnectionBeginBody(conn); // TODO: Page header uv_fs_t req = { .data = co_active(); } for(index_t i = 0; i < files->count; ++i) { uv_fs_open(loop, &req, path, O_RDONLY, 0400, async_fs_cb); co_switch(yield); uv_fs_req_cleanup(&req); uv_file const file = req.result; if(file < 0) continue; HTTPConnectionWriteChunkLength(conn, files->items[i].size); HTTPConnectionWriteFile(conn, file); uv_fs_close(loop, &req, file, async_fs_cb); co_switch(yield); uv_fs_req_cleanup(&req); HTTPConnectionWrite(conn, "\r\n", 2); } // TODO: Page trailer HTTPConnectionWriteChunkLength(conn, 0); HTTPConnectionWrite(conn, "\r\n", 2); HTTPConnectionEnd(conn); EFSFileInfoListFree(files); return true; }
static int lluv_file_close(lua_State *L){ lluv_file_t *f = lluv_check_file(L, 1, 0); lluv_loop_t *loop = f->loop; if(IS_(f, OPEN)){ const char *path = NULL; int argc = 1; UNSET_(f, OPEN); if(!IS_(f, NOCLOSE)){ LLUV_PRE_FILE(); err = uv_fs_close(loop->handle, &req->req, f->handle, cb); LLUV_POST_FILE(); } } return 0; }