END_TEST START_TEST(test_getDatanodeReport) { struct hdfs_object *e, *dns; e = dns = NULL; dns = hdfs_getDatanodeReport(h, HDFS_DNREPORT_ALL, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); else hdfs_object_free(dns); dns = hdfs_getDatanodeReport(h, HDFS_DNREPORT_LIVE, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); else hdfs_object_free(dns); dns = hdfs_getDatanodeReport(h, HDFS_DNREPORT_DEAD, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); else hdfs_object_free(dns); }
END_TEST START_TEST(test_admin_functions2) { struct hdfs_object *e, *e2; e = e2 = NULL; hdfs_metaSave(h, "/HADOOFUS_TEST_METASAVE", &e); (void)hdfs_delete(h, "/HADOOFUS_TEST_METASAVE", false, &e2); if (e2) hdfs_object_free(e2); if (e) #if 0 ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); #else hdfs_object_free(e); #endif (void)hdfs_isFileClosed(h, "/BOGUS", &e); if (e) hdfs_object_free(e); hdfs_setBalancerBandwidth(h, 100000000, &e); if (e) #if 0 ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); #else hdfs_object_free(e); #endif }
END_TEST START_TEST(test_admin_functions) { struct hdfs_object *e; e = NULL; hdfs_finalizeUpgrade(h, &e); if (e) #if 0 ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); #else hdfs_object_free(e); #endif hdfs_refreshNodes(h, &e); if (e) #if 0 ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); #else hdfs_object_free(e); #endif hdfs_saveNamespace(h, &e); if (e) #if 0 ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); #else hdfs_object_free(e); #endif }
/** * hdfsGetUsed - Return the total raw size of all files in the filesystem. * * @param fs The configured filesystem handle. * @return Returns the total-size; -1 on error. */ tOffset hdfsGetUsed(hdfsFS fs) { tOffset res = -1; struct hdfsFS_internal *client = fs; struct hdfs_object *ex = NULL, *stats_arr = NULL; stats_arr = hdfs_getStats(client->fs_namenode, &ex); if (ex) { ERR(EIO, "getStats(): %s", hdfs_exception_get_message(ex)); goto out; } if (stats_arr->ob_type == H_NULL) { ERR(EIO, "getStats(): got bogus null array"); goto out; } if (stats_arr->ob_val._array_long._len < 2) { ERR(EIO, "getStats(): got short stats array"); goto out; } res = stats_arr->ob_val._array_long._vals[1]; out: if (stats_arr) hdfs_object_free(stats_arr); if (ex) hdfs_object_free(ex); return res; }
END_TEST START_TEST(test_symlinks) { const char *tl = "/HADOOFUS_TEST_SYMLINKS", *td = "/HADOOFUS_TEST_BOGUS"; struct hdfs_object *targ, *e, *e2, *fs; e = e2 = NULL; hdfs2_createSymlink(h, td, tl, 0755, false, &e); if (e) goto err; targ = hdfs2_getLinkTarget(h, tl, &e); if (e) goto err; hdfs_object_free(targ); fs = hdfs2_getFileLinkInfo(h, tl, &e); if (e) goto err; hdfs_object_free(fs); err: hdfs_delete(h, tl, false, &e2); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); if (e2) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); }
/** * hdfsGetPathInfo - Get information about a path as a (dynamically * allocated) single hdfsFileInfo struct. hdfsFreeFileInfo should be * called when the pointer is no longer needed. * * @param fs The configured filesystem handle. * @param path The path of the file. * @return Returns a dynamically-allocated hdfsFileInfo object; * NULL on error. */ hdfsFileInfo* hdfsGetPathInfo(hdfsFS fs, const char* path) { hdfsFileInfo *res = NULL; struct hdfsFS_internal *client = fs; struct hdfs_object *ex = NULL, *fstatus = NULL; char *path_abs = _makeabs(fs, path); fstatus = hdfs_getFileInfo(client->fs_namenode, path_abs, &ex); if (ex) { ERR(EIO, "getFileInfo(): %s", hdfs_exception_get_message(ex)); hdfs_object_free(ex); goto out; } if (fstatus->ob_type == H_NULL) { ERR(ENOENT, "getFileInfo(): %s doesn't exist", path_abs); goto out; } res = malloc(sizeof *res); assert(res); _hadoofus_file_status_to_libhdfs(client->fs_uri, path_abs, fstatus, res); out: if (fstatus) hdfs_object_free(fstatus); if (path_abs != path) free(path_abs); return res; }
/** * hdfsExists - Checks if a given path exists on the filesystem * * @param fs The configured filesystem handle. * @param path The path to look for * @return Returns 0 on success, -1 on error. */ int hdfsExists(hdfsFS fs, const char *path) { struct hdfs_object *s, *ex = NULL; struct hdfsFS_internal *client = fs; int res = 0; char *path_abs = _makeabs(fs, path); s = hdfs_getFileInfo(client->fs_namenode, path_abs, &ex); if (ex) { ERR(EIO, "getFileInfo(): %s", hdfs_exception_get_message(ex)); hdfs_object_free(ex); res = -1; goto out; } if (s->ob_type == H_NULL) { res = -1; errno = ENOENT; } hdfs_object_free(s); out: if (path_abs != path) free(path_abs); return res; }
void * athread(void *v) { struct hdfs_namenode *nn; struct hdfs_object *rpc; struct hdfs_object *object; const char *error; struct hdfs_rpc_response_future futures[100]; unsigned i; bool ok; nn = v; // getProtocolVersion(61) rpc = hdfs_rpc_invocation_new( "getProtocolVersion", hdfs_string_new(HADOOFUS_CLIENT_PROTOCOL_STR), hdfs_long_new(61), NULL); for (i = 0; i < 100; i++) { futures[i] = HDFS_RPC_RESPONSE_FUTURE_INITIALIZER; error = hdfs_namenode_invoke(nn, rpc, &futures[i]); if (error) { warnx("namenode_invoke: %s", error); goto out; } } for (i = 0; i < 100; i++) { // Get the response (should be long(61)) ok = hdfs_future_get_timeout(&futures[i], &object, 2000/*ms*/); if (!ok) { warnx("timeout waiting for result from NN server"); continue; } if (object->ob_type != H_LONG || object->ob_val._long._val != 61L) printf("bad result\n"); hdfs_object_free(object); } out: hdfs_object_free(rpc); return NULL; }
END_TEST START_TEST(test_append) { bool s; struct hdfs_object *e = NULL, *lb; const char *tf = "/HADOOFUS_TEST_APPEND", *client = "HADOOFUS_CLIENT"; // Create the file first hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); s = hdfs_complete(h, tf, client, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "complete returned false"); // Open for appending lb = hdfs_append(h, tf, client, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(lb); // Cleanup s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_addBlock) { bool s; struct hdfs_object *e = NULL, *lb; const char *tf = "/HADOOFUS_TEST_ADDBLOCK", *client = "HADOOFUS_CLIENT"; // Create the file first hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); mark_point(); lb = hdfs_addBlock(h, tf, client, NULL, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert(!hdfs_object_is_null(lb)); hdfs_object_free(lb); mark_point(); // Cleanup s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_getContentSummary) { bool s; struct hdfs_object *e = NULL, *cs; const char *tf = "/HADOOFUS_TEST_CSDIR"; s = hdfs_mkdirs(h, tf, 0755, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "mkdirs returned false"); cs = hdfs_getContentSummary(h, tf, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert(!hdfs_object_is_null(cs)); hdfs_object_free(cs); s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
/** * hdfsSetReplication - Set the replication of the specified * file to the supplied value * * @param fs The configured filesystem handle. * @param path The path of the file. * @return Returns 0 on success, -1 on error. */ int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication) { int res = 0; struct hdfsFS_internal *client = fs; struct hdfs_object *ex = NULL; char *path_abs = _makeabs(fs, path); bool b; b = hdfs_setReplication(client->fs_namenode, path_abs, replication, &ex); if (ex) { ERR(EIO, "setReplication(): %s", hdfs_exception_get_message(ex)); hdfs_object_free(ex); res = -1; goto out; } if (!b) { ERR(ENOENT, "setReplication(): No such file, or %s is a directory", path_abs); res = -1; goto out; } out: if (path_abs != path) free(path_abs); return res; }
/** * hdfsCreateDirectory - Make the given file and all non-existent * parents into directories. * * @param fs The configured filesystem handle. * @param path The path of the directory. * @return Returns 0 on success, -1 on error. */ int hdfsCreateDirectory(hdfsFS fs, const char* path) { int res = 0; struct hdfsFS_internal *client = fs; struct hdfs_object *ex = NULL; char *path_abs = _makeabs(fs, path); bool b = hdfs_mkdirs(client->fs_namenode, path_abs, 0755, &ex); if (ex) { ERR(EIO, "mkdirs(): %s", hdfs_exception_get_message(ex)); hdfs_object_free(ex); res = -1; goto out; } if (!b) { ERR(EINVAL, "CreateDirectory() failed on '%s'", path_abs); res = -1; goto out; } out: if (path_abs != path) free(path_abs); return res; }
/** * hdfsRename - Rename file. * * @param fs The configured filesystem handle. * @param oldPath The path of the source file. * @param newPath The path of the destination file. * @return Returns 0 on success, -1 on error. */ int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath) { struct hdfs_object *ex = NULL; bool b; struct hdfsFS_internal *client = fs; char *oldPath_abs, *newPath_abs; int res = 0; oldPath_abs = _makeabs(fs, oldPath); newPath_abs = _makeabs(fs, newPath); b = hdfs_rename(client->fs_namenode, oldPath_abs, newPath_abs, &ex); if (ex) { ERR(EIO, "rename(): %s", hdfs_exception_get_message(ex)); hdfs_object_free(ex); res = -1; goto out; } if (!b) { ERR(EINVAL, "rename() failed (on '%s' -> '%s')", oldPath_abs, newPath_abs); res = -1; goto out; } out: if (oldPath_abs != oldPath) free(oldPath_abs); if (newPath_abs != newPath) free(newPath_abs); return res; }
int main(int argc, char **argv) { int r; int64_t res; struct hdfs_object *exception = NULL, *dl; r = sasl_client_init(NULL); if (r != SASL_OK) { fprintf(stderr, "Error initializing sasl: %d\n", r); return -1; } if (argc > 1) { if (strcmp(argv[1], "-h") == 0) { printf("Usage: ./kerb [host [port [kerb_principal]]]\n"); exit(0); } host = argv[1]; if (argc > 2) { port = argv[2]; if (argc > 3) { user = argv[3]; } } } h = hdfs_namenode_new(host, port, user, HDFS_REQUIRE_KERB, &err); if (!h) goto out; res = hdfs_getProtocolVersion(h, HADOOFUS_CLIENT_PROTOCOL_STR, 61L, &exception); if (exception) { err = exception->ob_val._exception._msg; goto out; } if (res != 61) fprintf(stderr, "protocol version != 61: %zd\n", (intmax_t)res); else fprintf(stderr, "success\n"); dl = hdfs_getListing(h, "/", NULL, &exception); if (exception) { err = exception->ob_val._exception._msg; goto out; } hdfs_object_free(dl); fprintf(stderr, "dl: success\n"); out: if (err) fprintf(stderr, "hadoofus error: %s\n", err); if (h) hdfs_namenode_delete(h); sasl_done(); return 0; }
END_TEST START_TEST(test_delegationTokens) { struct hdfs_object *token, *e; e = NULL; token = hdfs_getDelegationToken(h, "abcde", &e); if (e) { /* * "Delegation Token can be issued only with kerberos or web * authentication" */ #if 0 ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); #else hdfs_object_free(e); e = NULL; token = hdfs_token_new_empty(); #endif } (void)hdfs_renewDelegationToken(h, token, &e); if (e) { /* Similar error. */ #if 0 ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); #else hdfs_object_free(e); e = NULL; #endif } hdfs_cancelDelegationToken(h, token, &e); if (e) { /* Similar error. */ #if 0 ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); #else hdfs_object_free(e); e = NULL; #endif } hdfs_object_free(token); }
END_TEST START_TEST(test_abandonBlock) { bool s; struct hdfs_object *e = NULL, *lb, *bl; const char *tf = "/HADOOFUS_TEST_ABANDONBLOCK", *client = "HADOOFUS_CLIENT"; // Create the file first hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); mark_point(); // XXX this must be updated to cover v2.0+ (last_block/fileid) lb = hdfs_addBlock(h, tf, client, NULL, NULL, 0, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert(!hdfs_object_is_null(lb)); mark_point(); bl = hdfs_block_from_located_block(lb); hdfs_object_free(lb); mark_point(); hdfs_abandonBlock(h, bl, tf, client, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(bl); mark_point(); // Cleanup s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
/** * hdfsListDirectory - Get list of files/directories for a given * directory-path. hdfsFreeFileInfo should be called to deallocate memory. * * @param fs The configured filesystem handle. * @param path The path of the directory. * @param numEntries Set to the number of files/directories in path. * @return Returns a dynamically-allocated array of hdfsFileInfo * objects; NULL on error. */ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries) { hdfsFileInfo *res = NULL; struct hdfsFS_internal *client = fs; struct hdfs_object *ex = NULL, *dl = NULL; char *path_abs = _makeabs(fs, path); int nfiles; dl = hdfs_getListing(client->fs_namenode, path_abs, NULL, &ex); if (ex) { ERR(EIO, "getListing(): %s", hdfs_exception_get_message(ex)); hdfs_object_free(ex); goto out; } if (dl->ob_type == H_NULL) { ERR(ENOENT, "getListing(): %s doesn't exist", path_abs); goto out; } nfiles = dl->ob_val._directory_listing._num_files; res = malloc(nfiles * sizeof *res); assert(res); for (int i = 0; i < nfiles; i++) { struct hdfs_object *fstatus = dl->ob_val._directory_listing._files[i]; _hadoofus_file_status_to_libhdfs(client->fs_uri, path_abs, fstatus, &res[i]); } *numEntries = nfiles; out: if (path_abs != path) free(path_abs); if (dl) hdfs_object_free(dl); return res; }
END_TEST START_TEST(test_getServerDefaults) { struct hdfs_object *object, *e = NULL; object = hdfs2_getServerDefaults(h, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(object); }
END_TEST START_TEST(test_getStats) { struct hdfs_object *e = NULL, *stats; stats = hdfs_getStats(h, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(stats); }
END_TEST START_TEST(test_getListing) { struct hdfs_object *e = NULL, *listing; listing = hdfs_getListing(h, "/", NULL, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert(!hdfs_object_is_null(listing)); hdfs_object_free(listing); }
END_TEST START_TEST(test_distributedUpgradeProgress) { struct hdfs_object *e, *us; e = NULL; us = hdfs_distributedUpgradeProgress(h, HDFS_UPGRADEACTION_STATUS, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(us); us = hdfs_distributedUpgradeProgress(h, HDFS_UPGRADEACTION_DETAILED, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(us); #if 0 us = hdfs_distributedUpgradeProgress(HDFS_UPGRADEACTION_FORCE_PROCEED); #endif }
END_TEST START_TEST(test_getFileInfo) { struct hdfs_object *e = NULL, *fs; fs = hdfs_getFileInfo(h, "/", &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert(!hdfs_object_is_null(fs)); hdfs_object_free(fs); }
/** * hdfsMove - Move file from one filesystem to another. * * @param srcFS The handle to source filesystem. * @param src The path of source file. * @param dstFS The handle to destination filesystem. * @param dst The path of destination file. * @return Returns 0 on success, -1 on error. */ int hdfsMove(hdfsFS srcFS_, const char* src, hdfsFS dstFS, const char* dst) { int res = -1; bool b; struct hdfs_object *ex = NULL; struct hdfsFS_internal *srcFS = srcFS_; char *src_abs, *dst_abs; src_abs = _makeabs(srcFS, src); dst_abs = _makeabs(dstFS, dst); // Yeah this comparison isn't perfect. We don't have anything better. if (srcFS_ == dstFS) { b = hdfs_rename(srcFS->fs_namenode, src_abs, dst_abs, &ex); if (ex) { ERR(EIO, "rename failed: %s", hdfs_exception_get_message(ex)); goto out; } if (!b) WARN("rename of '%s' returned false", src_abs); } else { res = hdfsCopy(srcFS_, src_abs, dstFS, dst_abs); if (res == -1) { ERR(errno, "hdfsCopy failed"); goto out; } b = hdfs_delete(srcFS->fs_namenode, src_abs, false/*recurse*/, &ex); if (ex) { ERR(EIO, "delete failed: %s", hdfs_exception_get_message(ex)); goto out; } if (!b) WARN("delete of '%s' returned false", src_abs); } res = 0; out: if (src_abs != src) free(src_abs); if (dst_abs != dst) free(dst_abs); if (ex) hdfs_object_free(ex); return res; }
/** * hdfsGetDefaultBlockSize - Get the optimum blocksize. * * @param fs The configured filesystem handle. * @return Returns the blocksize; -1 on error. */ tOffset hdfsGetDefaultBlockSize(hdfsFS fs) { tOffset res; struct hdfsFS_internal *client = fs; struct hdfs_object *ex = NULL; res = hdfs_getPreferredBlockSize(client->fs_namenode, "/", &ex); if (ex) { ERR(EIO, "getPreferredBlockSize(): %s", hdfs_exception_get_message(ex)); goto out; } out: if (ex) hdfs_object_free(ex); return res; }
END_TEST START_TEST(test_reportBadBlocks) { struct hdfs_object *e, *alb, *lb; e = NULL; lb = hdfs_located_block_new(0, 0, 0, 0); alb = hdfs_array_locatedblock_new(); hdfs_array_locatedblock_append_located_block(alb, lb); lb = NULL; hdfs_reportBadBlocks(h, alb, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(alb); }
/** * hdfsDelete - Delete file. * * @param fs The configured filesystem handle. * @param path The path of the file. * @return Returns 0 on success, -1 on error. */ int hdfsDelete(hdfsFS fs, const char* path) { struct hdfs_object *ex = NULL; struct hdfsFS_internal *client = fs; char *path_abs = _makeabs(fs, path); int res = 0; /*bool b = */hdfs_delete(client->fs_namenode, path_abs, true/*recurse*/, &ex); if (ex) { ERR(EIO, "delete(): %s", hdfs_exception_get_message(ex)); hdfs_object_free(ex); res = -1; } if (path_abs != path) free(path_abs); return res; }
/** * hdfsCloseFile - Close an open file. * * @param fs The configured filesystem handle. * @param file The file handle. * @return Returns 0 on success, -1 on error. */ int hdfsCloseFile(hdfsFS fs, hdfsFile file) { struct hdfsFile_internal *f = file; struct hdfsFS_internal *client = fs; int res = 0; if (f->fi_mode == FILE_WRITE || f->fi_mode == FILE_APPEND) { struct hdfs_object *ex = NULL; bool succ = false; if (f->fi_wbuf_used > 0) { res = _flush(client->fs_namenode, f, NULL, -1); if (res == -1) WARN("Flushing '%s' failed: %m", f->fi_path); } while (!succ) { succ = hdfs_complete(client->fs_namenode, f->fi_path, f->fi_client, &ex); if (ex) { ERR(EIO, "Could not complete '%s', abandoning " "write: %s", f->fi_path, hdfs_exception_get_message(ex)); hdfs_object_free(ex); res = -1; break; } if (!succ) { WARN("Could not complete '%s'", f->fi_path); usleep(400*1000); } } free(f->fi_wbuf); } free(f->fi_client); free(f->fi_path); free(f); return res; }
/** * hdfsChmod * * @param fs The configured filesystem handle. * @param path the path to the file or directory * @param mode the bitmask to set it to * @return 0 on success else -1 */ int hdfsChmod(hdfsFS fs, const char* path, short mode) { int res = 0; struct hdfsFS_internal *client = fs; struct hdfs_object *ex = NULL; char *path_abs = _makeabs(fs, path); hdfs_setPermission(client->fs_namenode, path_abs, mode, &ex); if (ex) { ERR(EIO, "setPermission(): %s", hdfs_exception_get_message(ex)); res = -1; goto out; } out: if (ex) hdfs_object_free(ex); if (path_abs != path) free(path_abs); return res; }
/** * hdfsChown * * @param fs The configured filesystem handle. * @param path the path to the file or directory * @param owner this is a string in Hadoop land. Set to null or "" if only setting group * @param group this is a string in Hadoop land. Set to null or "" if only setting user * @return 0 on success else -1 */ int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group) { int res = 0; struct hdfsFS_internal *client = fs; struct hdfs_object *ex = NULL; char *path_abs = _makeabs(fs, path); hdfs_setOwner(client->fs_namenode, path_abs, owner, group, &ex); if (ex) { ERR(EIO, "setOwner(): %s", hdfs_exception_get_message(ex)); res = -1; goto out; } out: if (ex) hdfs_object_free(ex); if (path_abs != path) free(path_abs); return res; }