static void setup(char *argv0, bool live_check) { char exec_path[MAXPGPATH]; /* full path to my executable */ /* * make sure the user has a clean environment, otherwise, we may confuse * libpq when we connect to one (or both) of the servers. */ check_pghost_envvar(); verify_directories(); /* no postmasters should be running */ if (!live_check && is_server_running(old_cluster.pgdata)) pg_log(PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n" "Please shutdown that postmaster and try again.\n"); /* same goes for the new postmaster */ if (is_server_running(new_cluster.pgdata)) pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n" "Please shutdown that postmaster and try again.\n"); /* get path to pg_upgrade executable */ if (find_my_exec(argv0, exec_path) < 0) pg_log(PG_FATAL, "Could not get path name to pg_upgrade: %s\n", getErrorText(errno)); /* Trim off program name and keep just path */ *last_dir_separator(exec_path) = '\0'; canonicalize_path(exec_path); os_info.exec_path = pg_strdup(exec_path); }
static char * get_pkglibdir(const char *bindir) { char cmd[MAXPGPATH]; char bufin[MAX_STRING]; FILE *output; int i; snprintf(cmd, sizeof(cmd), "\"%s/pg_config\" --pkglibdir", bindir); if ((output = popen(cmd, "r")) == NULL) pg_log(PG_FATAL, "Could not get pkglibdir data: %s\n", getErrorText(errno)); fgets(bufin, sizeof(bufin), output); if (output) pclose(output); /* Remove trailing newline */ i = strlen(bufin) - 1; if (bufin[i] == '\n') bufin[i] = '\0'; return pg_strdup(bufin); }
/* * @brief Draw the sensor status. * @param index Current sensor index. * @param x Coordinate x where to draw the values. * @param y Coordinate y where to draw the values. */ void drawSensorStatus(int index, int x, int y) { char buffer[BUFFER_SIZE]; sprintf(buffer, "%s: %s", gSensorName[index], getErrorText(gSensorError[index])); maDrawText(x, y, buffer); }
/* * check_data_dir() * * This function validates the given cluster directory - we search for a * small set of subdirectories that we expect to find in a valid $PGDATA * directory. If any of the subdirectories are missing (or secured against * us) we display an error message and exit() * */ static void check_data_dir(const char *pg_data) { char subDirName[MAXPGPATH]; int subdirnum; /* start check with top-most directory */ const char *requiredSubdirs[] = {"", "base", "global", "pg_clog", "pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase", "pg_xlog"}; for (subdirnum = 0; subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]); ++subdirnum) { struct stat statBuf; snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data, /* Win32 can't stat() a directory with a trailing slash. */ *requiredSubdirs[subdirnum] ? "/" : "", requiredSubdirs[subdirnum]); if (stat(subDirName, &statBuf) != 0) report_status(PG_FATAL, "check for \"%s\" failed: %s\n", subDirName, getErrorText(errno)); else if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, "%s is not a directory\n", subDirName); } }
/* * Test pg_upgrade_support.so is in the proper place. We cannot copy it * ourselves because install directories are typically root-owned. */ static void check_for_support_lib(ClusterInfo *cluster) { char cmd[MAX_PG_PATH]; char libdir[MAX_STRING]; char libfile[MAX_PG_PATH]; FILE *lib_test; FILE *output; snprintf(cmd, sizeof(cmd), "\"%s/pg_config\" --pkglibdir", cluster->bindir); if ((output = popen(cmd, "r")) == NULL) pg_log(PG_FATAL, "Could not get pkglibdir data: %s\n", getErrorText(errno)); fgets(libdir, sizeof(libdir), output); pclose(output); /* Remove trailing newline */ if (strchr(libdir, '\n') != NULL) *strchr(libdir, '\n') = '\0'; snprintf(libfile, sizeof(libfile), "%s/pg_upgrade_support%s", libdir, DLSUFFIX); if ((lib_test = fopen(libfile, "r")) == NULL) pg_log(PG_FATAL, "The pg_upgrade_support module must be created and installed in the %s cluster.\n", CLUSTER_NAME(cluster)); fclose(lib_test); }
bool DVR::connectControl(const string& host, uint16_t port) { m_host = host; m_port = port; struct sockaddr_in serv_addr; m_controlFd = socket(AF_INET, SOCK_STREAM, 0); memset(&serv_addr, 0, sizeof(serv_addr)); serv_addr.sin_family = AF_INET; serv_addr.sin_port = htons(port); hostent* ent = gethostbyname(m_host.c_str()); if (!ent) { setError(ConnectError, "unable to resolve name"); return false; } memcpy(&serv_addr.sin_addr, ent->h_addr_list[0], 4); if (connect(m_controlFd, (sockaddr*)&serv_addr, sizeof(serv_addr))) { setError(ConnectError, getErrorText("connect error")); return false; } return true; }
/* * check_data_dir() * * This function validates the given cluster directory - we search for a * small set of subdirectories that we expect to find in a valid $PGDATA * directory. If any of the subdirectories are missing (or secured against * us) we display an error message and exit() * */ static void check_data_dir(const char *pg_data) { char subDirName[MAXPGPATH]; int subdirnum; const char *requiredSubdirs[] = {"base", "global", "pg_clog", "pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase", "pg_xlog"}; for (subdirnum = 0; subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]); ++subdirnum) { struct stat statBuf; snprintf(subDirName, sizeof(subDirName), "%s/%s", pg_data, requiredSubdirs[subdirnum]); if (stat(subDirName, &statBuf) != 0) report_status(PG_FATAL, "check for %s failed: %s\n", requiredSubdirs[subdirnum], getErrorText(errno)); else if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, "%s is not a directory\n", requiredSubdirs[subdirnum]); } }
static void get_bin_version(ClusterInfo *cluster) { char cmd[MAX_PG_PATH], cmd_output[MAX_STRING]; FILE *output; int pre_dot, post_dot; snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir); if ((output = popen(cmd, "r")) == NULL) pg_log(PG_FATAL, "Could not get pg_ctl version data: %s\n", getErrorText(errno)); fgets(cmd_output, sizeof(cmd_output), output); pclose(output); /* Remove trailing newline */ if (strchr(cmd_output, '\n') != NULL) *strchr(cmd_output, '\n') = '\0'; if (sscanf(cmd_output, "%*s %*s %d.%d", &pre_dot, &post_dot) != 2) pg_log(PG_FATAL, "could not get version from %s\n", cmd); cluster->bin_version = (pre_dot * 100 + post_dot) * 100; }
HDFS_Client_RetCode HdfsClient::hdfsMergeFiles( const NAString& srcPath, const NAString& dstPath) { QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsMergeFiles(%s, %s) called.", srcPath.data(), dstPath.data()); if (initJNIEnv() != JOI_OK) return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM; if (getInstance() == NULL) return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM; jstring js_SrcPath = jenv_->NewStringUTF(srcPath.data()); if (js_SrcPath == NULL) { GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM)); jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM; } jstring js_DstPath= jenv_->NewStringUTF(dstPath.data()); if (js_DstPath == NULL) { GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM)); jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM; } tsRecentJMFromJNI = JavaMethods_[JM_HDFS_MERGE_FILES].jm_full_name; jboolean jresult = jenv_->CallStaticBooleanMethod(javaClass_, JavaMethods_[JM_HDFS_MERGE_FILES].methodID, js_SrcPath, js_DstPath); if (jenv_->ExceptionCheck()) { getExceptionDetails(); logError(CAT_SQL_HDFS, __FILE__, __LINE__); logError(CAT_SQL_HDFS, "HdfsClient::hdfsMergeFiles()", getLastError()); jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_EXCEPTION; } if (jresult == false) { logError(CAT_SQL_HDFS, "HdfsClient::hdfsMergeFiles()", getLastError()); jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_EXCEPTION; } jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_OK; }
/* * linkFile() * * Creates a hard link between the given relation files. We use * this function to perform a true in-place update. If the on-disk * format of the new cluster is bit-for-bit compatible with the on-disk * format of the old cluster, we can simply link each relation * instead of copying the data from the old cluster to the new cluster. */ const char * linkFile(const char *src, const char *dst) { if (pg_link_file(src, dst) == -1) return getErrorText(); else return NULL; }
/* * check_loadable_libraries() * * Check that the new cluster contains all required libraries. * We do this by actually trying to LOAD each one, thereby testing * compatibility as well as presence. */ void check_loadable_libraries(void) { PGconn *conn = connectToServer(&new_cluster, "template1"); int libnum; FILE *script = NULL; bool found = false; char output_path[MAXPGPATH]; prep_status("Checking for presence of required libraries"); snprintf(output_path, sizeof(output_path), "%s/loadable_libraries.txt", os_info.cwd); for (libnum = 0; libnum < os_info.num_libraries; libnum++) { char *lib = os_info.libraries[libnum]; int llen = strlen(lib); char *cmd = (char *) pg_malloc(8 + 2 * llen + 1); PGresult *res; strcpy(cmd, "LOAD '"); PQescapeStringConn(conn, cmd + 6, lib, llen, NULL); strcat(cmd, "'"); res = PQexec(conn, cmd); if (PQresultStatus(res) != PGRES_COMMAND_OK) { found = true; if (script == NULL && (script = fopen(output_path, "w")) == NULL) pg_log(PG_FATAL, "Could not open file \"%s\": %s\n", output_path, getErrorText(errno)); fprintf(script, "Could not load library \"%s\"\n%s\n", lib, PQerrorMessage(conn)); } PQclear(res); pg_free(cmd); } PQfinish(conn); if (found) { fclose(script); pg_log(PG_REPORT, "fatal\n"); pg_log(PG_FATAL, "Your installation references loadable libraries that are missing from the\n" "new installation. You can add these libraries to the new installation,\n" "or remove the functions using them from the old installation. A list of\n" "problem libraries is in the file:\n" " %s\n\n", output_path); } else check_ok(); }
std::string _TheoraGenericException::repr() { std::string text=getType(); if (text != "") text+=": "; if (mFile != "") text+="["+mFile+":"+str(mLineNumber)+"] - "; return text + getErrorText(); }
/* * load_directory() * * Read all the file names in the specified directory, and return them as * an array of "char *" pointers. The array address is returned in * *namelist, and the function result is the count of file names. * * To free the result data, free each (char *) array member, then free the * namelist array itself. */ int load_directory(const char *dirname, char ***namelist) { DIR *dirdesc; struct dirent *direntry; int count = 0; int allocsize = 64; /* initial array size */ *namelist = (char **) pg_malloc(allocsize * sizeof(char *)); if ((dirdesc = opendir(dirname)) == NULL) pg_log(PG_FATAL, "could not open directory \"%s\": %s\n", dirname, getErrorText(errno)); while (errno = 0, (direntry = readdir(dirdesc)) != NULL) { if (count >= allocsize) { allocsize *= 2; *namelist = (char **) pg_realloc(*namelist, allocsize * sizeof(char *)); } (*namelist)[count++] = pg_strdup(direntry->d_name); } #ifdef WIN32 /* * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in * released version */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif if (errno) pg_log(PG_FATAL, "could not read directory \"%s\": %s\n", dirname, getErrorText(errno)); closedir(dirdesc); return count; }
static void setup(char *argv0, bool *live_check) { char exec_path[MAXPGPATH]; /* full path to my executable */ /* * make sure the user has a clean environment, otherwise, we may confuse * libpq when we connect to one (or both) of the servers. */ check_pghost_envvar(); verify_directories(); /* no postmasters should be running, except for a live check */ if (pid_lock_file_exists(old_cluster.pgdata)) { /* * If we have a postmaster.pid file, try to start the server. If it * starts, the pid file was stale, so stop the server. If it doesn't * start, assume the server is running. If the pid file is left over * from a server crash, this also allows any committed transactions * stored in the WAL to be replayed so they are not lost, because WAL * files are not transfered from old to new servers. */ if (start_postmaster(&old_cluster, false)) stop_postmaster(false); else { if (!user_opts.check) pg_fatal("There seems to be a postmaster servicing the old cluster.\n" "Please shutdown that postmaster and try again.\n"); else *live_check = true; } } /* same goes for the new postmaster */ if (pid_lock_file_exists(new_cluster.pgdata)) { if (start_postmaster(&new_cluster, false)) stop_postmaster(false); else pg_fatal("There seems to be a postmaster servicing the new cluster.\n" "Please shutdown that postmaster and try again.\n"); } /* get path to pg_upgrade executable */ if (find_my_exec(argv0, exec_path) < 0) pg_fatal("Could not get path name to pg_upgrade: %s\n", getErrorText(errno)); /* Trim off program name and keep just path */ *last_dir_separator(exec_path) = '\0'; canonicalize_path(exec_path); os_info.exec_path = pg_strdup(exec_path); }
void onErrorFromServer(Server *pServer, int code) { switch (code) { case SOCK_RC_AUTH_FAILURE: closeConnection(pServer, FALSE); break; case SOCK_RC_TIMEOUT: closeConnection(pServer, FALSE); break; } debug("%s[%d] host [%s:%d]", getErrorText(code), code, pServer->host, pServer->port); }
HVC_RetCode HiveClient_JNI::exists(const char* schName, const char* tabName) { QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HiveClient_JNI::exists(%s, %s) called.", schName, tabName); if (initJNIEnv() != JOI_OK) return HVC_ERROR_INIT_PARAM; if (getInstance() == NULL) return HVC_ERROR_INIT_PARAM; jstring js_schName = jenv_->NewStringUTF(schName); if (js_schName == NULL) { GetCliGlobals()->setJniErrorStr(getErrorText(HVC_ERROR_EXISTS_PARAM)); jenv_->PopLocalFrame(NULL); return HVC_ERROR_EXISTS_PARAM; } jstring js_tabName = jenv_->NewStringUTF(tabName); if (js_tabName == NULL) { GetCliGlobals()->setJniErrorStr(getErrorText(HVC_ERROR_EXISTS_PARAM)); jenv_->PopLocalFrame(NULL); return HVC_ERROR_EXISTS_PARAM; } // boolean exists(java.lang.String, java.lang.String); tsRecentJMFromJNI = JavaMethods_[JM_EXISTS].jm_full_name; jboolean jresult = jenv_->CallStaticBooleanMethod(javaClass_, JavaMethods_[JM_EXISTS].methodID, js_schName, js_tabName); if (jenv_->ExceptionCheck()) { getExceptionDetails(__FILE__, __LINE__, "HiveClient_JNI::exists()"); jenv_->PopLocalFrame(NULL); return HVC_ERROR_EXISTS_EXCEPTION; } if (jresult == false) { jenv_->PopLocalFrame(NULL); return HVC_DONE; // Table does not exist } jenv_->PopLocalFrame(NULL); return HVC_OK; // Table exists. }
/* * linkAndUpdateFile() * * Creates a symbolic link between the given relation files. We use * this function to perform a true in-place update. If the on-disk * format of the new cluster is bit-for-bit compatible with the on-disk * format of the old cluster, we can simply symlink each relation * instead of copying the data from the old cluster to the new cluster. */ const char * linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src, const char *dst) { if (pageConverter != NULL) return "Can't in-place update this cluster, page-by-page conversion is required"; if (pg_link_file(src, dst) == -1) return getErrorText(errno); else return NULL; }
/* * copyFile() * * Copies a relation file from src to dst. */ const char * copyFile(const char *src, const char *dst, bool force) { #ifndef WIN32 if (copy_file(src, dst, force) == -1) #else if (CopyFile(src, dst, !force) == 0) #endif return getErrorText(); else return NULL; }
/* * adjust_data_dir * * If a configuration-only directory was specified, find the real data dir * by quering the running server. This has limited checking because we * can't check for a running server because we can't find postmaster.pid. */ void adjust_data_dir(ClusterInfo *cluster) { char filename[MAXPGPATH]; char cmd[MAXPGPATH], cmd_output[MAX_STRING]; FILE *fp, *output; /* If there is no postgresql.conf, it can't be a config-only dir */ snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig); if ((fp = fopen(filename, "r")) == NULL) return; fclose(fp); /* If PG_VERSION exists, it can't be a config-only dir */ snprintf(filename, sizeof(filename), "%s/PG_VERSION", cluster->pgconfig); if ((fp = fopen(filename, "r")) != NULL) { fclose(fp); return; } /* Must be a configuration directory, so find the real data directory. */ prep_status("Finding the real data directory for the %s cluster", CLUSTER_NAME(cluster)); /* * We don't have a data directory yet, so we can't check the PG version, * so this might fail --- only works for PG 9.2+. If this fails, * pg_upgrade will fail anyway because the data files will not be found. */ snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory", cluster->bindir, cluster->pgconfig); if ((output = popen(cmd, "r")) == NULL || fgets(cmd_output, sizeof(cmd_output), output) == NULL) pg_fatal("Could not get data directory using %s: %s\n", cmd, getErrorText(errno)); pclose(output); /* Remove trailing newline */ if (strchr(cmd_output, '\n') != NULL) *strchr(cmd_output, '\n') = '\0'; cluster->pgdata = pg_strdup(cmd_output); check_ok(); }
/* * linkAndUpdateFile() * * Creates a hard link between the given relation files. We use * this function to perform a true in-place update. If the on-disk * format of the new cluster is bit-for-bit compatible with the on-disk * format of the old cluster, we can simply link each relation * instead of copying the data from the old cluster to the new cluster. */ const char * linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src, const char *dst) { report_progress(NULL, FILE_COPY, "Link \"%s\" to \"%s\"", src, dst); if (pageConverter != NULL) return "Cannot in-place update this cluster, page-by-page conversion is required"; if (pg_link_file(src, dst) == -1) return getErrorText(); else return NULL; }
bool FSUIPC::write(DWORD offset, DWORD size, void* data) { if (!m_link_ok) m_link_ok = openLink(); if (!m_link_ok) return false; DWORD error = 0; bool ret = FSUIPC_Write(offset, size, data, &error); if (!ret) { Logger::log(QString("FSUIPC:write: Error: %1").arg(getErrorText(error))); fflush(stdout); closeLink(); } return ret; }
/* * load_directory() * * Returns count of files that meet the selection criteria coded in * the function pointed to by selector. Creates an array of pointers * to dirent structures. Address of array returned in namelist. * * Note that the number of dirent structures needed is dynamically * allocated using realloc. Realloc can be inefficient if invoked a * large number of times. */ int load_directory(const char *dirname, struct dirent ***namelist) { DIR *dirdesc; struct dirent *direntry; int count = 0; int name_num = 0; size_t entrysize; if ((dirdesc = opendir(dirname)) == NULL) pg_log(PG_FATAL, "could not open directory \"%s\": %s\n", dirname, getErrorText(errno)); *namelist = NULL; while ((direntry = readdir(dirdesc)) != NULL) { count++; *namelist = (struct dirent **) realloc((void *) (*namelist), (size_t) ((name_num + 1) * sizeof(struct dirent *))); if (*namelist == NULL) { closedir(dirdesc); return -1; } entrysize = sizeof(struct dirent) - sizeof(direntry->d_name) + strlen(direntry->d_name) + 1; (*namelist)[name_num] = (struct dirent *) malloc(entrysize); if ((*namelist)[name_num] == NULL) { closedir(dirdesc); return -1; } memcpy((*namelist)[name_num], direntry, entrysize); name_num++; } closedir(dirdesc); return count; }
Int32 HdfsClient::hdfsWrite(const char* data, Int64 len, HDFS_Client_RetCode &hdfsClientRetcode) { QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsWrite(%ld) called.", len); if (initJNIEnv() != JOI_OK) { hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION; return 0; } //Write the requisite bytes into the file jbyteArray jbArray = jenv_->NewByteArray( len); if (!jbArray) { GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM)); jenv_->PopLocalFrame(NULL); hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM; return 0; } jenv_->SetByteArrayRegion(jbArray, 0, len, (const jbyte*)data); if (hdfsStats_ != NULL) hdfsStats_->getHdfsTimer().start(); tsRecentJMFromJNI = JavaMethods_[JM_HDFS_WRITE].jm_full_name; // Java method returns the cumulative bytes written jint totalBytesWritten = jenv_->CallIntMethod(javaObj_, JavaMethods_[JM_HDFS_WRITE].methodID, jbArray); if (hdfsStats_ != NULL) { hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop()); hdfsStats_->incHdfsCalls(); } if (jenv_->ExceptionCheck()) { getExceptionDetails(); logError(CAT_SQL_HDFS, __FILE__, __LINE__); logError(CAT_SQL_HDFS, "HdfsClient::hdfsWrite()", getLastError()); jenv_->PopLocalFrame(NULL); hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION; return 0; } jenv_->PopLocalFrame(NULL); hdfsClientRetcode = HDFS_CLIENT_OK; Int32 bytesWritten = totalBytesWritten - totalBytesWritten_; totalBytesWritten_ = totalBytesWritten; return bytesWritten; }
HDFS_Client_RetCode HdfsClient::hdfsCreate(const char* path, NABoolean overwrite, NABoolean compress) { QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsCreate(%s) called.", path); if (initJNIEnv() != JOI_OK) return HDFS_CLIENT_ERROR_HDFS_CREATE_PARAM; setPath(path); jstring js_path = jenv_->NewStringUTF(path); if (js_path == NULL) { GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_CREATE_PARAM)); jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_ERROR_HDFS_CREATE_PARAM; } jboolean j_compress = compress; jboolean j_overwrite = overwrite; if (hdfsStats_ != NULL) hdfsStats_->getHdfsTimer().start(); tsRecentJMFromJNI = JavaMethods_[JM_HDFS_CREATE].jm_full_name; jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_HDFS_CREATE].methodID, js_path, j_overwrite, j_compress); if (hdfsStats_ != NULL) { hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop()); hdfsStats_->incHdfsCalls(); } if (jenv_->ExceptionCheck()) { getExceptionDetails(); logError(CAT_SQL_HDFS, __FILE__, __LINE__); logError(CAT_SQL_HDFS, "HdfsClient::hdfsCreate()", getLastError()); jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_ERROR_HDFS_CREATE_EXCEPTION; } if (jresult == false) { logError(CAT_SQL_HDFS, "HdfsClient::hdfsCreate()", getLastError()); jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_ERROR_HDFS_CREATE_PARAM; } jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_OK; }
/* * validate_exec() * * validate "path" as an executable file */ static void validate_exec(const char *dir, const char *cmdName) { char path[MAXPGPATH]; struct stat buf; snprintf(path, sizeof(path), "%s/%s", dir, cmdName); #ifdef WIN32 /* Windows requires a .exe suffix for stat() */ if (strlen(path) <= strlen(EXE_EXT) || pg_strcasecmp(path + strlen(path) - strlen(EXE_EXT), EXE_EXT) != 0) strlcat(path, EXE_EXT, sizeof(path)); #endif /* * Ensure that the file exists and is a regular file. */ if (stat(path, &buf) < 0) pg_log(PG_FATAL, "check for %s failed - %s\n", cmdName, getErrorText(errno)); if (!S_ISREG(buf.st_mode)) pg_log(PG_FATAL, "check for %s failed - not an executable file\n", cmdName); /* * Ensure that the file is both executable and readable (required for * dynamic loading). */ #ifndef WIN32 if (access(path, R_OK) != 0) #else if ((buf.st_mode & S_IRUSR) == 0) #endif pg_log(PG_FATAL, "check for %s failed - cannot read file (permission denied)\n", cmdName); #ifndef WIN32 if (access(path, X_OK) != 0) #else if ((buf.st_mode & S_IXUSR) == 0) #endif pg_log(PG_FATAL, "check for %s failed - cannot execute (permission denied)\n", cmdName); }
void check_hard_link(void) { char existing_file[MAXPGPATH]; char new_link_file[MAXPGPATH]; snprintf(existing_file, sizeof(existing_file), "%s/PG_VERSION", old_cluster.pgdata); snprintf(new_link_file, sizeof(new_link_file), "%s/PG_VERSION.linktest", new_cluster.pgdata); unlink(new_link_file); /* might fail */ if (pg_link_file(existing_file, new_link_file) == -1) { pg_fatal("Could not create hard link between old and new data directories: %s\n" "In link mode the old and new data directories must be on the same file system volume.\n", getErrorText()); } unlink(new_link_file); }
void check_hard_link(migratorContext *ctx) { char existing_file[MAXPGPATH]; char new_link_file[MAXPGPATH]; snprintf(existing_file, sizeof(existing_file), "%s/PG_VERSION", ctx->old.pgdata); snprintf(new_link_file, sizeof(new_link_file), "%s/PG_VERSION.linktest", ctx->new.pgdata); unlink(new_link_file); /* might fail */ if (pg_link_file(existing_file, new_link_file) == -1) { pg_log(ctx, PG_FATAL, "Could not create hard link between old and new data directories: %s\n" "In link mode the old and new data directories must be on the same file system volume.\n", getErrorText(errno)); } unlink(new_link_file); }
/* * pid_lock_file_exists() * * Checks whether the postmaster.pid file exists. */ bool pid_lock_file_exists(const char *datadir) { char path[MAXPGPATH]; int fd; snprintf(path, sizeof(path), "%s/postmaster.pid", datadir); if ((fd = open(path, O_RDONLY, 0)) < 0) { /* ENOTDIR means we will throw a more useful error later */ if (errno != ENOENT && errno != ENOTDIR) pg_log(PG_FATAL, "could not open file \"%s\" for reading: %s\n", path, getErrorText(errno)); return false; } close(fd); return true; }
Int32 HdfsClient::hdfsRead(const char* data, Int64 len, HDFS_Client_RetCode &hdfsClientRetcode) { QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsWrite(%ld) called.", len); if (initJNIEnv() != JOI_OK) { hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_READ_EXCEPTION; return 0; } jobject j_buf = jenv_->NewDirectByteBuffer((BYTE *)data, len); if (j_buf == NULL) { GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_READ_PARAM)); jenv_->PopLocalFrame(NULL); return HDFS_CLIENT_ERROR_HDFS_READ_PARAM; } if (hdfsStats_ != NULL) hdfsStats_->getHdfsTimer().start(); tsRecentJMFromJNI = JavaMethods_[JM_HDFS_READ].jm_full_name; jint bytesRead = 0; bytesRead = jenv_->CallIntMethod(javaObj_, JavaMethods_[JM_HDFS_READ].methodID, j_buf); if (hdfsStats_ != NULL) { hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop()); hdfsStats_->incHdfsCalls(); } if (jenv_->ExceptionCheck()) { getExceptionDetails(); logError(CAT_SQL_HDFS, __FILE__, __LINE__); logError(CAT_SQL_HDFS, "HdfsClient::hdfsRead()", getLastError()); jenv_->PopLocalFrame(NULL); hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_READ_EXCEPTION; return 0; } jenv_->PopLocalFrame(NULL); hdfsClientRetcode = HDFS_CLIENT_OK; return bytesRead; }
/* * check_bin_dir() * * This function searches for the executables that we expect to find * in the binaries directory. If we find that a required executable * is missing (or secured against us), we display an error message and * exit(). */ static void check_bin_dir(ClusterInfo *cluster) { struct stat statBuf; /* check bindir */ if (stat(cluster->bindir, &statBuf) != 0) report_status(PG_FATAL, "check for \"%s\" failed: %s\n", cluster->bindir, getErrorText(errno)); else if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, "%s is not a directory\n", cluster->bindir); validate_exec(cluster->bindir, "postgres"); validate_exec(cluster->bindir, "pg_ctl"); validate_exec(cluster->bindir, "pg_resetxlog"); if (cluster == &new_cluster) { /* these are only needed in the new cluster */ validate_exec(cluster->bindir, "psql"); validate_exec(cluster->bindir, "pg_dumpall"); } }