/** * @brief Initialize a BufferedWriter */ static void BufferedWriterInit(BufferedWriter *self) { /* * Set defaults to unspecified parameters. */ if (self->base.max_dup_errors < -1) self->base.max_dup_errors = DEFAULT_MAX_DUP_ERRORS; self->base.rel = heap_open(self->base.relid, AccessExclusiveLock); VerifyTarget(self->base.rel, self->base.max_dup_errors); self->base.desc = RelationGetDescr(self->base.rel); SpoolerOpen(&self->spooler, self->base.rel, true, self->base.on_duplicate, self->base.max_dup_errors, self->base.dup_badfile); self->base.context = GetPerTupleMemoryContext(self->spooler.estate); self->bistate = GetBulkInsertState(); self->cid = GetCurrentCommandId(true); self->base.tchecker = CreateTupleChecker(self->base.desc); self->base.tchecker->checker = (CheckerTupleProc) CoercionCheckerTuple; }
/** * @brief Initialize a ParallelWriter */ static void ParallelWriterInit(ParallelWriter *self) { unsigned queryKey; char queueName[MAXPGPATH]; PGresult *res; Assert(self->base.truncate == false); if (self->base.relid != InvalidOid) { TupleDesc resultDesc; /* open relation without any relation locks */ self->base.rel = heap_open(self->base.relid, NoLock); self->base.desc = RelationGetDescr(self->base.rel); self->base.tchecker = CreateTupleChecker(self->base.desc); self->base.tchecker->checker = (CheckerTupleProc) CoercionCheckerTuple; /* * If the return value of the filter function or input function is a * target table, lookup_rowtype_tupdesc grab AccessShareLock on the * table in the first call. We call lookup_rowtype_tupdesc here to * avoid deadlock when lookup_rowtype_tupdesc is called by the internal * routine of the filter function or input function, because a parallel * writer process holds an AccessExclusiveLock. */ resultDesc = lookup_rowtype_tupdesc(self->base.desc->tdtypeid, -1); ReleaseTupleDesc(resultDesc); } else { self->writer->init(self->writer); self->base.desc = self->writer->desc; self->base.tchecker = self->writer->tchecker; } self->base.context = AllocSetContextCreate( CurrentMemoryContext, "ParallelWriter", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); /* create queue */ self->queue = QueueCreate(&queryKey, DEFAULT_BUFFER_SIZE); snprintf(queueName, lengthof(queueName), ":%u", queryKey); /* connect to localhost */ self->conn = connect_to_localhost(); /* start transaction */ res = PQexec(self->conn, "BEGIN"); if (PQresultStatus(res) != PGRES_COMMAND_OK) { ereport(ERROR, (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), errmsg("could not start transaction"), errdetail("%s", finish_and_get_message(self)))); } PQclear(res); if (!self->writer->dup_badfile) self->writer->dup_badfile = self->base.dup_badfile; if (1 != self->writer->sendQuery(self->writer, self->conn, queueName, self->base.logfile, self->base.verbose)) { ereport(ERROR, (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), errmsg("could not send query"), errdetail("%s", finish_and_get_message(self)))); } }
/** * @brief Initialize a DirectWriter */ static void DirectWriterInit(DirectWriter *self) { LoadStatus *ls; /* * Set defaults to unspecified parameters. */ if (self->base.max_dup_errors < -1) self->base.max_dup_errors = DEFAULT_MAX_DUP_ERRORS; self->base.rel = heap_open(self->base.relid, AccessExclusiveLock); VerifyTarget(self->base.rel, self->base.max_dup_errors); self->base.desc = RelationGetDescr(self->base.rel); SpoolerOpen(&self->spooler, self->base.rel, false, self->base.on_duplicate, self->base.max_dup_errors, self->base.dup_badfile); self->base.context = GetPerTupleMemoryContext(self->spooler.estate); /* Verify DataDir/pg_bulkload directory */ ValidateLSFDirectory(BULKLOAD_LSF_DIR); /* Initialize first block */ PageInit(GetCurrentPage(self), BLCKSZ, 0); PageSetTLI(GetCurrentPage(self), ThisTimeLineID); /* Obtain transaction ID and command ID. */ self->xid = GetCurrentTransactionId(); self->cid = GetCurrentCommandId(true); /* * Initialize load status information */ ls = &self->ls; ls->ls.relid = self->base.relid; ls->ls.rnode = self->base.rel->rd_node; ls->ls.exist_cnt = RelationGetNumberOfBlocks(self->base.rel); ls->ls.create_cnt = 0; /* * Create a load status file and write the initial status for it. * At the time, if we find any existing load status files, exit with * error because recovery process haven't been executed after failing * load to the same table. */ BULKLOAD_LSF_PATH(self->lsf_path, ls); self->lsf_fd = BasicOpenFile(self->lsf_path, O_CREAT | O_EXCL | O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); if (self->lsf_fd == -1) ereport(ERROR, (errcode_for_file_access(), errmsg("could not create loadstatus file \"%s\": %m", self->lsf_path))); if (write(self->lsf_fd, ls, sizeof(LoadStatus)) != sizeof(LoadStatus) || pg_fsync(self->lsf_fd) != 0) { UnlinkLSF(self); ereport(ERROR, (errcode_for_file_access(), errmsg("could not write loadstatus file \"%s\": %m", self->lsf_path))); } self->base.tchecker = CreateTupleChecker(self->base.desc); self->base.tchecker->checker = (CheckerTupleProc) CoercionCheckerTuple; }