Exemple #1
0
static void
writetup_heap(Tuplestorestate *state, void *tup)
{
	MinimalTuple tuple = (MinimalTuple) tup;

	/* the part of the MinimalTuple we'll write: */
	char	   *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
	unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;

	/* total on-disk footprint: */
	unsigned int tuplen = tupbodylen + sizeof(int);

	if (BufFileWrite(state->myfile, (void *) &tuplen,
					 sizeof(tuplen)) != sizeof(tuplen))
		ereport(ERROR,
				(errcode_for_file_access(),
				 errmsg("could not write to tuplestore temporary file: %m")));
	if (BufFileWrite(state->myfile, (void *) tupbody,
					 tupbodylen) != (size_t) tupbodylen)
		ereport(ERROR,
				(errcode_for_file_access(),
				 errmsg("could not write to tuplestore temporary file: %m")));
	if (state->backward)		/* need trailing length word? */
		if (BufFileWrite(state->myfile, (void *) &tuplen,
						 sizeof(tuplen)) != sizeof(tuplen))
			ereport(ERROR,
					(errcode_for_file_access(),
				errmsg("could not write to tuplestore temporary file: %m")));

	FREEMEM(state, GetMemoryChunkSpace(tuple));
	heap_free_minimal_tuple(tuple);
}
Exemple #2
0
static void
writetup_heap(Tuplestorestate *state, TuplestorePos *pos, void *tup)
{
	uint32 tuplen = 0; 
	Size         memsize = 0;

	if(is_heaptuple_memtuple((HeapTuple) tup))
		tuplen = memtuple_get_size((MemTuple) tup, NULL);
	else
	{
		Assert(!is_heaptuple_splitter((HeapTuple) tup));
		tuplen = heaptuple_get_size((HeapTuple) tup);
	}

	if (BufFileWrite(state->myfile, (void *) tup, tuplen) != (size_t) tuplen)
		elog(ERROR, "write failed");
	if (state->eflags & EXEC_FLAG_BACKWARD)		/* need trailing length word? */
		if (BufFileWrite(state->myfile, (void *) &tuplen,
						 sizeof(tuplen)) != sizeof(tuplen))
			elog(ERROR, "write failed");

	memsize = GetMemoryChunkSpace(tup);

	state->spilledBytes += memsize;
	FREEMEM(state, memsize);

	pfree(tup);
}
Exemple #3
0
/*
 * Write a block-sized buffer to the specified block of the underlying file.
 *
 * NB: should not attempt to write beyond current end of file (ie, create
 * "holes" in file), since BufFile doesn't allow that.  The first write pass
 * must write blocks sequentially.
 *
 * No need for an error return convention; we ereport() on any error.
 */
static void
ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
{
	if (BufFileSeekBlock(lts->pfile, blocknum) != 0 ||
		BufFileWrite(lts->pfile, buffer, BLCKSZ) != BLCKSZ)
		ereport(ERROR,
				(errcode_for_file_access(),
				 errmsg("could not write block %ld of temporary file: %m",
						blocknum)));
}
Exemple #4
0
/*
 * Write a block-sized buffer to the specified block of the underlying file.
 *
 * NB: should not attempt to write beyond current end of file (ie, create
 * "holes" in file), since BufFile doesn't allow that.  The first write pass
 * must write blocks sequentially.
 *
 * No need for an error return convention; we ereport() on any error.
 */
static void
ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
{
	if (BufFileSeekBlock(lts->pfile, blocknum) != 0 ||
		BufFileWrite(lts->pfile, buffer, BLCKSZ) != BLCKSZ)
		ereport(ERROR,
		/* XXX is it okay to assume errno is correct? */
				(errcode_for_file_access(),
				 errmsg("could not write block %ld of temporary file: %m",
						blocknum),
				 errhint("Perhaps out of disk space?")));
}
Exemple #5
0
/*
 * Write a block-sized buffer to the specified block of the underlying file.
 *
 * No need for an error return convention; we ereport() on any error.
 */
static void
ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
{
	/*
	 * BufFile does not support "holes", so if we're about to write a block
	 * that's past the current end of file, fill the space between the current
	 * end of file and the target block with zeros.
	 *
	 * This should happen rarely, otherwise you are not writing very
	 * sequentially.  In current use, this only happens when the sort ends
	 * writing a run, and switches to another tape.  The last block of the
	 * previous tape isn't flushed to disk until the end of the sort, so you
	 * get one-block hole, where the last block of the previous tape will
	 * later go.
	 *
	 * Note that BufFile concatenation can leave "holes" in BufFile between
	 * worker-owned block ranges.  These are tracked for reporting purposes
	 * only.  We never read from nor write to these hole blocks, and so they
	 * are not considered here.
	 */
	while (blocknum > lts->nBlocksWritten)
	{
		PGAlignedBlock zerobuf;

		MemSet(zerobuf.data, 0, sizeof(zerobuf));

		ltsWriteBlock(lts, lts->nBlocksWritten, zerobuf.data);
	}

	/* Write the requested block */
	if (BufFileSeekBlock(lts->pfile, blocknum) != 0 ||
		BufFileWrite(lts->pfile, buffer, BLCKSZ) != BLCKSZ)
		ereport(ERROR,
				(errcode_for_file_access(),
				 errmsg("could not write block %ld of temporary file: %m",
						blocknum)));

	/* Update nBlocksWritten, if we extended the file */
	if (blocknum == lts->nBlocksWritten)
		lts->nBlocksWritten++;
}
/*
 * ExecWorkFile_Write
 *    write the given data from the end of the last write position.
 *
 * This function returns true if the write succeeds. Otherwise, return false.
 */
bool
ExecWorkFile_Write(ExecWorkFile *workfile,
				   void *data,
				   uint64 size)
{
	Assert(workfile != NULL);
	uint64 bytes;

	if (data == NULL || size == 0)
	{
		return false;
	}

	/* Test the per-query and per-segment limit */
	if ((workfile->flags & EXEC_WORKFILE_LIMIT_SIZE) &&
			!WorkfileDiskspace_Reserve(size))
	{
		/* Failed to reserve additional disk space, notify caller */
		workfile_mgr_report_error();
	}

	switch(workfile->fileType)
	{
		case BUFFILE:
			{}
			BufFile *buffile = (BufFile *)workfile->file;

			int64 current_size = BufFileGetSize(buffile);
			int64 new_size = 0;

			PG_TRY();
			{
				bytes = BufFileWrite(buffile, data, size);
			}
			PG_CATCH();
			{
				new_size = BufFileGetSize(buffile);
				workfile->size = new_size;
				WorkfileDiskspace_Commit( (new_size - current_size), size, true /* update_query_size */);

				int64 size_evicted = workfile_mgr_evict(MIN_EVICT_SIZE);
				elog(gp_workfile_caching_loglevel, "Hit out of disk space, evicted " INT64_FORMAT " bytes", size_evicted);

				PG_RE_THROW();
			}
			PG_END_TRY();

			new_size = BufFileGetSize(buffile);
			workfile->size = new_size;

			WorkfileDiskspace_Commit( (new_size - current_size), size, true /* update_query_size */);
			workfile_update_in_progress_size(workfile, new_size - current_size);

			if (bytes != size)
			{
				workfile_mgr_report_error();
			}

			break;
		case BFZ:

			PG_TRY();
			{
				bfz_append((bfz_t *)workfile->file, data, size);
			}
			PG_CATCH();
			{
				Assert(WorkfileDiskspace_IsFull());
				WorkfileDiskspace_Commit(0, size, true /* update_query_size */);

				int64 size_evicted = workfile_mgr_evict(MIN_EVICT_SIZE);
				elog(gp_workfile_caching_loglevel, "Hit out of disk space, evicted " INT64_FORMAT " bytes", size_evicted);

				PG_RE_THROW();
			}
			PG_END_TRY();

			/* bfz_append always adds to the file size */
			workfile->size += size;
			if ((workfile->flags & EXEC_WORKFILE_LIMIT_SIZE))
			{
				WorkfileDiskspace_Commit(size, size, true /* update_query_size */);
			}
			workfile_update_in_progress_size(workfile, size);

			break;
		default:
			insist_log(false, "invalid work file type: %d", workfile->fileType);
	}

	return true;
}
Exemple #7
0
void
dumpSharedComboCommandId(TransactionId xmin, CommandId cmin, CommandId cmax, CommandId combocid)
{
	/*
	 * In any given segment, there are many readers, but only one writer. The
	 * combo cid file information is stored in the MyProc of the writer process,
	 * and is referenced by reader process via lockHolderProcPtr.  The writer
	 * will setup and/or dump combocids to a combo cid file when appropriate.
	 * The writer keeps track of the number of entries in the combo cid file in
	 * MyProc->combocid_map_count. Readers reference the count via
	 * lockHolderProcPtr->combocid_map_count.
	 *
	 * Since combo cid file entries are always appended to the end of a combo
	 * cid file and because there is only one writer, it is not necessary to
	 * lock the combo cid file during reading or writing. A new combo cid will
	 * not become visable to the reader until the combocid_map_count variable
	 * has been incremented.
	 */

	ComboCidEntryData entry;

	Assert(Gp_role != GP_ROLE_EXECUTE || Gp_is_writer);

	if (combocid_map == NULL)
	{
		/* This is the first time a combo cid is to be written by this writer. */
		MemoryContext oldCtx;
		char			path[MAXPGPATH];

		MyProc->combocid_map_count = 0;

		ComboCidMapName(path, gp_session_id, MyProc->pid);

		/* open our file, as appropriate: this will throw an error if the create-fails. */
		oldCtx = MemoryContextSwitchTo(TopMemoryContext);

		/*
		 * XXX: We could probably close and delete the file at the end of
		 * transaction.  We would then need to keep combocid_map_count
		 * synchronized with open files at (sub-) xact boundaries.
		 */
		combocid_map = BufFileCreateTemp_ReaderWriter(path, true, true);
		MemoryContextSwitchTo(oldCtx);
	}
	Assert(combocid_map != NULL);

	/* Seek to the end: BufFileSeek() doesn't support SEEK_END! */

	/* build our entry */
	memset(&entry, 0, sizeof(entry));
	entry.key.cmin = cmin;
	entry.key.cmax = cmax;
	entry.key.xmin = xmin;
	entry.combocid = combocid;

	/* write our entry */
	if (BufFileWrite(combocid_map, &entry, sizeof(entry)) != sizeof(entry))
	{
		elog(ERROR, "Combocid map I/O error!");
	}

	/* flush our output */
	BufFileFlush(combocid_map);

	/* Increment combocid count to make new combocid visible to Readers */
	MyProc->combocid_map_count += 1;
}