Пример #1
0
/** Constructor.
 * @param plugin name of the plugin that caused the exception
 * @param message message of exception
 * @param e exception to copy further messages from
 */
PluginLoadException::PluginLoadException(const char *plugin, const char *message,
					 Exception &e)
  : Exception(), __plugin_name(plugin)
{
  append("Plugin '%s' could not be loaded: %s", plugin, message);
  copy_messages(e);
}
Пример #2
0
/** Assign an Exception.
 * As this is one of the Big Three (see C++ FAQ at
 * http://www.parashift.com/c++-faq-lite/coding-standards.html#faq-27.10) this
 * is needed because we already need a copy constructor. Read about the
 * copy constructor why this is the case.
 * @see Exception(const Exception &exc)
 * @param exc The exception with the values to assign to this exception.
 * @return reference to this object. Allows assignment chaining.
 */
Exception &
Exception::operator=(const Exception &exc) throw()
{
  messages_mutex = new Mutex();
  copy_messages(exc);

  return *this;
}
Пример #3
0
/** Copy constructor.
 * The copy constructor is worth some extra discussion. If you do an exception
 * by value (which you shouldn't in the first place since this will generate a
 * copy, only do this if you can't avoid it for some reason. Not if you only
 * THINK that you can't avoid it) the copy constructor is called. If your catch
 * statements reads like
 * @code
 *   try {
 *     ...
 *   } catch (Exception e) {
 *     ...
 *   }
 * @endcode
 * then a copy will be created for the catch block. You throw the exception with
 * something like
 * @code
 *   throw Exception("Boom");
 * @endcode
 * This will create an Exception which is valid in the block where you throw the
 * exception. Now for the catch block a copy is created. Since the exception
 * holds a pointer on the heap the implicit copy constructor would just copy
 * the pointer, not the data. So both exceptions point to the same data (to the
 * message for the base exception). If now both destructors for the exception
 * are called they both try to free the very same memory. Of course the second
 * destructor will cause a disaster. If you are lucky your glibc detectes the
 * problem an kills the application. If you are not that fortunate you will
 * cause very strange behaviour of your application.
 *
 * In general you should not have to worry about this. But if you choose to have
 * own storage on the heap using either new, malloc or a method that returns
 * memory on the heap (like strdup()) you have to write your own copy contructor
 * and copy the memory area or take care that only one exception frees the memory.
 * @param exc Exception to copy
 */
Exception::Exception(const Exception &exc) throw()
{
  messages_mutex = new Mutex();

  messages = NULL;
  messages_end = NULL;
  messages_iterator = NULL;

  _errno = exc._errno;
  __type_id = exc.__type_id;
  copy_messages(exc);
}
Пример #4
0
/*
 * Background worker entrypoint.
 *
 * This is intended to demonstrate how a background worker can be used to
 * facilitate a parallel computation.  Most of the logic here is fairly
 * boilerplate stuff, designed to attach to the shared memory segment,
 * notify the user backend that we're alive, and so on.  The
 * application-specific bits of logic that you'd replace for your own worker
 * are attach_to_queues() and copy_messages().
 */
void
test_shm_mq_main(Datum main_arg)
{
	dsm_segment *seg;
	shm_toc    *toc;
	shm_mq_handle *inqh;
	shm_mq_handle *outqh;
	volatile test_shm_mq_header *hdr;
	int			myworkernumber;
	PGPROC	   *registrant;

	/*
	 * Establish signal handlers.
	 *
	 * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as
	 * it would a normal user backend.  To make that happen, we establish a
	 * signal handler that is a stripped-down version of die().  We don't have
	 * any equivalent of the backend's command-read loop, where interrupts can
	 * be processed immediately, so make sure ImmediateInterruptOK is turned
	 * off.
	 */
	pqsignal(SIGTERM, handle_sigterm);
	ImmediateInterruptOK = false;
	BackgroundWorkerUnblockSignals();

	/*
	 * Connect to the dynamic shared memory segment.
	 *
	 * The backend that registered this worker passed us the ID of a shared
	 * memory segment to which we must attach for further instructions.  In
	 * order to attach to dynamic shared memory, we need a resource owner.
	 * Once we've mapped the segment in our address space, attach to the table
	 * of contents so we can locate the various data structures we'll need to
	 * find within the segment.
	 */
	CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker");
	seg = dsm_attach(DatumGetInt32(main_arg));
	if (seg == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
				 errmsg("unable to map dynamic shared memory segment")));
	toc = shm_toc_attach(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg));
	if (toc == NULL)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
			   errmsg("bad magic number in dynamic shared memory segment")));

	/*
	 * Acquire a worker number.
	 *
	 * By convention, the process registering this background worker should
	 * have stored the control structure at key 0.  We look up that key to
	 * find it.  Our worker number gives our identity: there may be just one
	 * worker involved in this parallel operation, or there may be many.
	 */
	hdr = shm_toc_lookup(toc, 0);
	SpinLockAcquire(&hdr->mutex);
	myworkernumber = ++hdr->workers_attached;
	SpinLockRelease(&hdr->mutex);
	if (myworkernumber > hdr->workers_total)
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
				 errmsg("too many message queue testing workers already")));

	/*
	 * Attach to the appropriate message queues.
	 */
	attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);

	/*
	 * Indicate that we're fully initialized and ready to begin the main part
	 * of the parallel operation.
	 *
	 * Once we signal that we're ready, the user backend is entitled to assume
	 * that our on_dsm_detach callbacks will fire before we disconnect from
	 * the shared memory segment and exit.  Generally, that means we must have
	 * attached to all relevant dynamic shared memory data structures by now.
	 */
	SpinLockAcquire(&hdr->mutex);
	++hdr->workers_ready;
	SpinLockRelease(&hdr->mutex);
	registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid);
	if (registrant == NULL)
	{
		elog(DEBUG1, "registrant backend has exited prematurely");
		proc_exit(1);
	}
	SetLatch(&registrant->procLatch);

	/* Do the work. */
	copy_messages(inqh, outqh);

	/*
	 * We're done.  Explicitly detach the shared memory segment so that we
	 * don't get a resource leak warning at commit time.  This will fire any
	 * on_dsm_detach callbacks we've registered, as well.  Once that's done,
	 * we can go ahead and exit.
	 */
	dsm_detach(seg);
	proc_exit(1);
}
Пример #5
0
/** Append message that are from another Exception.
 * @param e Exception to copy messages from
 */
void
Exception::append(const Exception &e) throw()
{
  copy_messages(e);  
}