const pgstrom_device_info * pgstrom_get_device_info(unsigned int index) { pg_memory_barrier(); if (index < devinfo_shm_values->num_devices) return devinfo_shm_values->devinfo_array[index]; return NULL; }
/* * Sets a latch and wakes up anyone waiting on it. * * This is cheap if the latch is already set, otherwise not so much. * * NB: when calling this in a signal handler, be sure to save and restore * errno around it. (That's standard practice in most signal handlers, of * course, but we used to omit it in handlers that only set a flag.) * * NB: this function is called from critical sections and signal handlers so * throwing an error is not a good idea. */ void SetLatch(volatile Latch *latch) { pid_t owner_pid; /* * The memory barrier has be to be placed here to ensure that any flag * variables possibly changed by this process have been flushed to main * memory, before we check/set is_set. */ pg_memory_barrier(); /* Quick exit if already set */ if (latch->is_set) return; latch->is_set = true; /* * See if anyone's waiting for the latch. It can be the current process if * we're in a signal handler. We use the self-pipe to wake up the select() * in that case. If it's another process, send a signal. * * Fetch owner_pid only once, in case the latch is concurrently getting * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't * guaranteed to be true! In practice, the effective range of pid_t fits * in a 32 bit integer, and so should be atomic. In the worst case, we * might end up signaling the wrong process. Even then, you're very * unlucky if a process with that bogus pid exists and belongs to * Postgres; and PG database processes should handle excess SIGUSR1 * interrupts without a problem anyhow. * * Another sort of race condition that's possible here is for a new * process to own the latch immediately after we look, so we don't signal * it. This is okay so long as all callers of ResetLatch/WaitLatch follow * the standard coding convention of waiting at the bottom of their loops, * not the top, so that they'll correctly process latch-setting events * that happen before they enter the loop. */ owner_pid = latch->owner_pid; if (owner_pid == 0) return; else if (owner_pid == MyProcPid) { if (waiting) sendSelfPipeByte(); } else kill(owner_pid, SIGUSR1); }
void ResetLatch(volatile Latch *latch) { /* Only the owner should reset the latch */ Assert(latch->owner_pid == MyProcPid); latch->is_set = false; /* * Ensure that the write to is_set gets flushed to main memory before we * examine any flag variables. Otherwise a concurrent SetLatch might * falsely conclude that it needn't signal us, even though we have missed * seeing some flag updates that SetLatch was supposed to inform us of. */ pg_memory_barrier(); }
/* * The comments above the unix implementation (unix_latch.c) of this function * apply here as well. */ void SetLatch(volatile Latch *latch) { HANDLE handle; /* * The memory barrier has be to be placed here to ensure that any flag * variables possibly changed by this process have been flushed to main * memory, before we check/set is_set. */ pg_memory_barrier(); /* Quick exit if already set */ if (latch->is_set) return; latch->is_set = true; /* * See if anyone's waiting for the latch. It can be the current process if * we're in a signal handler. * * Use a local variable here just in case somebody changes the event field * concurrently (which really should not happen). */ handle = latch->event; if (handle) { SetEvent(handle); /* * Note that we silently ignore any errors. We might be in a signal * handler or other critical path where it's not safe to call elog(). */ } }
/* * Routines to get device properties. */ int pgstrom_get_device_nums(void) { pg_memory_barrier(); return devinfo_shm_values->num_devices; }