shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
shm_toc_estimate_keys(&pcxt->estimator, 1);
+ /*
+ * If we manage to reach here while non-interruptible, it's unsafe to
+ * launch any workers: we would fail to process interrupts sent by them.
+ * We can deal with that edge case by pretending no workers were
+ * requested.
+ */
+ if (!INTERRUPTS_CAN_BE_PROCESSED())
+ pcxt->nworkers = 0;
+
/*
* Normally, the user will have requested at least one worker process, but
* if by chance they have not, we can skip a bunch of things here.
shm_toc_insert(pcxt->toc, PARALLEL_KEY_ENTRYPOINT, entrypointstate);
}
+ /* Update nworkers_to_launch, in case we changed nworkers above. */
+ pcxt->nworkers_to_launch = pcxt->nworkers;
+
/* Restore previous memory context. */
MemoryContextSwitchTo(oldcontext);
}
{
/*
* The number of workers that need to be launched must be less than the
- * number of workers with which the parallel context is initialized.
+ * number of workers with which the parallel context is initialized. But
+ * the caller might not know that InitializeParallelDSM reduced nworkers,
+ * so just silently trim the request.
*/
- Assert(pcxt->nworkers >= nworkers_to_launch);
- pcxt->nworkers_to_launch = nworkers_to_launch;
+ pcxt->nworkers_to_launch = Min(pcxt->nworkers, nworkers_to_launch);
}
/*
ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
{
int plan_node_id = state->js.ps.plan->plan_node_id;
- ParallelHashJoinState *pstate =
- shm_toc_lookup(pcxt->toc, plan_node_id, false);
+ ParallelHashJoinState *pstate;
+
+ /* Nothing to do if we failed to create a DSM segment. */
+ if (pcxt->seg == NULL)
+ return;
+
+ pstate = shm_toc_lookup(pcxt->toc, plan_node_id, false);
/*
* It would be possible to reuse the shared hash table in single-batch
* we want to allow parallel inserts in general; updates and deletes have
* additional problems especially around combo CIDs.)
*
- * We don't try to use parallel mode unless interruptible. The leader
- * expects ProcessInterrupts() calls to reach HandleParallelMessages().
- * Even if we called HandleParallelMessages() another way, starting a
- * parallel worker is too delay-prone to be prudent when uncancellable.
- *
* For now, we don't try to use parallel mode if we're running inside a
* parallel worker. We might eventually be able to relax this
* restriction, but for now it seems best not to have parallel workers
parse->commandType == CMD_SELECT &&
!parse->hasModifyingCTE &&
max_parallel_workers_per_gather > 0 &&
- INTERRUPTS_CAN_BE_PROCESSED() &&
!IsParallelWorker())
{
/* all the cheap tests pass, so scan the query tree */