Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * portalmem.c
4 : : * backend portal memory management
5 : : *
6 : : * Portals are objects representing the execution state of a query.
7 : : * This module provides memory management services for portals, but it
8 : : * doesn't actually run the executor for them.
9 : : *
10 : : *
11 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
12 : : * Portions Copyright (c) 1994, Regents of the University of California
13 : : *
14 : : * IDENTIFICATION
15 : : * src/backend/utils/mmgr/portalmem.c
16 : : *
17 : : *-------------------------------------------------------------------------
18 : : */
19 : : #include "postgres.h"
20 : :
21 : : #include "access/xact.h"
22 : : #include "commands/portalcmds.h"
23 : : #include "funcapi.h"
24 : : #include "miscadmin.h"
25 : : #include "storage/ipc.h"
26 : : #include "utils/builtins.h"
27 : : #include "utils/memutils.h"
28 : : #include "utils/snapmgr.h"
29 : : #include "utils/timestamp.h"
30 : :
31 : : /*
32 : : * Estimate of the maximum number of open portals a user would have,
33 : : * used in initially sizing the PortalHashTable in EnablePortalManager().
34 : : * Since the hash table can expand, there's no need to make this overly
35 : : * generous, and keeping it small avoids unnecessary overhead in the
36 : : * hash_seq_search() calls executed during transaction end.
37 : : */
38 : : #define PORTALS_PER_USER 16
39 : :
40 : :
41 : : /* ----------------
42 : : * Global state
43 : : * ----------------
44 : : */
45 : :
46 : : #define MAX_PORTALNAME_LEN NAMEDATALEN
47 : :
48 : : typedef struct portalhashent
49 : : {
50 : : char portalname[MAX_PORTALNAME_LEN];
51 : : Portal portal;
52 : : } PortalHashEnt;
53 : :
54 : : static HTAB *PortalHashTable = NULL;
55 : :
56 : : #define PortalHashTableLookup(NAME, PORTAL) \
57 : : do { \
58 : : PortalHashEnt *hentry; \
59 : : \
60 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 : : (NAME), HASH_FIND, NULL); \
62 : : if (hentry) \
63 : : PORTAL = hentry->portal; \
64 : : else \
65 : : PORTAL = NULL; \
66 : : } while(0)
67 : :
68 : : #define PortalHashTableInsert(PORTAL, NAME) \
69 : : do { \
70 : : PortalHashEnt *hentry; bool found; \
71 : : \
72 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 : : (NAME), HASH_ENTER, &found); \
74 : : if (found) \
75 : : elog(ERROR, "duplicate portal name"); \
76 : : hentry->portal = PORTAL; \
77 : : /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 : : PORTAL->name = hentry->portalname; \
79 : : } while(0)
80 : :
81 : : #define PortalHashTableDelete(PORTAL) \
82 : : do { \
83 : : PortalHashEnt *hentry; \
84 : : \
85 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 : : PORTAL->name, HASH_REMOVE, NULL); \
87 : : if (hentry == NULL) \
88 : : elog(WARNING, "trying to delete portal name that does not exist"); \
89 : : } while(0)
90 : :
91 : : static MemoryContext TopPortalContext = NULL;
92 : :
93 : :
94 : : /* ----------------------------------------------------------------
95 : : * public portal interface functions
96 : : * ----------------------------------------------------------------
97 : : */
98 : :
99 : : /*
100 : : * EnablePortalManager
101 : : * Enables the portal management module at backend startup.
102 : : */
103 : : void
9391 tgl@sss.pgh.pa.us 104 :CBC 17084 : EnablePortalManager(void)
105 : : {
106 : : HASHCTL ctl;
107 : :
3011 peter_e@gmx.net 108 [ - + ]: 17084 : Assert(TopPortalContext == NULL);
109 : :
110 : 17084 : TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111 : : "TopPortalContext",
112 : : ALLOCSET_DEFAULT_SIZES);
113 : :
9391 tgl@sss.pgh.pa.us 114 : 17084 : ctl.keysize = MAX_PORTALNAME_LEN;
8931 115 : 17084 : ctl.entrysize = sizeof(PortalHashEnt);
116 : :
117 : : /*
118 : : * use PORTALS_PER_USER as a guess of how many hash table entries to
119 : : * create, initially
120 : : */
8927 121 : 17084 : PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122 : : &ctl, HASH_ELEM | HASH_STRINGS);
10841 scrappy@hub.org 123 : 17084 : }
124 : :
125 : : /*
126 : : * GetPortalByName
127 : : * Returns a portal given a portal name, or NULL if name not found.
128 : : */
129 : : Portal
8476 tgl@sss.pgh.pa.us 130 : 448620 : GetPortalByName(const char *name)
131 : : {
132 : : Portal portal;
133 : :
172 peter@eisentraut.org 134 [ + - ]:GNC 448620 : if (name)
10416 bruce@momjian.us 135 [ + + ]:CBC 448620 : PortalHashTableLookup(name, portal);
136 : : else
9391 tgl@sss.pgh.pa.us 137 :UBC 0 : portal = NULL;
138 : :
10057 bruce@momjian.us 139 :CBC 448620 : return portal;
140 : : }
141 : :
142 : : /*
143 : : * PortalGetPrimaryStmt
144 : : * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 : : *
146 : : * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 : : * portal are marked canSetTag, returns the first one. Neither of these
148 : : * cases should occur in present usages of this function.
149 : : */
150 : : PlannedStmt *
3347 tgl@sss.pgh.pa.us 151 : 198113 : PortalGetPrimaryStmt(Portal portal)
152 : : {
153 : : ListCell *lc;
154 : :
155 [ + - + - : 198113 : foreach(lc, portal->stmts)
+ - ]
156 : : {
3261 157 : 198113 : PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 : :
3347 159 [ + - ]: 198113 : if (stmt->canSetTag)
160 : 198113 : return stmt;
161 : : }
7153 tgl@sss.pgh.pa.us 162 :UBC 0 : return NULL;
163 : : }
164 : :
165 : : /*
166 : : * CreatePortal
167 : : * Returns a new portal given a name.
168 : : *
169 : : * allowDup: if true, automatically drop any pre-existing portal of the
170 : : * same name (if false, an error is raised).
171 : : *
172 : : * dupSilent: if true, don't even emit a WARNING.
173 : : */
174 : : Portal
8353 tgl@sss.pgh.pa.us 175 :CBC 395985 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 : : {
177 : : Portal portal;
178 : :
172 peter@eisentraut.org 179 [ - + ]:GNC 395985 : Assert(name);
180 : :
10416 bruce@momjian.us 181 :CBC 395985 : portal = GetPortalByName(name);
182 [ + + ]: 395985 : if (PortalIsValid(portal))
183 : : {
8353 tgl@sss.pgh.pa.us 184 [ - + ]: 5552 : if (!allowDup)
8269 tgl@sss.pgh.pa.us 185 [ # # ]:UBC 0 : ereport(ERROR,
186 : : (errcode(ERRCODE_DUPLICATE_CURSOR),
187 : : errmsg("cursor \"%s\" already exists", name)));
8353 tgl@sss.pgh.pa.us 188 [ - + ]:CBC 5552 : if (!dupSilent)
8269 tgl@sss.pgh.pa.us 189 [ # # ]:UBC 0 : ereport(WARNING,
190 : : (errcode(ERRCODE_DUPLICATE_CURSOR),
191 : : errmsg("closing existing cursor \"%s\"",
192 : : name)));
8353 tgl@sss.pgh.pa.us 193 :CBC 5552 : PortalDrop(portal, false);
194 : : }
195 : :
196 : : /* make new portal structure */
3011 peter_e@gmx.net 197 : 395985 : portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 : :
199 : : /* initialize portal context; typically it won't store much */
200 : 395985 : portal->portalContext = AllocSetContextCreate(TopPortalContext,
201 : : "PortalContext",
202 : : ALLOCSET_SMALL_SIZES);
203 : :
204 : : /* create a resource owner for the portal */
7911 tgl@sss.pgh.pa.us 205 : 395985 : portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
206 : : "Portal");
207 : :
208 : : /* initialize portal fields that don't start off zero */
7361 neilc@samurai.com 209 : 395985 : portal->status = PORTAL_NEW;
8356 tgl@sss.pgh.pa.us 210 : 395985 : portal->cleanup = PortalCleanup;
7850 211 : 395985 : portal->createSubid = GetCurrentSubTransactionId();
3845 212 : 395985 : portal->activeSubid = portal->createSubid;
1626 213 : 395985 : portal->createLevel = GetCurrentTransactionNestLevel();
8353 214 : 395985 : portal->strategy = PORTAL_MULTI_QUERY;
215 : 395985 : portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
8405 216 : 395985 : portal->atStart = true;
217 : 395985 : portal->atEnd = true; /* disallow fetches until query is set */
7361 neilc@samurai.com 218 : 395985 : portal->visible = true;
7208 tgl@sss.pgh.pa.us 219 : 395985 : portal->creation_time = GetCurrentStatementStartTimestamp();
220 : :
221 : : /* put portal in table (sets portal->name) */
8353 222 [ - + - - ]: 395985 : PortalHashTableInsert(portal, name);
223 : :
224 : : /* for named portals reuse portal->name copy */
2014 peter@eisentraut.org 225 [ + + ]: 395985 : MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
226 : :
10057 bruce@momjian.us 227 : 395985 : return portal;
228 : : }
229 : :
230 : : /*
231 : : * CreateNewPortal
232 : : * Create a new portal, assigning it a random nonconflicting name.
233 : : */
234 : : Portal
8353 tgl@sss.pgh.pa.us 235 : 14356 : CreateNewPortal(void)
236 : : {
237 : : static unsigned int unnamed_portal_count = 0;
238 : :
239 : : char portalname[MAX_PORTALNAME_LEN];
240 : :
241 : : /* Select a nonconflicting name */
242 : : for (;;)
243 : : {
244 : 14356 : unnamed_portal_count++;
245 : 14356 : sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
246 [ + - ]: 14356 : if (GetPortalByName(portalname) == NULL)
247 : 14356 : break;
248 : : }
249 : :
250 : 14356 : return CreatePortal(portalname, false, false);
251 : : }
252 : :
253 : : /*
254 : : * PortalDefineQuery
255 : : * A simple subroutine to establish a portal's query.
256 : : *
257 : : * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258 : : * allowed anymore to pass NULL. (If you really don't have source text,
259 : : * you can pass a constant string, perhaps "(query not available)".)
260 : : *
261 : : * commandTag shall be NULL if and only if the original query string
262 : : * (before rewriting) was an empty string. Also, the passed commandTag must
263 : : * be a pointer to a constant string, since it is not copied.
264 : : *
265 : : * If cplan is provided, then it is a cached plan containing the stmts, and
266 : : * the caller must have done GetCachedPlan(), causing a refcount increment.
267 : : * The refcount will be released when the portal is destroyed.
268 : : *
269 : : * If cplan is NULL, then it is the caller's responsibility to ensure that
270 : : * the passed plan trees have adequate lifetime. Typically this is done by
271 : : * copying them into the portal's context.
272 : : *
273 : : * The caller is also responsible for ensuring that the passed prepStmtName
274 : : * (if not NULL) and sourceText have adequate lifetime.
275 : : *
276 : : * NB: this function mustn't do much beyond storing the passed values; in
277 : : * particular don't do anything that risks elog(ERROR). If that were to
278 : : * happen here before storing the cplan reference, we'd leak the plancache
279 : : * refcount that the caller is trying to hand off to us.
280 : : */
281 : : void
282 : 395968 : PortalDefineQuery(Portal portal,
283 : : const char *prepStmtName,
284 : : const char *sourceText,
285 : : CommandTag commandTag,
286 : : List *stmts,
287 : : CachedPlan *cplan)
288 : : {
1234 peter@eisentraut.org 289 [ - + ]: 395968 : Assert(PortalIsValid(portal));
290 [ - + ]: 395968 : Assert(portal->status == PORTAL_NEW);
291 : :
292 [ - + ]: 395968 : Assert(sourceText != NULL);
293 [ - + - - ]: 395968 : Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
294 : :
6556 tgl@sss.pgh.pa.us 295 : 395968 : portal->prepStmtName = prepStmtName;
296 : 395968 : portal->sourceText = sourceText;
8353 297 : 395968 : portal->commandTag = commandTag;
44 alvherre@kurilemu.de 298 :GNC 395968 : SetQueryCompletion(&portal->qc, commandTag, 0);
6963 tgl@sss.pgh.pa.us 299 :CBC 395968 : portal->stmts = stmts;
6942 300 : 395968 : portal->cplan = cplan;
301 : 395968 : portal->status = PORTAL_DEFINED;
302 : 395968 : }
303 : :
304 : : /*
305 : : * PortalReleaseCachedPlan
306 : : * Release a portal's reference to its cached plan, if any.
307 : : */
308 : : static void
309 : 412329 : PortalReleaseCachedPlan(Portal portal)
310 : : {
311 [ + + ]: 412329 : if (portal->cplan)
312 : : {
1875 313 : 19454 : ReleaseCachedPlan(portal->cplan, NULL);
6942 314 : 19454 : portal->cplan = NULL;
315 : :
316 : : /*
317 : : * We must also clear portal->stmts which is now a dangling reference
318 : : * to the cached plan's plan list. This protects any code that might
319 : : * try to examine the Portal later.
320 : : */
5900 321 : 19454 : portal->stmts = NIL;
322 : : }
8353 323 : 412329 : }
324 : :
325 : : /*
326 : : * PortalCreateHoldStore
327 : : * Create the tuplestore for a portal.
328 : : */
329 : : void
8349 330 : 25915 : PortalCreateHoldStore(Portal portal)
331 : : {
332 : : MemoryContext oldcxt;
333 : :
334 [ - + ]: 25915 : Assert(portal->holdContext == NULL);
335 [ - + ]: 25915 : Assert(portal->holdStore == NULL);
3507 336 [ - + ]: 25915 : Assert(portal->holdSnapshot == NULL);
337 : :
338 : : /*
339 : : * Create the memory context that is used for storage of the tuple set.
340 : : * Note this is NOT a child of the portal's portalContext.
341 : : */
8349 342 : 25915 : portal->holdContext =
3011 peter_e@gmx.net 343 : 25915 : AllocSetContextCreate(TopPortalContext,
344 : : "PortalHoldContext",
345 : : ALLOCSET_DEFAULT_SIZES);
346 : :
347 : : /*
348 : : * Create the tuple store, selecting cross-transaction temp files, and
349 : : * enabling random access only if cursor requires scrolling.
350 : : *
351 : : * XXX: Should maintenance_work_mem be used for the portal size?
352 : : */
8349 tgl@sss.pgh.pa.us 353 : 25915 : oldcxt = MemoryContextSwitchTo(portal->holdContext);
354 : :
6346 355 : 25915 : portal->holdStore =
356 : 25915 : tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
357 : : true, work_mem);
358 : :
8349 359 : 25915 : MemoryContextSwitchTo(oldcxt);
360 : 25915 : }
361 : :
362 : : /*
363 : : * PinPortal
364 : : * Protect a portal from dropping.
365 : : *
366 : : * A pinned portal is still unpinned and dropped at transaction or
367 : : * subtransaction abort.
368 : : */
369 : : void
5732 heikki.linnakangas@i 370 : 6081 : PinPortal(Portal portal)
371 : : {
372 [ - + ]: 6081 : if (portal->portalPinned)
5732 heikki.linnakangas@i 373 [ # # ]:UBC 0 : elog(ERROR, "portal already pinned");
374 : :
5732 heikki.linnakangas@i 375 :CBC 6081 : portal->portalPinned = true;
376 : 6081 : }
377 : :
378 : : void
379 : 6057 : UnpinPortal(Portal portal)
380 : : {
381 [ - + ]: 6057 : if (!portal->portalPinned)
5732 heikki.linnakangas@i 382 [ # # ]:UBC 0 : elog(ERROR, "portal not pinned");
383 : :
5732 heikki.linnakangas@i 384 :CBC 6057 : portal->portalPinned = false;
385 : 6057 : }
386 : :
387 : : /*
388 : : * MarkPortalActive
389 : : * Transition a portal from READY to ACTIVE state.
390 : : *
391 : : * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
392 : : */
393 : : void
3845 tgl@sss.pgh.pa.us 394 : 413332 : MarkPortalActive(Portal portal)
395 : : {
396 : : /* For safety, this is a runtime test not just an Assert */
397 [ + + ]: 413332 : if (portal->status != PORTAL_READY)
398 [ + - ]: 9 : ereport(ERROR,
399 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
400 : : errmsg("portal \"%s\" cannot be run", portal->name)));
401 : : /* Perform the state transition */
402 : 413323 : portal->status = PORTAL_ACTIVE;
403 : 413323 : portal->activeSubid = GetCurrentSubTransactionId();
404 : 413323 : }
405 : :
406 : : /*
407 : : * MarkPortalDone
408 : : * Transition a portal from ACTIVE to DONE state.
409 : : *
410 : : * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
411 : : */
412 : : void
5491 413 : 198596 : MarkPortalDone(Portal portal)
414 : : {
415 : : /* Perform the state transition */
416 [ - + ]: 198596 : Assert(portal->status == PORTAL_ACTIVE);
417 : 198596 : portal->status = PORTAL_DONE;
418 : :
419 : : /*
420 : : * Allow portalcmds.c to clean up the state it knows about. We might as
421 : : * well do that now, since the portal can't be executed any more.
422 : : *
423 : : * In some cases involving execution of a ROLLBACK command in an already
424 : : * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
425 : : * with the cleanup hook still unexecuted.
426 : : */
172 peter@eisentraut.org 427 [ + + ]:GNC 198596 : if (portal->cleanup)
428 : : {
3111 peter_e@gmx.net 429 :CBC 198561 : portal->cleanup(portal);
5142 tgl@sss.pgh.pa.us 430 : 198561 : portal->cleanup = NULL;
431 : : }
432 : 198596 : }
433 : :
434 : : /*
435 : : * MarkPortalFailed
436 : : * Transition a portal into FAILED state.
437 : : *
438 : : * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
439 : : */
440 : : void
441 : 16184 : MarkPortalFailed(Portal portal)
442 : : {
443 : : /* Perform the state transition */
444 [ - + ]: 16184 : Assert(portal->status != PORTAL_DONE);
445 : 16184 : portal->status = PORTAL_FAILED;
446 : :
447 : : /*
448 : : * Allow portalcmds.c to clean up the state it knows about. We might as
449 : : * well do that now, since the portal can't be executed any more.
450 : : *
451 : : * In some cases involving cleanup of an already aborted transaction, this
452 : : * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
453 : : * still unexecuted.
454 : : */
172 peter@eisentraut.org 455 [ + + ]:GNC 16184 : if (portal->cleanup)
456 : : {
3111 peter_e@gmx.net 457 :CBC 16177 : portal->cleanup(portal);
5491 tgl@sss.pgh.pa.us 458 : 16177 : portal->cleanup = NULL;
459 : : }
460 : 16184 : }
461 : :
462 : : /*
463 : : * PortalDrop
464 : : * Destroy the portal.
465 : : */
466 : : void
7911 467 : 395976 : PortalDrop(Portal portal, bool isTopCommit)
468 : : {
1234 peter@eisentraut.org 469 [ - + ]: 395976 : Assert(PortalIsValid(portal));
470 : :
471 : : /*
472 : : * Don't allow dropping a pinned portal, it's still needed by whoever
473 : : * pinned it.
474 : : */
2986 peter_e@gmx.net 475 [ - + ]: 395976 : if (portal->portalPinned)
2986 peter_e@gmx.net 476 [ # # ]:UBC 0 : ereport(ERROR,
477 : : (errcode(ERRCODE_INVALID_CURSOR_STATE),
478 : : errmsg("cannot drop pinned portal \"%s\"", portal->name)));
479 : :
480 : : /*
481 : : * Not sure if the PORTAL_ACTIVE case can validly happen or not...
482 : : */
2986 peter_e@gmx.net 483 [ - + ]:CBC 395976 : if (portal->status == PORTAL_ACTIVE)
5732 heikki.linnakangas@i 484 [ # # ]:UBC 0 : ereport(ERROR,
485 : : (errcode(ERRCODE_INVALID_CURSOR_STATE),
486 : : errmsg("cannot drop active portal \"%s\"", portal->name)));
487 : :
488 : : /*
489 : : * Allow portalcmds.c to clean up the state it knows about, in particular
490 : : * shutting down the executor if still active. This step potentially runs
491 : : * user-defined code so failure has to be expected. It's the cleanup
492 : : * hook's responsibility to not try to do that more than once, in the case
493 : : * that failure occurs and then we come back to drop the portal again
494 : : * during transaction abort.
495 : : *
496 : : * Note: in most paths of control, this will have been done already in
497 : : * MarkPortalDone or MarkPortalFailed. We're just making sure.
498 : : */
172 peter@eisentraut.org 499 [ + + ]:GNC 395976 : if (portal->cleanup)
500 : : {
3111 peter_e@gmx.net 501 :CBC 181178 : portal->cleanup(portal);
5495 tgl@sss.pgh.pa.us 502 : 181178 : portal->cleanup = NULL;
503 : : }
504 : :
505 : : /* There shouldn't be an active snapshot anymore, except after error */
1759 506 [ + + - + ]: 395976 : Assert(portal->portalSnapshot == NULL || !isTopCommit);
507 : :
508 : : /*
509 : : * Remove portal from hash table. Because we do this here, we will not
510 : : * come back to try to remove the portal again if there's any error in the
511 : : * subsequent steps. Better to leak a little memory than to get into an
512 : : * infinite error-recovery loop.
513 : : */
9391 514 [ - + - - ]: 395976 : PortalHashTableDelete(portal);
515 : :
516 : : /* drop cached plan reference, if any */
5900 517 : 395976 : PortalReleaseCachedPlan(portal);
518 : :
519 : : /*
520 : : * If portal has a snapshot protecting its data, release that. This needs
521 : : * a little care since the registration will be attached to the portal's
522 : : * resowner; if the portal failed, we will already have released the
523 : : * resowner (and the snapshot) during transaction abort.
524 : : */
3507 525 [ + + ]: 395976 : if (portal->holdSnapshot)
526 : : {
527 [ + + ]: 22565 : if (portal->resowner)
528 : 22359 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
529 : : portal->resowner);
530 : 22565 : portal->holdSnapshot = NULL;
531 : : }
532 : :
533 : : /*
534 : : * Release any resources still attached to the portal. There are several
535 : : * cases being covered here:
536 : : *
537 : : * Top transaction commit (indicated by isTopCommit): normally we should
538 : : * do nothing here and let the regular end-of-transaction resource
539 : : * releasing mechanism handle these resources too. However, if we have a
540 : : * FAILED portal (eg, a cursor that got an error), we'd better clean up
541 : : * its resources to avoid resource-leakage warning messages.
542 : : *
543 : : * Sub transaction commit: never comes here at all, since we don't kill
544 : : * any portals in AtSubCommit_Portals().
545 : : *
546 : : * Main or sub transaction abort: we will do nothing here because
547 : : * portal->resowner was already set NULL; the resources were already
548 : : * cleaned up in transaction abort.
549 : : *
550 : : * Ordinary portal drop: must release resources. However, if the portal
551 : : * is not FAILED then we do not release its locks. The locks become the
552 : : * responsibility of the transaction's ResourceOwner (since it is the
553 : : * parent of the portal's owner) and will be released when the transaction
554 : : * eventually ends.
555 : : */
7911 556 [ + + ]: 395976 : if (portal->resowner &&
557 [ + + - + ]: 373990 : (!isTopCommit || portal->status == PORTAL_FAILED))
558 : : {
7868 bruce@momjian.us 559 : 368794 : bool isCommit = (portal->status != PORTAL_FAILED);
560 : :
7911 tgl@sss.pgh.pa.us 561 : 368794 : ResourceOwnerRelease(portal->resowner,
562 : : RESOURCE_RELEASE_BEFORE_LOCKS,
563 : : isCommit, false);
564 : 368794 : ResourceOwnerRelease(portal->resowner,
565 : : RESOURCE_RELEASE_LOCKS,
566 : : isCommit, false);
567 : 368794 : ResourceOwnerRelease(portal->resowner,
568 : : RESOURCE_RELEASE_AFTER_LOCKS,
569 : : isCommit, false);
7872 570 : 368794 : ResourceOwnerDelete(portal->resowner);
571 : : }
7911 572 : 395976 : portal->resowner = NULL;
573 : :
574 : : /*
575 : : * Delete tuplestore if present. We should do this even under error
576 : : * conditions; since the tuplestore would have been using cross-
577 : : * transaction storage, its temp files need to be explicitly deleted.
578 : : */
8349 579 [ + + ]: 395976 : if (portal->holdStore)
580 : : {
581 : : MemoryContext oldcontext;
582 : :
583 : 25906 : oldcontext = MemoryContextSwitchTo(portal->holdContext);
584 : 25906 : tuplestore_end(portal->holdStore);
585 : 25906 : MemoryContextSwitchTo(oldcontext);
586 : 25906 : portal->holdStore = NULL;
587 : : }
588 : :
589 : : /* delete tuplestore storage, if any */
8389 bruce@momjian.us 590 [ + + ]: 395976 : if (portal->holdContext)
591 : 25906 : MemoryContextDelete(portal->holdContext);
592 : :
593 : : /* release subsidiary storage */
3011 peter_e@gmx.net 594 : 395976 : MemoryContextDelete(portal->portalContext);
595 : :
596 : : /* release portal struct (it's in TopPortalContext) */
9391 tgl@sss.pgh.pa.us 597 : 395976 : pfree(portal);
10841 scrappy@hub.org 598 : 395976 : }
599 : :
600 : : /*
601 : : * Delete all declared cursors.
602 : : *
603 : : * Used by commands: CLOSE ALL, DISCARD ALL
604 : : */
605 : : void
6912 neilc@samurai.com 606 : 9 : PortalHashTableDeleteAll(void)
607 : : {
608 : : HASH_SEQ_STATUS status;
609 : : PortalHashEnt *hentry;
610 : :
611 [ - + ]: 9 : if (PortalHashTable == NULL)
6912 neilc@samurai.com 612 :UBC 0 : return;
613 : :
6912 neilc@samurai.com 614 :CBC 9 : hash_seq_init(&status, PortalHashTable);
615 [ + + ]: 36 : while ((hentry = hash_seq_search(&status)) != NULL)
616 : : {
6695 bruce@momjian.us 617 : 27 : Portal portal = hentry->portal;
618 : :
619 : : /* Can't close the active portal (the one running the command) */
5495 tgl@sss.pgh.pa.us 620 [ + + ]: 27 : if (portal->status == PORTAL_ACTIVE)
621 : 15 : continue;
622 : :
623 : 12 : PortalDrop(portal, false);
624 : :
625 : : /* Restart the iteration in case that led to other drops */
626 : 12 : hash_seq_term(&status);
627 : 12 : hash_seq_init(&status, PortalHashTable);
628 : : }
629 : : }
630 : :
631 : : /*
632 : : * "Hold" a portal. Prepare it for access by later transactions.
633 : : */
634 : : static void
2909 peter_e@gmx.net 635 : 42 : HoldPortal(Portal portal)
636 : : {
637 : : /*
638 : : * Note that PersistHoldablePortal() must release all resources used by
639 : : * the portal that are local to the creating transaction.
640 : : */
641 : 42 : PortalCreateHoldStore(portal);
642 : 42 : PersistHoldablePortal(portal);
643 : :
644 : : /* drop cached plan reference, if any */
645 : 40 : PortalReleaseCachedPlan(portal);
646 : :
647 : : /*
648 : : * Any resources belonging to the portal will be released in the upcoming
649 : : * transaction-wide cleanup; the portal will no longer have its own
650 : : * resources.
651 : : */
652 : 40 : portal->resowner = NULL;
653 : :
654 : : /*
655 : : * Having successfully exported the holdable cursor, mark it as not
656 : : * belonging to this transaction.
657 : : */
658 : 40 : portal->createSubid = InvalidSubTransactionId;
659 : 40 : portal->activeSubid = InvalidSubTransactionId;
1626 tgl@sss.pgh.pa.us 660 : 40 : portal->createLevel = 0;
2909 peter_e@gmx.net 661 : 40 : }
662 : :
663 : : /*
664 : : * Pre-commit processing for portals.
665 : : *
666 : : * Holdable cursors created in this transaction need to be converted to
667 : : * materialized form, since we are going to close down the executor and
668 : : * release locks. Non-holdable portals created in this transaction are
669 : : * simply removed. Portals remaining from prior transactions should be
670 : : * left untouched.
671 : : *
672 : : * Returns true if any portals changed state (possibly causing user-defined
673 : : * code to be run), false if not.
674 : : */
675 : : bool
5495 tgl@sss.pgh.pa.us 676 : 318558 : PreCommit_Portals(bool isPrepare)
677 : : {
7456 bruce@momjian.us 678 : 318558 : bool result = false;
679 : : HASH_SEQ_STATUS status;
680 : : PortalHashEnt *hentry;
681 : :
8927 tgl@sss.pgh.pa.us 682 : 318558 : hash_seq_init(&status, PortalHashTable);
683 : :
684 [ + + ]: 355215 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
685 : : {
8259 bruce@momjian.us 686 : 36657 : Portal portal = hentry->portal;
687 : :
688 : : /*
689 : : * There should be no pinned portals anymore. Complain if someone
690 : : * leaked one. Auto-held portals are allowed; we assume that whoever
691 : : * pinned them is managing them.
692 : : */
2909 peter_e@gmx.net 693 [ + + - + ]: 36657 : if (portal->portalPinned && !portal->autoHeld)
5495 tgl@sss.pgh.pa.us 694 [ # # ]:UBC 0 : elog(ERROR, "cannot commit while a portal is pinned");
695 : :
696 : : /*
697 : : * Do not touch active portals --- this can only happen in the case of
698 : : * a multi-transaction utility command, such as VACUUM, or a commit in
699 : : * a procedure.
700 : : *
701 : : * Note however that any resource owner attached to such a portal is
702 : : * still going to go away, so don't leave a dangling pointer. Also
703 : : * unregister any snapshots held by the portal, mainly to avoid
704 : : * snapshot leak warnings from ResourceOwnerRelease().
705 : : */
5495 tgl@sss.pgh.pa.us 706 [ + + ]:CBC 36657 : if (portal->status == PORTAL_ACTIVE)
707 : : {
2761 peter_e@gmx.net 708 [ + + ]: 31137 : if (portal->holdSnapshot)
709 : : {
710 [ + - ]: 1 : if (portal->resowner)
711 : 1 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
712 : : portal->resowner);
713 : 1 : portal->holdSnapshot = NULL;
714 : : }
5495 tgl@sss.pgh.pa.us 715 : 31137 : portal->resowner = NULL;
716 : : /* Clear portalSnapshot too, for cleanliness */
1759 717 : 31137 : portal->portalSnapshot = NULL;
5495 718 : 31137 : continue;
719 : : }
720 : :
721 : : /* Is it a holdable portal created in the current xact? */
7911 722 [ + + ]: 5520 : if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
7643 723 [ + + ]: 249 : portal->createSubid != InvalidSubTransactionId &&
7911 724 [ + - ]: 24 : portal->status == PORTAL_READY)
725 : : {
726 : : /*
727 : : * We are exiting the transaction that created a holdable cursor.
728 : : * Instead of dropping the portal, prepare it for access by later
729 : : * transactions.
730 : : *
731 : : * However, if this is PREPARE TRANSACTION rather than COMMIT,
732 : : * refuse PREPARE, because the semantics seem pretty unclear.
733 : : */
5495 734 [ - + ]: 24 : if (isPrepare)
5495 tgl@sss.pgh.pa.us 735 [ # # ]:UBC 0 : ereport(ERROR,
736 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
737 : : errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
738 : :
2909 peter_e@gmx.net 739 :CBC 24 : HoldPortal(portal);
740 : :
741 : : /* Report we changed state */
7643 tgl@sss.pgh.pa.us 742 : 24 : result = true;
743 : : }
5495 744 [ + + ]: 5496 : else if (portal->createSubid == InvalidSubTransactionId)
745 : : {
746 : : /*
747 : : * Do nothing to cursors held over from a previous transaction
748 : : * (including ones we just froze in a previous cycle of this loop)
749 : : */
7643 750 : 267 : continue;
751 : : }
752 : : else
753 : : {
754 : : /* Zap all non-holdable portals */
5495 755 : 5229 : PortalDrop(portal, true);
756 : :
757 : : /* Report we changed state */
758 : 5229 : result = true;
759 : : }
760 : :
761 : : /*
762 : : * After either freezing or dropping a portal, we have to restart the
763 : : * iteration, because we could have invoked user-defined code that
764 : : * caused a drop of the next portal in the hash chain.
765 : : */
6898 766 : 5253 : hash_seq_term(&status);
7613 bruce@momjian.us 767 : 5253 : hash_seq_init(&status, PortalHashTable);
768 : : }
769 : :
5495 tgl@sss.pgh.pa.us 770 : 318558 : return result;
771 : : }
772 : :
773 : : /*
774 : : * Abort processing for portals.
775 : : *
776 : : * At this point we run the cleanup hook if present, but we can't release the
777 : : * portal's memory until the cleanup call.
778 : : */
779 : : void
8353 780 : 26816 : AtAbort_Portals(void)
781 : : {
782 : : HASH_SEQ_STATUS status;
783 : : PortalHashEnt *hentry;
784 : :
785 : 26816 : hash_seq_init(&status, PortalHashTable);
786 : :
787 [ + + ]: 42540 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
788 : : {
8259 bruce@momjian.us 789 : 15724 : Portal portal = hentry->portal;
790 : :
791 : : /*
792 : : * When elog(FATAL) is progress, we need to set the active portal to
793 : : * failed, so that PortalCleanup() doesn't run the executor shutdown.
794 : : */
2964 peter_e@gmx.net 795 [ + + + + ]: 15724 : if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
796 : 4 : MarkPortalFailed(portal);
797 : :
798 : : /*
799 : : * Do nothing else to cursors held over from a previous transaction.
800 : : */
7850 tgl@sss.pgh.pa.us 801 [ + + ]: 15724 : if (portal->createSubid == InvalidSubTransactionId)
8353 802 : 68 : continue;
803 : :
804 : : /*
805 : : * Do nothing to auto-held cursors. This is similar to the case of a
806 : : * cursor from a previous transaction, but it could also be that the
807 : : * cursor was auto-held in this transaction, so it wants to live on.
808 : : */
2909 peter_e@gmx.net 809 [ - + ]: 15656 : if (portal->autoHeld)
2909 peter_e@gmx.net 810 :UBC 0 : continue;
811 : :
812 : : /*
813 : : * If it was created in the current transaction, we can't do normal
814 : : * shutdown on a READY portal either; it might refer to objects
815 : : * created in the failed transaction. See comments in
816 : : * AtSubAbort_Portals.
817 : : */
5869 tgl@sss.pgh.pa.us 818 [ + + ]:CBC 15656 : if (portal->status == PORTAL_READY)
5142 819 : 484 : MarkPortalFailed(portal);
820 : :
821 : : /*
822 : : * Allow portalcmds.c to clean up the state it knows about, if we
823 : : * haven't already.
824 : : */
172 peter@eisentraut.org 825 [ + + ]:GNC 15656 : if (portal->cleanup)
826 : : {
3111 peter_e@gmx.net 827 :CBC 60 : portal->cleanup(portal);
8353 tgl@sss.pgh.pa.us 828 : 60 : portal->cleanup = NULL;
829 : : }
830 : :
831 : : /* drop cached plan reference, if any */
5900 832 : 15656 : PortalReleaseCachedPlan(portal);
833 : :
834 : : /*
835 : : * Any resources belonging to the portal will be released in the
836 : : * upcoming transaction-wide cleanup; they will be gone before we run
837 : : * PortalDrop.
838 : : */
7911 839 : 15656 : portal->resowner = NULL;
840 : :
841 : : /*
842 : : * Although we can't delete the portal data structure proper, we can
843 : : * release any memory in subsidiary contexts, such as executor state.
844 : : * The cleanup hook was the last thing that might have needed data
845 : : * there. But leave active portals alone.
846 : : */
2974 peter_e@gmx.net 847 [ + + ]: 15656 : if (portal->status != PORTAL_ACTIVE)
848 : 15556 : MemoryContextDeleteChildren(portal->portalContext);
849 : : }
9203 tgl@sss.pgh.pa.us 850 : 26816 : }
851 : :
852 : : /*
853 : : * Post-abort cleanup for portals.
854 : : *
855 : : * Delete all portals not held over from prior transactions.
856 : : */
857 : : void
8353 858 : 26803 : AtCleanup_Portals(void)
859 : : {
860 : : HASH_SEQ_STATUS status;
861 : : PortalHashEnt *hentry;
862 : :
863 : 26803 : hash_seq_init(&status, PortalHashTable);
864 : :
865 [ + + ]: 41845 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
866 : : {
8259 bruce@momjian.us 867 : 15042 : Portal portal = hentry->portal;
868 : :
869 : : /*
870 : : * Do not touch active portals --- this can only happen in the case of
871 : : * a multi-transaction command.
872 : : */
2974 peter_e@gmx.net 873 [ + + ]: 15042 : if (portal->status == PORTAL_ACTIVE)
874 : 100 : continue;
875 : :
876 : : /*
877 : : * Do nothing to cursors held over from a previous transaction or
878 : : * auto-held ones.
879 : : */
2909 880 [ + + - + ]: 14942 : if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
881 : : {
7895 tgl@sss.pgh.pa.us 882 [ - + ]: 68 : Assert(portal->status != PORTAL_ACTIVE);
883 [ - + ]: 68 : Assert(portal->resowner == NULL);
8353 884 : 68 : continue;
885 : : }
886 : :
887 : : /*
888 : : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
889 : : * let us drop the portal otherwise. Whoever pinned the portal was
890 : : * interrupted by the abort too and won't try to use it anymore.
891 : : */
5732 heikki.linnakangas@i 892 [ + + ]: 14874 : if (portal->portalPinned)
893 : 19 : portal->portalPinned = false;
894 : :
895 : : /*
896 : : * We had better not call any user-defined code during cleanup, so if
897 : : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
898 : : */
172 peter@eisentraut.org 899 [ - + ]:GNC 14874 : if (portal->cleanup)
900 : : {
3135 tgl@sss.pgh.pa.us 901 [ # # ]:UBC 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
902 : 0 : portal->cleanup = NULL;
903 : : }
904 : :
905 : : /* Zap it. */
7911 tgl@sss.pgh.pa.us 906 :CBC 14874 : PortalDrop(portal, false);
907 : : }
8353 908 : 26803 : }
909 : :
910 : : /*
911 : : * Portal-related cleanup when we return to the main loop on error.
912 : : *
913 : : * This is different from the cleanup at transaction abort. Auto-held portals
914 : : * are cleaned up on error but not on transaction abort.
915 : : */
916 : : void
2909 peter_e@gmx.net 917 : 22891 : PortalErrorCleanup(void)
918 : : {
919 : : HASH_SEQ_STATUS status;
920 : : PortalHashEnt *hentry;
921 : :
922 : 22891 : hash_seq_init(&status, PortalHashTable);
923 : :
924 [ + + ]: 47344 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
925 : : {
926 : 1562 : Portal portal = hentry->portal;
927 : :
928 [ + + ]: 1562 : if (portal->autoHeld)
929 : : {
930 : 2 : portal->portalPinned = false;
931 : 2 : PortalDrop(portal, false);
932 : : }
933 : : }
934 : 22891 : }
935 : :
936 : : /*
937 : : * Pre-subcommit processing for portals.
938 : : *
939 : : * Reassign portals created or used in the current subtransaction to the
940 : : * parent subtransaction.
941 : : */
942 : : void
7850 tgl@sss.pgh.pa.us 943 : 6991 : AtSubCommit_Portals(SubTransactionId mySubid,
944 : : SubTransactionId parentSubid,
945 : : int parentLevel,
946 : : ResourceOwner parentXactOwner)
947 : : {
948 : : HASH_SEQ_STATUS status;
949 : : PortalHashEnt *hentry;
950 : :
7927 951 : 6991 : hash_seq_init(&status, PortalHashTable);
952 : :
953 [ + + ]: 20358 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
954 : : {
7868 bruce@momjian.us 955 : 6376 : Portal portal = hentry->portal;
956 : :
7850 tgl@sss.pgh.pa.us 957 [ + + ]: 6376 : if (portal->createSubid == mySubid)
958 : : {
959 : 30 : portal->createSubid = parentSubid;
1626 960 : 30 : portal->createLevel = parentLevel;
7911 961 [ + - ]: 30 : if (portal->resowner)
962 : 30 : ResourceOwnerNewParent(portal->resowner, parentXactOwner);
963 : : }
3845 964 [ + + ]: 6376 : if (portal->activeSubid == mySubid)
965 : 110 : portal->activeSubid = parentSubid;
966 : : }
7927 967 : 6991 : }
968 : :
969 : : /*
970 : : * Subtransaction abort handling for portals.
971 : : *
972 : : * Deactivate portals created or used during the failed subtransaction.
973 : : * Note that per AtSubCommit_Portals, this will catch portals created/used
974 : : * in descendants of the subtransaction too.
975 : : *
976 : : * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
977 : : */
978 : : void
7850 979 : 4713 : AtSubAbort_Portals(SubTransactionId mySubid,
980 : : SubTransactionId parentSubid,
981 : : ResourceOwner myXactOwner,
982 : : ResourceOwner parentXactOwner)
983 : : {
984 : : HASH_SEQ_STATUS status;
985 : : PortalHashEnt *hentry;
986 : :
7927 987 : 4713 : hash_seq_init(&status, PortalHashTable);
988 : :
989 [ + + ]: 11431 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
990 : : {
7868 bruce@momjian.us 991 : 6718 : Portal portal = hentry->portal;
992 : :
993 : : /* Was it created in this subtransaction? */
7850 tgl@sss.pgh.pa.us 994 [ + + ]: 6718 : if (portal->createSubid != mySubid)
995 : : {
996 : : /* No, but maybe it was used in this subtransaction? */
3845 997 [ + + ]: 6061 : if (portal->activeSubid == mySubid)
998 : : {
999 : : /* Maintain activeSubid until the portal is removed */
1000 : 25 : portal->activeSubid = parentSubid;
1001 : :
1002 : : /*
1003 : : * A MarkPortalActive() caller ran an upper-level portal in
1004 : : * this subtransaction and left the portal ACTIVE. This can't
1005 : : * happen, but force the portal into FAILED state for the same
1006 : : * reasons discussed below.
1007 : : *
1008 : : * We assume we can get away without forcing upper-level READY
1009 : : * portals to fail, even if they were run and then suspended.
1010 : : * In theory a suspended upper-level portal could have
1011 : : * acquired some references to objects that are about to be
1012 : : * destroyed, but there should be sufficient defenses against
1013 : : * such cases: the portal's original query cannot contain such
1014 : : * references, and any references within, say, cached plans of
1015 : : * PL/pgSQL functions are not from active queries and should
1016 : : * be protected by revalidation logic.
1017 : : */
1018 [ - + ]: 25 : if (portal->status == PORTAL_ACTIVE)
3845 tgl@sss.pgh.pa.us 1019 :UBC 0 : MarkPortalFailed(portal);
1020 : :
1021 : : /*
1022 : : * Also, if we failed it during the current subtransaction
1023 : : * (either just above, or earlier), reattach its resource
1024 : : * owner to the current subtransaction's resource owner, so
1025 : : * that any resources it still holds will be released while
1026 : : * cleaning up this subtransaction. This prevents some corner
1027 : : * cases wherein we might get Asserts or worse while cleaning
1028 : : * up objects created during the current subtransaction
1029 : : * (because they're still referenced within this portal).
1030 : : */
3845 tgl@sss.pgh.pa.us 1031 [ + + + - ]:CBC 25 : if (portal->status == PORTAL_FAILED && portal->resowner)
1032 : : {
1033 : 7 : ResourceOwnerNewParent(portal->resowner, myXactOwner);
1034 : 7 : portal->resowner = NULL;
1035 : : }
1036 : : }
1037 : : /* Done if it wasn't created in this subtransaction */
7927 1038 : 6061 : continue;
1039 : : }
1040 : :
1041 : : /*
1042 : : * Force any live portals of my own subtransaction into FAILED state.
1043 : : * We have to do this because they might refer to objects created or
1044 : : * changed in the failed subtransaction, leading to crashes within
1045 : : * ExecutorEnd when portalcmds.c tries to close down the portal.
1046 : : * Currently, every MarkPortalActive() caller ensures it updates the
1047 : : * portal status again before relinquishing control, so ACTIVE can't
1048 : : * happen here. If it does happen, dispose the portal like existing
1049 : : * MarkPortalActive() callers would.
1050 : : */
5869 1051 [ + + ]: 657 : if (portal->status == PORTAL_READY ||
1052 [ - + ]: 129 : portal->status == PORTAL_ACTIVE)
5142 1053 : 528 : MarkPortalFailed(portal);
1054 : :
1055 : : /*
1056 : : * Allow portalcmds.c to clean up the state it knows about, if we
1057 : : * haven't already.
1058 : : */
172 peter@eisentraut.org 1059 [ - + ]:GNC 657 : if (portal->cleanup)
1060 : : {
3111 peter_e@gmx.net 1061 :UBC 0 : portal->cleanup(portal);
5869 tgl@sss.pgh.pa.us 1062 : 0 : portal->cleanup = NULL;
1063 : : }
1064 : :
1065 : : /* drop cached plan reference, if any */
5869 tgl@sss.pgh.pa.us 1066 :CBC 657 : PortalReleaseCachedPlan(portal);
1067 : :
1068 : : /*
1069 : : * Any resources belonging to the portal will be released in the
1070 : : * upcoming transaction-wide cleanup; they will be gone before we run
1071 : : * PortalDrop.
1072 : : */
1073 : 657 : portal->resowner = NULL;
1074 : :
1075 : : /*
1076 : : * Although we can't delete the portal data structure proper, we can
1077 : : * release any memory in subsidiary contexts, such as executor state.
1078 : : * The cleanup hook was the last thing that might have needed data
1079 : : * there.
1080 : : */
3011 peter_e@gmx.net 1081 : 657 : MemoryContextDeleteChildren(portal->portalContext);
1082 : : }
7927 tgl@sss.pgh.pa.us 1083 : 4713 : }
1084 : :
1085 : : /*
1086 : : * Post-subabort cleanup for portals.
1087 : : *
1088 : : * Drop all portals created in the failed subtransaction (but note that
1089 : : * we will not drop any that were reassigned to the parent above).
1090 : : */
1091 : : void
7850 1092 : 4713 : AtSubCleanup_Portals(SubTransactionId mySubid)
1093 : : {
1094 : : HASH_SEQ_STATUS status;
1095 : : PortalHashEnt *hentry;
1096 : :
7927 1097 : 4713 : hash_seq_init(&status, PortalHashTable);
1098 : :
1099 [ + + ]: 11306 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1100 : : {
1101 : 6593 : Portal portal = hentry->portal;
1102 : :
7850 1103 [ + + ]: 6593 : if (portal->createSubid != mySubid)
7927 1104 : 6061 : continue;
1105 : :
1106 : : /*
1107 : : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1108 : : * let us drop the portal otherwise. Whoever pinned the portal was
1109 : : * interrupted by the abort too and won't try to use it anymore.
1110 : : */
5724 heikki.linnakangas@i 1111 [ + + ]: 532 : if (portal->portalPinned)
1112 : 3 : portal->portalPinned = false;
1113 : :
1114 : : /*
1115 : : * We had better not call any user-defined code during cleanup, so if
1116 : : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1117 : : */
172 peter@eisentraut.org 1118 [ - + ]:GNC 532 : if (portal->cleanup)
1119 : : {
3135 tgl@sss.pgh.pa.us 1120 [ # # ]:UBC 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1121 : 0 : portal->cleanup = NULL;
1122 : : }
1123 : :
1124 : : /* Zap it. */
7911 tgl@sss.pgh.pa.us 1125 :CBC 532 : PortalDrop(portal, false);
1126 : : }
7927 1127 : 4713 : }
1128 : :
1129 : : /* Find all available cursors */
1130 : : Datum
7361 neilc@samurai.com 1131 : 60 : pg_cursor(PG_FUNCTION_ARGS)
1132 : : {
6898 tgl@sss.pgh.pa.us 1133 : 60 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1134 : : HASH_SEQ_STATUS hash_seq;
1135 : : PortalHashEnt *hentry;
1136 : :
1137 : : /*
1138 : : * We put all the tuples into a tuplestore in one scan of the hashtable.
1139 : : * This avoids any issue of the hashtable possibly changing between calls.
1140 : : */
1244 michael@paquier.xyz 1141 : 60 : InitMaterializedSRF(fcinfo, 0);
1142 : :
6898 tgl@sss.pgh.pa.us 1143 : 60 : hash_seq_init(&hash_seq, PortalHashTable);
1144 [ + + ]: 186 : while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1145 : : {
1146 : 126 : Portal portal = hentry->portal;
1147 : : Datum values[6];
1338 peter@eisentraut.org 1148 : 126 : bool nulls[6] = {0};
1149 : :
1150 : : /* report only "visible" entries */
6898 tgl@sss.pgh.pa.us 1151 [ + + ]: 126 : if (!portal->visible)
1152 : 63 : continue;
1153 : : /* also ignore it if PortalDefineQuery hasn't been called yet */
525 1154 [ - + ]: 63 : if (!portal->sourceText)
525 tgl@sss.pgh.pa.us 1155 :UBC 0 : continue;
1156 : :
6564 tgl@sss.pgh.pa.us 1157 :CBC 63 : values[0] = CStringGetTextDatum(portal->name);
6449 1158 : 63 : values[1] = CStringGetTextDatum(portal->sourceText);
7361 neilc@samurai.com 1159 : 63 : values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1160 : 63 : values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1161 : 63 : values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1162 : 63 : values[5] = TimestampTzGetDatum(portal->creation_time);
1163 : :
1469 michael@paquier.xyz 1164 : 63 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1165 : : }
1166 : :
6898 tgl@sss.pgh.pa.us 1167 : 60 : return (Datum) 0;
1168 : : }
1169 : :
1170 : : bool
4852 simon@2ndQuadrant.co 1171 : 30 : ThereAreNoReadyPortals(void)
1172 : : {
1173 : : HASH_SEQ_STATUS status;
1174 : : PortalHashEnt *hentry;
1175 : :
1176 : 30 : hash_seq_init(&status, PortalHashTable);
1177 : :
1178 [ + + ]: 60 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1179 : : {
1180 : 30 : Portal portal = hentry->portal;
1181 : :
1182 [ - + ]: 30 : if (portal->status == PORTAL_READY)
4852 simon@2ndQuadrant.co 1183 :UBC 0 : return false;
1184 : : }
1185 : :
4852 simon@2ndQuadrant.co 1186 :CBC 30 : return true;
1187 : : }
1188 : :
1189 : : /*
1190 : : * Hold all pinned portals.
1191 : : *
1192 : : * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1193 : : * called to protect internally-generated cursors from being dropped during
1194 : : * the transaction shutdown. Currently, SPI calls this automatically; PLs
1195 : : * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1196 : : * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1197 : : * because we need to run user-defined code while persisting a portal.
1198 : : * It's too late to do that once transaction abort has started.)
1199 : : *
1200 : : * We protect such portals by converting them to held cursors. We mark them
1201 : : * as "auto-held" so that exception exit knows to clean them up. (In normal,
1202 : : * non-exception code paths, the PL needs to clean such portals itself, since
1203 : : * transaction end won't do it anymore; but that should be normal practice
1204 : : * anyway.)
1205 : : */
1206 : : void
2909 peter_e@gmx.net 1207 : 2217 : HoldPinnedPortals(void)
1208 : : {
1209 : : HASH_SEQ_STATUS status;
1210 : : PortalHashEnt *hentry;
1211 : :
2974 1212 : 2217 : hash_seq_init(&status, PortalHashTable);
1213 : :
1214 [ + + ]: 4494 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1215 : : {
1216 : 2280 : Portal portal = hentry->portal;
1217 : :
2909 1218 [ + + + + ]: 2280 : if (portal->portalPinned && !portal->autoHeld)
1219 : : {
1220 : : /*
1221 : : * Doing transaction control, especially abort, inside a cursor
1222 : : * loop that is not read-only, for example using UPDATE ...
1223 : : * RETURNING, has weird semantics issues. Also, this
1224 : : * implementation wouldn't work, because such portals cannot be
1225 : : * held. (The core grammar enforces that only SELECT statements
1226 : : * can drive a cursor, but for example PL/pgSQL does not restrict
1227 : : * it.)
1228 : : */
1229 [ + + ]: 19 : if (portal->strategy != PORTAL_ONE_SELECT)
1230 [ + - ]: 1 : ereport(ERROR,
1231 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1232 : : errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1233 : :
1234 : : /* Verify it's in a suitable state to be held */
2522 tgl@sss.pgh.pa.us 1235 [ - + ]: 18 : if (portal->status != PORTAL_READY)
2522 tgl@sss.pgh.pa.us 1236 [ # # ]:UBC 0 : elog(ERROR, "pinned portal is not ready to be auto-held");
1237 : :
2909 peter_e@gmx.net 1238 :CBC 18 : HoldPortal(portal);
2522 tgl@sss.pgh.pa.us 1239 : 16 : portal->autoHeld = true;
1240 : : }
1241 : : }
2974 peter_e@gmx.net 1242 : 2214 : }
1243 : :
1244 : : /*
1245 : : * Drop the outer active snapshots for all portals, so that no snapshots
1246 : : * remain active.
1247 : : *
1248 : : * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1249 : : * ROLLBACK inside a procedure. This has to be separate from that since it
1250 : : * should not be run until we're done with steps that are likely to fail.
1251 : : *
1252 : : * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1253 : : * need to clean up snapshot management in VACUUM and perhaps other places.
1254 : : */
1255 : : void
1759 tgl@sss.pgh.pa.us 1256 : 2214 : ForgetPortalSnapshots(void)
1257 : : {
1258 : : HASH_SEQ_STATUS status;
1259 : : PortalHashEnt *hentry;
1260 : 2214 : int numPortalSnaps = 0;
1261 : 2214 : int numActiveSnaps = 0;
1262 : :
1263 : : /* First, scan PortalHashTable and clear portalSnapshot fields */
1264 : 2214 : hash_seq_init(&status, PortalHashTable);
1265 : :
1266 [ + + ]: 6705 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1267 : : {
1268 : 2277 : Portal portal = hentry->portal;
1269 : :
1270 [ + + ]: 2277 : if (portal->portalSnapshot != NULL)
1271 : : {
1272 : 2214 : portal->portalSnapshot = NULL;
1273 : 2214 : numPortalSnaps++;
1274 : : }
1275 : : /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1276 : : }
1277 : :
1278 : : /*
1279 : : * Now, pop all the active snapshots, which should be just those that were
1280 : : * portal snapshots. Ideally we'd drive this directly off the portal
1281 : : * scan, but there's no good way to visit the portals in the correct
1282 : : * order. So just cross-check after the fact.
1283 : : */
1284 [ + + ]: 4428 : while (ActiveSnapshotSet())
1285 : : {
1286 : 2214 : PopActiveSnapshot();
1287 : 2214 : numActiveSnaps++;
1288 : : }
1289 : :
1290 [ - + ]: 2214 : if (numPortalSnaps != numActiveSnaps)
1759 tgl@sss.pgh.pa.us 1291 [ # # ]:UBC 0 : elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1292 : : numPortalSnaps, numActiveSnaps);
1759 tgl@sss.pgh.pa.us 1293 :CBC 2214 : }
|