Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * portalmem.c
4 : : * backend portal memory management
5 : : *
6 : : * Portals are objects representing the execution state of a query.
7 : : * This module provides memory management services for portals, but it
8 : : * doesn't actually run the executor for them.
9 : : *
10 : : *
11 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
12 : : * Portions Copyright (c) 1994, Regents of the University of California
13 : : *
14 : : * IDENTIFICATION
15 : : * src/backend/utils/mmgr/portalmem.c
16 : : *
17 : : *-------------------------------------------------------------------------
18 : : */
19 : : #include "postgres.h"
20 : :
21 : : #include "access/xact.h"
22 : : #include "commands/portalcmds.h"
23 : : #include "funcapi.h"
24 : : #include "miscadmin.h"
25 : : #include "storage/ipc.h"
26 : : #include "utils/builtins.h"
27 : : #include "utils/memutils.h"
28 : : #include "utils/snapmgr.h"
29 : : #include "utils/timestamp.h"
30 : :
31 : : /*
32 : : * Estimate of the maximum number of open portals a user would have,
33 : : * used in initially sizing the PortalHashTable in EnablePortalManager().
34 : : * Since the hash table can expand, there's no need to make this overly
35 : : * generous, and keeping it small avoids unnecessary overhead in the
36 : : * hash_seq_search() calls executed during transaction end.
37 : : */
38 : : #define PORTALS_PER_USER 16
39 : :
40 : :
41 : : /* ----------------
42 : : * Global state
43 : : * ----------------
44 : : */
45 : :
46 : : #define MAX_PORTALNAME_LEN NAMEDATALEN
47 : :
48 : : typedef struct portalhashent
49 : : {
50 : : char portalname[MAX_PORTALNAME_LEN];
51 : : Portal portal;
52 : : } PortalHashEnt;
53 : :
54 : : static HTAB *PortalHashTable = NULL;
55 : :
56 : : #define PortalHashTableLookup(NAME, PORTAL) \
57 : : do { \
58 : : PortalHashEnt *hentry; \
59 : : \
60 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 : : (NAME), HASH_FIND, NULL); \
62 : : if (hentry) \
63 : : PORTAL = hentry->portal; \
64 : : else \
65 : : PORTAL = NULL; \
66 : : } while(0)
67 : :
68 : : #define PortalHashTableInsert(PORTAL, NAME) \
69 : : do { \
70 : : PortalHashEnt *hentry; bool found; \
71 : : \
72 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 : : (NAME), HASH_ENTER, &found); \
74 : : if (found) \
75 : : elog(ERROR, "duplicate portal name"); \
76 : : hentry->portal = PORTAL; \
77 : : /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 : : PORTAL->name = hentry->portalname; \
79 : : } while(0)
80 : :
81 : : #define PortalHashTableDelete(PORTAL) \
82 : : do { \
83 : : PortalHashEnt *hentry; \
84 : : \
85 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 : : PORTAL->name, HASH_REMOVE, NULL); \
87 : : if (hentry == NULL) \
88 : : elog(WARNING, "trying to delete portal name that does not exist"); \
89 : : } while(0)
90 : :
91 : : static MemoryContext TopPortalContext = NULL;
92 : :
93 : :
94 : : /* ----------------------------------------------------------------
95 : : * public portal interface functions
96 : : * ----------------------------------------------------------------
97 : : */
98 : :
99 : : /*
100 : : * EnablePortalManager
101 : : * Enables the portal management module at backend startup.
102 : : */
103 : : void
9302 tgl@sss.pgh.pa.us 104 :CBC 15569 : EnablePortalManager(void)
105 : : {
106 : : HASHCTL ctl;
107 : :
2922 peter_e@gmx.net 108 [ - + ]: 15569 : Assert(TopPortalContext == NULL);
109 : :
110 : 15569 : TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111 : : "TopPortalContext",
112 : : ALLOCSET_DEFAULT_SIZES);
113 : :
9302 tgl@sss.pgh.pa.us 114 : 15569 : ctl.keysize = MAX_PORTALNAME_LEN;
8842 115 : 15569 : ctl.entrysize = sizeof(PortalHashEnt);
116 : :
117 : : /*
118 : : * use PORTALS_PER_USER as a guess of how many hash table entries to
119 : : * create, initially
120 : : */
8838 121 : 15569 : PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122 : : &ctl, HASH_ELEM | HASH_STRINGS);
10752 scrappy@hub.org 123 : 15569 : }
124 : :
125 : : /*
126 : : * GetPortalByName
127 : : * Returns a portal given a portal name, or NULL if name not found.
128 : : */
129 : : Portal
8387 tgl@sss.pgh.pa.us 130 : 438938 : GetPortalByName(const char *name)
131 : : {
132 : : Portal portal;
133 : :
83 peter@eisentraut.org 134 [ + - ]:GNC 438938 : if (name)
10327 bruce@momjian.us 135 [ + + ]:CBC 438938 : PortalHashTableLookup(name, portal);
136 : : else
9302 tgl@sss.pgh.pa.us 137 :UBC 0 : portal = NULL;
138 : :
9968 bruce@momjian.us 139 :CBC 438938 : return portal;
140 : : }
141 : :
142 : : /*
143 : : * PortalGetPrimaryStmt
144 : : * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 : : *
146 : : * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 : : * portal are marked canSetTag, returns the first one. Neither of these
148 : : * cases should occur in present usages of this function.
149 : : */
150 : : PlannedStmt *
3258 tgl@sss.pgh.pa.us 151 : 187231 : PortalGetPrimaryStmt(Portal portal)
152 : : {
153 : : ListCell *lc;
154 : :
155 [ + - + - : 187231 : foreach(lc, portal->stmts)
+ - ]
156 : : {
3172 157 : 187231 : PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 : :
3258 159 [ + - ]: 187231 : if (stmt->canSetTag)
160 : 187231 : return stmt;
161 : : }
7064 tgl@sss.pgh.pa.us 162 :UBC 0 : return NULL;
163 : : }
164 : :
165 : : /*
166 : : * CreatePortal
167 : : * Returns a new portal given a name.
168 : : *
169 : : * allowDup: if true, automatically drop any pre-existing portal of the
170 : : * same name (if false, an error is raised).
171 : : *
172 : : * dupSilent: if true, don't even emit a WARNING.
173 : : */
174 : : Portal
8264 tgl@sss.pgh.pa.us 175 :CBC 386953 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 : : {
177 : : Portal portal;
178 : :
83 peter@eisentraut.org 179 [ - + ]:GNC 386953 : Assert(name);
180 : :
10327 bruce@momjian.us 181 :CBC 386953 : portal = GetPortalByName(name);
182 [ + + ]: 386953 : if (PortalIsValid(portal))
183 : : {
8264 tgl@sss.pgh.pa.us 184 [ - + ]: 5534 : if (!allowDup)
8180 tgl@sss.pgh.pa.us 185 [ # # ]:UBC 0 : ereport(ERROR,
186 : : (errcode(ERRCODE_DUPLICATE_CURSOR),
187 : : errmsg("cursor \"%s\" already exists", name)));
8264 tgl@sss.pgh.pa.us 188 [ - + ]:CBC 5534 : if (!dupSilent)
8180 tgl@sss.pgh.pa.us 189 [ # # ]:UBC 0 : ereport(WARNING,
190 : : (errcode(ERRCODE_DUPLICATE_CURSOR),
191 : : errmsg("closing existing cursor \"%s\"",
192 : : name)));
8264 tgl@sss.pgh.pa.us 193 :CBC 5534 : PortalDrop(portal, false);
194 : : }
195 : :
196 : : /* make new portal structure */
2922 peter_e@gmx.net 197 : 386953 : portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 : :
199 : : /* initialize portal context; typically it won't store much */
200 : 386953 : portal->portalContext = AllocSetContextCreate(TopPortalContext,
201 : : "PortalContext",
202 : : ALLOCSET_SMALL_SIZES);
203 : :
204 : : /* create a resource owner for the portal */
7822 tgl@sss.pgh.pa.us 205 : 386953 : portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
206 : : "Portal");
207 : :
208 : : /* initialize portal fields that don't start off zero */
7272 neilc@samurai.com 209 : 386953 : portal->status = PORTAL_NEW;
8267 tgl@sss.pgh.pa.us 210 : 386953 : portal->cleanup = PortalCleanup;
7761 211 : 386953 : portal->createSubid = GetCurrentSubTransactionId();
3756 212 : 386953 : portal->activeSubid = portal->createSubid;
1537 213 : 386953 : portal->createLevel = GetCurrentTransactionNestLevel();
8264 214 : 386953 : portal->strategy = PORTAL_MULTI_QUERY;
215 : 386953 : portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
8316 216 : 386953 : portal->atStart = true;
217 : 386953 : portal->atEnd = true; /* disallow fetches until query is set */
7272 neilc@samurai.com 218 : 386953 : portal->visible = true;
7119 tgl@sss.pgh.pa.us 219 : 386953 : portal->creation_time = GetCurrentStatementStartTimestamp();
220 : :
221 : : /* put portal in table (sets portal->name) */
8264 222 [ - + - - ]: 386953 : PortalHashTableInsert(portal, name);
223 : :
224 : : /* for named portals reuse portal->name copy */
1925 peter@eisentraut.org 225 [ + + ]: 386953 : MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
226 : :
9968 bruce@momjian.us 227 : 386953 : return portal;
228 : : }
229 : :
230 : : /*
231 : : * CreateNewPortal
232 : : * Create a new portal, assigning it a random nonconflicting name.
233 : : */
234 : : Portal
8264 tgl@sss.pgh.pa.us 235 : 14082 : CreateNewPortal(void)
236 : : {
237 : : static unsigned int unnamed_portal_count = 0;
238 : :
239 : : char portalname[MAX_PORTALNAME_LEN];
240 : :
241 : : /* Select a nonconflicting name */
242 : : for (;;)
243 : : {
244 : 14082 : unnamed_portal_count++;
245 : 14082 : sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
246 [ + - ]: 14082 : if (GetPortalByName(portalname) == NULL)
247 : 14082 : break;
248 : : }
249 : :
250 : 14082 : return CreatePortal(portalname, false, false);
251 : : }
252 : :
253 : : /*
254 : : * PortalDefineQuery
255 : : * A simple subroutine to establish a portal's query.
256 : : *
257 : : * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258 : : * allowed anymore to pass NULL. (If you really don't have source text,
259 : : * you can pass a constant string, perhaps "(query not available)".)
260 : : *
261 : : * commandTag shall be NULL if and only if the original query string
262 : : * (before rewriting) was an empty string. Also, the passed commandTag must
263 : : * be a pointer to a constant string, since it is not copied.
264 : : *
265 : : * If cplan is provided, then it is a cached plan containing the stmts, and
266 : : * the caller must have done GetCachedPlan(), causing a refcount increment.
267 : : * The refcount will be released when the portal is destroyed.
268 : : *
269 : : * If cplan is NULL, then it is the caller's responsibility to ensure that
270 : : * the passed plan trees have adequate lifetime. Typically this is done by
271 : : * copying them into the portal's context.
272 : : *
273 : : * The caller is also responsible for ensuring that the passed prepStmtName
274 : : * (if not NULL) and sourceText have adequate lifetime.
275 : : *
276 : : * NB: this function mustn't do much beyond storing the passed values; in
277 : : * particular don't do anything that risks elog(ERROR). If that were to
278 : : * happen here before storing the cplan reference, we'd leak the plancache
279 : : * refcount that the caller is trying to hand off to us.
280 : : */
281 : : void
282 : 386936 : PortalDefineQuery(Portal portal,
283 : : const char *prepStmtName,
284 : : const char *sourceText,
285 : : CommandTag commandTag,
286 : : List *stmts,
287 : : CachedPlan *cplan)
288 : : {
1145 peter@eisentraut.org 289 [ - + ]: 386936 : Assert(PortalIsValid(portal));
290 [ - + ]: 386936 : Assert(portal->status == PORTAL_NEW);
291 : :
292 [ - + ]: 386936 : Assert(sourceText != NULL);
293 [ - + - - ]: 386936 : Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
294 : :
6467 tgl@sss.pgh.pa.us 295 : 386936 : portal->prepStmtName = prepStmtName;
296 : 386936 : portal->sourceText = sourceText;
2115 alvherre@alvh.no-ip. 297 : 386936 : portal->qc.commandTag = commandTag;
298 : 386936 : portal->qc.nprocessed = 0;
8264 tgl@sss.pgh.pa.us 299 : 386936 : portal->commandTag = commandTag;
6874 300 : 386936 : portal->stmts = stmts;
6853 301 : 386936 : portal->cplan = cplan;
302 : 386936 : portal->status = PORTAL_DEFINED;
303 : 386936 : }
304 : :
305 : : /*
306 : : * PortalReleaseCachedPlan
307 : : * Release a portal's reference to its cached plan, if any.
308 : : */
309 : : static void
310 : 402945 : PortalReleaseCachedPlan(Portal portal)
311 : : {
312 [ + + ]: 402945 : if (portal->cplan)
313 : : {
1786 314 : 19055 : ReleaseCachedPlan(portal->cplan, NULL);
6853 315 : 19055 : portal->cplan = NULL;
316 : :
317 : : /*
318 : : * We must also clear portal->stmts which is now a dangling reference
319 : : * to the cached plan's plan list. This protects any code that might
320 : : * try to examine the Portal later.
321 : : */
5811 322 : 19055 : portal->stmts = NIL;
323 : : }
8264 324 : 402945 : }
325 : :
326 : : /*
327 : : * PortalCreateHoldStore
328 : : * Create the tuplestore for a portal.
329 : : */
330 : : void
8260 331 : 25088 : PortalCreateHoldStore(Portal portal)
332 : : {
333 : : MemoryContext oldcxt;
334 : :
335 [ - + ]: 25088 : Assert(portal->holdContext == NULL);
336 [ - + ]: 25088 : Assert(portal->holdStore == NULL);
3418 337 [ - + ]: 25088 : Assert(portal->holdSnapshot == NULL);
338 : :
339 : : /*
340 : : * Create the memory context that is used for storage of the tuple set.
341 : : * Note this is NOT a child of the portal's portalContext.
342 : : */
8260 343 : 25088 : portal->holdContext =
2922 peter_e@gmx.net 344 : 25088 : AllocSetContextCreate(TopPortalContext,
345 : : "PortalHoldContext",
346 : : ALLOCSET_DEFAULT_SIZES);
347 : :
348 : : /*
349 : : * Create the tuple store, selecting cross-transaction temp files, and
350 : : * enabling random access only if cursor requires scrolling.
351 : : *
352 : : * XXX: Should maintenance_work_mem be used for the portal size?
353 : : */
8260 tgl@sss.pgh.pa.us 354 : 25088 : oldcxt = MemoryContextSwitchTo(portal->holdContext);
355 : :
6257 356 : 25088 : portal->holdStore =
357 : 25088 : tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
358 : : true, work_mem);
359 : :
8260 360 : 25088 : MemoryContextSwitchTo(oldcxt);
361 : 25088 : }
362 : :
363 : : /*
364 : : * PinPortal
365 : : * Protect a portal from dropping.
366 : : *
367 : : * A pinned portal is still unpinned and dropped at transaction or
368 : : * subtransaction abort.
369 : : */
370 : : void
5643 heikki.linnakangas@i 371 : 6042 : PinPortal(Portal portal)
372 : : {
373 [ - + ]: 6042 : if (portal->portalPinned)
5643 heikki.linnakangas@i 374 [ # # ]:UBC 0 : elog(ERROR, "portal already pinned");
375 : :
5643 heikki.linnakangas@i 376 :CBC 6042 : portal->portalPinned = true;
377 : 6042 : }
378 : :
379 : : void
380 : 6018 : UnpinPortal(Portal portal)
381 : : {
382 [ - + ]: 6018 : if (!portal->portalPinned)
5643 heikki.linnakangas@i 383 [ # # ]:UBC 0 : elog(ERROR, "portal not pinned");
384 : :
5643 heikki.linnakangas@i 385 :CBC 6018 : portal->portalPinned = false;
386 : 6018 : }
387 : :
388 : : /*
389 : : * MarkPortalActive
390 : : * Transition a portal from READY to ACTIVE state.
391 : : *
392 : : * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
393 : : */
394 : : void
3756 tgl@sss.pgh.pa.us 395 : 404251 : MarkPortalActive(Portal portal)
396 : : {
397 : : /* For safety, this is a runtime test not just an Assert */
398 [ + + ]: 404251 : if (portal->status != PORTAL_READY)
399 [ + - ]: 9 : ereport(ERROR,
400 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
401 : : errmsg("portal \"%s\" cannot be run", portal->name)));
402 : : /* Perform the state transition */
403 : 404242 : portal->status = PORTAL_ACTIVE;
404 : 404242 : portal->activeSubid = GetCurrentSubTransactionId();
405 : 404242 : }
406 : :
407 : : /*
408 : : * MarkPortalDone
409 : : * Transition a portal from ACTIVE to DONE state.
410 : : *
411 : : * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
412 : : */
413 : : void
5402 414 : 200085 : MarkPortalDone(Portal portal)
415 : : {
416 : : /* Perform the state transition */
417 [ - + ]: 200085 : Assert(portal->status == PORTAL_ACTIVE);
418 : 200085 : portal->status = PORTAL_DONE;
419 : :
420 : : /*
421 : : * Allow portalcmds.c to clean up the state it knows about. We might as
422 : : * well do that now, since the portal can't be executed any more.
423 : : *
424 : : * In some cases involving execution of a ROLLBACK command in an already
425 : : * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
426 : : * with the cleanup hook still unexecuted.
427 : : */
83 peter@eisentraut.org 428 [ + + ]:GNC 200085 : if (portal->cleanup)
429 : : {
3022 peter_e@gmx.net 430 :CBC 200060 : portal->cleanup(portal);
5053 tgl@sss.pgh.pa.us 431 : 200060 : portal->cleanup = NULL;
432 : : }
433 : 200085 : }
434 : :
435 : : /*
436 : : * MarkPortalFailed
437 : : * Transition a portal into FAILED state.
438 : : *
439 : : * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
440 : : */
441 : : void
442 : 15843 : MarkPortalFailed(Portal portal)
443 : : {
444 : : /* Perform the state transition */
445 [ - + ]: 15843 : Assert(portal->status != PORTAL_DONE);
446 : 15843 : portal->status = PORTAL_FAILED;
447 : :
448 : : /*
449 : : * Allow portalcmds.c to clean up the state it knows about. We might as
450 : : * well do that now, since the portal can't be executed any more.
451 : : *
452 : : * In some cases involving cleanup of an already aborted transaction, this
453 : : * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
454 : : * still unexecuted.
455 : : */
83 peter@eisentraut.org 456 [ + + ]:GNC 15843 : if (portal->cleanup)
457 : : {
3022 peter_e@gmx.net 458 :CBC 15836 : portal->cleanup(portal);
5402 tgl@sss.pgh.pa.us 459 : 15836 : portal->cleanup = NULL;
460 : : }
461 : 15843 : }
462 : :
463 : : /*
464 : : * PortalDrop
465 : : * Destroy the portal.
466 : : */
467 : : void
7822 468 : 386944 : PortalDrop(Portal portal, bool isTopCommit)
469 : : {
1145 peter@eisentraut.org 470 [ - + ]: 386944 : Assert(PortalIsValid(portal));
471 : :
472 : : /*
473 : : * Don't allow dropping a pinned portal, it's still needed by whoever
474 : : * pinned it.
475 : : */
2897 peter_e@gmx.net 476 [ - + ]: 386944 : if (portal->portalPinned)
2897 peter_e@gmx.net 477 [ # # ]:UBC 0 : ereport(ERROR,
478 : : (errcode(ERRCODE_INVALID_CURSOR_STATE),
479 : : errmsg("cannot drop pinned portal \"%s\"", portal->name)));
480 : :
481 : : /*
482 : : * Not sure if the PORTAL_ACTIVE case can validly happen or not...
483 : : */
2897 peter_e@gmx.net 484 [ - + ]:CBC 386944 : if (portal->status == PORTAL_ACTIVE)
5643 heikki.linnakangas@i 485 [ # # ]:UBC 0 : ereport(ERROR,
486 : : (errcode(ERRCODE_INVALID_CURSOR_STATE),
487 : : errmsg("cannot drop active portal \"%s\"", portal->name)));
488 : :
489 : : /*
490 : : * Allow portalcmds.c to clean up the state it knows about, in particular
491 : : * shutting down the executor if still active. This step potentially runs
492 : : * user-defined code so failure has to be expected. It's the cleanup
493 : : * hook's responsibility to not try to do that more than once, in the case
494 : : * that failure occurs and then we come back to drop the portal again
495 : : * during transaction abort.
496 : : *
497 : : * Note: in most paths of control, this will have been done already in
498 : : * MarkPortalDone or MarkPortalFailed. We're just making sure.
499 : : */
83 peter@eisentraut.org 500 [ + + ]:GNC 386944 : if (portal->cleanup)
501 : : {
3022 peter_e@gmx.net 502 :CBC 170998 : portal->cleanup(portal);
5406 tgl@sss.pgh.pa.us 503 : 170998 : portal->cleanup = NULL;
504 : : }
505 : :
506 : : /* There shouldn't be an active snapshot anymore, except after error */
1670 507 [ + + - + ]: 386944 : Assert(portal->portalSnapshot == NULL || !isTopCommit);
508 : :
509 : : /*
510 : : * Remove portal from hash table. Because we do this here, we will not
511 : : * come back to try to remove the portal again if there's any error in the
512 : : * subsequent steps. Better to leak a little memory than to get into an
513 : : * infinite error-recovery loop.
514 : : */
9302 515 [ - + - - ]: 386944 : PortalHashTableDelete(portal);
516 : :
517 : : /* drop cached plan reference, if any */
5811 518 : 386944 : PortalReleaseCachedPlan(portal);
519 : :
520 : : /*
521 : : * If portal has a snapshot protecting its data, release that. This needs
522 : : * a little care since the registration will be attached to the portal's
523 : : * resowner; if the portal failed, we will already have released the
524 : : * resowner (and the snapshot) during transaction abort.
525 : : */
3418 526 [ + + ]: 386944 : if (portal->holdSnapshot)
527 : : {
528 [ + + ]: 21785 : if (portal->resowner)
529 : 21603 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
530 : : portal->resowner);
531 : 21785 : portal->holdSnapshot = NULL;
532 : : }
533 : :
534 : : /*
535 : : * Release any resources still attached to the portal. There are several
536 : : * cases being covered here:
537 : : *
538 : : * Top transaction commit (indicated by isTopCommit): normally we should
539 : : * do nothing here and let the regular end-of-transaction resource
540 : : * releasing mechanism handle these resources too. However, if we have a
541 : : * FAILED portal (eg, a cursor that got an error), we'd better clean up
542 : : * its resources to avoid resource-leakage warning messages.
543 : : *
544 : : * Sub transaction commit: never comes here at all, since we don't kill
545 : : * any portals in AtSubCommit_Portals().
546 : : *
547 : : * Main or sub transaction abort: we will do nothing here because
548 : : * portal->resowner was already set NULL; the resources were already
549 : : * cleaned up in transaction abort.
550 : : *
551 : : * Ordinary portal drop: must release resources. However, if the portal
552 : : * is not FAILED then we do not release its locks. The locks become the
553 : : * responsibility of the transaction's ResourceOwner (since it is the
554 : : * parent of the portal's owner) and will be released when the transaction
555 : : * eventually ends.
556 : : */
7822 557 [ + + ]: 386944 : if (portal->resowner &&
558 [ + + - + ]: 365412 : (!isTopCommit || portal->status == PORTAL_FAILED))
559 : : {
7779 bruce@momjian.us 560 : 360357 : bool isCommit = (portal->status != PORTAL_FAILED);
561 : :
7822 tgl@sss.pgh.pa.us 562 : 360357 : ResourceOwnerRelease(portal->resowner,
563 : : RESOURCE_RELEASE_BEFORE_LOCKS,
564 : : isCommit, false);
565 : 360357 : ResourceOwnerRelease(portal->resowner,
566 : : RESOURCE_RELEASE_LOCKS,
567 : : isCommit, false);
568 : 360357 : ResourceOwnerRelease(portal->resowner,
569 : : RESOURCE_RELEASE_AFTER_LOCKS,
570 : : isCommit, false);
7783 571 : 360357 : ResourceOwnerDelete(portal->resowner);
572 : : }
7822 573 : 386944 : portal->resowner = NULL;
574 : :
575 : : /*
576 : : * Delete tuplestore if present. We should do this even under error
577 : : * conditions; since the tuplestore would have been using cross-
578 : : * transaction storage, its temp files need to be explicitly deleted.
579 : : */
8260 580 [ + + ]: 386944 : if (portal->holdStore)
581 : : {
582 : : MemoryContext oldcontext;
583 : :
584 : 25079 : oldcontext = MemoryContextSwitchTo(portal->holdContext);
585 : 25079 : tuplestore_end(portal->holdStore);
586 : 25079 : MemoryContextSwitchTo(oldcontext);
587 : 25079 : portal->holdStore = NULL;
588 : : }
589 : :
590 : : /* delete tuplestore storage, if any */
8300 bruce@momjian.us 591 [ + + ]: 386944 : if (portal->holdContext)
592 : 25079 : MemoryContextDelete(portal->holdContext);
593 : :
594 : : /* release subsidiary storage */
2922 peter_e@gmx.net 595 : 386944 : MemoryContextDelete(portal->portalContext);
596 : :
597 : : /* release portal struct (it's in TopPortalContext) */
9302 tgl@sss.pgh.pa.us 598 : 386944 : pfree(portal);
10752 scrappy@hub.org 599 : 386944 : }
600 : :
601 : : /*
602 : : * Delete all declared cursors.
603 : : *
604 : : * Used by commands: CLOSE ALL, DISCARD ALL
605 : : */
606 : : void
6823 neilc@samurai.com 607 : 9 : PortalHashTableDeleteAll(void)
608 : : {
609 : : HASH_SEQ_STATUS status;
610 : : PortalHashEnt *hentry;
611 : :
612 [ - + ]: 9 : if (PortalHashTable == NULL)
6823 neilc@samurai.com 613 :UBC 0 : return;
614 : :
6823 neilc@samurai.com 615 :CBC 9 : hash_seq_init(&status, PortalHashTable);
616 [ + + ]: 36 : while ((hentry = hash_seq_search(&status)) != NULL)
617 : : {
6606 bruce@momjian.us 618 : 27 : Portal portal = hentry->portal;
619 : :
620 : : /* Can't close the active portal (the one running the command) */
5406 tgl@sss.pgh.pa.us 621 [ + + ]: 27 : if (portal->status == PORTAL_ACTIVE)
622 : 15 : continue;
623 : :
624 : 12 : PortalDrop(portal, false);
625 : :
626 : : /* Restart the iteration in case that led to other drops */
627 : 12 : hash_seq_term(&status);
628 : 12 : hash_seq_init(&status, PortalHashTable);
629 : : }
630 : : }
631 : :
632 : : /*
633 : : * "Hold" a portal. Prepare it for access by later transactions.
634 : : */
635 : : static void
2820 peter_e@gmx.net 636 : 42 : HoldPortal(Portal portal)
637 : : {
638 : : /*
639 : : * Note that PersistHoldablePortal() must release all resources used by
640 : : * the portal that are local to the creating transaction.
641 : : */
642 : 42 : PortalCreateHoldStore(portal);
643 : 42 : PersistHoldablePortal(portal);
644 : :
645 : : /* drop cached plan reference, if any */
646 : 40 : PortalReleaseCachedPlan(portal);
647 : :
648 : : /*
649 : : * Any resources belonging to the portal will be released in the upcoming
650 : : * transaction-wide cleanup; the portal will no longer have its own
651 : : * resources.
652 : : */
653 : 40 : portal->resowner = NULL;
654 : :
655 : : /*
656 : : * Having successfully exported the holdable cursor, mark it as not
657 : : * belonging to this transaction.
658 : : */
659 : 40 : portal->createSubid = InvalidSubTransactionId;
660 : 40 : portal->activeSubid = InvalidSubTransactionId;
1537 tgl@sss.pgh.pa.us 661 : 40 : portal->createLevel = 0;
2820 peter_e@gmx.net 662 : 40 : }
663 : :
664 : : /*
665 : : * Pre-commit processing for portals.
666 : : *
667 : : * Holdable cursors created in this transaction need to be converted to
668 : : * materialized form, since we are going to close down the executor and
669 : : * release locks. Non-holdable portals created in this transaction are
670 : : * simply removed. Portals remaining from prior transactions should be
671 : : * left untouched.
672 : : *
673 : : * Returns true if any portals changed state (possibly causing user-defined
674 : : * code to be run), false if not.
675 : : */
676 : : bool
5406 tgl@sss.pgh.pa.us 677 : 312442 : PreCommit_Portals(bool isPrepare)
678 : : {
7367 bruce@momjian.us 679 : 312442 : bool result = false;
680 : : HASH_SEQ_STATUS status;
681 : : PortalHashEnt *hentry;
682 : :
8838 tgl@sss.pgh.pa.us 683 : 312442 : hash_seq_init(&status, PortalHashTable);
684 : :
685 [ + + ]: 348538 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
686 : : {
8170 bruce@momjian.us 687 : 36096 : Portal portal = hentry->portal;
688 : :
689 : : /*
690 : : * There should be no pinned portals anymore. Complain if someone
691 : : * leaked one. Auto-held portals are allowed; we assume that whoever
692 : : * pinned them is managing them.
693 : : */
2820 peter_e@gmx.net 694 [ + + - + ]: 36096 : if (portal->portalPinned && !portal->autoHeld)
5406 tgl@sss.pgh.pa.us 695 [ # # ]:UBC 0 : elog(ERROR, "cannot commit while a portal is pinned");
696 : :
697 : : /*
698 : : * Do not touch active portals --- this can only happen in the case of
699 : : * a multi-transaction utility command, such as VACUUM, or a commit in
700 : : * a procedure.
701 : : *
702 : : * Note however that any resource owner attached to such a portal is
703 : : * still going to go away, so don't leave a dangling pointer. Also
704 : : * unregister any snapshots held by the portal, mainly to avoid
705 : : * snapshot leak warnings from ResourceOwnerRelease().
706 : : */
5406 tgl@sss.pgh.pa.us 707 [ + + ]:CBC 36096 : if (portal->status == PORTAL_ACTIVE)
708 : : {
2672 peter_e@gmx.net 709 [ + + ]: 30712 : if (portal->holdSnapshot)
710 : : {
711 [ + - ]: 1 : if (portal->resowner)
712 : 1 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
713 : : portal->resowner);
714 : 1 : portal->holdSnapshot = NULL;
715 : : }
5406 tgl@sss.pgh.pa.us 716 : 30712 : portal->resowner = NULL;
717 : : /* Clear portalSnapshot too, for cleanliness */
1670 718 : 30712 : portal->portalSnapshot = NULL;
5406 719 : 30712 : continue;
720 : : }
721 : :
722 : : /* Is it a holdable portal created in the current xact? */
7822 723 [ + + ]: 5384 : if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
7554 724 [ + + ]: 249 : portal->createSubid != InvalidSubTransactionId &&
7822 725 [ + - ]: 24 : portal->status == PORTAL_READY)
726 : : {
727 : : /*
728 : : * We are exiting the transaction that created a holdable cursor.
729 : : * Instead of dropping the portal, prepare it for access by later
730 : : * transactions.
731 : : *
732 : : * However, if this is PREPARE TRANSACTION rather than COMMIT,
733 : : * refuse PREPARE, because the semantics seem pretty unclear.
734 : : */
5406 735 [ - + ]: 24 : if (isPrepare)
5406 tgl@sss.pgh.pa.us 736 [ # # ]:UBC 0 : ereport(ERROR,
737 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
738 : : errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
739 : :
2820 peter_e@gmx.net 740 :CBC 24 : HoldPortal(portal);
741 : :
742 : : /* Report we changed state */
7554 tgl@sss.pgh.pa.us 743 : 24 : result = true;
744 : : }
5406 745 [ + + ]: 5360 : else if (portal->createSubid == InvalidSubTransactionId)
746 : : {
747 : : /*
748 : : * Do nothing to cursors held over from a previous transaction
749 : : * (including ones we just froze in a previous cycle of this loop)
750 : : */
7554 751 : 267 : continue;
752 : : }
753 : : else
754 : : {
755 : : /* Zap all non-holdable portals */
5406 756 : 5093 : PortalDrop(portal, true);
757 : :
758 : : /* Report we changed state */
759 : 5093 : result = true;
760 : : }
761 : :
762 : : /*
763 : : * After either freezing or dropping a portal, we have to restart the
764 : : * iteration, because we could have invoked user-defined code that
765 : : * caused a drop of the next portal in the hash chain.
766 : : */
6809 767 : 5117 : hash_seq_term(&status);
7524 bruce@momjian.us 768 : 5117 : hash_seq_init(&status, PortalHashTable);
769 : : }
770 : :
5406 tgl@sss.pgh.pa.us 771 : 312442 : return result;
772 : : }
773 : :
774 : : /*
775 : : * Abort processing for portals.
776 : : *
777 : : * At this point we run the cleanup hook if present, but we can't release the
778 : : * portal's memory until the cleanup call.
779 : : */
780 : : void
8264 781 : 26193 : AtAbort_Portals(void)
782 : : {
783 : : HASH_SEQ_STATUS status;
784 : : PortalHashEnt *hentry;
785 : :
786 : 26193 : hash_seq_init(&status, PortalHashTable);
787 : :
788 [ + + ]: 41565 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
789 : : {
8170 bruce@momjian.us 790 : 15372 : Portal portal = hentry->portal;
791 : :
792 : : /*
793 : : * When elog(FATAL) is progress, we need to set the active portal to
794 : : * failed, so that PortalCleanup() doesn't run the executor shutdown.
795 : : */
2875 peter_e@gmx.net 796 [ + + + + ]: 15372 : if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
797 : 4 : MarkPortalFailed(portal);
798 : :
799 : : /*
800 : : * Do nothing else to cursors held over from a previous transaction.
801 : : */
7761 tgl@sss.pgh.pa.us 802 [ + + ]: 15372 : if (portal->createSubid == InvalidSubTransactionId)
8264 803 : 68 : continue;
804 : :
805 : : /*
806 : : * Do nothing to auto-held cursors. This is similar to the case of a
807 : : * cursor from a previous transaction, but it could also be that the
808 : : * cursor was auto-held in this transaction, so it wants to live on.
809 : : */
2820 peter_e@gmx.net 810 [ - + ]: 15304 : if (portal->autoHeld)
2820 peter_e@gmx.net 811 :UBC 0 : continue;
812 : :
813 : : /*
814 : : * If it was created in the current transaction, we can't do normal
815 : : * shutdown on a READY portal either; it might refer to objects
816 : : * created in the failed transaction. See comments in
817 : : * AtSubAbort_Portals.
818 : : */
5780 tgl@sss.pgh.pa.us 819 [ + + ]:CBC 15304 : if (portal->status == PORTAL_READY)
5053 820 : 484 : MarkPortalFailed(portal);
821 : :
822 : : /*
823 : : * Allow portalcmds.c to clean up the state it knows about, if we
824 : : * haven't already.
825 : : */
83 peter@eisentraut.org 826 [ + + ]:GNC 15304 : if (portal->cleanup)
827 : : {
3022 peter_e@gmx.net 828 :CBC 50 : portal->cleanup(portal);
8264 tgl@sss.pgh.pa.us 829 : 50 : portal->cleanup = NULL;
830 : : }
831 : :
832 : : /* drop cached plan reference, if any */
5811 833 : 15304 : PortalReleaseCachedPlan(portal);
834 : :
835 : : /*
836 : : * Any resources belonging to the portal will be released in the
837 : : * upcoming transaction-wide cleanup; they will be gone before we run
838 : : * PortalDrop.
839 : : */
7822 840 : 15304 : portal->resowner = NULL;
841 : :
842 : : /*
843 : : * Although we can't delete the portal data structure proper, we can
844 : : * release any memory in subsidiary contexts, such as executor state.
845 : : * The cleanup hook was the last thing that might have needed data
846 : : * there. But leave active portals alone.
847 : : */
2885 peter_e@gmx.net 848 [ + + ]: 15304 : if (portal->status != PORTAL_ACTIVE)
849 : 15214 : MemoryContextDeleteChildren(portal->portalContext);
850 : : }
9114 tgl@sss.pgh.pa.us 851 : 26193 : }
852 : :
853 : : /*
854 : : * Post-abort cleanup for portals.
855 : : *
856 : : * Delete all portals not held over from prior transactions.
857 : : */
858 : : void
8264 859 : 26181 : AtCleanup_Portals(void)
860 : : {
861 : : HASH_SEQ_STATUS status;
862 : : PortalHashEnt *hentry;
863 : :
864 : 26181 : hash_seq_init(&status, PortalHashTable);
865 : :
866 [ + + ]: 40881 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
867 : : {
8170 bruce@momjian.us 868 : 14700 : Portal portal = hentry->portal;
869 : :
870 : : /*
871 : : * Do not touch active portals --- this can only happen in the case of
872 : : * a multi-transaction command.
873 : : */
2885 peter_e@gmx.net 874 [ + + ]: 14700 : if (portal->status == PORTAL_ACTIVE)
875 : 90 : continue;
876 : :
877 : : /*
878 : : * Do nothing to cursors held over from a previous transaction or
879 : : * auto-held ones.
880 : : */
2820 881 [ + + - + ]: 14610 : if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
882 : : {
7806 tgl@sss.pgh.pa.us 883 [ - + ]: 68 : Assert(portal->status != PORTAL_ACTIVE);
884 [ - + ]: 68 : Assert(portal->resowner == NULL);
8264 885 : 68 : continue;
886 : : }
887 : :
888 : : /*
889 : : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
890 : : * let us drop the portal otherwise. Whoever pinned the portal was
891 : : * interrupted by the abort too and won't try to use it anymore.
892 : : */
5643 heikki.linnakangas@i 893 [ + + ]: 14542 : if (portal->portalPinned)
894 : 19 : portal->portalPinned = false;
895 : :
896 : : /*
897 : : * We had better not call any user-defined code during cleanup, so if
898 : : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
899 : : */
83 peter@eisentraut.org 900 [ - + ]:GNC 14542 : if (portal->cleanup)
901 : : {
3046 tgl@sss.pgh.pa.us 902 [ # # ]:UBC 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
903 : 0 : portal->cleanup = NULL;
904 : : }
905 : :
906 : : /* Zap it. */
7822 tgl@sss.pgh.pa.us 907 :CBC 14542 : PortalDrop(portal, false);
908 : : }
8264 909 : 26181 : }
910 : :
911 : : /*
912 : : * Portal-related cleanup when we return to the main loop on error.
913 : : *
914 : : * This is different from the cleanup at transaction abort. Auto-held portals
915 : : * are cleaned up on error but not on transaction abort.
916 : : */
917 : : void
2820 peter_e@gmx.net 918 : 22501 : PortalErrorCleanup(void)
919 : : {
920 : : HASH_SEQ_STATUS status;
921 : : PortalHashEnt *hentry;
922 : :
923 : 22501 : hash_seq_init(&status, PortalHashTable);
924 : :
925 [ + + ]: 46554 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
926 : : {
927 : 1552 : Portal portal = hentry->portal;
928 : :
929 [ + + ]: 1552 : if (portal->autoHeld)
930 : : {
931 : 2 : portal->portalPinned = false;
932 : 2 : PortalDrop(portal, false);
933 : : }
934 : : }
935 : 22501 : }
936 : :
937 : : /*
938 : : * Pre-subcommit processing for portals.
939 : : *
940 : : * Reassign portals created or used in the current subtransaction to the
941 : : * parent subtransaction.
942 : : */
943 : : void
7761 tgl@sss.pgh.pa.us 944 : 4431 : AtSubCommit_Portals(SubTransactionId mySubid,
945 : : SubTransactionId parentSubid,
946 : : int parentLevel,
947 : : ResourceOwner parentXactOwner)
948 : : {
949 : : HASH_SEQ_STATUS status;
950 : : PortalHashEnt *hentry;
951 : :
7838 952 : 4431 : hash_seq_init(&status, PortalHashTable);
953 : :
954 [ + + ]: 12701 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
955 : : {
7779 bruce@momjian.us 956 : 3839 : Portal portal = hentry->portal;
957 : :
7761 tgl@sss.pgh.pa.us 958 [ + + ]: 3839 : if (portal->createSubid == mySubid)
959 : : {
960 : 30 : portal->createSubid = parentSubid;
1537 961 : 30 : portal->createLevel = parentLevel;
7822 962 [ + - ]: 30 : if (portal->resowner)
963 : 30 : ResourceOwnerNewParent(portal->resowner, parentXactOwner);
964 : : }
3756 965 [ + + ]: 3839 : if (portal->activeSubid == mySubid)
966 : 110 : portal->activeSubid = parentSubid;
967 : : }
7838 968 : 4431 : }
969 : :
970 : : /*
971 : : * Subtransaction abort handling for portals.
972 : : *
973 : : * Deactivate portals created or used during the failed subtransaction.
974 : : * Note that per AtSubCommit_Portals, this will catch portals created/used
975 : : * in descendants of the subtransaction too.
976 : : *
977 : : * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
978 : : */
979 : : void
7761 980 : 4689 : AtSubAbort_Portals(SubTransactionId mySubid,
981 : : SubTransactionId parentSubid,
982 : : ResourceOwner myXactOwner,
983 : : ResourceOwner parentXactOwner)
984 : : {
985 : : HASH_SEQ_STATUS status;
986 : : PortalHashEnt *hentry;
987 : :
7838 988 : 4689 : hash_seq_init(&status, PortalHashTable);
989 : :
990 [ + + ]: 11381 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
991 : : {
7779 bruce@momjian.us 992 : 6692 : Portal portal = hentry->portal;
993 : :
994 : : /* Was it created in this subtransaction? */
7761 tgl@sss.pgh.pa.us 995 [ + + ]: 6692 : if (portal->createSubid != mySubid)
996 : : {
997 : : /* No, but maybe it was used in this subtransaction? */
3756 998 [ + + ]: 6035 : if (portal->activeSubid == mySubid)
999 : : {
1000 : : /* Maintain activeSubid until the portal is removed */
1001 : 25 : portal->activeSubid = parentSubid;
1002 : :
1003 : : /*
1004 : : * A MarkPortalActive() caller ran an upper-level portal in
1005 : : * this subtransaction and left the portal ACTIVE. This can't
1006 : : * happen, but force the portal into FAILED state for the same
1007 : : * reasons discussed below.
1008 : : *
1009 : : * We assume we can get away without forcing upper-level READY
1010 : : * portals to fail, even if they were run and then suspended.
1011 : : * In theory a suspended upper-level portal could have
1012 : : * acquired some references to objects that are about to be
1013 : : * destroyed, but there should be sufficient defenses against
1014 : : * such cases: the portal's original query cannot contain such
1015 : : * references, and any references within, say, cached plans of
1016 : : * PL/pgSQL functions are not from active queries and should
1017 : : * be protected by revalidation logic.
1018 : : */
1019 [ - + ]: 25 : if (portal->status == PORTAL_ACTIVE)
3756 tgl@sss.pgh.pa.us 1020 :UBC 0 : MarkPortalFailed(portal);
1021 : :
1022 : : /*
1023 : : * Also, if we failed it during the current subtransaction
1024 : : * (either just above, or earlier), reattach its resource
1025 : : * owner to the current subtransaction's resource owner, so
1026 : : * that any resources it still holds will be released while
1027 : : * cleaning up this subtransaction. This prevents some corner
1028 : : * cases wherein we might get Asserts or worse while cleaning
1029 : : * up objects created during the current subtransaction
1030 : : * (because they're still referenced within this portal).
1031 : : */
3756 tgl@sss.pgh.pa.us 1032 [ + + + - ]:CBC 25 : if (portal->status == PORTAL_FAILED && portal->resowner)
1033 : : {
1034 : 7 : ResourceOwnerNewParent(portal->resowner, myXactOwner);
1035 : 7 : portal->resowner = NULL;
1036 : : }
1037 : : }
1038 : : /* Done if it wasn't created in this subtransaction */
7838 1039 : 6035 : continue;
1040 : : }
1041 : :
1042 : : /*
1043 : : * Force any live portals of my own subtransaction into FAILED state.
1044 : : * We have to do this because they might refer to objects created or
1045 : : * changed in the failed subtransaction, leading to crashes within
1046 : : * ExecutorEnd when portalcmds.c tries to close down the portal.
1047 : : * Currently, every MarkPortalActive() caller ensures it updates the
1048 : : * portal status again before relinquishing control, so ACTIVE can't
1049 : : * happen here. If it does happen, dispose the portal like existing
1050 : : * MarkPortalActive() callers would.
1051 : : */
5780 1052 [ + + ]: 657 : if (portal->status == PORTAL_READY ||
1053 [ - + ]: 129 : portal->status == PORTAL_ACTIVE)
5053 1054 : 528 : MarkPortalFailed(portal);
1055 : :
1056 : : /*
1057 : : * Allow portalcmds.c to clean up the state it knows about, if we
1058 : : * haven't already.
1059 : : */
83 peter@eisentraut.org 1060 [ - + ]:GNC 657 : if (portal->cleanup)
1061 : : {
3022 peter_e@gmx.net 1062 :UBC 0 : portal->cleanup(portal);
5780 tgl@sss.pgh.pa.us 1063 : 0 : portal->cleanup = NULL;
1064 : : }
1065 : :
1066 : : /* drop cached plan reference, if any */
5780 tgl@sss.pgh.pa.us 1067 :CBC 657 : PortalReleaseCachedPlan(portal);
1068 : :
1069 : : /*
1070 : : * Any resources belonging to the portal will be released in the
1071 : : * upcoming transaction-wide cleanup; they will be gone before we run
1072 : : * PortalDrop.
1073 : : */
1074 : 657 : portal->resowner = NULL;
1075 : :
1076 : : /*
1077 : : * Although we can't delete the portal data structure proper, we can
1078 : : * release any memory in subsidiary contexts, such as executor state.
1079 : : * The cleanup hook was the last thing that might have needed data
1080 : : * there.
1081 : : */
2922 peter_e@gmx.net 1082 : 657 : MemoryContextDeleteChildren(portal->portalContext);
1083 : : }
7838 tgl@sss.pgh.pa.us 1084 : 4689 : }
1085 : :
1086 : : /*
1087 : : * Post-subabort cleanup for portals.
1088 : : *
1089 : : * Drop all portals created in the failed subtransaction (but note that
1090 : : * we will not drop any that were reassigned to the parent above).
1091 : : */
1092 : : void
7761 1093 : 4689 : AtSubCleanup_Portals(SubTransactionId mySubid)
1094 : : {
1095 : : HASH_SEQ_STATUS status;
1096 : : PortalHashEnt *hentry;
1097 : :
7838 1098 : 4689 : hash_seq_init(&status, PortalHashTable);
1099 : :
1100 [ + + ]: 11256 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1101 : : {
1102 : 6567 : Portal portal = hentry->portal;
1103 : :
7761 1104 [ + + ]: 6567 : if (portal->createSubid != mySubid)
7838 1105 : 6035 : continue;
1106 : :
1107 : : /*
1108 : : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1109 : : * let us drop the portal otherwise. Whoever pinned the portal was
1110 : : * interrupted by the abort too and won't try to use it anymore.
1111 : : */
5635 heikki.linnakangas@i 1112 [ + + ]: 532 : if (portal->portalPinned)
1113 : 3 : portal->portalPinned = false;
1114 : :
1115 : : /*
1116 : : * We had better not call any user-defined code during cleanup, so if
1117 : : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1118 : : */
83 peter@eisentraut.org 1119 [ - + ]:GNC 532 : if (portal->cleanup)
1120 : : {
3046 tgl@sss.pgh.pa.us 1121 [ # # ]:UBC 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1122 : 0 : portal->cleanup = NULL;
1123 : : }
1124 : :
1125 : : /* Zap it. */
7822 tgl@sss.pgh.pa.us 1126 :CBC 532 : PortalDrop(portal, false);
1127 : : }
7838 1128 : 4689 : }
1129 : :
1130 : : /* Find all available cursors */
1131 : : Datum
7272 neilc@samurai.com 1132 : 60 : pg_cursor(PG_FUNCTION_ARGS)
1133 : : {
6809 tgl@sss.pgh.pa.us 1134 : 60 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1135 : : HASH_SEQ_STATUS hash_seq;
1136 : : PortalHashEnt *hentry;
1137 : :
1138 : : /*
1139 : : * We put all the tuples into a tuplestore in one scan of the hashtable.
1140 : : * This avoids any issue of the hashtable possibly changing between calls.
1141 : : */
1155 michael@paquier.xyz 1142 : 60 : InitMaterializedSRF(fcinfo, 0);
1143 : :
6809 tgl@sss.pgh.pa.us 1144 : 60 : hash_seq_init(&hash_seq, PortalHashTable);
1145 [ + + ]: 186 : while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1146 : : {
1147 : 126 : Portal portal = hentry->portal;
1148 : : Datum values[6];
1249 peter@eisentraut.org 1149 : 126 : bool nulls[6] = {0};
1150 : :
1151 : : /* report only "visible" entries */
6809 tgl@sss.pgh.pa.us 1152 [ + + ]: 126 : if (!portal->visible)
1153 : 63 : continue;
1154 : : /* also ignore it if PortalDefineQuery hasn't been called yet */
436 1155 [ - + ]: 63 : if (!portal->sourceText)
436 tgl@sss.pgh.pa.us 1156 :UBC 0 : continue;
1157 : :
6475 tgl@sss.pgh.pa.us 1158 :CBC 63 : values[0] = CStringGetTextDatum(portal->name);
6360 1159 : 63 : values[1] = CStringGetTextDatum(portal->sourceText);
7272 neilc@samurai.com 1160 : 63 : values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1161 : 63 : values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1162 : 63 : values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1163 : 63 : values[5] = TimestampTzGetDatum(portal->creation_time);
1164 : :
1380 michael@paquier.xyz 1165 : 63 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1166 : : }
1167 : :
6809 tgl@sss.pgh.pa.us 1168 : 60 : return (Datum) 0;
1169 : : }
1170 : :
1171 : : bool
4763 simon@2ndQuadrant.co 1172 : 30 : ThereAreNoReadyPortals(void)
1173 : : {
1174 : : HASH_SEQ_STATUS status;
1175 : : PortalHashEnt *hentry;
1176 : :
1177 : 30 : hash_seq_init(&status, PortalHashTable);
1178 : :
1179 [ + + ]: 60 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1180 : : {
1181 : 30 : Portal portal = hentry->portal;
1182 : :
1183 [ - + ]: 30 : if (portal->status == PORTAL_READY)
4763 simon@2ndQuadrant.co 1184 :UBC 0 : return false;
1185 : : }
1186 : :
4763 simon@2ndQuadrant.co 1187 :CBC 30 : return true;
1188 : : }
1189 : :
1190 : : /*
1191 : : * Hold all pinned portals.
1192 : : *
1193 : : * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1194 : : * called to protect internally-generated cursors from being dropped during
1195 : : * the transaction shutdown. Currently, SPI calls this automatically; PLs
1196 : : * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1197 : : * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1198 : : * because we need to run user-defined code while persisting a portal.
1199 : : * It's too late to do that once transaction abort has started.)
1200 : : *
1201 : : * We protect such portals by converting them to held cursors. We mark them
1202 : : * as "auto-held" so that exception exit knows to clean them up. (In normal,
1203 : : * non-exception code paths, the PL needs to clean such portals itself, since
1204 : : * transaction end won't do it anymore; but that should be normal practice
1205 : : * anyway.)
1206 : : */
1207 : : void
2820 peter_e@gmx.net 1208 : 2207 : HoldPinnedPortals(void)
1209 : : {
1210 : : HASH_SEQ_STATUS status;
1211 : : PortalHashEnt *hentry;
1212 : :
2885 1213 : 2207 : hash_seq_init(&status, PortalHashTable);
1214 : :
1215 [ + + ]: 4474 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1216 : : {
1217 : 2270 : Portal portal = hentry->portal;
1218 : :
2820 1219 [ + + + + ]: 2270 : if (portal->portalPinned && !portal->autoHeld)
1220 : : {
1221 : : /*
1222 : : * Doing transaction control, especially abort, inside a cursor
1223 : : * loop that is not read-only, for example using UPDATE ...
1224 : : * RETURNING, has weird semantics issues. Also, this
1225 : : * implementation wouldn't work, because such portals cannot be
1226 : : * held. (The core grammar enforces that only SELECT statements
1227 : : * can drive a cursor, but for example PL/pgSQL does not restrict
1228 : : * it.)
1229 : : */
1230 [ + + ]: 19 : if (portal->strategy != PORTAL_ONE_SELECT)
1231 [ + - ]: 1 : ereport(ERROR,
1232 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1233 : : errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1234 : :
1235 : : /* Verify it's in a suitable state to be held */
2433 tgl@sss.pgh.pa.us 1236 [ - + ]: 18 : if (portal->status != PORTAL_READY)
2433 tgl@sss.pgh.pa.us 1237 [ # # ]:UBC 0 : elog(ERROR, "pinned portal is not ready to be auto-held");
1238 : :
2820 peter_e@gmx.net 1239 :CBC 18 : HoldPortal(portal);
2433 tgl@sss.pgh.pa.us 1240 : 16 : portal->autoHeld = true;
1241 : : }
1242 : : }
2885 peter_e@gmx.net 1243 : 2204 : }
1244 : :
1245 : : /*
1246 : : * Drop the outer active snapshots for all portals, so that no snapshots
1247 : : * remain active.
1248 : : *
1249 : : * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1250 : : * ROLLBACK inside a procedure. This has to be separate from that since it
1251 : : * should not be run until we're done with steps that are likely to fail.
1252 : : *
1253 : : * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1254 : : * need to clean up snapshot management in VACUUM and perhaps other places.
1255 : : */
1256 : : void
1670 tgl@sss.pgh.pa.us 1257 : 2204 : ForgetPortalSnapshots(void)
1258 : : {
1259 : : HASH_SEQ_STATUS status;
1260 : : PortalHashEnt *hentry;
1261 : 2204 : int numPortalSnaps = 0;
1262 : 2204 : int numActiveSnaps = 0;
1263 : :
1264 : : /* First, scan PortalHashTable and clear portalSnapshot fields */
1265 : 2204 : hash_seq_init(&status, PortalHashTable);
1266 : :
1267 [ + + ]: 6675 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1268 : : {
1269 : 2267 : Portal portal = hentry->portal;
1270 : :
1271 [ + + ]: 2267 : if (portal->portalSnapshot != NULL)
1272 : : {
1273 : 2204 : portal->portalSnapshot = NULL;
1274 : 2204 : numPortalSnaps++;
1275 : : }
1276 : : /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1277 : : }
1278 : :
1279 : : /*
1280 : : * Now, pop all the active snapshots, which should be just those that were
1281 : : * portal snapshots. Ideally we'd drive this directly off the portal
1282 : : * scan, but there's no good way to visit the portals in the correct
1283 : : * order. So just cross-check after the fact.
1284 : : */
1285 [ + + ]: 4408 : while (ActiveSnapshotSet())
1286 : : {
1287 : 2204 : PopActiveSnapshot();
1288 : 2204 : numActiveSnaps++;
1289 : : }
1290 : :
1291 [ - + ]: 2204 : if (numPortalSnaps != numActiveSnaps)
1670 tgl@sss.pgh.pa.us 1292 [ # # ]:UBC 0 : elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1293 : : numPortalSnaps, numActiveSnaps);
1670 tgl@sss.pgh.pa.us 1294 :CBC 2204 : }
|