Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * portalmem.c
4 : : * backend portal memory management
5 : : *
6 : : * Portals are objects representing the execution state of a query.
7 : : * This module provides memory management services for portals, but it
8 : : * doesn't actually run the executor for them.
9 : : *
10 : : *
11 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
12 : : * Portions Copyright (c) 1994, Regents of the University of California
13 : : *
14 : : * IDENTIFICATION
15 : : * src/backend/utils/mmgr/portalmem.c
16 : : *
17 : : *-------------------------------------------------------------------------
18 : : */
19 : : #include "postgres.h"
20 : :
21 : : #include "access/xact.h"
22 : : #include "commands/portalcmds.h"
23 : : #include "funcapi.h"
24 : : #include "miscadmin.h"
25 : : #include "storage/ipc.h"
26 : : #include "utils/builtins.h"
27 : : #include "utils/hsearch.h"
28 : : #include "utils/memutils.h"
29 : : #include "utils/snapmgr.h"
30 : : #include "utils/timestamp.h"
31 : : #include "utils/tuplestore.h"
32 : :
33 : : /*
34 : : * Estimate of the maximum number of open portals a user would have,
35 : : * used in initially sizing the PortalHashTable in EnablePortalManager().
36 : : * Since the hash table can expand, there's no need to make this overly
37 : : * generous, and keeping it small avoids unnecessary overhead in the
38 : : * hash_seq_search() calls executed during transaction end.
39 : : */
40 : : #define PORTALS_PER_USER 16
41 : :
42 : :
43 : : /* ----------------
44 : : * Global state
45 : : * ----------------
46 : : */
47 : :
48 : : #define MAX_PORTALNAME_LEN NAMEDATALEN
49 : :
50 : : typedef struct portalhashent
51 : : {
52 : : char portalname[MAX_PORTALNAME_LEN];
53 : : Portal portal;
54 : : } PortalHashEnt;
55 : :
56 : : static HTAB *PortalHashTable = NULL;
57 : :
58 : : #define PortalHashTableLookup(NAME, PORTAL) \
59 : : do { \
60 : : PortalHashEnt *hentry; \
61 : : \
62 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
63 : : (NAME), HASH_FIND, NULL); \
64 : : if (hentry) \
65 : : PORTAL = hentry->portal; \
66 : : else \
67 : : PORTAL = NULL; \
68 : : } while(0)
69 : :
70 : : #define PortalHashTableInsert(PORTAL, NAME) \
71 : : do { \
72 : : PortalHashEnt *hentry; bool found; \
73 : : \
74 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
75 : : (NAME), HASH_ENTER, &found); \
76 : : if (found) \
77 : : elog(ERROR, "duplicate portal name"); \
78 : : hentry->portal = PORTAL; \
79 : : /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
80 : : PORTAL->name = hentry->portalname; \
81 : : } while(0)
82 : :
83 : : #define PortalHashTableDelete(PORTAL) \
84 : : do { \
85 : : PortalHashEnt *hentry; \
86 : : \
87 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
88 : : PORTAL->name, HASH_REMOVE, NULL); \
89 : : if (hentry == NULL) \
90 : : elog(WARNING, "trying to delete portal name that does not exist"); \
91 : : } while(0)
92 : :
93 : : static MemoryContext TopPortalContext = NULL;
94 : :
95 : :
96 : : /* ----------------------------------------------------------------
97 : : * public portal interface functions
98 : : * ----------------------------------------------------------------
99 : : */
100 : :
101 : : /*
102 : : * EnablePortalManager
103 : : * Enables the portal management module at backend startup.
104 : : */
105 : : void
9442 tgl@sss.pgh.pa.us 106 :CBC 18714 : EnablePortalManager(void)
107 : : {
108 : : HASHCTL ctl;
109 : :
3062 peter_e@gmx.net 110 [ - + ]: 18714 : Assert(TopPortalContext == NULL);
111 : :
112 : 18714 : TopPortalContext = AllocSetContextCreate(TopMemoryContext,
113 : : "TopPortalContext",
114 : : ALLOCSET_DEFAULT_SIZES);
115 : :
9442 tgl@sss.pgh.pa.us 116 : 18714 : ctl.keysize = MAX_PORTALNAME_LEN;
8982 117 : 18714 : ctl.entrysize = sizeof(PortalHashEnt);
118 : :
119 : : /*
120 : : * use PORTALS_PER_USER as a guess of how many hash table entries to
121 : : * create, initially
122 : : */
8978 123 : 18714 : PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
124 : : &ctl, HASH_ELEM | HASH_STRINGS);
10892 scrappy@hub.org 125 : 18714 : }
126 : :
127 : : /*
128 : : * GetPortalByName
129 : : * Returns a portal given a portal name, or NULL if name not found.
130 : : */
131 : : Portal
8527 tgl@sss.pgh.pa.us 132 : 532779 : GetPortalByName(const char *name)
133 : : {
134 : : Portal portal;
135 : :
223 peter@eisentraut.org 136 [ + - ]:GNC 532779 : if (name)
10467 bruce@momjian.us 137 [ + + ]:CBC 532779 : PortalHashTableLookup(name, portal);
138 : : else
9442 tgl@sss.pgh.pa.us 139 :UBC 0 : portal = NULL;
140 : :
10108 bruce@momjian.us 141 :CBC 532779 : return portal;
142 : : }
143 : :
144 : : /*
145 : : * PortalGetPrimaryStmt
146 : : * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
147 : : *
148 : : * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
149 : : * portal are marked canSetTag, returns the first one. Neither of these
150 : : * cases should occur in present usages of this function.
151 : : */
152 : : PlannedStmt *
3398 tgl@sss.pgh.pa.us 153 : 235721 : PortalGetPrimaryStmt(Portal portal)
154 : : {
155 : : ListCell *lc;
156 : :
157 [ + - + - : 235721 : foreach(lc, portal->stmts)
+ - ]
158 : : {
3312 159 : 235721 : PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
160 : :
3398 161 [ + - ]: 235721 : if (stmt->canSetTag)
162 : 235721 : return stmt;
163 : : }
7204 tgl@sss.pgh.pa.us 164 :UBC 0 : return NULL;
165 : : }
166 : :
167 : : /*
168 : : * CreatePortal
169 : : * Returns a new portal given a name.
170 : : *
171 : : * allowDup: if true, automatically drop any pre-existing portal of the
172 : : * same name (if false, an error is raised).
173 : : *
174 : : * dupSilent: if true, don't even emit a WARNING.
175 : : */
176 : : Portal
8404 tgl@sss.pgh.pa.us 177 :CBC 474750 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
178 : : {
179 : : Portal portal;
180 : :
223 peter@eisentraut.org 181 [ - + ]:GNC 474750 : Assert(name);
182 : :
10467 bruce@momjian.us 183 :CBC 474750 : portal = GetPortalByName(name);
184 [ + + ]: 474750 : if (PortalIsValid(portal))
185 : : {
8404 tgl@sss.pgh.pa.us 186 [ - + ]: 5820 : if (!allowDup)
8320 tgl@sss.pgh.pa.us 187 [ # # ]:UBC 0 : ereport(ERROR,
188 : : (errcode(ERRCODE_DUPLICATE_CURSOR),
189 : : errmsg("cursor \"%s\" already exists", name)));
8404 tgl@sss.pgh.pa.us 190 [ - + ]:CBC 5820 : if (!dupSilent)
8320 tgl@sss.pgh.pa.us 191 [ # # ]:UBC 0 : ereport(WARNING,
192 : : (errcode(ERRCODE_DUPLICATE_CURSOR),
193 : : errmsg("closing existing cursor \"%s\"",
194 : : name)));
8404 tgl@sss.pgh.pa.us 195 :CBC 5820 : PortalDrop(portal, false);
196 : : }
197 : :
198 : : /* make new portal structure */
3062 peter_e@gmx.net 199 : 474750 : portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
200 : :
201 : : /* initialize portal context; typically it won't store much */
202 : 474750 : portal->portalContext = AllocSetContextCreate(TopPortalContext,
203 : : "PortalContext",
204 : : ALLOCSET_SMALL_SIZES);
205 : :
206 : : /* create a resource owner for the portal */
7962 tgl@sss.pgh.pa.us 207 : 474750 : portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
208 : : "Portal");
209 : :
210 : : /* initialize portal fields that don't start off zero */
7412 neilc@samurai.com 211 : 474750 : portal->status = PORTAL_NEW;
8407 tgl@sss.pgh.pa.us 212 : 474750 : portal->cleanup = PortalCleanup;
7901 213 : 474750 : portal->createSubid = GetCurrentSubTransactionId();
3896 214 : 474750 : portal->activeSubid = portal->createSubid;
1677 215 : 474750 : portal->createLevel = GetCurrentTransactionNestLevel();
8404 216 : 474750 : portal->strategy = PORTAL_MULTI_QUERY;
217 : 474750 : portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
8456 218 : 474750 : portal->atStart = true;
219 : 474750 : portal->atEnd = true; /* disallow fetches until query is set */
7412 neilc@samurai.com 220 : 474750 : portal->visible = true;
7259 tgl@sss.pgh.pa.us 221 : 474750 : portal->creation_time = GetCurrentStatementStartTimestamp();
222 : :
223 : : /* put portal in table (sets portal->name) */
8404 224 [ - + - - ]: 474750 : PortalHashTableInsert(portal, name);
225 : :
226 : : /* for named portals reuse portal->name copy */
2065 peter@eisentraut.org 227 [ + + ]: 474750 : MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
228 : :
10108 bruce@momjian.us 229 : 474750 : return portal;
230 : : }
231 : :
232 : : /*
233 : : * CreateNewPortal
234 : : * Create a new portal, assigning it a random nonconflicting name.
235 : : */
236 : : Portal
8404 tgl@sss.pgh.pa.us 237 : 16968 : CreateNewPortal(void)
238 : : {
239 : : static unsigned int unnamed_portal_count = 0;
240 : :
241 : : char portalname[MAX_PORTALNAME_LEN];
242 : :
243 : : /* Select a nonconflicting name */
244 : : for (;;)
245 : : {
246 : 16968 : unnamed_portal_count++;
247 : 16968 : sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
248 [ + - ]: 16968 : if (GetPortalByName(portalname) == NULL)
249 : 16968 : break;
250 : : }
251 : :
252 : 16968 : return CreatePortal(portalname, false, false);
253 : : }
254 : :
255 : : /*
256 : : * PortalDefineQuery
257 : : * A simple subroutine to establish a portal's query.
258 : : *
259 : : * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
260 : : * allowed anymore to pass NULL. (If you really don't have source text,
261 : : * you can pass a constant string, perhaps "(query not available)".)
262 : : *
263 : : * commandTag shall be NULL if and only if the original query string
264 : : * (before rewriting) was an empty string. Also, the passed commandTag must
265 : : * be a pointer to a constant string, since it is not copied.
266 : : *
267 : : * If cplan is provided, then it is a cached plan containing the stmts, and
268 : : * the caller must have done GetCachedPlan(), causing a refcount increment.
269 : : * The refcount will be released when the portal is destroyed.
270 : : *
271 : : * If cplan is NULL, then it is the caller's responsibility to ensure that
272 : : * the passed plan trees have adequate lifetime. Typically this is done by
273 : : * copying them into the portal's context.
274 : : *
275 : : * The caller is also responsible for ensuring that the passed prepStmtName
276 : : * (if not NULL) and sourceText have adequate lifetime.
277 : : *
278 : : * NB: this function mustn't do much beyond storing the passed values; in
279 : : * particular don't do anything that risks elog(ERROR). If that were to
280 : : * happen here before storing the cplan reference, we'd leak the plancache
281 : : * refcount that the caller is trying to hand off to us.
282 : : */
283 : : void
284 : 474704 : PortalDefineQuery(Portal portal,
285 : : const char *prepStmtName,
286 : : const char *sourceText,
287 : : CommandTag commandTag,
288 : : List *stmts,
289 : : CachedPlan *cplan)
290 : : {
1285 peter@eisentraut.org 291 [ - + ]: 474704 : Assert(PortalIsValid(portal));
292 [ - + ]: 474704 : Assert(portal->status == PORTAL_NEW);
293 : :
294 [ - + ]: 474704 : Assert(sourceText != NULL);
295 [ - + - - ]: 474704 : Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
296 : :
6607 tgl@sss.pgh.pa.us 297 : 474704 : portal->prepStmtName = prepStmtName;
298 : 474704 : portal->sourceText = sourceText;
8404 299 : 474704 : portal->commandTag = commandTag;
95 alvherre@kurilemu.de 300 :GNC 474704 : SetQueryCompletion(&portal->qc, commandTag, 0);
7014 tgl@sss.pgh.pa.us 301 :CBC 474704 : portal->stmts = stmts;
6993 302 : 474704 : portal->cplan = cplan;
303 : 474704 : portal->status = PORTAL_DEFINED;
304 : 474704 : }
305 : :
306 : : /*
307 : : * PortalReleaseCachedPlan
308 : : * Release a portal's reference to its cached plan, if any.
309 : : */
310 : : static void
311 : 496915 : PortalReleaseCachedPlan(Portal portal)
312 : : {
313 [ + + ]: 496915 : if (portal->cplan)
314 : : {
1926 315 : 20570 : ReleaseCachedPlan(portal->cplan, NULL);
6993 316 : 20570 : portal->cplan = NULL;
317 : :
318 : : /*
319 : : * We must also clear portal->stmts which is now a dangling reference
320 : : * to the cached plan's plan list. This protects any code that might
321 : : * try to examine the Portal later.
322 : : */
5951 323 : 20570 : portal->stmts = NIL;
324 : : }
8404 325 : 496915 : }
326 : :
327 : : /*
328 : : * PortalCreateHoldStore
329 : : * Create the tuplestore for a portal.
330 : : */
331 : : void
8400 332 : 31160 : PortalCreateHoldStore(Portal portal)
333 : : {
334 : : MemoryContext oldcxt;
335 : :
336 [ - + ]: 31160 : Assert(portal->holdContext == NULL);
337 [ - + ]: 31160 : Assert(portal->holdStore == NULL);
3558 338 [ - + ]: 31160 : Assert(portal->holdSnapshot == NULL);
339 : :
340 : : /*
341 : : * Create the memory context that is used for storage of the tuple set.
342 : : * Note this is NOT a child of the portal's portalContext.
343 : : */
8400 344 : 31160 : portal->holdContext =
3062 peter_e@gmx.net 345 : 31160 : AllocSetContextCreate(TopPortalContext,
346 : : "PortalHoldContext",
347 : : ALLOCSET_DEFAULT_SIZES);
348 : :
349 : : /*
350 : : * Create the tuple store, selecting cross-transaction temp files, and
351 : : * enabling random access only if cursor requires scrolling.
352 : : *
353 : : * XXX: Should maintenance_work_mem be used for the portal size?
354 : : */
8400 tgl@sss.pgh.pa.us 355 : 31160 : oldcxt = MemoryContextSwitchTo(portal->holdContext);
356 : :
6397 357 : 31160 : portal->holdStore =
358 : 31160 : tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
359 : : true, work_mem);
360 : :
8400 361 : 31160 : MemoryContextSwitchTo(oldcxt);
362 : 31160 : }
363 : :
364 : : /*
365 : : * PinPortal
366 : : * Protect a portal from dropping.
367 : : *
368 : : * A pinned portal is still unpinned and dropped at transaction or
369 : : * subtransaction abort.
370 : : */
371 : : void
5783 heikki.linnakangas@i 372 : 8121 : PinPortal(Portal portal)
373 : : {
374 [ - + ]: 8121 : if (portal->portalPinned)
5783 heikki.linnakangas@i 375 [ # # ]:UBC 0 : elog(ERROR, "portal already pinned");
376 : :
5783 heikki.linnakangas@i 377 :CBC 8121 : portal->portalPinned = true;
378 : 8121 : }
379 : :
380 : : void
381 : 8093 : UnpinPortal(Portal portal)
382 : : {
383 [ - + ]: 8093 : if (!portal->portalPinned)
5783 heikki.linnakangas@i 384 [ # # ]:UBC 0 : elog(ERROR, "portal not pinned");
385 : :
5783 heikki.linnakangas@i 386 :CBC 8093 : portal->portalPinned = false;
387 : 8093 : }
388 : :
389 : : /*
390 : : * MarkPortalActive
391 : : * Transition a portal from READY to ACTIVE state.
392 : : *
393 : : * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
394 : : */
395 : : void
3896 tgl@sss.pgh.pa.us 396 : 497507 : MarkPortalActive(Portal portal)
397 : : {
398 : : /* For safety, this is a runtime test not just an Assert */
399 [ + + ]: 497507 : if (portal->status != PORTAL_READY)
400 [ + - ]: 12 : ereport(ERROR,
401 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
402 : : errmsg("portal \"%s\" cannot be run", portal->name)));
403 : : /* Perform the state transition */
404 : 497495 : portal->status = PORTAL_ACTIVE;
405 : 497495 : portal->activeSubid = GetCurrentSubTransactionId();
406 : 497495 : }
407 : :
408 : : /*
409 : : * MarkPortalDone
410 : : * Transition a portal from ACTIVE to DONE state.
411 : : *
412 : : * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
413 : : */
414 : : void
5542 415 : 237920 : MarkPortalDone(Portal portal)
416 : : {
417 : : /* Perform the state transition */
418 [ - + ]: 237920 : Assert(portal->status == PORTAL_ACTIVE);
419 : 237920 : portal->status = PORTAL_DONE;
420 : :
421 : : /*
422 : : * Allow portalcmds.c to clean up the state it knows about. We might as
423 : : * well do that now, since the portal can't be executed any more.
424 : : *
425 : : * In some cases involving execution of a ROLLBACK command in an already
426 : : * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
427 : : * with the cleanup hook still unexecuted.
428 : : */
223 peter@eisentraut.org 429 [ + + ]:GNC 237920 : if (portal->cleanup)
430 : : {
3162 peter_e@gmx.net 431 :CBC 237885 : portal->cleanup(portal);
5193 tgl@sss.pgh.pa.us 432 : 237885 : portal->cleanup = NULL;
433 : : }
434 : 237920 : }
435 : :
436 : : /*
437 : : * MarkPortalFailed
438 : : * Transition a portal into FAILED state.
439 : : *
440 : : * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
441 : : */
442 : : void
443 : 21971 : MarkPortalFailed(Portal portal)
444 : : {
445 : : /* Perform the state transition */
446 [ - + ]: 21971 : Assert(portal->status != PORTAL_DONE);
447 : 21971 : portal->status = PORTAL_FAILED;
448 : :
449 : : /*
450 : : * Allow portalcmds.c to clean up the state it knows about. We might as
451 : : * well do that now, since the portal can't be executed any more.
452 : : *
453 : : * In some cases involving cleanup of an already aborted transaction, this
454 : : * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
455 : : * still unexecuted.
456 : : */
223 peter@eisentraut.org 457 [ + + ]:GNC 21971 : if (portal->cleanup)
458 : : {
3162 peter_e@gmx.net 459 :CBC 21964 : portal->cleanup(portal);
5542 tgl@sss.pgh.pa.us 460 : 21964 : portal->cleanup = NULL;
461 : : }
462 : 21971 : }
463 : :
464 : : /*
465 : : * PortalDrop
466 : : * Destroy the portal.
467 : : */
468 : : void
7962 469 : 474738 : PortalDrop(Portal portal, bool isTopCommit)
470 : : {
1285 peter@eisentraut.org 471 [ - + ]: 474738 : Assert(PortalIsValid(portal));
472 : :
473 : : /*
474 : : * Don't allow dropping a pinned portal, it's still needed by whoever
475 : : * pinned it.
476 : : */
3037 peter_e@gmx.net 477 [ - + ]: 474738 : if (portal->portalPinned)
3037 peter_e@gmx.net 478 [ # # ]:UBC 0 : ereport(ERROR,
479 : : (errcode(ERRCODE_INVALID_CURSOR_STATE),
480 : : errmsg("cannot drop pinned portal \"%s\"", portal->name)));
481 : :
482 : : /*
483 : : * Not sure if the PORTAL_ACTIVE case can validly happen or not...
484 : : */
3037 peter_e@gmx.net 485 [ - + ]:CBC 474738 : if (portal->status == PORTAL_ACTIVE)
5783 heikki.linnakangas@i 486 [ # # ]:UBC 0 : ereport(ERROR,
487 : : (errcode(ERRCODE_INVALID_CURSOR_STATE),
488 : : errmsg("cannot drop active portal \"%s\"", portal->name)));
489 : :
490 : : /*
491 : : * Allow portalcmds.c to clean up the state it knows about, in particular
492 : : * shutting down the executor if still active. This step potentially runs
493 : : * user-defined code so failure has to be expected. It's the cleanup
494 : : * hook's responsibility to not try to do that more than once, in the case
495 : : * that failure occurs and then we come back to drop the portal again
496 : : * during transaction abort.
497 : : *
498 : : * Note: in most paths of control, this will have been done already in
499 : : * MarkPortalDone or MarkPortalFailed. We're just making sure.
500 : : */
223 peter@eisentraut.org 501 [ + + ]:GNC 474738 : if (portal->cleanup)
502 : : {
3162 peter_e@gmx.net 503 :CBC 214800 : portal->cleanup(portal);
5546 tgl@sss.pgh.pa.us 504 : 214800 : portal->cleanup = NULL;
505 : : }
506 : :
507 : : /* There shouldn't be an active snapshot anymore, except after error */
1810 508 [ + + - + ]: 474738 : Assert(portal->portalSnapshot == NULL || !isTopCommit);
509 : :
510 : : /*
511 : : * Remove portal from hash table. Because we do this here, we will not
512 : : * come back to try to remove the portal again if there's any error in the
513 : : * subsequent steps. Better to leak a little memory than to get into an
514 : : * infinite error-recovery loop.
515 : : */
9442 516 [ - + - - ]: 474738 : PortalHashTableDelete(portal);
517 : :
518 : : /* drop cached plan reference, if any */
5951 519 : 474738 : PortalReleaseCachedPlan(portal);
520 : :
521 : : /*
522 : : * If portal has a snapshot protecting its data, release that. This needs
523 : : * a little care since the registration will be attached to the portal's
524 : : * resowner; if the portal failed, we will already have released the
525 : : * resowner (and the snapshot) during transaction abort.
526 : : */
3558 527 [ + + ]: 474738 : if (portal->holdSnapshot)
528 : : {
529 [ + + ]: 27228 : if (portal->resowner)
530 : 26938 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
531 : : portal->resowner);
532 : 27228 : portal->holdSnapshot = NULL;
533 : : }
534 : :
535 : : /*
536 : : * Release any resources still attached to the portal. There are several
537 : : * cases being covered here:
538 : : *
539 : : * Top transaction commit (indicated by isTopCommit): normally we should
540 : : * do nothing here and let the regular end-of-transaction resource
541 : : * releasing mechanism handle these resources too. However, if we have a
542 : : * FAILED portal (eg, a cursor that got an error), we'd better clean up
543 : : * its resources to avoid resource-leakage warning messages.
544 : : *
545 : : * Sub transaction commit: never comes here at all, since we don't kill
546 : : * any portals in AtSubCommit_Portals().
547 : : *
548 : : * Main or sub transaction abort: we will do nothing here because
549 : : * portal->resowner was already set NULL; the resources were already
550 : : * cleaned up in transaction abort.
551 : : *
552 : : * Ordinary portal drop: must release resources. However, if the portal
553 : : * is not FAILED then we do not release its locks. The locks become the
554 : : * responsibility of the transaction's ResourceOwner (since it is the
555 : : * parent of the portal's owner) and will be released when the transaction
556 : : * eventually ends.
557 : : */
7962 558 [ + + ]: 474738 : if (portal->resowner &&
559 [ + + - + ]: 446009 : (!isTopCommit || portal->status == PORTAL_FAILED))
560 : : {
7919 bruce@momjian.us 561 : 440619 : bool isCommit = (portal->status != PORTAL_FAILED);
562 : :
7962 tgl@sss.pgh.pa.us 563 : 440619 : ResourceOwnerRelease(portal->resowner,
564 : : RESOURCE_RELEASE_BEFORE_LOCKS,
565 : : isCommit, false);
566 : 440619 : ResourceOwnerRelease(portal->resowner,
567 : : RESOURCE_RELEASE_LOCKS,
568 : : isCommit, false);
569 : 440619 : ResourceOwnerRelease(portal->resowner,
570 : : RESOURCE_RELEASE_AFTER_LOCKS,
571 : : isCommit, false);
7923 572 : 440619 : ResourceOwnerDelete(portal->resowner);
573 : : }
7962 574 : 474738 : portal->resowner = NULL;
575 : :
576 : : /*
577 : : * Delete tuplestore if present. We should do this even under error
578 : : * conditions; since the tuplestore would have been using cross-
579 : : * transaction storage, its temp files need to be explicitly deleted.
580 : : */
8400 581 [ + + ]: 474738 : if (portal->holdStore)
582 : : {
583 : : MemoryContext oldcontext;
584 : :
585 : 31148 : oldcontext = MemoryContextSwitchTo(portal->holdContext);
586 : 31148 : tuplestore_end(portal->holdStore);
587 : 31148 : MemoryContextSwitchTo(oldcontext);
588 : 31148 : portal->holdStore = NULL;
589 : : }
590 : :
591 : : /* delete tuplestore storage, if any */
8440 bruce@momjian.us 592 [ + + ]: 474738 : if (portal->holdContext)
593 : 31148 : MemoryContextDelete(portal->holdContext);
594 : :
595 : : /* release subsidiary storage */
3062 peter_e@gmx.net 596 : 474738 : MemoryContextDelete(portal->portalContext);
597 : :
598 : : /* release portal struct (it's in TopPortalContext) */
9442 tgl@sss.pgh.pa.us 599 : 474738 : pfree(portal);
10892 scrappy@hub.org 600 : 474738 : }
601 : :
602 : : /*
603 : : * Delete all declared cursors.
604 : : *
605 : : * Used by commands: CLOSE ALL, DISCARD ALL
606 : : */
607 : : void
6963 neilc@samurai.com 608 : 12 : PortalHashTableDeleteAll(void)
609 : : {
610 : : HASH_SEQ_STATUS status;
611 : : PortalHashEnt *hentry;
612 : :
613 [ - + ]: 12 : if (PortalHashTable == NULL)
6963 neilc@samurai.com 614 :UBC 0 : return;
615 : :
6963 neilc@samurai.com 616 :CBC 12 : hash_seq_init(&status, PortalHashTable);
617 [ + + ]: 48 : while ((hentry = hash_seq_search(&status)) != NULL)
618 : : {
6746 bruce@momjian.us 619 : 36 : Portal portal = hentry->portal;
620 : :
621 : : /* Can't close the active portal (the one running the command) */
5546 tgl@sss.pgh.pa.us 622 [ + + ]: 36 : if (portal->status == PORTAL_ACTIVE)
623 : 20 : continue;
624 : :
625 : 16 : PortalDrop(portal, false);
626 : :
627 : : /* Restart the iteration in case that led to other drops */
628 : 16 : hash_seq_term(&status);
629 : 16 : hash_seq_init(&status, PortalHashTable);
630 : : }
631 : : }
632 : :
633 : : /*
634 : : * "Hold" a portal. Prepare it for access by later transactions.
635 : : */
636 : : static void
2960 peter_e@gmx.net 637 : 49 : HoldPortal(Portal portal)
638 : : {
639 : : /*
640 : : * Note that PersistHoldablePortal() must release all resources used by
641 : : * the portal that are local to the creating transaction.
642 : : */
643 : 49 : PortalCreateHoldStore(portal);
644 : 49 : PersistHoldablePortal(portal);
645 : :
646 : : /* drop cached plan reference, if any */
647 : 47 : PortalReleaseCachedPlan(portal);
648 : :
649 : : /*
650 : : * Any resources belonging to the portal will be released in the upcoming
651 : : * transaction-wide cleanup; the portal will no longer have its own
652 : : * resources.
653 : : */
654 : 47 : portal->resowner = NULL;
655 : :
656 : : /*
657 : : * Having successfully exported the holdable cursor, mark it as not
658 : : * belonging to this transaction.
659 : : */
660 : 47 : portal->createSubid = InvalidSubTransactionId;
661 : 47 : portal->activeSubid = InvalidSubTransactionId;
1677 tgl@sss.pgh.pa.us 662 : 47 : portal->createLevel = 0;
2960 peter_e@gmx.net 663 : 47 : }
664 : :
665 : : /*
666 : : * Pre-commit processing for portals.
667 : : *
668 : : * Holdable cursors created in this transaction need to be converted to
669 : : * materialized form, since we are going to close down the executor and
670 : : * release locks. Non-holdable portals created in this transaction are
671 : : * simply removed. Portals remaining from prior transactions should be
672 : : * left untouched.
673 : : *
674 : : * Returns true if any portals changed state (possibly causing user-defined
675 : : * code to be run), false if not.
676 : : */
677 : : bool
5546 tgl@sss.pgh.pa.us 678 : 394866 : PreCommit_Portals(bool isPrepare)
679 : : {
7507 bruce@momjian.us 680 : 394866 : bool result = false;
681 : : HASH_SEQ_STATUS status;
682 : : PortalHashEnt *hentry;
683 : :
8978 tgl@sss.pgh.pa.us 684 : 394866 : hash_seq_init(&status, PortalHashTable);
685 : :
686 [ + + ]: 437112 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
687 : : {
8310 bruce@momjian.us 688 : 42246 : Portal portal = hentry->portal;
689 : :
690 : : /*
691 : : * There should be no pinned portals anymore. Complain if someone
692 : : * leaked one. Auto-held portals are allowed; we assume that whoever
693 : : * pinned them is managing them.
694 : : */
2960 peter_e@gmx.net 695 [ + + - + ]: 42246 : if (portal->portalPinned && !portal->autoHeld)
5546 tgl@sss.pgh.pa.us 696 [ # # ]:UBC 0 : elog(ERROR, "cannot commit while a portal is pinned");
697 : :
698 : : /*
699 : : * Do not touch active portals --- this can only happen in the case of
700 : : * a multi-transaction utility command, such as VACUUM, or a commit in
701 : : * a procedure.
702 : : *
703 : : * Note however that any resource owner attached to such a portal is
704 : : * still going to go away, so don't leave a dangling pointer. Also
705 : : * unregister any snapshots held by the portal, mainly to avoid
706 : : * snapshot leak warnings from ResourceOwnerRelease().
707 : : */
5546 tgl@sss.pgh.pa.us 708 [ + + ]:CBC 42246 : if (portal->status == PORTAL_ACTIVE)
709 : : {
2812 peter_e@gmx.net 710 [ + + ]: 36452 : if (portal->holdSnapshot)
711 : : {
712 [ + - ]: 1 : if (portal->resowner)
713 : 1 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
714 : : portal->resowner);
715 : 1 : portal->holdSnapshot = NULL;
716 : : }
5546 tgl@sss.pgh.pa.us 717 : 36452 : portal->resowner = NULL;
718 : : /* Clear portalSnapshot too, for cleanliness */
1810 719 : 36452 : portal->portalSnapshot = NULL;
5546 720 : 36452 : continue;
721 : : }
722 : :
723 : : /* Is it a holdable portal created in the current xact? */
7962 724 [ + + ]: 5794 : if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
7694 725 [ + + ]: 329 : portal->createSubid != InvalidSubTransactionId &&
7962 726 [ + - ]: 31 : portal->status == PORTAL_READY)
727 : : {
728 : : /*
729 : : * We are exiting the transaction that created a holdable cursor.
730 : : * Instead of dropping the portal, prepare it for access by later
731 : : * transactions.
732 : : *
733 : : * However, if this is PREPARE TRANSACTION rather than COMMIT,
734 : : * refuse PREPARE, because the semantics seem pretty unclear.
735 : : */
5546 736 [ - + ]: 31 : if (isPrepare)
5546 tgl@sss.pgh.pa.us 737 [ # # ]:UBC 0 : ereport(ERROR,
738 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
739 : : errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
740 : :
2960 peter_e@gmx.net 741 :CBC 31 : HoldPortal(portal);
742 : :
743 : : /* Report we changed state */
7694 tgl@sss.pgh.pa.us 744 : 31 : result = true;
745 : : }
5546 746 [ + + ]: 5763 : else if (portal->createSubid == InvalidSubTransactionId)
747 : : {
748 : : /*
749 : : * Do nothing to cursors held over from a previous transaction
750 : : * (including ones we just froze in a previous cycle of this loop)
751 : : */
7694 752 : 340 : continue;
753 : : }
754 : : else
755 : : {
756 : : /* Zap all non-holdable portals */
5546 757 : 5423 : PortalDrop(portal, true);
758 : :
759 : : /* Report we changed state */
760 : 5423 : result = true;
761 : : }
762 : :
763 : : /*
764 : : * After either freezing or dropping a portal, we have to restart the
765 : : * iteration, because we could have invoked user-defined code that
766 : : * caused a drop of the next portal in the hash chain.
767 : : */
6949 768 : 5454 : hash_seq_term(&status);
7664 bruce@momjian.us 769 : 5454 : hash_seq_init(&status, PortalHashTable);
770 : : }
771 : :
5546 tgl@sss.pgh.pa.us 772 : 394866 : return result;
773 : : }
774 : :
775 : : /*
776 : : * Abort processing for portals.
777 : : *
778 : : * At this point we run the cleanup hook if present, but we can't release the
779 : : * portal's memory until the cleanup call.
780 : : */
781 : : void
8404 782 : 35394 : AtAbort_Portals(void)
783 : : {
784 : : HASH_SEQ_STATUS status;
785 : : PortalHashEnt *hentry;
786 : :
787 : 35394 : hash_seq_init(&status, PortalHashTable);
788 : :
789 [ + + ]: 56731 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
790 : : {
8310 bruce@momjian.us 791 : 21337 : Portal portal = hentry->portal;
792 : :
793 : : /*
794 : : * When elog(FATAL) is progress, we need to set the active portal to
795 : : * failed, so that PortalCleanup() doesn't run the executor shutdown.
796 : : */
3015 peter_e@gmx.net 797 [ + + + + ]: 21337 : if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
798 : 4 : MarkPortalFailed(portal);
799 : :
800 : : /*
801 : : * Do nothing else to cursors held over from a previous transaction.
802 : : */
7901 tgl@sss.pgh.pa.us 803 [ + + ]: 21337 : if (portal->createSubid == InvalidSubTransactionId)
8404 804 : 87 : continue;
805 : :
806 : : /*
807 : : * Do nothing to auto-held cursors. This is similar to the case of a
808 : : * cursor from a previous transaction, but it could also be that the
809 : : * cursor was auto-held in this transaction, so it wants to live on.
810 : : */
2960 peter_e@gmx.net 811 [ - + ]: 21250 : if (portal->autoHeld)
2960 peter_e@gmx.net 812 :UBC 0 : continue;
813 : :
814 : : /*
815 : : * If it was created in the current transaction, we can't do normal
816 : : * shutdown on a READY portal either; it might refer to objects
817 : : * created in the failed transaction. See comments in
818 : : * AtSubAbort_Portals.
819 : : */
5920 tgl@sss.pgh.pa.us 820 [ + + ]:CBC 21250 : if (portal->status == PORTAL_READY)
5193 821 : 632 : MarkPortalFailed(portal);
822 : :
823 : : /*
824 : : * Allow portalcmds.c to clean up the state it knows about, if we
825 : : * haven't already.
826 : : */
223 peter@eisentraut.org 827 [ + + ]:GNC 21250 : if (portal->cleanup)
828 : : {
3162 peter_e@gmx.net 829 :CBC 89 : portal->cleanup(portal);
8404 tgl@sss.pgh.pa.us 830 : 89 : portal->cleanup = NULL;
831 : : }
832 : :
833 : : /* drop cached plan reference, if any */
5951 834 : 21250 : PortalReleaseCachedPlan(portal);
835 : :
836 : : /*
837 : : * Any resources belonging to the portal will be released in the
838 : : * upcoming transaction-wide cleanup; they will be gone before we run
839 : : * PortalDrop.
840 : : */
7962 841 : 21250 : portal->resowner = NULL;
842 : :
843 : : /*
844 : : * Although we can't delete the portal data structure proper, we can
845 : : * release any memory in subsidiary contexts, such as executor state.
846 : : * The cleanup hook was the last thing that might have needed data
847 : : * there. But leave active portals alone.
848 : : */
3025 peter_e@gmx.net 849 [ + + ]: 21250 : if (portal->status != PORTAL_ACTIVE)
850 : 21150 : MemoryContextDeleteChildren(portal->portalContext);
851 : : }
9254 tgl@sss.pgh.pa.us 852 : 35394 : }
853 : :
854 : : /*
855 : : * Post-abort cleanup for portals.
856 : : *
857 : : * Delete all portals not held over from prior transactions.
858 : : */
859 : : void
8404 860 : 35380 : AtCleanup_Portals(void)
861 : : {
862 : : HASH_SEQ_STATUS status;
863 : : PortalHashEnt *hentry;
864 : :
865 : 35380 : hash_seq_init(&status, PortalHashTable);
866 : :
867 [ + + ]: 55860 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
868 : : {
8310 bruce@momjian.us 869 : 20480 : Portal portal = hentry->portal;
870 : :
871 : : /*
872 : : * Do not touch active portals --- this can only happen in the case of
873 : : * a multi-transaction command.
874 : : */
3025 peter_e@gmx.net 875 [ + + ]: 20480 : if (portal->status == PORTAL_ACTIVE)
876 : 100 : continue;
877 : :
878 : : /*
879 : : * Do nothing to cursors held over from a previous transaction or
880 : : * auto-held ones.
881 : : */
2960 882 [ + + - + ]: 20380 : if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
883 : : {
7946 tgl@sss.pgh.pa.us 884 [ - + ]: 87 : Assert(portal->status != PORTAL_ACTIVE);
885 [ - + ]: 87 : Assert(portal->resowner == NULL);
8404 886 : 87 : continue;
887 : : }
888 : :
889 : : /*
890 : : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
891 : : * let us drop the portal otherwise. Whoever pinned the portal was
892 : : * interrupted by the abort too and won't try to use it anymore.
893 : : */
5783 heikki.linnakangas@i 894 [ + + ]: 20293 : if (portal->portalPinned)
895 : 23 : portal->portalPinned = false;
896 : :
897 : : /*
898 : : * We had better not call any user-defined code during cleanup, so if
899 : : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
900 : : */
223 peter@eisentraut.org 901 [ - + ]:GNC 20293 : if (portal->cleanup)
902 : : {
3186 tgl@sss.pgh.pa.us 903 [ # # ]:UBC 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
904 : 0 : portal->cleanup = NULL;
905 : : }
906 : :
907 : : /* Zap it. */
7962 tgl@sss.pgh.pa.us 908 :CBC 20293 : PortalDrop(portal, false);
909 : : }
8404 910 : 35380 : }
911 : :
912 : : /*
913 : : * Portal-related cleanup when we return to the main loop on error.
914 : : *
915 : : * This is different from the cleanup at transaction abort. Auto-held portals
916 : : * are cleaned up on error but not on transaction abort.
917 : : */
918 : : void
2960 peter_e@gmx.net 919 : 31115 : PortalErrorCleanup(void)
920 : : {
921 : : HASH_SEQ_STATUS status;
922 : : PortalHashEnt *hentry;
923 : :
924 : 31115 : hash_seq_init(&status, PortalHashTable);
925 : :
926 [ + + ]: 64263 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
927 : : {
928 : 2033 : Portal portal = hentry->portal;
929 : :
930 [ + + ]: 2033 : if (portal->autoHeld)
931 : : {
932 : 2 : portal->portalPinned = false;
933 : 2 : PortalDrop(portal, false);
934 : : }
935 : : }
936 : 31115 : }
937 : :
938 : : /*
939 : : * Pre-subcommit processing for portals.
940 : : *
941 : : * Reassign portals created or used in the current subtransaction to the
942 : : * parent subtransaction.
943 : : */
944 : : void
7901 tgl@sss.pgh.pa.us 945 : 7281 : AtSubCommit_Portals(SubTransactionId mySubid,
946 : : SubTransactionId parentSubid,
947 : : int parentLevel,
948 : : ResourceOwner parentXactOwner)
949 : : {
950 : : HASH_SEQ_STATUS status;
951 : : PortalHashEnt *hentry;
952 : :
7978 953 : 7281 : hash_seq_init(&status, PortalHashTable);
954 : :
955 [ + + ]: 21109 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
956 : : {
7919 bruce@momjian.us 957 : 6547 : Portal portal = hentry->portal;
958 : :
7901 tgl@sss.pgh.pa.us 959 [ + + ]: 6547 : if (portal->createSubid == mySubid)
960 : : {
961 : 30 : portal->createSubid = parentSubid;
1677 962 : 30 : portal->createLevel = parentLevel;
7962 963 [ + - ]: 30 : if (portal->resowner)
964 : 30 : ResourceOwnerNewParent(portal->resowner, parentXactOwner);
965 : : }
3896 966 [ + + ]: 6547 : if (portal->activeSubid == mySubid)
967 : 111 : portal->activeSubid = parentSubid;
968 : : }
7978 969 : 7281 : }
970 : :
971 : : /*
972 : : * Subtransaction abort handling for portals.
973 : : *
974 : : * Deactivate portals created or used during the failed subtransaction.
975 : : * Note that per AtSubCommit_Portals, this will catch portals created/used
976 : : * in descendants of the subtransaction too.
977 : : *
978 : : * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
979 : : */
980 : : void
7901 981 : 5388 : AtSubAbort_Portals(SubTransactionId mySubid,
982 : : SubTransactionId parentSubid,
983 : : ResourceOwner myXactOwner,
984 : : ResourceOwner parentXactOwner)
985 : : {
986 : : HASH_SEQ_STATUS status;
987 : : PortalHashEnt *hentry;
988 : :
7978 989 : 5388 : hash_seq_init(&status, PortalHashTable);
990 : :
991 [ + + ]: 13551 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
992 : : {
7919 bruce@momjian.us 993 : 8163 : Portal portal = hentry->portal;
994 : :
995 : : /* Was it created in this subtransaction? */
7901 tgl@sss.pgh.pa.us 996 [ + + ]: 8163 : if (portal->createSubid != mySubid)
997 : : {
998 : : /* No, but maybe it was used in this subtransaction? */
3896 999 [ + + ]: 7283 : if (portal->activeSubid == mySubid)
1000 : : {
1001 : : /* Maintain activeSubid until the portal is removed */
1002 : 33 : portal->activeSubid = parentSubid;
1003 : :
1004 : : /*
1005 : : * A MarkPortalActive() caller ran an upper-level portal in
1006 : : * this subtransaction and left the portal ACTIVE. This can't
1007 : : * happen, but force the portal into FAILED state for the same
1008 : : * reasons discussed below.
1009 : : *
1010 : : * We assume we can get away without forcing upper-level READY
1011 : : * portals to fail, even if they were run and then suspended.
1012 : : * In theory a suspended upper-level portal could have
1013 : : * acquired some references to objects that are about to be
1014 : : * destroyed, but there should be sufficient defenses against
1015 : : * such cases: the portal's original query cannot contain such
1016 : : * references, and any references within, say, cached plans of
1017 : : * PL/pgSQL functions are not from active queries and should
1018 : : * be protected by revalidation logic.
1019 : : */
1020 [ - + ]: 33 : if (portal->status == PORTAL_ACTIVE)
3896 tgl@sss.pgh.pa.us 1021 :UBC 0 : MarkPortalFailed(portal);
1022 : :
1023 : : /*
1024 : : * Also, if we failed it during the current subtransaction
1025 : : * (either just above, or earlier), reattach its resource
1026 : : * owner to the current subtransaction's resource owner, so
1027 : : * that any resources it still holds will be released while
1028 : : * cleaning up this subtransaction. This prevents some corner
1029 : : * cases wherein we might get Asserts or worse while cleaning
1030 : : * up objects created during the current subtransaction
1031 : : * (because they're still referenced within this portal).
1032 : : */
3896 tgl@sss.pgh.pa.us 1033 [ + + + - ]:CBC 33 : if (portal->status == PORTAL_FAILED && portal->resowner)
1034 : : {
1035 : 9 : ResourceOwnerNewParent(portal->resowner, myXactOwner);
1036 : 9 : portal->resowner = NULL;
1037 : : }
1038 : : }
1039 : : /* Done if it wasn't created in this subtransaction */
7978 1040 : 7283 : continue;
1041 : : }
1042 : :
1043 : : /*
1044 : : * Force any live portals of my own subtransaction into FAILED state.
1045 : : * We have to do this because they might refer to objects created or
1046 : : * changed in the failed subtransaction, leading to crashes within
1047 : : * ExecutorEnd when portalcmds.c tries to close down the portal.
1048 : : * Currently, every MarkPortalActive() caller ensures it updates the
1049 : : * portal status again before relinquishing control, so ACTIVE can't
1050 : : * happen here. If it does happen, dispose the portal like existing
1051 : : * MarkPortalActive() callers would.
1052 : : */
5920 1053 [ + + ]: 880 : if (portal->status == PORTAL_READY ||
1054 [ - + ]: 177 : portal->status == PORTAL_ACTIVE)
5193 1055 : 703 : MarkPortalFailed(portal);
1056 : :
1057 : : /*
1058 : : * Allow portalcmds.c to clean up the state it knows about, if we
1059 : : * haven't already.
1060 : : */
223 peter@eisentraut.org 1061 [ - + ]:GNC 880 : if (portal->cleanup)
1062 : : {
3162 peter_e@gmx.net 1063 :UBC 0 : portal->cleanup(portal);
5920 tgl@sss.pgh.pa.us 1064 : 0 : portal->cleanup = NULL;
1065 : : }
1066 : :
1067 : : /* drop cached plan reference, if any */
5920 tgl@sss.pgh.pa.us 1068 :CBC 880 : PortalReleaseCachedPlan(portal);
1069 : :
1070 : : /*
1071 : : * Any resources belonging to the portal will be released in the
1072 : : * upcoming transaction-wide cleanup; they will be gone before we run
1073 : : * PortalDrop.
1074 : : */
1075 : 880 : portal->resowner = NULL;
1076 : :
1077 : : /*
1078 : : * Although we can't delete the portal data structure proper, we can
1079 : : * release any memory in subsidiary contexts, such as executor state.
1080 : : * The cleanup hook was the last thing that might have needed data
1081 : : * there.
1082 : : */
3062 peter_e@gmx.net 1083 : 880 : MemoryContextDeleteChildren(portal->portalContext);
1084 : : }
7978 tgl@sss.pgh.pa.us 1085 : 5388 : }
1086 : :
1087 : : /*
1088 : : * Post-subabort cleanup for portals.
1089 : : *
1090 : : * Drop all portals created in the failed subtransaction (but note that
1091 : : * we will not drop any that were reassigned to the parent above).
1092 : : */
1093 : : void
7901 1094 : 5388 : AtSubCleanup_Portals(SubTransactionId mySubid)
1095 : : {
1096 : : HASH_SEQ_STATUS status;
1097 : : PortalHashEnt *hentry;
1098 : :
7978 1099 : 5388 : hash_seq_init(&status, PortalHashTable);
1100 : :
1101 [ + + ]: 13383 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1102 : : {
1103 : 7995 : Portal portal = hentry->portal;
1104 : :
7901 1105 [ + + ]: 7995 : if (portal->createSubid != mySubid)
7978 1106 : 7283 : continue;
1107 : :
1108 : : /*
1109 : : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1110 : : * let us drop the portal otherwise. Whoever pinned the portal was
1111 : : * interrupted by the abort too and won't try to use it anymore.
1112 : : */
5775 heikki.linnakangas@i 1113 [ + + ]: 712 : if (portal->portalPinned)
1114 : 3 : portal->portalPinned = false;
1115 : :
1116 : : /*
1117 : : * We had better not call any user-defined code during cleanup, so if
1118 : : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1119 : : */
223 peter@eisentraut.org 1120 [ - + ]:GNC 712 : if (portal->cleanup)
1121 : : {
3186 tgl@sss.pgh.pa.us 1122 [ # # ]:UBC 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1123 : 0 : portal->cleanup = NULL;
1124 : : }
1125 : :
1126 : : /* Zap it. */
7962 tgl@sss.pgh.pa.us 1127 :CBC 712 : PortalDrop(portal, false);
1128 : : }
7978 1129 : 5388 : }
1130 : :
1131 : : /* Find all available cursors */
1132 : : Datum
7412 neilc@samurai.com 1133 : 75 : pg_cursor(PG_FUNCTION_ARGS)
1134 : : {
6949 tgl@sss.pgh.pa.us 1135 : 75 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1136 : : HASH_SEQ_STATUS hash_seq;
1137 : : PortalHashEnt *hentry;
1138 : :
1139 : : /*
1140 : : * We put all the tuples into a tuplestore in one scan of the hashtable.
1141 : : * This avoids any issue of the hashtable possibly changing between calls.
1142 : : */
1295 michael@paquier.xyz 1143 : 75 : InitMaterializedSRF(fcinfo, 0);
1144 : :
6949 tgl@sss.pgh.pa.us 1145 : 75 : hash_seq_init(&hash_seq, PortalHashTable);
1146 [ + + ]: 238 : while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1147 : : {
1148 : 163 : Portal portal = hentry->portal;
1149 : : Datum values[6];
1389 peter@eisentraut.org 1150 : 163 : bool nulls[6] = {0};
1151 : :
1152 : : /* report only "visible" entries */
6949 tgl@sss.pgh.pa.us 1153 [ + + ]: 163 : if (!portal->visible)
1154 : 79 : continue;
1155 : : /* also ignore it if PortalDefineQuery hasn't been called yet */
576 1156 [ - + ]: 84 : if (!portal->sourceText)
576 tgl@sss.pgh.pa.us 1157 :UBC 0 : continue;
1158 : :
6615 tgl@sss.pgh.pa.us 1159 :CBC 84 : values[0] = CStringGetTextDatum(portal->name);
6500 1160 : 84 : values[1] = CStringGetTextDatum(portal->sourceText);
7412 neilc@samurai.com 1161 : 84 : values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1162 : 84 : values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1163 : 84 : values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1164 : 84 : values[5] = TimestampTzGetDatum(portal->creation_time);
1165 : :
1520 michael@paquier.xyz 1166 : 84 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1167 : : }
1168 : :
6949 tgl@sss.pgh.pa.us 1169 : 75 : return (Datum) 0;
1170 : : }
1171 : :
1172 : : bool
4903 simon@2ndQuadrant.co 1173 : 37 : ThereAreNoReadyPortals(void)
1174 : : {
1175 : : HASH_SEQ_STATUS status;
1176 : : PortalHashEnt *hentry;
1177 : :
1178 : 37 : hash_seq_init(&status, PortalHashTable);
1179 : :
1180 [ + + ]: 74 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1181 : : {
1182 : 37 : Portal portal = hentry->portal;
1183 : :
1184 [ - + ]: 37 : if (portal->status == PORTAL_READY)
4903 simon@2ndQuadrant.co 1185 :UBC 0 : return false;
1186 : : }
1187 : :
4903 simon@2ndQuadrant.co 1188 :CBC 37 : return true;
1189 : : }
1190 : :
1191 : : /*
1192 : : * Hold all pinned portals.
1193 : : *
1194 : : * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1195 : : * called to protect internally-generated cursors from being dropped during
1196 : : * the transaction shutdown. Currently, SPI calls this automatically; PLs
1197 : : * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1198 : : * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1199 : : * because we need to run user-defined code while persisting a portal.
1200 : : * It's too late to do that once transaction abort has started.)
1201 : : *
1202 : : * We protect such portals by converting them to held cursors. We mark them
1203 : : * as "auto-held" so that exception exit knows to clean them up. (In normal,
1204 : : * non-exception code paths, the PL needs to clean such portals itself, since
1205 : : * transaction end won't do it anymore; but that should be normal practice
1206 : : * anyway.)
1207 : : */
1208 : : void
2960 peter_e@gmx.net 1209 : 2217 : HoldPinnedPortals(void)
1210 : : {
1211 : : HASH_SEQ_STATUS status;
1212 : : PortalHashEnt *hentry;
1213 : :
3025 1214 : 2217 : hash_seq_init(&status, PortalHashTable);
1215 : :
1216 [ + + ]: 4494 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1217 : : {
1218 : 2280 : Portal portal = hentry->portal;
1219 : :
2960 1220 [ + + + + ]: 2280 : if (portal->portalPinned && !portal->autoHeld)
1221 : : {
1222 : : /*
1223 : : * Doing transaction control, especially abort, inside a cursor
1224 : : * loop that is not read-only, for example using UPDATE ...
1225 : : * RETURNING, has weird semantics issues. Also, this
1226 : : * implementation wouldn't work, because such portals cannot be
1227 : : * held. (The core grammar enforces that only SELECT statements
1228 : : * can drive a cursor, but for example PL/pgSQL does not restrict
1229 : : * it.)
1230 : : */
1231 [ + + ]: 19 : if (portal->strategy != PORTAL_ONE_SELECT)
1232 [ + - ]: 1 : ereport(ERROR,
1233 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1234 : : errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1235 : :
1236 : : /* Verify it's in a suitable state to be held */
2573 tgl@sss.pgh.pa.us 1237 [ - + ]: 18 : if (portal->status != PORTAL_READY)
2573 tgl@sss.pgh.pa.us 1238 [ # # ]:UBC 0 : elog(ERROR, "pinned portal is not ready to be auto-held");
1239 : :
2960 peter_e@gmx.net 1240 :CBC 18 : HoldPortal(portal);
2573 tgl@sss.pgh.pa.us 1241 : 16 : portal->autoHeld = true;
1242 : : }
1243 : : }
3025 peter_e@gmx.net 1244 : 2214 : }
1245 : :
1246 : : /*
1247 : : * Drop the outer active snapshots for all portals, so that no snapshots
1248 : : * remain active.
1249 : : *
1250 : : * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1251 : : * ROLLBACK inside a procedure. This has to be separate from that since it
1252 : : * should not be run until we're done with steps that are likely to fail.
1253 : : *
1254 : : * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1255 : : * need to clean up snapshot management in VACUUM and perhaps other places.
1256 : : */
1257 : : void
1810 tgl@sss.pgh.pa.us 1258 : 2214 : ForgetPortalSnapshots(void)
1259 : : {
1260 : : HASH_SEQ_STATUS status;
1261 : : PortalHashEnt *hentry;
1262 : 2214 : int numPortalSnaps = 0;
1263 : 2214 : int numActiveSnaps = 0;
1264 : :
1265 : : /* First, scan PortalHashTable and clear portalSnapshot fields */
1266 : 2214 : hash_seq_init(&status, PortalHashTable);
1267 : :
1268 [ + + ]: 6705 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1269 : : {
1270 : 2277 : Portal portal = hentry->portal;
1271 : :
1272 [ + + ]: 2277 : if (portal->portalSnapshot != NULL)
1273 : : {
1274 : 2214 : portal->portalSnapshot = NULL;
1275 : 2214 : numPortalSnaps++;
1276 : : }
1277 : : /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1278 : : }
1279 : :
1280 : : /*
1281 : : * Now, pop all the active snapshots, which should be just those that were
1282 : : * portal snapshots. Ideally we'd drive this directly off the portal
1283 : : * scan, but there's no good way to visit the portals in the correct
1284 : : * order. So just cross-check after the fact.
1285 : : */
1286 [ + + ]: 4428 : while (ActiveSnapshotSet())
1287 : : {
1288 : 2214 : PopActiveSnapshot();
1289 : 2214 : numActiveSnaps++;
1290 : : }
1291 : :
1292 [ - + ]: 2214 : if (numPortalSnaps != numActiveSnaps)
1810 tgl@sss.pgh.pa.us 1293 [ # # ]:UBC 0 : elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1294 : : numPortalSnaps, numActiveSnaps);
1810 tgl@sss.pgh.pa.us 1295 :CBC 2214 : }
|