Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * waiteventset.c
4 : : * ppoll()/pselect() like abstraction
5 : : *
6 : : * WaitEvents are an abstraction for waiting for one or more events at a time.
7 : : * The waiting can be done in a race free fashion, similar ppoll() or
8 : : * pselect() (as opposed to plain poll()/select()).
9 : : *
10 : : * You can wait for:
11 : : * - a latch being set from another process or from signal handler in the same
12 : : * process (WL_LATCH_SET)
13 : : * - data to become readable or writeable on a socket (WL_SOCKET_*)
14 : : * - postmaster death (WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH)
15 : : * - timeout (WL_TIMEOUT)
16 : : *
17 : : * Implementation
18 : : * --------------
19 : : *
20 : : * The poll() implementation uses the so-called self-pipe trick to overcome the
21 : : * race condition involved with poll() and setting a global flag in the signal
22 : : * handler. When a latch is set and the current process is waiting for it, the
23 : : * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
24 : : * A signal by itself doesn't interrupt poll() on all platforms, and even on
25 : : * platforms where it does, a signal that arrives just before the poll() call
26 : : * does not prevent poll() from entering sleep. An incoming byte on a pipe
27 : : * however reliably interrupts the sleep, and causes poll() to return
28 : : * immediately even if the signal arrives before poll() begins.
29 : : *
30 : : * The epoll() implementation overcomes the race with a different technique: it
31 : : * keeps SIGURG blocked and consumes from a signalfd() descriptor instead. We
32 : : * don't need to register a signal handler or create our own self-pipe. We
33 : : * assume that any system that has Linux epoll() also has Linux signalfd().
34 : : *
35 : : * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
36 : : *
37 : : * The Windows implementation uses Windows events that are inherited by all
38 : : * postmaster child processes. There's no need for the self-pipe trick there.
39 : : *
40 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
41 : : * Portions Copyright (c) 1994, Regents of the University of California
42 : : *
43 : : * IDENTIFICATION
44 : : * src/backend/storage/ipc/waiteventset.c
45 : : *
46 : : *-------------------------------------------------------------------------
47 : : */
48 : : #include "postgres.h"
49 : :
50 : : #include <fcntl.h>
51 : : #include <limits.h>
52 : : #include <signal.h>
53 : : #include <unistd.h>
54 : : #ifdef HAVE_SYS_EPOLL_H
55 : : #include <sys/epoll.h>
56 : : #endif
57 : : #ifdef HAVE_SYS_EVENT_H
58 : : #include <sys/event.h>
59 : : #endif
60 : : #ifdef HAVE_SYS_SIGNALFD_H
61 : : #include <sys/signalfd.h>
62 : : #endif
63 : : #ifdef HAVE_POLL_H
64 : : #include <poll.h>
65 : : #endif
66 : :
67 : : #include "libpq/pqsignal.h"
68 : : #include "miscadmin.h"
69 : : #include "pgstat.h"
70 : : #include "port/atomics.h"
71 : : #include "portability/instr_time.h"
72 : : #include "postmaster/postmaster.h"
73 : : #include "storage/fd.h"
74 : : #include "storage/ipc.h"
75 : : #include "storage/pmsignal.h"
76 : : #include "storage/latch.h"
77 : : #include "storage/waiteventset.h"
78 : : #include "utils/memutils.h"
79 : : #include "utils/resowner.h"
80 : :
81 : : /*
82 : : * Select the fd readiness primitive to use. Normally the "most modern"
83 : : * primitive supported by the OS will be used, but for testing it can be
84 : : * useful to manually specify the used primitive. If desired, just add a
85 : : * define somewhere before this block.
86 : : */
87 : : #if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
88 : : defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
89 : : /* don't overwrite manual choice */
90 : : #elif defined(HAVE_SYS_EPOLL_H)
91 : : #define WAIT_USE_EPOLL
92 : : #elif defined(HAVE_KQUEUE)
93 : : #define WAIT_USE_KQUEUE
94 : : #elif defined(HAVE_POLL)
95 : : #define WAIT_USE_POLL
96 : : #elif WIN32
97 : : #define WAIT_USE_WIN32
98 : : #else
99 : : #error "no wait set implementation available"
100 : : #endif
101 : :
102 : : /*
103 : : * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
104 : : * available. For testing the choice can also be manually specified.
105 : : */
106 : : #if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
107 : : #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
108 : : /* don't overwrite manual choice */
109 : : #elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H)
110 : : #define WAIT_USE_SIGNALFD
111 : : #else
112 : : #define WAIT_USE_SELF_PIPE
113 : : #endif
114 : : #endif
115 : :
116 : : /* typedef in waiteventset.h */
117 : : struct WaitEventSet
118 : : {
119 : : ResourceOwner owner;
120 : :
121 : : int nevents; /* number of registered events */
122 : : int nevents_space; /* maximum number of events in this set */
123 : :
124 : : /*
125 : : * Array, of nevents_space length, storing the definition of events this
126 : : * set is waiting for.
127 : : */
128 : : WaitEvent *events;
129 : :
130 : : /*
131 : : * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
132 : : * said latch, and latch_pos the offset in the ->events array. This is
133 : : * useful because we check the state of the latch before performing doing
134 : : * syscalls related to waiting.
135 : : */
136 : : Latch *latch;
137 : : int latch_pos;
138 : :
139 : : /*
140 : : * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
141 : : * is set so that we'll exit immediately if postmaster death is detected,
142 : : * instead of returning.
143 : : */
144 : : bool exit_on_postmaster_death;
145 : :
146 : : #if defined(WAIT_USE_EPOLL)
147 : : int epoll_fd;
148 : : /* epoll_wait returns events in a user provided arrays, allocate once */
149 : : struct epoll_event *epoll_ret_events;
150 : : #elif defined(WAIT_USE_KQUEUE)
151 : : int kqueue_fd;
152 : : /* kevent returns events in a user provided arrays, allocate once */
153 : : struct kevent *kqueue_ret_events;
154 : : bool report_postmaster_not_running;
155 : : #elif defined(WAIT_USE_POLL)
156 : : /* poll expects events to be waited on every poll() call, prepare once */
157 : : struct pollfd *pollfds;
158 : : #elif defined(WAIT_USE_WIN32)
159 : :
160 : : /*
161 : : * Array of windows events. The first element always contains
162 : : * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
163 : : * event->pos + 1).
164 : : */
165 : : HANDLE *handles;
166 : : #endif
167 : : };
168 : :
169 : : #ifndef WIN32
170 : : /* Are we currently in WaitLatch? The signal handler would like to know. */
171 : : static volatile sig_atomic_t waiting = false;
172 : : #endif
173 : :
174 : : #ifdef WAIT_USE_SIGNALFD
175 : : /* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
176 : : static int signal_fd = -1;
177 : : #endif
178 : :
179 : : #ifdef WAIT_USE_SELF_PIPE
180 : : /* Read and write ends of the self-pipe */
181 : : static int selfpipe_readfd = -1;
182 : : static int selfpipe_writefd = -1;
183 : :
184 : : /* Process owning the self-pipe --- needed for checking purposes */
185 : : static int selfpipe_owner_pid = 0;
186 : :
187 : : /* Private function prototypes */
188 : : static void latch_sigurg_handler(SIGNAL_ARGS);
189 : : static void sendSelfPipeByte(void);
190 : : #endif
191 : :
192 : : #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
193 : : static void drain(void);
194 : : #endif
195 : :
196 : : #if defined(WAIT_USE_EPOLL)
197 : : static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
198 : : #elif defined(WAIT_USE_KQUEUE)
199 : : static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
200 : : #elif defined(WAIT_USE_POLL)
201 : : static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
202 : : #elif defined(WAIT_USE_WIN32)
203 : : static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
204 : : #endif
205 : :
206 : : static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
207 : : WaitEvent *occurred_events, int nevents);
208 : :
209 : : /* ResourceOwner support to hold WaitEventSets */
210 : : static void ResOwnerReleaseWaitEventSet(Datum res);
211 : :
212 : : static const ResourceOwnerDesc wait_event_set_resowner_desc =
213 : : {
214 : : .name = "WaitEventSet",
215 : : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
216 : : .release_priority = RELEASE_PRIO_WAITEVENTSETS,
217 : : .ReleaseResource = ResOwnerReleaseWaitEventSet,
218 : : .DebugPrint = NULL
219 : : };
220 : :
221 : : /* Convenience wrappers over ResourceOwnerRemember/Forget */
222 : : static inline void
235 heikki.linnakangas@i 223 :CBC 73775 : ResourceOwnerRememberWaitEventSet(ResourceOwner owner, WaitEventSet *set)
224 : : {
225 : 73775 : ResourceOwnerRemember(owner, PointerGetDatum(set), &wait_event_set_resowner_desc);
226 : 73775 : }
227 : : static inline void
228 : 73774 : ResourceOwnerForgetWaitEventSet(ResourceOwner owner, WaitEventSet *set)
229 : : {
230 : 73774 : ResourceOwnerForget(owner, PointerGetDatum(set), &wait_event_set_resowner_desc);
231 : 73774 : }
232 : :
233 : :
234 : : /*
235 : : * Initialize the process-local wait event infrastructure.
236 : : *
237 : : * This must be called once during startup of any process that can wait on
238 : : * latches, before it issues any InitLatch() or OwnLatch() calls.
239 : : */
240 : : void
241 : 20419 : InitializeWaitEventSupport(void)
242 : : {
243 : : #if defined(WAIT_USE_SELF_PIPE)
244 : : int pipefd[2];
245 : :
246 : : if (IsUnderPostmaster)
247 : : {
248 : : /*
249 : : * We might have inherited connections to a self-pipe created by the
250 : : * postmaster. It's critical that child processes create their own
251 : : * self-pipes, of course, and we really want them to close the
252 : : * inherited FDs for safety's sake.
253 : : */
254 : : if (selfpipe_owner_pid != 0)
255 : : {
256 : : /* Assert we go through here but once in a child process */
257 : : Assert(selfpipe_owner_pid != MyProcPid);
258 : : /* Release postmaster's pipe FDs; ignore any error */
259 : : (void) close(selfpipe_readfd);
260 : : (void) close(selfpipe_writefd);
261 : : /* Clean up, just for safety's sake; we'll set these below */
262 : : selfpipe_readfd = selfpipe_writefd = -1;
263 : : selfpipe_owner_pid = 0;
264 : : /* Keep fd.c's accounting straight */
265 : : ReleaseExternalFD();
266 : : ReleaseExternalFD();
267 : : }
268 : : else
269 : : {
270 : : /*
271 : : * Postmaster didn't create a self-pipe ... or else we're in an
272 : : * EXEC_BACKEND build, in which case it doesn't matter since the
273 : : * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
274 : : * fd.c won't have state to clean up, either.
275 : : */
276 : : Assert(selfpipe_readfd == -1);
277 : : }
278 : : }
279 : : else
280 : : {
281 : : /* In postmaster or standalone backend, assert we do this but once */
282 : : Assert(selfpipe_readfd == -1);
283 : : Assert(selfpipe_owner_pid == 0);
284 : : }
285 : :
286 : : /*
287 : : * Set up the self-pipe that allows a signal handler to wake up the
288 : : * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
289 : : * that SetLatch won't block if the event has already been set many times
290 : : * filling the kernel buffer. Make the read-end non-blocking too, so that
291 : : * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
292 : : * Also, make both FDs close-on-exec, since we surely do not want any
293 : : * child processes messing with them.
294 : : */
295 : : if (pipe(pipefd) < 0)
296 : : elog(FATAL, "pipe() failed: %m");
297 : : if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
298 : : elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
299 : : if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
300 : : elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
301 : : if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
302 : : elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
303 : : if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
304 : : elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
305 : :
306 : : selfpipe_readfd = pipefd[0];
307 : : selfpipe_writefd = pipefd[1];
308 : : selfpipe_owner_pid = MyProcPid;
309 : :
310 : : /* Tell fd.c about these two long-lived FDs */
311 : : ReserveExternalFD();
312 : : ReserveExternalFD();
313 : :
314 : : pqsignal(SIGURG, latch_sigurg_handler);
315 : : #endif
316 : :
317 : : #ifdef WAIT_USE_SIGNALFD
318 : : sigset_t signalfd_mask;
319 : :
320 [ + + ]: 20419 : if (IsUnderPostmaster)
321 : : {
322 : : /*
323 : : * It would probably be safe to re-use the inherited signalfd since
324 : : * signalfds only see the current process's pending signals, but it
325 : : * seems less surprising to close it and create our own.
326 : : */
327 [ + - ]: 19332 : if (signal_fd != -1)
328 : : {
329 : : /* Release postmaster's signal FD; ignore any error */
330 : 19332 : (void) close(signal_fd);
331 : 19332 : signal_fd = -1;
332 : 19332 : ReleaseExternalFD();
333 : : }
334 : : }
335 : :
336 : : /* Block SIGURG, because we'll receive it through a signalfd. */
337 : 20419 : sigaddset(&UnBlockSig, SIGURG);
338 : :
339 : : /* Set up the signalfd to receive SIGURG notifications. */
340 : 20419 : sigemptyset(&signalfd_mask);
341 : 20419 : sigaddset(&signalfd_mask, SIGURG);
342 : 20419 : signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
343 [ - + ]: 20419 : if (signal_fd < 0)
235 heikki.linnakangas@i 344 [ # # ]:UBC 0 : elog(FATAL, "signalfd() failed");
235 heikki.linnakangas@i 345 :CBC 20419 : ReserveExternalFD();
346 : : #endif
347 : :
348 : : #ifdef WAIT_USE_KQUEUE
349 : : /* Ignore SIGURG, because we'll receive it via kqueue. */
350 : : pqsignal(SIGURG, SIG_IGN);
351 : : #endif
352 : 20419 : }
353 : :
354 : : /*
355 : : * Create a WaitEventSet with space for nevents different events to wait for.
356 : : *
357 : : * These events can then be efficiently waited upon together, using
358 : : * WaitEventSetWait().
359 : : *
360 : : * The WaitEventSet is tracked by the given 'resowner'. Use NULL for session
361 : : * lifetime.
362 : : */
363 : : WaitEventSet *
364 : 118144 : CreateWaitEventSet(ResourceOwner resowner, int nevents)
365 : : {
366 : : WaitEventSet *set;
367 : : char *data;
368 : 118144 : Size sz = 0;
369 : :
370 : : /*
371 : : * Use MAXALIGN size/alignment to guarantee that later uses of memory are
372 : : * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
373 : : * platforms, but earlier allocations like WaitEventSet and WaitEvent
374 : : * might not be sized to guarantee that when purely using sizeof().
375 : : */
376 : 118144 : sz += MAXALIGN(sizeof(WaitEventSet));
377 : 118144 : sz += MAXALIGN(sizeof(WaitEvent) * nevents);
378 : :
379 : : #if defined(WAIT_USE_EPOLL)
380 : 118144 : sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
381 : : #elif defined(WAIT_USE_KQUEUE)
382 : : sz += MAXALIGN(sizeof(struct kevent) * nevents);
383 : : #elif defined(WAIT_USE_POLL)
384 : : sz += MAXALIGN(sizeof(struct pollfd) * nevents);
385 : : #elif defined(WAIT_USE_WIN32)
386 : : /* need space for the pgwin32_signal_event */
387 : : sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
388 : : #endif
389 : :
390 [ + + ]: 118144 : if (resowner != NULL)
391 : 73775 : ResourceOwnerEnlarge(resowner);
392 : :
393 : 118144 : data = (char *) MemoryContextAllocZero(TopMemoryContext, sz);
394 : :
395 : 118144 : set = (WaitEventSet *) data;
396 : 118144 : data += MAXALIGN(sizeof(WaitEventSet));
397 : :
398 : 118144 : set->events = (WaitEvent *) data;
399 : 118144 : data += MAXALIGN(sizeof(WaitEvent) * nevents);
400 : :
401 : : #if defined(WAIT_USE_EPOLL)
402 : 118144 : set->epoll_ret_events = (struct epoll_event *) data;
403 : 118144 : data += MAXALIGN(sizeof(struct epoll_event) * nevents);
404 : : #elif defined(WAIT_USE_KQUEUE)
405 : : set->kqueue_ret_events = (struct kevent *) data;
406 : : data += MAXALIGN(sizeof(struct kevent) * nevents);
407 : : #elif defined(WAIT_USE_POLL)
408 : : set->pollfds = (struct pollfd *) data;
409 : : data += MAXALIGN(sizeof(struct pollfd) * nevents);
410 : : #elif defined(WAIT_USE_WIN32)
411 : : set->handles = (HANDLE) data;
412 : : data += MAXALIGN(sizeof(HANDLE) * nevents);
413 : : #endif
414 : :
415 : 118144 : set->latch = NULL;
416 : 118144 : set->nevents_space = nevents;
417 : 118144 : set->exit_on_postmaster_death = false;
418 : :
419 [ + + ]: 118144 : if (resowner != NULL)
420 : : {
421 : 73775 : ResourceOwnerRememberWaitEventSet(resowner, set);
422 : 73775 : set->owner = resowner;
423 : : }
424 : :
425 : : #if defined(WAIT_USE_EPOLL)
426 [ - + ]: 118144 : if (!AcquireExternalFD())
235 heikki.linnakangas@i 427 [ # # ]:UBC 0 : elog(ERROR, "AcquireExternalFD, for epoll_create1, failed: %m");
235 heikki.linnakangas@i 428 :CBC 118144 : set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
429 [ - + ]: 118144 : if (set->epoll_fd < 0)
430 : : {
235 heikki.linnakangas@i 431 :UBC 0 : ReleaseExternalFD();
432 [ # # ]: 0 : elog(ERROR, "epoll_create1 failed: %m");
433 : : }
434 : : #elif defined(WAIT_USE_KQUEUE)
435 : : if (!AcquireExternalFD())
436 : : elog(ERROR, "AcquireExternalFD, for kqueue, failed: %m");
437 : : set->kqueue_fd = kqueue();
438 : : if (set->kqueue_fd < 0)
439 : : {
440 : : ReleaseExternalFD();
441 : : elog(ERROR, "kqueue failed: %m");
442 : : }
443 : : if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
444 : : {
445 : : int save_errno = errno;
446 : :
447 : : close(set->kqueue_fd);
448 : : ReleaseExternalFD();
449 : : errno = save_errno;
450 : : elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
451 : : }
452 : : set->report_postmaster_not_running = false;
453 : : #elif defined(WAIT_USE_WIN32)
454 : :
455 : : /*
456 : : * To handle signals while waiting, we need to add a win32 specific event.
457 : : * We accounted for the additional event at the top of this routine. See
458 : : * port/win32/signal.c for more details.
459 : : *
460 : : * Note: pgwin32_signal_event should be first to ensure that it will be
461 : : * reported when multiple events are set. We want to guarantee that
462 : : * pending signals are serviced.
463 : : */
464 : : set->handles[0] = pgwin32_signal_event;
465 : : StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
466 : : #endif
467 : :
235 heikki.linnakangas@i 468 :CBC 118144 : return set;
469 : : }
470 : :
471 : : /*
472 : : * Free a previously created WaitEventSet.
473 : : *
474 : : * Note: preferably, this shouldn't have to free any resources that could be
475 : : * inherited across an exec(). If it did, we'd likely leak those resources in
476 : : * many scenarios. For the epoll case, we ensure that by setting EPOLL_CLOEXEC
477 : : * when the FD is created. For the Windows case, we assume that the handles
478 : : * involved are non-inheritable.
479 : : */
480 : : void
481 : 85211 : FreeWaitEventSet(WaitEventSet *set)
482 : : {
483 [ + + ]: 85211 : if (set->owner)
484 : : {
485 : 73774 : ResourceOwnerForgetWaitEventSet(set->owner, set);
486 : 73774 : set->owner = NULL;
487 : : }
488 : :
489 : : #if defined(WAIT_USE_EPOLL)
490 : 85211 : close(set->epoll_fd);
491 : 85211 : ReleaseExternalFD();
492 : : #elif defined(WAIT_USE_KQUEUE)
493 : : close(set->kqueue_fd);
494 : : ReleaseExternalFD();
495 : : #elif defined(WAIT_USE_WIN32)
496 : : for (WaitEvent *cur_event = set->events;
497 : : cur_event < (set->events + set->nevents);
498 : : cur_event++)
499 : : {
500 : : if (cur_event->events & WL_LATCH_SET)
501 : : {
502 : : /* uses the latch's HANDLE */
503 : : }
504 : : else if (cur_event->events & WL_POSTMASTER_DEATH)
505 : : {
506 : : /* uses PostmasterHandle */
507 : : }
508 : : else
509 : : {
510 : : /* Clean up the event object we created for the socket */
511 : : WSAEventSelect(cur_event->fd, NULL, 0);
512 : : WSACloseEvent(set->handles[cur_event->pos + 1]);
513 : : }
514 : : }
515 : : #endif
516 : :
517 : 85211 : pfree(set);
518 : 85211 : }
519 : :
520 : : /*
521 : : * Free a previously created WaitEventSet in a child process after a fork().
522 : : */
523 : : void
524 : 16018 : FreeWaitEventSetAfterFork(WaitEventSet *set)
525 : : {
526 : : #if defined(WAIT_USE_EPOLL)
527 : 16018 : close(set->epoll_fd);
528 : 16018 : ReleaseExternalFD();
529 : : #elif defined(WAIT_USE_KQUEUE)
530 : : /* kqueues are not normally inherited by child processes */
531 : : ReleaseExternalFD();
532 : : #endif
533 : :
534 : 16018 : pfree(set);
535 : 16018 : }
536 : :
537 : : /* ---
538 : : * Add an event to the set. Possible events are:
539 : : * - WL_LATCH_SET: Wait for the latch to be set
540 : : * - WL_POSTMASTER_DEATH: Wait for postmaster to die
541 : : * - WL_SOCKET_READABLE: Wait for socket to become readable,
542 : : * can be combined in one event with other WL_SOCKET_* events
543 : : * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
544 : : * can be combined with other WL_SOCKET_* events
545 : : * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
546 : : * can be combined with other WL_SOCKET_* events (on non-Windows
547 : : * platforms, this is the same as WL_SOCKET_WRITEABLE)
548 : : * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
549 : : * can be combined with other WL_SOCKET_* events (on non-Windows
550 : : * platforms, this is the same as WL_SOCKET_READABLE)
551 : : * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
552 : : * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
553 : : *
554 : : * Returns the offset in WaitEventSet->events (starting from 0), which can be
555 : : * used to modify previously added wait events using ModifyWaitEvent().
556 : : *
557 : : * In the WL_LATCH_SET case the latch must be owned by the current process,
558 : : * i.e. it must be a process-local latch initialized with InitLatch, or a
559 : : * shared latch associated with the current process by calling OwnLatch.
560 : : *
561 : : * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
562 : : * conditions cause the socket to be reported as readable/writable/connected,
563 : : * so that the caller can deal with the condition.
564 : : *
565 : : * The user_data pointer specified here will be set for the events returned
566 : : * by WaitEventSetWait(), allowing to easily associate additional data with
567 : : * events.
568 : : */
569 : : int
570 : 331854 : AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch,
571 : : void *user_data)
572 : : {
573 : : WaitEvent *event;
574 : :
575 : : /* not enough space */
576 [ - + ]: 331854 : Assert(set->nevents < set->nevents_space);
577 : :
578 [ + + ]: 331854 : if (events == WL_EXIT_ON_PM_DEATH)
579 : : {
580 : 103710 : events = WL_POSTMASTER_DEATH;
581 : 103710 : set->exit_on_postmaster_death = true;
582 : : }
583 : :
584 [ + + ]: 331854 : if (latch)
585 : : {
586 [ - + ]: 117766 : if (latch->owner_pid != MyProcPid)
235 heikki.linnakangas@i 587 [ # # ]:UBC 0 : elog(ERROR, "cannot wait on a latch owned by another process");
235 heikki.linnakangas@i 588 [ - + ]:CBC 117766 : if (set->latch)
235 heikki.linnakangas@i 589 [ # # ]:UBC 0 : elog(ERROR, "cannot wait on more than one latch");
235 heikki.linnakangas@i 590 [ - + ]:CBC 117766 : if ((events & WL_LATCH_SET) != WL_LATCH_SET)
235 heikki.linnakangas@i 591 [ # # ]:UBC 0 : elog(ERROR, "latch events only support being set");
592 : : }
593 : : else
594 : : {
235 heikki.linnakangas@i 595 [ - + ]:CBC 214088 : if (events & WL_LATCH_SET)
235 heikki.linnakangas@i 596 [ # # ]:UBC 0 : elog(ERROR, "cannot wait on latch without a specified latch");
597 : : }
598 : :
599 : : /* waiting for socket readiness without a socket indicates a bug */
235 heikki.linnakangas@i 600 [ + + - + ]:CBC 331854 : if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
235 heikki.linnakangas@i 601 [ # # ]:UBC 0 : elog(ERROR, "cannot wait on socket event without a socket");
602 : :
235 heikki.linnakangas@i 603 :CBC 331854 : event = &set->events[set->nevents];
604 : 331854 : event->pos = set->nevents++;
605 : 331854 : event->fd = fd;
606 : 331854 : event->events = events;
607 : 331854 : event->user_data = user_data;
608 : : #ifdef WIN32
609 : : event->reset = false;
610 : : #endif
611 : :
612 [ + + ]: 331854 : if (events == WL_LATCH_SET)
613 : : {
614 : 117766 : set->latch = latch;
615 : 117766 : set->latch_pos = event->pos;
616 : : #if defined(WAIT_USE_SELF_PIPE)
617 : : event->fd = selfpipe_readfd;
618 : : #elif defined(WAIT_USE_SIGNALFD)
619 : 117766 : event->fd = signal_fd;
620 : : #else
621 : : event->fd = PGINVALID_SOCKET;
622 : : #ifdef WAIT_USE_EPOLL
623 : : return event->pos;
624 : : #endif
625 : : #endif
626 : : }
627 [ + + ]: 214088 : else if (events == WL_POSTMASTER_DEATH)
628 : : {
629 : : #ifndef WIN32
630 : 116239 : event->fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
631 : : #endif
632 : : }
633 : :
634 : : /* perform wait primitive specific initialization, if needed */
635 : : #if defined(WAIT_USE_EPOLL)
636 : 331854 : WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
637 : : #elif defined(WAIT_USE_KQUEUE)
638 : : WaitEventAdjustKqueue(set, event, 0);
639 : : #elif defined(WAIT_USE_POLL)
640 : : WaitEventAdjustPoll(set, event);
641 : : #elif defined(WAIT_USE_WIN32)
642 : : WaitEventAdjustWin32(set, event);
643 : : #endif
644 : :
645 : 331854 : return event->pos;
646 : : }
647 : :
648 : : /*
649 : : * Change the event mask and, in the WL_LATCH_SET case, the latch associated
650 : : * with the WaitEvent. The latch may be changed to NULL to disable the latch
651 : : * temporarily, and then set back to a latch later.
652 : : *
653 : : * 'pos' is the id returned by AddWaitEventToSet.
654 : : */
655 : : void
656 : 2391058 : ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
657 : : {
658 : : WaitEvent *event;
659 : : #if defined(WAIT_USE_KQUEUE)
660 : : int old_events;
661 : : #endif
662 : :
663 [ - + ]: 2391058 : Assert(pos < set->nevents);
664 : :
665 : 2391058 : event = &set->events[pos];
666 : : #if defined(WAIT_USE_KQUEUE)
667 : : old_events = event->events;
668 : : #endif
669 : :
670 : : /*
671 : : * Allow switching between WL_POSTMASTER_DEATH and WL_EXIT_ON_PM_DEATH.
672 : : *
673 : : * Note that because WL_EXIT_ON_PM_DEATH is mapped to WL_POSTMASTER_DEATH
674 : : * in AddWaitEventToSet(), this needs to be checked before the fast-path
675 : : * below that checks if 'events' has changed.
676 : : */
677 [ + + ]: 2391058 : if (event->events == WL_POSTMASTER_DEATH)
678 : : {
679 [ + + - + ]: 1051258 : if (events != WL_POSTMASTER_DEATH && events != WL_EXIT_ON_PM_DEATH)
235 heikki.linnakangas@i 680 [ # # ]:UBC 0 : elog(ERROR, "cannot remove postmaster death event");
235 heikki.linnakangas@i 681 :CBC 1051258 : set->exit_on_postmaster_death = ((events & WL_EXIT_ON_PM_DEATH) != 0);
682 : 1051258 : return;
683 : : }
684 : :
685 : : /*
686 : : * If neither the event mask nor the associated latch changes, return
687 : : * early. That's an important optimization for some sockets, where
688 : : * ModifyWaitEvent is frequently used to switch from waiting for reads to
689 : : * waiting on writes.
690 : : */
691 [ + + ]: 1339800 : if (events == event->events &&
692 [ + + + + ]: 1317406 : (!(event->events & WL_LATCH_SET) || set->latch == latch))
693 : 1285098 : return;
694 : :
695 [ + + - + ]: 54702 : if (event->events & WL_LATCH_SET && events != event->events)
235 heikki.linnakangas@i 696 [ # # ]:UBC 0 : elog(ERROR, "cannot modify latch event");
697 : :
698 : : /* FIXME: validate event mask */
235 heikki.linnakangas@i 699 :CBC 54702 : event->events = events;
700 : :
701 [ + + ]: 54702 : if (events == WL_LATCH_SET)
702 : : {
703 [ + + - + ]: 32308 : if (latch && latch->owner_pid != MyProcPid)
235 heikki.linnakangas@i 704 [ # # ]:UBC 0 : elog(ERROR, "cannot wait on a latch owned by another process");
235 heikki.linnakangas@i 705 :CBC 32308 : set->latch = latch;
706 : :
707 : : /*
708 : : * On Unix, we don't need to modify the kernel object because the
709 : : * underlying pipe (if there is one) is the same for all latches so we
710 : : * can return immediately. On Windows, we need to update our array of
711 : : * handles, but we leave the old one in place and tolerate spurious
712 : : * wakeups if the latch is disabled.
713 : : */
714 : : #if defined(WAIT_USE_WIN32)
715 : : if (!latch)
716 : : return;
717 : : #else
718 : 32308 : return;
719 : : #endif
720 : : }
721 : :
722 : : #if defined(WAIT_USE_EPOLL)
723 : 22394 : WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
724 : : #elif defined(WAIT_USE_KQUEUE)
725 : : WaitEventAdjustKqueue(set, event, old_events);
726 : : #elif defined(WAIT_USE_POLL)
727 : : WaitEventAdjustPoll(set, event);
728 : : #elif defined(WAIT_USE_WIN32)
729 : : WaitEventAdjustWin32(set, event);
730 : : #endif
731 : : }
732 : :
733 : : #if defined(WAIT_USE_EPOLL)
734 : : /*
735 : : * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
736 : : */
737 : : static void
738 : 354248 : WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
739 : : {
740 : : struct epoll_event epoll_ev;
741 : : int rc;
742 : :
743 : : /* pointer to our event, returned by epoll_wait */
744 : 354248 : epoll_ev.data.ptr = event;
745 : : /* always wait for errors */
746 : 354248 : epoll_ev.events = EPOLLERR | EPOLLHUP;
747 : :
748 : : /* prepare pollfd entry once */
749 [ + + ]: 354248 : if (event->events == WL_LATCH_SET)
750 : : {
751 [ - + ]: 117766 : Assert(set->latch != NULL);
752 : 117766 : epoll_ev.events |= EPOLLIN;
753 : : }
754 [ + + ]: 236482 : else if (event->events == WL_POSTMASTER_DEATH)
755 : : {
756 : 116239 : epoll_ev.events |= EPOLLIN;
757 : : }
758 : : else
759 : : {
760 [ - + ]: 120243 : Assert(event->fd != PGINVALID_SOCKET);
761 [ - + ]: 120243 : Assert(event->events & (WL_SOCKET_READABLE |
762 : : WL_SOCKET_WRITEABLE |
763 : : WL_SOCKET_CLOSED));
764 : :
765 [ + + ]: 120243 : if (event->events & WL_SOCKET_READABLE)
766 : 105959 : epoll_ev.events |= EPOLLIN;
767 [ + + ]: 120243 : if (event->events & WL_SOCKET_WRITEABLE)
768 : 20059 : epoll_ev.events |= EPOLLOUT;
769 [ - + ]: 120243 : if (event->events & WL_SOCKET_CLOSED)
235 heikki.linnakangas@i 770 :UBC 0 : epoll_ev.events |= EPOLLRDHUP;
771 : : }
772 : :
773 : : /*
774 : : * Even though unused, we also pass epoll_ev as the data argument if
775 : : * EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
776 : : * requiring that, and actually it makes the code simpler...
777 : : */
235 heikki.linnakangas@i 778 :CBC 354248 : rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
779 : :
780 [ - + ]: 354248 : if (rc < 0)
235 heikki.linnakangas@i 781 [ # # ]:UBC 0 : ereport(ERROR,
782 : : (errcode_for_socket_access(),
783 : : errmsg("%s() failed: %m",
784 : : "epoll_ctl")));
235 heikki.linnakangas@i 785 :CBC 354248 : }
786 : : #endif
787 : :
788 : : #if defined(WAIT_USE_POLL)
789 : : static void
790 : : WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
791 : : {
792 : : struct pollfd *pollfd = &set->pollfds[event->pos];
793 : :
794 : : pollfd->revents = 0;
795 : : pollfd->fd = event->fd;
796 : :
797 : : /* prepare pollfd entry once */
798 : : if (event->events == WL_LATCH_SET)
799 : : {
800 : : Assert(set->latch != NULL);
801 : : pollfd->events = POLLIN;
802 : : }
803 : : else if (event->events == WL_POSTMASTER_DEATH)
804 : : {
805 : : pollfd->events = POLLIN;
806 : : }
807 : : else
808 : : {
809 : : Assert(event->events & (WL_SOCKET_READABLE |
810 : : WL_SOCKET_WRITEABLE |
811 : : WL_SOCKET_CLOSED));
812 : : pollfd->events = 0;
813 : : if (event->events & WL_SOCKET_READABLE)
814 : : pollfd->events |= POLLIN;
815 : : if (event->events & WL_SOCKET_WRITEABLE)
816 : : pollfd->events |= POLLOUT;
817 : : #ifdef POLLRDHUP
818 : : if (event->events & WL_SOCKET_CLOSED)
819 : : pollfd->events |= POLLRDHUP;
820 : : #endif
821 : : }
822 : :
823 : : Assert(event->fd != PGINVALID_SOCKET);
824 : : }
825 : : #endif
826 : :
827 : : #if defined(WAIT_USE_KQUEUE)
828 : :
829 : : /*
830 : : * On most BSD family systems, the udata member of struct kevent is of type
831 : : * void *, so we could directly convert to/from WaitEvent *. Unfortunately,
832 : : * NetBSD has it as intptr_t, so here we wallpaper over that difference with
833 : : * an lvalue cast.
834 : : */
835 : : #define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
836 : :
837 : : static inline void
838 : : WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
839 : : WaitEvent *event)
840 : : {
841 : : k_ev->ident = event->fd;
842 : : k_ev->filter = filter;
843 : : k_ev->flags = action;
844 : : k_ev->fflags = 0;
845 : : k_ev->data = 0;
846 : : AccessWaitEvent(k_ev) = event;
847 : : }
848 : :
849 : : static inline void
850 : : WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
851 : : {
852 : : /* For now postmaster death can only be added, not removed. */
853 : : k_ev->ident = PostmasterPid;
854 : : k_ev->filter = EVFILT_PROC;
855 : : k_ev->flags = EV_ADD;
856 : : k_ev->fflags = NOTE_EXIT;
857 : : k_ev->data = 0;
858 : : AccessWaitEvent(k_ev) = event;
859 : : }
860 : :
861 : : static inline void
862 : : WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
863 : : {
864 : : /* For now latch can only be added, not removed. */
865 : : k_ev->ident = SIGURG;
866 : : k_ev->filter = EVFILT_SIGNAL;
867 : : k_ev->flags = EV_ADD;
868 : : k_ev->fflags = 0;
869 : : k_ev->data = 0;
870 : : AccessWaitEvent(k_ev) = event;
871 : : }
872 : :
873 : : /*
874 : : * old_events is the previous event mask, used to compute what has changed.
875 : : */
876 : : static void
877 : : WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
878 : : {
879 : : int rc;
880 : : struct kevent k_ev[2];
881 : : int count = 0;
882 : : bool new_filt_read = false;
883 : : bool old_filt_read = false;
884 : : bool new_filt_write = false;
885 : : bool old_filt_write = false;
886 : :
887 : : if (old_events == event->events)
888 : : return;
889 : :
890 : : Assert(event->events != WL_LATCH_SET || set->latch != NULL);
891 : : Assert(event->events == WL_LATCH_SET ||
892 : : event->events == WL_POSTMASTER_DEATH ||
893 : : (event->events & (WL_SOCKET_READABLE |
894 : : WL_SOCKET_WRITEABLE |
895 : : WL_SOCKET_CLOSED)));
896 : :
897 : : if (event->events == WL_POSTMASTER_DEATH)
898 : : {
899 : : /*
900 : : * Unlike all the other implementations, we detect postmaster death
901 : : * using process notification instead of waiting on the postmaster
902 : : * alive pipe.
903 : : */
904 : : WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
905 : : }
906 : : else if (event->events == WL_LATCH_SET)
907 : : {
908 : : /* We detect latch wakeup using a signal event. */
909 : : WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
910 : : }
911 : : else
912 : : {
913 : : /*
914 : : * We need to compute the adds and deletes required to get from the
915 : : * old event mask to the new event mask, since kevent treats readable
916 : : * and writable as separate events.
917 : : */
918 : : if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
919 : : old_filt_read = true;
920 : : if (event->events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
921 : : new_filt_read = true;
922 : : if (old_events & WL_SOCKET_WRITEABLE)
923 : : old_filt_write = true;
924 : : if (event->events & WL_SOCKET_WRITEABLE)
925 : : new_filt_write = true;
926 : : if (old_filt_read && !new_filt_read)
927 : : WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
928 : : event);
929 : : else if (!old_filt_read && new_filt_read)
930 : : WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
931 : : event);
932 : : if (old_filt_write && !new_filt_write)
933 : : WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
934 : : event);
935 : : else if (!old_filt_write && new_filt_write)
936 : : WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
937 : : event);
938 : : }
939 : :
940 : : /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
941 : : if (count == 0)
942 : : return;
943 : :
944 : : Assert(count <= 2);
945 : :
946 : : rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
947 : :
948 : : /*
949 : : * When adding the postmaster's pid, we have to consider that it might
950 : : * already have exited and perhaps even been replaced by another process
951 : : * with the same pid. If so, we have to defer reporting this as an event
952 : : * until the next call to WaitEventSetWaitBlock().
953 : : */
954 : :
955 : : if (rc < 0)
956 : : {
957 : : if (event->events == WL_POSTMASTER_DEATH &&
958 : : (errno == ESRCH || errno == EACCES))
959 : : set->report_postmaster_not_running = true;
960 : : else
961 : : ereport(ERROR,
962 : : (errcode_for_socket_access(),
963 : : errmsg("%s() failed: %m",
964 : : "kevent")));
965 : : }
966 : : else if (event->events == WL_POSTMASTER_DEATH &&
967 : : PostmasterPid != getppid() &&
968 : : !PostmasterIsAlive())
969 : : {
970 : : /*
971 : : * The extra PostmasterIsAliveInternal() check prevents false alarms
972 : : * on systems that give a different value for getppid() while being
973 : : * traced by a debugger.
974 : : */
975 : : set->report_postmaster_not_running = true;
976 : : }
977 : : }
978 : :
979 : : #endif
980 : :
981 : : #if defined(WAIT_USE_WIN32)
982 : : static void
983 : : WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
984 : : {
985 : : HANDLE *handle = &set->handles[event->pos + 1];
986 : :
987 : : if (event->events == WL_LATCH_SET)
988 : : {
989 : : Assert(set->latch != NULL);
990 : : *handle = set->latch->event;
991 : : }
992 : : else if (event->events == WL_POSTMASTER_DEATH)
993 : : {
994 : : *handle = PostmasterHandle;
995 : : }
996 : : else
997 : : {
998 : : int flags = FD_CLOSE; /* always check for errors/EOF */
999 : :
1000 : : if (event->events & WL_SOCKET_READABLE)
1001 : : flags |= FD_READ;
1002 : : if (event->events & WL_SOCKET_WRITEABLE)
1003 : : flags |= FD_WRITE;
1004 : : if (event->events & WL_SOCKET_CONNECTED)
1005 : : flags |= FD_CONNECT;
1006 : : if (event->events & WL_SOCKET_ACCEPT)
1007 : : flags |= FD_ACCEPT;
1008 : :
1009 : : if (*handle == WSA_INVALID_EVENT)
1010 : : {
1011 : : *handle = WSACreateEvent();
1012 : : if (*handle == WSA_INVALID_EVENT)
1013 : : elog(ERROR, "failed to create event for socket: error code %d",
1014 : : WSAGetLastError());
1015 : : }
1016 : : if (WSAEventSelect(event->fd, *handle, flags) != 0)
1017 : : elog(ERROR, "failed to set up event for socket: error code %d",
1018 : : WSAGetLastError());
1019 : :
1020 : : Assert(event->fd != PGINVALID_SOCKET);
1021 : : }
1022 : : }
1023 : : #endif
1024 : :
1025 : : /*
1026 : : * Wait for events added to the set to happen, or until the timeout is
1027 : : * reached. At most nevents occurred events are returned.
1028 : : *
1029 : : * If timeout = -1, block until an event occurs; if 0, check sockets for
1030 : : * readiness, but don't block; if > 0, block for at most timeout milliseconds.
1031 : : *
1032 : : * Returns the number of events occurred, or 0 if the timeout was reached.
1033 : : *
1034 : : * Returned events will have the fd, pos, user_data fields set to the
1035 : : * values associated with the registered event.
1036 : : */
1037 : : int
1038 : 1437255 : WaitEventSetWait(WaitEventSet *set, long timeout,
1039 : : WaitEvent *occurred_events, int nevents,
1040 : : uint32 wait_event_info)
1041 : : {
1042 : 1437255 : int returned_events = 0;
1043 : : instr_time start_time;
1044 : : instr_time cur_time;
1045 : 1437255 : long cur_timeout = -1;
1046 : :
1047 [ - + ]: 1437255 : Assert(nevents > 0);
1048 : :
1049 : : /*
1050 : : * Initialize timeout if requested. We must record the current time so
1051 : : * that we can determine the remaining timeout if interrupted.
1052 : : */
1053 [ + + ]: 1437255 : if (timeout >= 0)
1054 : : {
1055 : 230829 : INSTR_TIME_SET_CURRENT(start_time);
1056 [ + - - + ]: 230829 : Assert(timeout >= 0 && timeout <= INT_MAX);
1057 : 230829 : cur_timeout = timeout;
1058 : : }
1059 : : else
1060 : 1206426 : INSTR_TIME_SET_ZERO(start_time);
1061 : :
1062 : 1437255 : pgstat_report_wait_start(wait_event_info);
1063 : :
1064 : : #ifndef WIN32
1065 : 1437255 : waiting = true;
1066 : : #else
1067 : : /* Ensure that signals are serviced even if latch is already set */
1068 : : pgwin32_dispatch_queued_signals();
1069 : : #endif
1070 [ + + ]: 2814403 : while (returned_events == 0)
1071 : : {
1072 : : int rc;
1073 : :
1074 : : /*
1075 : : * Check if the latch is set already first. If so, we either exit
1076 : : * immediately or ask the kernel for further events available right
1077 : : * now without waiting, depending on how many events the caller wants.
1078 : : *
1079 : : * If someone sets the latch between this and the
1080 : : * WaitEventSetWaitBlock() below, the setter will write a byte to the
1081 : : * pipe (or signal us and the signal handler will do that), and the
1082 : : * readiness routine will return immediately.
1083 : : *
1084 : : * On unix, If there's a pending byte in the self pipe, we'll notice
1085 : : * whenever blocking. Only clearing the pipe in that case avoids
1086 : : * having to drain it every time WaitLatchOrSocket() is used. Should
1087 : : * the pipe-buffer fill up we're still ok, because the pipe is in
1088 : : * nonblocking mode. It's unlikely for that to happen, because the
1089 : : * self pipe isn't filled unless we're blocking (waiting = true), or
1090 : : * from inside a signal handler in latch_sigurg_handler().
1091 : : *
1092 : : * On windows, we'll also notice if there's a pending event for the
1093 : : * latch when blocking, but there's no danger of anything filling up,
1094 : : * as "Setting an event that is already set has no effect.".
1095 : : *
1096 : : * Note: we assume that the kernel calls involved in latch management
1097 : : * will provide adequate synchronization on machines with weak memory
1098 : : * ordering, so that we cannot miss seeing is_set if a notification
1099 : : * has already been queued.
1100 : : */
1101 [ + + + + ]: 1481080 : if (set->latch && !set->latch->is_set)
1102 : : {
1103 : : /* about to sleep on a latch */
1104 : 1377774 : set->latch->maybe_sleeping = true;
1105 : 1377774 : pg_memory_barrier();
1106 : : /* and recheck */
1107 : : }
1108 : :
1109 [ + + + + ]: 1481080 : if (set->latch && set->latch->is_set)
1110 : : {
1111 : 102934 : occurred_events->fd = PGINVALID_SOCKET;
1112 : 102934 : occurred_events->pos = set->latch_pos;
1113 : 102934 : occurred_events->user_data =
1114 : 102934 : set->events[set->latch_pos].user_data;
1115 : 102934 : occurred_events->events = WL_LATCH_SET;
1116 : 102934 : occurred_events++;
1117 : 102934 : returned_events++;
1118 : :
1119 : : /* could have been set above */
1120 : 102934 : set->latch->maybe_sleeping = false;
1121 : :
1122 [ + + ]: 102934 : if (returned_events == nevents)
1123 : 78345 : break; /* output buffer full already */
1124 : :
1125 : : /*
1126 : : * Even though we already have an event, we'll poll just once with
1127 : : * zero timeout to see what non-latch events we can fit into the
1128 : : * output buffer at the same time.
1129 : : */
1130 : 24589 : cur_timeout = 0;
1131 : 24589 : timeout = 0;
1132 : : }
1133 : :
1134 : : /*
1135 : : * Wait for events using the readiness primitive chosen at the top of
1136 : : * this file. If -1 is returned, a timeout has occurred, if 0 we have
1137 : : * to retry, everything >= 1 is the number of returned events.
1138 : : */
1139 : 1402735 : rc = WaitEventSetWaitBlock(set, cur_timeout,
1140 : : occurred_events, nevents - returned_events);
1141 : :
1142 [ + + ]: 1402711 : if (set->latch &&
1143 [ + + ]: 1402317 : set->latch->maybe_sleeping)
1144 : 1377728 : set->latch->maybe_sleeping = false;
1145 : :
1146 [ + + ]: 1402711 : if (rc == -1)
1147 : 25563 : break; /* timeout occurred */
1148 : : else
1149 : 1377148 : returned_events += rc;
1150 : :
1151 : : /* If we're not done, update cur_timeout for next iteration */
1152 [ + + + + ]: 1377148 : if (returned_events == 0 && timeout >= 0)
1153 : : {
1154 : 35848 : INSTR_TIME_SET_CURRENT(cur_time);
1155 : 35848 : INSTR_TIME_SUBTRACT(cur_time, start_time);
1156 : 35848 : cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
1157 [ - + ]: 35848 : if (cur_timeout <= 0)
235 heikki.linnakangas@i 1158 :LBC (3) : break;
1159 : : }
1160 : : }
1161 : : #ifndef WIN32
235 heikki.linnakangas@i 1162 :CBC 1437231 : waiting = false;
1163 : : #endif
1164 : :
1165 : 1437231 : pgstat_report_wait_end();
1166 : :
1167 : 1437231 : return returned_events;
1168 : : }
1169 : :
1170 : :
1171 : : #if defined(WAIT_USE_EPOLL)
1172 : :
1173 : : /*
1174 : : * Wait using linux's epoll_wait(2).
1175 : : *
1176 : : * This is the preferable wait method, as several readiness notifications are
1177 : : * delivered, without having to iterate through all of set->events. The return
1178 : : * epoll_event struct contain a pointer to our events, making association
1179 : : * easy.
1180 : : */
1181 : : static inline int
1182 : 1402735 : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1183 : : WaitEvent *occurred_events, int nevents)
1184 : : {
1185 : 1402735 : int returned_events = 0;
1186 : : int rc;
1187 : : WaitEvent *cur_event;
1188 : : struct epoll_event *cur_epoll_event;
1189 : :
1190 : : /* Sleep */
1191 : 1402735 : rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
1192 : 1402735 : Min(nevents, set->nevents_space), cur_timeout);
1193 : :
1194 : : /* Check return code */
1195 [ + + ]: 1402735 : if (rc < 0)
1196 : : {
1197 : : /* EINTR is okay, otherwise complain */
1198 [ - + ]: 34991 : if (errno != EINTR)
1199 : : {
235 heikki.linnakangas@i 1200 :UBC 0 : waiting = false;
1201 [ # # ]: 0 : ereport(ERROR,
1202 : : (errcode_for_socket_access(),
1203 : : errmsg("%s() failed: %m",
1204 : : "epoll_wait")));
1205 : : }
235 heikki.linnakangas@i 1206 :CBC 34991 : return 0;
1207 : : }
1208 [ + + ]: 1367744 : else if (rc == 0)
1209 : : {
1210 : : /* timeout exceeded */
1211 : 25563 : return -1;
1212 : : }
1213 : :
1214 : : /*
1215 : : * At least one event occurred, iterate over the returned epoll events
1216 : : * until they're either all processed, or we've returned all the events
1217 : : * the caller desired.
1218 : : */
1219 : 1342181 : for (cur_epoll_event = set->epoll_ret_events;
1220 [ + + + - ]: 2684376 : cur_epoll_event < (set->epoll_ret_events + rc) &&
1221 : : returned_events < nevents;
1222 : 1342195 : cur_epoll_event++)
1223 : : {
1224 : : /* epoll's data pointer is set to the associated WaitEvent */
1225 : 1342219 : cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
1226 : :
1227 : 1342219 : occurred_events->pos = cur_event->pos;
1228 : 1342219 : occurred_events->user_data = cur_event->user_data;
1229 : 1342219 : occurred_events->events = 0;
1230 : :
1231 [ + + ]: 1342219 : if (cur_event->events == WL_LATCH_SET &&
1232 [ + - ]: 1023564 : cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1233 : : {
1234 : : /* Drain the signalfd. */
1235 : 1023564 : drain();
1236 : :
1237 [ + - + + : 1023564 : if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
+ + ]
1238 : : {
1239 : 990564 : occurred_events->fd = PGINVALID_SOCKET;
1240 : 990564 : occurred_events->events = WL_LATCH_SET;
1241 : 990564 : occurred_events++;
1242 : 990564 : returned_events++;
1243 : : }
1244 : : }
1245 [ + + ]: 318655 : else if (cur_event->events == WL_POSTMASTER_DEATH &&
1246 [ + - ]: 24 : cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1247 : : {
1248 : : /*
1249 : : * We expect an EPOLLHUP when the remote end is closed, but
1250 : : * because we don't expect the pipe to become readable or to have
1251 : : * any errors either, treat those cases as postmaster death, too.
1252 : : *
1253 : : * Be paranoid about a spurious event signaling the postmaster as
1254 : : * being dead. There have been reports about that happening with
1255 : : * older primitives (select(2) to be specific), and a spurious
1256 : : * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1257 : : * cost much.
1258 : : */
1259 [ + - ]: 24 : if (!PostmasterIsAliveInternal())
1260 : : {
1261 [ + - ]: 24 : if (set->exit_on_postmaster_death)
1262 : 24 : proc_exit(1);
235 heikki.linnakangas@i 1263 :UBC 0 : occurred_events->fd = PGINVALID_SOCKET;
1264 : 0 : occurred_events->events = WL_POSTMASTER_DEATH;
1265 : 0 : occurred_events++;
1266 : 0 : returned_events++;
1267 : : }
1268 : : }
235 heikki.linnakangas@i 1269 [ + - ]:CBC 318631 : else if (cur_event->events & (WL_SOCKET_READABLE |
1270 : : WL_SOCKET_WRITEABLE |
1271 : : WL_SOCKET_CLOSED))
1272 : : {
1273 [ - + ]: 318631 : Assert(cur_event->fd != PGINVALID_SOCKET);
1274 : :
1275 [ + + ]: 318631 : if ((cur_event->events & WL_SOCKET_READABLE) &&
1276 [ + + ]: 303642 : (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
1277 : : {
1278 : : /* data available in socket, or EOF */
1279 : 290338 : occurred_events->events |= WL_SOCKET_READABLE;
1280 : : }
1281 : :
1282 [ + + ]: 318631 : if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1283 [ + + ]: 50203 : (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
1284 : : {
1285 : : /* writable, or EOF */
1286 : 49812 : occurred_events->events |= WL_SOCKET_WRITEABLE;
1287 : : }
1288 : :
1289 [ - + ]: 318631 : if ((cur_event->events & WL_SOCKET_CLOSED) &&
235 heikki.linnakangas@i 1290 [ # # ]:UBC 0 : (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
1291 : : {
1292 : : /* remote peer shut down, or error */
1293 : 0 : occurred_events->events |= WL_SOCKET_CLOSED;
1294 : : }
1295 : :
235 heikki.linnakangas@i 1296 [ + - ]:CBC 318631 : if (occurred_events->events != 0)
1297 : : {
1298 : 318631 : occurred_events->fd = cur_event->fd;
1299 : 318631 : occurred_events++;
1300 : 318631 : returned_events++;
1301 : : }
1302 : : }
1303 : : }
1304 : :
1305 : 1342157 : return returned_events;
1306 : : }
1307 : :
1308 : : #elif defined(WAIT_USE_KQUEUE)
1309 : :
1310 : : /*
1311 : : * Wait using kevent(2) on BSD-family systems and macOS.
1312 : : *
1313 : : * For now this mirrors the epoll code, but in future it could modify the fd
1314 : : * set in the same call to kevent as it uses for waiting instead of doing that
1315 : : * with separate system calls.
1316 : : */
1317 : : static int
1318 : : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1319 : : WaitEvent *occurred_events, int nevents)
1320 : : {
1321 : : int returned_events = 0;
1322 : : int rc;
1323 : : WaitEvent *cur_event;
1324 : : struct kevent *cur_kqueue_event;
1325 : : struct timespec timeout;
1326 : : struct timespec *timeout_p;
1327 : :
1328 : : if (cur_timeout < 0)
1329 : : timeout_p = NULL;
1330 : : else
1331 : : {
1332 : : timeout.tv_sec = cur_timeout / 1000;
1333 : : timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
1334 : : timeout_p = &timeout;
1335 : : }
1336 : :
1337 : : /*
1338 : : * Report postmaster events discovered by WaitEventAdjustKqueue() or an
1339 : : * earlier call to WaitEventSetWait().
1340 : : */
1341 : : if (unlikely(set->report_postmaster_not_running))
1342 : : {
1343 : : if (set->exit_on_postmaster_death)
1344 : : proc_exit(1);
1345 : : occurred_events->fd = PGINVALID_SOCKET;
1346 : : occurred_events->events = WL_POSTMASTER_DEATH;
1347 : : return 1;
1348 : : }
1349 : :
1350 : : /* Sleep */
1351 : : rc = kevent(set->kqueue_fd, NULL, 0,
1352 : : set->kqueue_ret_events,
1353 : : Min(nevents, set->nevents_space),
1354 : : timeout_p);
1355 : :
1356 : : /* Check return code */
1357 : : if (rc < 0)
1358 : : {
1359 : : /* EINTR is okay, otherwise complain */
1360 : : if (errno != EINTR)
1361 : : {
1362 : : waiting = false;
1363 : : ereport(ERROR,
1364 : : (errcode_for_socket_access(),
1365 : : errmsg("%s() failed: %m",
1366 : : "kevent")));
1367 : : }
1368 : : return 0;
1369 : : }
1370 : : else if (rc == 0)
1371 : : {
1372 : : /* timeout exceeded */
1373 : : return -1;
1374 : : }
1375 : :
1376 : : /*
1377 : : * At least one event occurred, iterate over the returned kqueue events
1378 : : * until they're either all processed, or we've returned all the events
1379 : : * the caller desired.
1380 : : */
1381 : : for (cur_kqueue_event = set->kqueue_ret_events;
1382 : : cur_kqueue_event < (set->kqueue_ret_events + rc) &&
1383 : : returned_events < nevents;
1384 : : cur_kqueue_event++)
1385 : : {
1386 : : /* kevent's udata points to the associated WaitEvent */
1387 : : cur_event = AccessWaitEvent(cur_kqueue_event);
1388 : :
1389 : : occurred_events->pos = cur_event->pos;
1390 : : occurred_events->user_data = cur_event->user_data;
1391 : : occurred_events->events = 0;
1392 : :
1393 : : if (cur_event->events == WL_LATCH_SET &&
1394 : : cur_kqueue_event->filter == EVFILT_SIGNAL)
1395 : : {
1396 : : if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1397 : : {
1398 : : occurred_events->fd = PGINVALID_SOCKET;
1399 : : occurred_events->events = WL_LATCH_SET;
1400 : : occurred_events++;
1401 : : returned_events++;
1402 : : }
1403 : : }
1404 : : else if (cur_event->events == WL_POSTMASTER_DEATH &&
1405 : : cur_kqueue_event->filter == EVFILT_PROC &&
1406 : : (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
1407 : : {
1408 : : /*
1409 : : * The kernel will tell this kqueue object only once about the
1410 : : * exit of the postmaster, so let's remember that for next time so
1411 : : * that we provide level-triggered semantics.
1412 : : */
1413 : : set->report_postmaster_not_running = true;
1414 : :
1415 : : if (set->exit_on_postmaster_death)
1416 : : proc_exit(1);
1417 : : occurred_events->fd = PGINVALID_SOCKET;
1418 : : occurred_events->events = WL_POSTMASTER_DEATH;
1419 : : occurred_events++;
1420 : : returned_events++;
1421 : : }
1422 : : else if (cur_event->events & (WL_SOCKET_READABLE |
1423 : : WL_SOCKET_WRITEABLE |
1424 : : WL_SOCKET_CLOSED))
1425 : : {
1426 : : Assert(cur_event->fd >= 0);
1427 : :
1428 : : if ((cur_event->events & WL_SOCKET_READABLE) &&
1429 : : (cur_kqueue_event->filter == EVFILT_READ))
1430 : : {
1431 : : /* readable, or EOF */
1432 : : occurred_events->events |= WL_SOCKET_READABLE;
1433 : : }
1434 : :
1435 : : if ((cur_event->events & WL_SOCKET_CLOSED) &&
1436 : : (cur_kqueue_event->filter == EVFILT_READ) &&
1437 : : (cur_kqueue_event->flags & EV_EOF))
1438 : : {
1439 : : /* the remote peer has shut down */
1440 : : occurred_events->events |= WL_SOCKET_CLOSED;
1441 : : }
1442 : :
1443 : : if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1444 : : (cur_kqueue_event->filter == EVFILT_WRITE))
1445 : : {
1446 : : /* writable, or EOF */
1447 : : occurred_events->events |= WL_SOCKET_WRITEABLE;
1448 : : }
1449 : :
1450 : : if (occurred_events->events != 0)
1451 : : {
1452 : : occurred_events->fd = cur_event->fd;
1453 : : occurred_events++;
1454 : : returned_events++;
1455 : : }
1456 : : }
1457 : : }
1458 : :
1459 : : return returned_events;
1460 : : }
1461 : :
1462 : : #elif defined(WAIT_USE_POLL)
1463 : :
1464 : : /*
1465 : : * Wait using poll(2).
1466 : : *
1467 : : * This allows to receive readiness notifications for several events at once,
1468 : : * but requires iterating through all of set->pollfds.
1469 : : */
1470 : : static inline int
1471 : : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1472 : : WaitEvent *occurred_events, int nevents)
1473 : : {
1474 : : int returned_events = 0;
1475 : : int rc;
1476 : : WaitEvent *cur_event;
1477 : : struct pollfd *cur_pollfd;
1478 : :
1479 : : /* Sleep */
1480 : : rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
1481 : :
1482 : : /* Check return code */
1483 : : if (rc < 0)
1484 : : {
1485 : : /* EINTR is okay, otherwise complain */
1486 : : if (errno != EINTR)
1487 : : {
1488 : : waiting = false;
1489 : : ereport(ERROR,
1490 : : (errcode_for_socket_access(),
1491 : : errmsg("%s() failed: %m",
1492 : : "poll")));
1493 : : }
1494 : : return 0;
1495 : : }
1496 : : else if (rc == 0)
1497 : : {
1498 : : /* timeout exceeded */
1499 : : return -1;
1500 : : }
1501 : :
1502 : : for (cur_event = set->events, cur_pollfd = set->pollfds;
1503 : : cur_event < (set->events + set->nevents) &&
1504 : : returned_events < nevents;
1505 : : cur_event++, cur_pollfd++)
1506 : : {
1507 : : /* no activity on this FD, skip */
1508 : : if (cur_pollfd->revents == 0)
1509 : : continue;
1510 : :
1511 : : occurred_events->pos = cur_event->pos;
1512 : : occurred_events->user_data = cur_event->user_data;
1513 : : occurred_events->events = 0;
1514 : :
1515 : : if (cur_event->events == WL_LATCH_SET &&
1516 : : (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1517 : : {
1518 : : /* There's data in the self-pipe, clear it. */
1519 : : drain();
1520 : :
1521 : : if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1522 : : {
1523 : : occurred_events->fd = PGINVALID_SOCKET;
1524 : : occurred_events->events = WL_LATCH_SET;
1525 : : occurred_events++;
1526 : : returned_events++;
1527 : : }
1528 : : }
1529 : : else if (cur_event->events == WL_POSTMASTER_DEATH &&
1530 : : (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1531 : : {
1532 : : /*
1533 : : * We expect an POLLHUP when the remote end is closed, but because
1534 : : * we don't expect the pipe to become readable or to have any
1535 : : * errors either, treat those cases as postmaster death, too.
1536 : : *
1537 : : * Be paranoid about a spurious event signaling the postmaster as
1538 : : * being dead. There have been reports about that happening with
1539 : : * older primitives (select(2) to be specific), and a spurious
1540 : : * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1541 : : * cost much.
1542 : : */
1543 : : if (!PostmasterIsAliveInternal())
1544 : : {
1545 : : if (set->exit_on_postmaster_death)
1546 : : proc_exit(1);
1547 : : occurred_events->fd = PGINVALID_SOCKET;
1548 : : occurred_events->events = WL_POSTMASTER_DEATH;
1549 : : occurred_events++;
1550 : : returned_events++;
1551 : : }
1552 : : }
1553 : : else if (cur_event->events & (WL_SOCKET_READABLE |
1554 : : WL_SOCKET_WRITEABLE |
1555 : : WL_SOCKET_CLOSED))
1556 : : {
1557 : : int errflags = POLLHUP | POLLERR | POLLNVAL;
1558 : :
1559 : : Assert(cur_event->fd >= PGINVALID_SOCKET);
1560 : :
1561 : : if ((cur_event->events & WL_SOCKET_READABLE) &&
1562 : : (cur_pollfd->revents & (POLLIN | errflags)))
1563 : : {
1564 : : /* data available in socket, or EOF */
1565 : : occurred_events->events |= WL_SOCKET_READABLE;
1566 : : }
1567 : :
1568 : : if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1569 : : (cur_pollfd->revents & (POLLOUT | errflags)))
1570 : : {
1571 : : /* writeable, or EOF */
1572 : : occurred_events->events |= WL_SOCKET_WRITEABLE;
1573 : : }
1574 : :
1575 : : #ifdef POLLRDHUP
1576 : : if ((cur_event->events & WL_SOCKET_CLOSED) &&
1577 : : (cur_pollfd->revents & (POLLRDHUP | errflags)))
1578 : : {
1579 : : /* remote peer closed, or error */
1580 : : occurred_events->events |= WL_SOCKET_CLOSED;
1581 : : }
1582 : : #endif
1583 : :
1584 : : if (occurred_events->events != 0)
1585 : : {
1586 : : occurred_events->fd = cur_event->fd;
1587 : : occurred_events++;
1588 : : returned_events++;
1589 : : }
1590 : : }
1591 : : }
1592 : : return returned_events;
1593 : : }
1594 : :
1595 : : #elif defined(WAIT_USE_WIN32)
1596 : :
1597 : : /*
1598 : : * Wait using Windows' WaitForMultipleObjects(). Each call only "consumes" one
1599 : : * event, so we keep calling until we've filled up our output buffer to match
1600 : : * the behavior of the other implementations.
1601 : : *
1602 : : * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273
1603 : : */
1604 : : static inline int
1605 : : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1606 : : WaitEvent *occurred_events, int nevents)
1607 : : {
1608 : : int returned_events = 0;
1609 : : DWORD rc;
1610 : : WaitEvent *cur_event;
1611 : :
1612 : : /* Reset any wait events that need it */
1613 : : for (cur_event = set->events;
1614 : : cur_event < (set->events + set->nevents);
1615 : : cur_event++)
1616 : : {
1617 : : if (cur_event->reset)
1618 : : {
1619 : : WaitEventAdjustWin32(set, cur_event);
1620 : : cur_event->reset = false;
1621 : : }
1622 : :
1623 : : /*
1624 : : * We associate the socket with a new event handle for each
1625 : : * WaitEventSet. FD_CLOSE is only generated once if the other end
1626 : : * closes gracefully. Therefore we might miss the FD_CLOSE
1627 : : * notification, if it was delivered to another event after we stopped
1628 : : * waiting for it. Close that race by peeking for EOF after setting
1629 : : * up this handle to receive notifications, and before entering the
1630 : : * sleep.
1631 : : *
1632 : : * XXX If we had one event handle for the lifetime of a socket, we
1633 : : * wouldn't need this.
1634 : : */
1635 : : if (cur_event->events & WL_SOCKET_READABLE)
1636 : : {
1637 : : char c;
1638 : : WSABUF buf;
1639 : : DWORD received;
1640 : : DWORD flags;
1641 : :
1642 : : buf.buf = &c;
1643 : : buf.len = 1;
1644 : : flags = MSG_PEEK;
1645 : : if (WSARecv(cur_event->fd, &buf, 1, &received, &flags, NULL, NULL) == 0)
1646 : : {
1647 : : occurred_events->pos = cur_event->pos;
1648 : : occurred_events->user_data = cur_event->user_data;
1649 : : occurred_events->events = WL_SOCKET_READABLE;
1650 : : occurred_events->fd = cur_event->fd;
1651 : : return 1;
1652 : : }
1653 : : }
1654 : :
1655 : : /*
1656 : : * Windows does not guarantee to log an FD_WRITE network event
1657 : : * indicating that more data can be sent unless the previous send()
1658 : : * failed with WSAEWOULDBLOCK. While our caller might well have made
1659 : : * such a call, we cannot assume that here. Therefore, if waiting for
1660 : : * write-ready, force the issue by doing a dummy send(). If the dummy
1661 : : * send() succeeds, assume that the socket is in fact write-ready, and
1662 : : * return immediately. Also, if it fails with something other than
1663 : : * WSAEWOULDBLOCK, return a write-ready indication to let our caller
1664 : : * deal with the error condition.
1665 : : */
1666 : : if (cur_event->events & WL_SOCKET_WRITEABLE)
1667 : : {
1668 : : char c;
1669 : : WSABUF buf;
1670 : : DWORD sent;
1671 : : int r;
1672 : :
1673 : : buf.buf = &c;
1674 : : buf.len = 0;
1675 : :
1676 : : r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
1677 : : if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
1678 : : {
1679 : : occurred_events->pos = cur_event->pos;
1680 : : occurred_events->user_data = cur_event->user_data;
1681 : : occurred_events->events = WL_SOCKET_WRITEABLE;
1682 : : occurred_events->fd = cur_event->fd;
1683 : : return 1;
1684 : : }
1685 : : }
1686 : : }
1687 : :
1688 : : /*
1689 : : * Sleep.
1690 : : *
1691 : : * Need to wait for ->nevents + 1, because signal handle is in [0].
1692 : : */
1693 : : rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
1694 : : cur_timeout);
1695 : :
1696 : : /* Check return code */
1697 : : if (rc == WAIT_FAILED)
1698 : : elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
1699 : : GetLastError());
1700 : : else if (rc == WAIT_TIMEOUT)
1701 : : {
1702 : : /* timeout exceeded */
1703 : : return -1;
1704 : : }
1705 : :
1706 : : if (rc == WAIT_OBJECT_0)
1707 : : {
1708 : : /* Service newly-arrived signals */
1709 : : pgwin32_dispatch_queued_signals();
1710 : : return 0; /* retry */
1711 : : }
1712 : :
1713 : : /*
1714 : : * With an offset of one, due to the always present pgwin32_signal_event,
1715 : : * the handle offset directly corresponds to a wait event.
1716 : : */
1717 : : cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
1718 : :
1719 : : for (;;)
1720 : : {
1721 : : int next_pos;
1722 : : int count;
1723 : :
1724 : : occurred_events->pos = cur_event->pos;
1725 : : occurred_events->user_data = cur_event->user_data;
1726 : : occurred_events->events = 0;
1727 : :
1728 : : if (cur_event->events == WL_LATCH_SET)
1729 : : {
1730 : : /*
1731 : : * We cannot use set->latch->event to reset the fired event if we
1732 : : * aren't waiting on this latch now.
1733 : : */
1734 : : if (!ResetEvent(set->handles[cur_event->pos + 1]))
1735 : : elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
1736 : :
1737 : : if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1738 : : {
1739 : : occurred_events->fd = PGINVALID_SOCKET;
1740 : : occurred_events->events = WL_LATCH_SET;
1741 : : occurred_events++;
1742 : : returned_events++;
1743 : : }
1744 : : }
1745 : : else if (cur_event->events == WL_POSTMASTER_DEATH)
1746 : : {
1747 : : /*
1748 : : * Postmaster apparently died. Since the consequences of falsely
1749 : : * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
1750 : : * take the trouble to positively verify this with
1751 : : * PostmasterIsAlive(), even though there is no known reason to
1752 : : * think that the event could be falsely set on Windows.
1753 : : */
1754 : : if (!PostmasterIsAliveInternal())
1755 : : {
1756 : : if (set->exit_on_postmaster_death)
1757 : : proc_exit(1);
1758 : : occurred_events->fd = PGINVALID_SOCKET;
1759 : : occurred_events->events = WL_POSTMASTER_DEATH;
1760 : : occurred_events++;
1761 : : returned_events++;
1762 : : }
1763 : : }
1764 : : else if (cur_event->events & WL_SOCKET_MASK)
1765 : : {
1766 : : WSANETWORKEVENTS resEvents;
1767 : : HANDLE handle = set->handles[cur_event->pos + 1];
1768 : :
1769 : : Assert(cur_event->fd);
1770 : :
1771 : : occurred_events->fd = cur_event->fd;
1772 : :
1773 : : ZeroMemory(&resEvents, sizeof(resEvents));
1774 : : if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
1775 : : elog(ERROR, "failed to enumerate network events: error code %d",
1776 : : WSAGetLastError());
1777 : : if ((cur_event->events & WL_SOCKET_READABLE) &&
1778 : : (resEvents.lNetworkEvents & FD_READ))
1779 : : {
1780 : : /* data available in socket */
1781 : : occurred_events->events |= WL_SOCKET_READABLE;
1782 : :
1783 : : /*------
1784 : : * WaitForMultipleObjects doesn't guarantee that a read event
1785 : : * will be returned if the latch is set at the same time. Even
1786 : : * if it did, the caller might drop that event expecting it to
1787 : : * reoccur on next call. So, we must force the event to be
1788 : : * reset if this WaitEventSet is used again in order to avoid
1789 : : * an indefinite hang.
1790 : : *
1791 : : * Refer
1792 : : * https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
1793 : : * for the behavior of socket events.
1794 : : *------
1795 : : */
1796 : : cur_event->reset = true;
1797 : : }
1798 : : if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1799 : : (resEvents.lNetworkEvents & FD_WRITE))
1800 : : {
1801 : : /* writeable */
1802 : : occurred_events->events |= WL_SOCKET_WRITEABLE;
1803 : : }
1804 : : if ((cur_event->events & WL_SOCKET_CONNECTED) &&
1805 : : (resEvents.lNetworkEvents & FD_CONNECT))
1806 : : {
1807 : : /* connected */
1808 : : occurred_events->events |= WL_SOCKET_CONNECTED;
1809 : : }
1810 : : if ((cur_event->events & WL_SOCKET_ACCEPT) &&
1811 : : (resEvents.lNetworkEvents & FD_ACCEPT))
1812 : : {
1813 : : /* incoming connection could be accepted */
1814 : : occurred_events->events |= WL_SOCKET_ACCEPT;
1815 : : }
1816 : : if (resEvents.lNetworkEvents & FD_CLOSE)
1817 : : {
1818 : : /* EOF/error, so signal all caller-requested socket flags */
1819 : : occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
1820 : : }
1821 : :
1822 : : if (occurred_events->events != 0)
1823 : : {
1824 : : occurred_events++;
1825 : : returned_events++;
1826 : : }
1827 : : }
1828 : :
1829 : : /* Is the output buffer full? */
1830 : : if (returned_events == nevents)
1831 : : break;
1832 : :
1833 : : /* Have we run out of possible events? */
1834 : : next_pos = cur_event->pos + 1;
1835 : : if (next_pos == set->nevents)
1836 : : break;
1837 : :
1838 : : /*
1839 : : * Poll the rest of the event handles in the array starting at
1840 : : * next_pos being careful to skip over the initial signal handle too.
1841 : : * This time we use a zero timeout.
1842 : : */
1843 : : count = set->nevents - next_pos;
1844 : : rc = WaitForMultipleObjects(count,
1845 : : set->handles + 1 + next_pos,
1846 : : false,
1847 : : 0);
1848 : :
1849 : : /*
1850 : : * We don't distinguish between errors and WAIT_TIMEOUT here because
1851 : : * we already have events to report.
1852 : : */
1853 : : if (rc < WAIT_OBJECT_0 || rc >= WAIT_OBJECT_0 + count)
1854 : : break;
1855 : :
1856 : : /* We have another event to decode. */
1857 : : cur_event = &set->events[next_pos + (rc - WAIT_OBJECT_0)];
1858 : : }
1859 : :
1860 : : return returned_events;
1861 : : }
1862 : : #endif
1863 : :
1864 : : /*
1865 : : * Return whether the current build options can report WL_SOCKET_CLOSED.
1866 : : */
1867 : : bool
1868 : 1087 : WaitEventSetCanReportClosed(void)
1869 : : {
1870 : : #if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
1871 : : defined(WAIT_USE_EPOLL) || \
1872 : : defined(WAIT_USE_KQUEUE)
1873 : 1087 : return true;
1874 : : #else
1875 : : return false;
1876 : : #endif
1877 : : }
1878 : :
1879 : : /*
1880 : : * Get the number of wait events registered in a given WaitEventSet.
1881 : : */
1882 : : int
1883 : 347 : GetNumRegisteredWaitEvents(WaitEventSet *set)
1884 : : {
1885 : 347 : return set->nevents;
1886 : : }
1887 : :
1888 : : #if defined(WAIT_USE_SELF_PIPE)
1889 : :
1890 : : /*
1891 : : * SetLatch uses SIGURG to wake up the process waiting on the latch.
1892 : : *
1893 : : * Wake up WaitLatch, if we're waiting.
1894 : : */
1895 : : static void
1896 : : latch_sigurg_handler(SIGNAL_ARGS)
1897 : : {
1898 : : if (waiting)
1899 : : sendSelfPipeByte();
1900 : : }
1901 : :
1902 : : /* Send one byte to the self-pipe, to wake up WaitLatch */
1903 : : static void
1904 : : sendSelfPipeByte(void)
1905 : : {
1906 : : int rc;
1907 : : char dummy = 0;
1908 : :
1909 : : retry:
1910 : : rc = write(selfpipe_writefd, &dummy, 1);
1911 : : if (rc < 0)
1912 : : {
1913 : : /* If interrupted by signal, just retry */
1914 : : if (errno == EINTR)
1915 : : goto retry;
1916 : :
1917 : : /*
1918 : : * If the pipe is full, we don't need to retry, the data that's there
1919 : : * already is enough to wake up WaitLatch.
1920 : : */
1921 : : if (errno == EAGAIN || errno == EWOULDBLOCK)
1922 : : return;
1923 : :
1924 : : /*
1925 : : * Oops, the write() failed for some other reason. We might be in a
1926 : : * signal handler, so it's not safe to elog(). We have no choice but
1927 : : * silently ignore the error.
1928 : : */
1929 : : return;
1930 : : }
1931 : : }
1932 : :
1933 : : #endif
1934 : :
1935 : : #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
1936 : :
1937 : : /*
1938 : : * Read all available data from self-pipe or signalfd.
1939 : : *
1940 : : * Note: this is only called when waiting = true. If it fails and doesn't
1941 : : * return, it must reset that flag first (though ideally, this will never
1942 : : * happen).
1943 : : */
1944 : : static void
1945 : 1023564 : drain(void)
1946 : : {
1947 : : char buf[1024];
1948 : : int rc;
1949 : : int fd;
1950 : :
1951 : : #ifdef WAIT_USE_SELF_PIPE
1952 : : fd = selfpipe_readfd;
1953 : : #else
1954 : 1023564 : fd = signal_fd;
1955 : : #endif
1956 : :
1957 : : for (;;)
1958 : : {
1959 : 1023564 : rc = read(fd, buf, sizeof(buf));
1960 [ - + ]: 1023564 : if (rc < 0)
1961 : : {
235 heikki.linnakangas@i 1962 [ # # # # ]:UBC 0 : if (errno == EAGAIN || errno == EWOULDBLOCK)
1963 : : break; /* the descriptor is empty */
1964 [ # # ]: 0 : else if (errno == EINTR)
1965 : 0 : continue; /* retry */
1966 : : else
1967 : : {
1968 : 0 : waiting = false;
1969 : : #ifdef WAIT_USE_SELF_PIPE
1970 : : elog(ERROR, "read() on self-pipe failed: %m");
1971 : : #else
1972 [ # # ]: 0 : elog(ERROR, "read() on signalfd failed: %m");
1973 : : #endif
1974 : : }
1975 : : }
235 heikki.linnakangas@i 1976 [ - + ]:CBC 1023564 : else if (rc == 0)
1977 : : {
235 heikki.linnakangas@i 1978 :UBC 0 : waiting = false;
1979 : : #ifdef WAIT_USE_SELF_PIPE
1980 : : elog(ERROR, "unexpected EOF on self-pipe");
1981 : : #else
1982 [ # # ]: 0 : elog(ERROR, "unexpected EOF on signalfd");
1983 : : #endif
1984 : : }
235 heikki.linnakangas@i 1985 [ + - ]:CBC 1023564 : else if (rc < sizeof(buf))
1986 : : {
1987 : : /* we successfully drained the pipe; no need to read() again */
1988 : 1023564 : break;
1989 : : }
1990 : : /* else buffer wasn't big enough, so read again */
1991 : : }
1992 : 1023564 : }
1993 : :
1994 : : #endif
1995 : :
1996 : : static void
1997 : 1 : ResOwnerReleaseWaitEventSet(Datum res)
1998 : : {
1999 : 1 : WaitEventSet *set = (WaitEventSet *) DatumGetPointer(res);
2000 : :
2001 [ - + ]: 1 : Assert(set->owner != NULL);
2002 : 1 : set->owner = NULL;
2003 : 1 : FreeWaitEventSet(set);
2004 : 1 : }
2005 : :
2006 : : #ifndef WIN32
2007 : : /*
2008 : : * Wake up my process if it's currently sleeping in WaitEventSetWaitBlock()
2009 : : *
2010 : : * NB: be sure to save and restore errno around it. (That's standard practice
2011 : : * in most signal handlers, of course, but we used to omit it in handlers that
2012 : : * only set a flag.) XXX
2013 : : *
2014 : : * NB: this function is called from critical sections and signal handlers so
2015 : : * throwing an error is not a good idea.
2016 : : *
2017 : : * On Windows, Latch uses SetEvent directly and this is not used.
2018 : : */
2019 : : void
2020 : 35250 : WakeupMyProc(void)
2021 : : {
2022 : : #if defined(WAIT_USE_SELF_PIPE)
2023 : : if (waiting)
2024 : : sendSelfPipeByte();
2025 : : #else
2026 [ + - ]: 35250 : if (waiting)
2027 : 35250 : kill(MyProcPid, SIGURG);
2028 : : #endif
2029 : 35250 : }
2030 : :
2031 : : /* Similar to WakeupMyProc, but wake up another process */
2032 : : void
2033 : 1051136 : WakeupOtherProc(int pid)
2034 : : {
2035 : 1051136 : kill(pid, SIGURG);
2036 : 1051136 : }
2037 : : #endif
|