diff --git a/configure b/configure
index 9c946381f6b..99a9961f025 100755
--- a/configure
+++ b/configure
@@ -15802,6 +15802,18 @@ if test "x$ac_cv_func_CRYPTO_lock" = xyes; then :
#define HAVE_CRYPTO_LOCK 1
_ACEOF
+fi
+done
+
+ # Function introduced in OpenSSL 1.1.1.
+ for ac_func in X509_get_signature_info
+do :
+ ac_fn_c_check_func "$LINENO" "X509_get_signature_info" "ac_cv_func_X509_get_signature_info"
+if test "x$ac_cv_func_X509_get_signature_info" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_X509_GET_SIGNATURE_INFO 1
+_ACEOF
+
fi
done
@@ -16895,7 +16907,7 @@ $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
fi
-for ac_header in atomic.h copyfile.h execinfo.h getopt.h ifaddrs.h langinfo.h mbarrier.h poll.h sys/epoll.h sys/event.h sys/ipc.h sys/prctl.h sys/procctl.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/signalfd.h sys/sockio.h sys/tas.h sys/uio.h sys/un.h termios.h ucred.h wctype.h
+for ac_header in atomic.h copyfile.h execinfo.h getopt.h ifaddrs.h langinfo.h mbarrier.h poll.h sys/epoll.h sys/event.h sys/ipc.h sys/personality.h sys/prctl.h sys/procctl.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/signalfd.h sys/sockio.h sys/tas.h sys/uio.h sys/un.h termios.h ucred.h wctype.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
diff --git a/configure.ac b/configure.ac
index bcc5cf6c6ab..a50ca5710c4 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1741,6 +1741,8 @@ if test "$with_ssl" = openssl ; then
# thread-safety. In 1.1.0, it's no longer required, and CRYPTO_lock()
# function was removed.
AC_CHECK_FUNCS([CRYPTO_lock])
+ # Function introduced in OpenSSL 1.1.1.
+ AC_CHECK_FUNCS([X509_get_signature_info])
AC_DEFINE([USE_OPENSSL], 1, [Define to 1 to build with OpenSSL support. (--with-ssl=openssl)])
elif test "$with_ssl" != no ; then
AC_MSG_ERROR([--with-ssl must specify openssl])
@@ -1904,6 +1906,7 @@ AC_CHECK_HEADERS(m4_normalize([
sys/epoll.h
sys/event.h
sys/ipc.h
+ sys/personality.h
sys/prctl.h
sys/procctl.h
sys/pstat.h
diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c
index 9abeca607d7..f7964b78173 100644
--- a/contrib/amcheck/verify_heapam.c
+++ b/contrib/amcheck/verify_heapam.c
@@ -1577,14 +1577,40 @@ check_tuple(HeapCheckContext *ctx)
static FullTransactionId
FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx)
{
- uint32 epoch;
+ uint64 nextfxid_i;
+ int32 diff;
+ FullTransactionId fxid;
+
+ Assert(TransactionIdIsNormal(ctx->next_xid));
+ Assert(FullTransactionIdIsNormal(ctx->next_fxid));
+ Assert(XidFromFullTransactionId(ctx->next_fxid) == ctx->next_xid);
if (!TransactionIdIsNormal(xid))
return FullTransactionIdFromEpochAndXid(0, xid);
- epoch = EpochFromFullTransactionId(ctx->next_fxid);
- if (xid > ctx->next_xid)
- epoch--;
- return FullTransactionIdFromEpochAndXid(epoch, xid);
+
+ nextfxid_i = U64FromFullTransactionId(ctx->next_fxid);
+
+ /* compute the 32bit modulo difference */
+ diff = (int32) (ctx->next_xid - xid);
+
+ /*
+ * In cases of corruption we might see a 32bit xid that is before epoch
+ * 0. We can't represent that as a 64bit xid, due to 64bit xids being
+ * unsigned integers, without the modulo arithmetic of 32bit xid. There's
+ * no really nice way to deal with that, but it works ok enough to use
+ * FirstNormalFullTransactionId in that case, as a freshly initdb'd
+ * cluster already has a newer horizon.
+ */
+ if (diff > 0 && (nextfxid_i - FirstNormalTransactionId) < (int64) diff)
+ {
+ Assert(EpochFromFullTransactionId(ctx->next_fxid) == 0);
+ fxid = FirstNormalFullTransactionId;
+ }
+ else
+ fxid = FullTransactionIdFromU64(nextfxid_i - diff);
+
+ Assert(FullTransactionIdIsNormal(fxid));
+ return fxid;
}
/*
@@ -1600,8 +1626,8 @@ update_cached_xid_range(HeapCheckContext *ctx)
LWLockRelease(XidGenLock);
/* And compute alternate versions of the same */
- ctx->oldest_fxid = FullTransactionIdFromXidAndCtx(ctx->oldest_xid, ctx);
ctx->next_xid = XidFromFullTransactionId(ctx->next_fxid);
+ ctx->oldest_fxid = FullTransactionIdFromXidAndCtx(ctx->oldest_xid, ctx);
}
/*
diff --git a/contrib/pageinspect/expected/brin.out b/contrib/pageinspect/expected/brin.out
index d19cdc3b957..e12fbeb4774 100644
--- a/contrib/pageinspect/expected/brin.out
+++ b/contrib/pageinspect/expected/brin.out
@@ -48,12 +48,14 @@ SELECT * FROM brin_page_items(get_raw_page('test1_a_idx', 2), 'test1_a_idx')
1 | 0 | 1 | f | f | f | {1 .. 1}
(1 row)
--- Failure for non-BRIN index.
+-- Mask DETAIL messages as these are not portable across architectures.
+\set VERBOSITY terse
+-- Failures for non-BRIN index.
CREATE INDEX test1_a_btree ON test1 (a);
SELECT brin_page_items(get_raw_page('test1_a_btree', 0), 'test1_a_btree');
ERROR: "test1_a_btree" is not a BRIN index
--- Mask DETAIL messages as these are not portable across architectures.
-\set VERBOSITY terse
+SELECT brin_page_items(get_raw_page('test1_a_btree', 0), 'test1_a_idx');
+ERROR: input page is not a valid BRIN page
-- Invalid special area size
SELECT brin_page_type(get_raw_page('test1', 0));
ERROR: input page is not a valid BRIN page
diff --git a/contrib/pageinspect/expected/gist.out b/contrib/pageinspect/expected/gist.out
index eec1fd91cb9..cae739219bd 100644
--- a/contrib/pageinspect/expected/gist.out
+++ b/contrib/pageinspect/expected/gist.out
@@ -56,14 +56,16 @@ SELECT itemoffset, ctid, itemlen FROM gist_page_items_bytea(get_raw_page('test_g
2 | (2,65535) | 40
(2 rows)
--- Failure with non-GiST index.
+-- Suppress the DETAIL message, to allow the tests to work across various
+-- page sizes and architectures.
+\set VERBOSITY terse
+-- Failures with non-GiST index.
CREATE INDEX test_gist_btree on test_gist(t);
SELECT gist_page_items(get_raw_page('test_gist_btree', 0), 'test_gist_btree');
ERROR: "test_gist_btree" is not a GiST index
+SELECT gist_page_items(get_raw_page('test_gist_btree', 0), 'test_gist_idx');
+ERROR: input page is not a valid GiST page
-- Failure with various modes.
--- Suppress the DETAIL message, to allow the tests to work across various
--- page sizes and architectures.
-\set VERBOSITY terse
-- invalid page size
SELECT gist_page_items_bytea('aaa'::bytea);
ERROR: invalid page size
diff --git a/contrib/pageinspect/gistfuncs.c b/contrib/pageinspect/gistfuncs.c
index d1c3c321f83..0ae8f7459c1 100644
--- a/contrib/pageinspect/gistfuncs.c
+++ b/contrib/pageinspect/gistfuncs.c
@@ -34,29 +34,20 @@ PG_FUNCTION_INFO_V1(gist_page_items_bytea);
#define ItemPointerGetDatum(X) PointerGetDatum(X)
-Datum
-gist_page_opaque_info(PG_FUNCTION_ARGS)
+static Page verify_gist_page(bytea *raw_page);
+
+/*
+ * Verify that the given bytea contains a GIST page or die in the attempt.
+ * A pointer to the page is returned.
+ */
+static Page
+verify_gist_page(bytea *raw_page)
{
- bytea *raw_page = PG_GETARG_BYTEA_P(0);
- TupleDesc tupdesc;
- Page page;
+ Page page = get_page_from_raw(raw_page);
GISTPageOpaque opaq;
- HeapTuple resultTuple;
- Datum values[4];
- bool nulls[4];
- Datum flags[16];
- int nflags = 0;
- uint16 flagbits;
-
- if (!superuser())
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use raw page functions")));
-
- page = get_page_from_raw(raw_page);
if (PageIsNew(page))
- PG_RETURN_NULL();
+ return page;
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData)))
@@ -76,12 +67,38 @@ gist_page_opaque_info(PG_FUNCTION_ARGS)
GIST_PAGE_ID,
opaq->gist_page_id)));
+ return page;
+}
+
+Datum
+gist_page_opaque_info(PG_FUNCTION_ARGS)
+{
+ bytea *raw_page = PG_GETARG_BYTEA_P(0);
+ TupleDesc tupdesc;
+ Page page;
+ HeapTuple resultTuple;
+ Datum values[4];
+ bool nulls[4];
+ Datum flags[16];
+ int nflags = 0;
+ uint16 flagbits;
+
+ if (!superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser to use raw page functions")));
+
+ page = verify_gist_page(raw_page);
+
+ if (PageIsNew(page))
+ PG_RETURN_NULL();
+
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
/* Convert the flags bitmask to an array of human-readable names */
- flagbits = opaq->flags;
+ flagbits = GistPageGetOpaque(page)->flags;
if (flagbits & F_LEAF)
flags[nflags++] = CStringGetTextDatum("leaf");
if (flagbits & F_DELETED)
@@ -103,7 +120,7 @@ gist_page_opaque_info(PG_FUNCTION_ARGS)
values[0] = LSNGetDatum(PageGetLSN(page));
values[1] = LSNGetDatum(GistPageGetNSN(page));
- values[2] = Int64GetDatum(opaq->rightlink);
+ values[2] = Int64GetDatum(GistPageGetOpaque(page)->rightlink);
values[3] = PointerGetDatum(construct_array(flags, nflags,
TEXTOID,
-1, false, TYPALIGN_INT));
@@ -124,7 +141,6 @@ gist_page_items_bytea(PG_FUNCTION_ARGS)
Tuplestorestate *tupstore;
MemoryContext oldcontext;
Page page;
- GISTPageOpaque opaq;
OffsetNumber offset;
OffsetNumber maxoff = InvalidOffsetNumber;
@@ -157,29 +173,11 @@ gist_page_items_bytea(PG_FUNCTION_ARGS)
MemoryContextSwitchTo(oldcontext);
- page = get_page_from_raw(raw_page);
+ page = verify_gist_page(raw_page);
if (PageIsNew(page))
PG_RETURN_NULL();
- /* verify the special space has the expected size */
- if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
- (int) PageGetSpecialSize(page))));
-
- opaq = (GISTPageOpaque) PageGetSpecialPointer(page);
- if (opaq->gist_page_id != GIST_PAGE_ID)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected %08x, got %08x.",
- GIST_PAGE_ID,
- opaq->gist_page_id)));
-
/* Avoid bogus PageGetMaxOffsetNumber() call with deleted pages */
if (GistPageIsDeleted(page))
elog(NOTICE, "page is deleted");
@@ -276,7 +274,7 @@ gist_page_items(PG_FUNCTION_ARGS)
errmsg("\"%s\" is not a %s index",
RelationGetRelationName(indexRel), "GiST")));
- page = get_page_from_raw(raw_page);
+ page = verify_gist_page(raw_page);
if (PageIsNew(page))
{
diff --git a/contrib/pageinspect/sql/brin.sql b/contrib/pageinspect/sql/brin.sql
index 45098c1ef5e..96b4645187e 100644
--- a/contrib/pageinspect/sql/brin.sql
+++ b/contrib/pageinspect/sql/brin.sql
@@ -15,12 +15,14 @@ SELECT * FROM brin_revmap_data(get_raw_page('test1_a_idx', 1)) LIMIT 5;
SELECT * FROM brin_page_items(get_raw_page('test1_a_idx', 2), 'test1_a_idx')
ORDER BY blknum, attnum LIMIT 5;
--- Failure for non-BRIN index.
+-- Mask DETAIL messages as these are not portable across architectures.
+\set VERBOSITY terse
+
+-- Failures for non-BRIN index.
CREATE INDEX test1_a_btree ON test1 (a);
SELECT brin_page_items(get_raw_page('test1_a_btree', 0), 'test1_a_btree');
+SELECT brin_page_items(get_raw_page('test1_a_btree', 0), 'test1_a_idx');
--- Mask DETAIL messages as these are not portable across architectures.
-\set VERBOSITY terse
-- Invalid special area size
SELECT brin_page_type(get_raw_page('test1', 0));
SELECT * FROM brin_metapage_info(get_raw_page('test1', 0));
diff --git a/contrib/pageinspect/sql/gist.sql b/contrib/pageinspect/sql/gist.sql
index ee46e09053e..963d5d40a3c 100644
--- a/contrib/pageinspect/sql/gist.sql
+++ b/contrib/pageinspect/sql/gist.sql
@@ -26,14 +26,16 @@ SELECT * FROM gist_page_items(get_raw_page('test_gist_idx', 1), 'test_gist_idx')
-- platform-dependent (endianess), so omit the actual key data from the output.
SELECT itemoffset, ctid, itemlen FROM gist_page_items_bytea(get_raw_page('test_gist_idx', 0));
--- Failure with non-GiST index.
+-- Suppress the DETAIL message, to allow the tests to work across various
+-- page sizes and architectures.
+\set VERBOSITY terse
+
+-- Failures with non-GiST index.
CREATE INDEX test_gist_btree on test_gist(t);
SELECT gist_page_items(get_raw_page('test_gist_btree', 0), 'test_gist_btree');
+SELECT gist_page_items(get_raw_page('test_gist_btree', 0), 'test_gist_idx');
-- Failure with various modes.
--- Suppress the DETAIL message, to allow the tests to work across various
--- page sizes and architectures.
-\set VERBOSITY terse
-- invalid page size
SELECT gist_page_items_bytea('aaa'::bytea);
SELECT gist_page_items('aaa'::bytea, 'test_gist_idx'::regclass);
diff --git a/contrib/pax_storage/src/test/regress/expected/alter_table.out b/contrib/pax_storage/src/test/regress/expected/alter_table.out
index fdc79c03c82..8d90e20a98d 100644
--- a/contrib/pax_storage/src/test/regress/expected/alter_table.out
+++ b/contrib/pax_storage/src/test/regress/expected/alter_table.out
@@ -2628,20 +2628,20 @@ View definition:
FROM at_view_1 v1;
explain (verbose, costs off) select * from at_view_2;
- QUERY PLAN
-----------------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- Output: bt.id, bt.stuff, (to_json(ROW(bt.id, bt.stuff, NULL)))
+ Output: bt.id, bt.stuff, (to_json(ROW(bt.id, bt.stuff, 4)))
-> Seq Scan on public.at_base_table bt
- Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, NULL))
+ Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, 4))
Optimizer: Postgres query optimizer
Settings: constraint_exclusion=partition
(6 rows)
select * from at_view_2;
- id | stuff | j
-----+--------+----------------------------------------
- 23 | skidoo | {"id":23,"stuff":"skidoo","more":null}
+ id | stuff | j
+----+--------+-------------------------------------
+ 23 | skidoo | {"id":23,"stuff":"skidoo","more":4}
(1 row)
drop view at_view_2;
diff --git a/contrib/pg_trgm/expected/pg_word_trgm.out b/contrib/pg_trgm/expected/pg_word_trgm.out
index 9f0ca502a6d..4c6b49934b6 100644
--- a/contrib/pg_trgm/expected/pg_word_trgm.out
+++ b/contrib/pg_trgm/expected/pg_word_trgm.out
@@ -1048,3 +1048,9 @@ select t,word_similarity('Kabankala',t) as sml from test_trgm2 where t %> 'Kaban
Waikala | 0.3
(89 rows)
+-- test unsatisfiable pattern
+select * from test_trgm2 where t ~ '.*$x';
+ t
+---
+(0 rows)
+
diff --git a/contrib/pg_trgm/sql/pg_word_trgm.sql b/contrib/pg_trgm/sql/pg_word_trgm.sql
index d9fa1c55e5e..d2ada49133a 100644
--- a/contrib/pg_trgm/sql/pg_word_trgm.sql
+++ b/contrib/pg_trgm/sql/pg_word_trgm.sql
@@ -43,3 +43,6 @@ select t,word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <% t
select t,word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <% t order by sml desc, t;
select t,word_similarity('Baykal',t) as sml from test_trgm2 where t %> 'Baykal' order by sml desc, t;
select t,word_similarity('Kabankala',t) as sml from test_trgm2 where t %> 'Kabankala' order by sml desc, t;
+
+-- test unsatisfiable pattern
+select * from test_trgm2 where t ~ '.*$x';
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
index 71e4ebee4e9..3485a725cde 100644
--- a/contrib/pg_trgm/trgm_regexp.c
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -1944,9 +1944,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
arcsCount;
HASH_SEQ_STATUS scan_status;
TrgmState *state;
- TrgmPackArcInfo *arcs,
- *p1,
- *p2;
+ TrgmPackArcInfo *arcs;
TrgmPackedArc *packedArcs;
TrgmPackedGraph *result;
int i,
@@ -2018,17 +2016,25 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
qsort(arcs, arcIndex, sizeof(TrgmPackArcInfo), packArcInfoCmp);
/* We could have duplicates because states were merged. Remove them. */
- /* p1 is probe point, p2 is last known non-duplicate. */
- p2 = arcs;
- for (p1 = arcs + 1; p1 < arcs + arcIndex; p1++)
+ if (arcIndex > 1)
{
- if (packArcInfoCmp(p1, p2) > 0)
+ /* p1 is probe point, p2 is last known non-duplicate. */
+ TrgmPackArcInfo *p1,
+ *p2;
+
+ p2 = arcs;
+ for (p1 = arcs + 1; p1 < arcs + arcIndex; p1++)
{
- p2++;
- *p2 = *p1;
+ if (packArcInfoCmp(p1, p2) > 0)
+ {
+ p2++;
+ *p2 = *p1;
+ }
}
+ arcsCount = (p2 - arcs) + 1;
}
- arcsCount = (p2 - arcs) + 1;
+ else
+ arcsCount = arcIndex;
/* Create packed representation */
result = (TrgmPackedGraph *)
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 67fde96a858..10700d6fd4a 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -9730,11 +9730,6 @@ WARNING: there is no transaction in progress
-- Change application_name of remote connection to special one
-- so that we can easily terminate the connection later.
ALTER SERVER loopback OPTIONS (application_name 'fdw_retry_check');
--- If debug_discard_caches is active, it results in
--- dropping remote connections after every transaction, making it
--- impossible to test termination meaningfully. So turn that off
--- for this test.
-SET debug_discard_caches = 0;
-- Make sure we have a remote connection.
SELECT 1 FROM ft1 LIMIT 1;
?column?
@@ -9743,13 +9738,12 @@ SELECT 1 FROM ft1 LIMIT 1;
(1 row)
-- Terminate the remote connection and wait for the termination to complete.
-SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity
+-- (If a cache flush happens, the remote connection might have already been
+-- dropped; so code this step in a way that doesn't fail if no connection.)
+DO $$ BEGIN
+PERFORM pg_terminate_backend(pid, 180000) FROM pg_stat_activity
WHERE application_name = 'fdw_retry_check';
- pg_terminate_backend
-----------------------
- t
-(1 row)
-
+END $$;
-- This query should detect the broken connection when starting new remote
-- transaction, reestablish new connection, and then succeed.
BEGIN;
@@ -9762,13 +9756,10 @@ SELECT 1 FROM ft1 LIMIT 1;
-- If we detect the broken connection when starting a new remote
-- subtransaction, we should fail instead of establishing a new connection.
-- Terminate the remote connection and wait for the termination to complete.
-SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity
+DO $$ BEGIN
+PERFORM pg_terminate_backend(pid, 180000) FROM pg_stat_activity
WHERE application_name = 'fdw_retry_check';
- pg_terminate_backend
-----------------------
- t
-(1 row)
-
+END $$;
SAVEPOINT s;
-- The text of the error might vary across platforms, so only show SQLSTATE.
\set VERBOSITY sqlstate
@@ -9776,7 +9767,6 @@ SELECT 1 FROM ft1 LIMIT 1; -- should fail
ERROR: 08006
\set VERBOSITY default
COMMIT;
-RESET debug_discard_caches;
-- =============================================================================
-- test connection invalidation cases and postgres_fdw_get_connections function
-- =============================================================================
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index f8c813d2175..793dd64811d 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -2912,18 +2912,16 @@ ROLLBACK;
-- so that we can easily terminate the connection later.
ALTER SERVER loopback OPTIONS (application_name 'fdw_retry_check');
--- If debug_discard_caches is active, it results in
--- dropping remote connections after every transaction, making it
--- impossible to test termination meaningfully. So turn that off
--- for this test.
-SET debug_discard_caches = 0;
-
-- Make sure we have a remote connection.
SELECT 1 FROM ft1 LIMIT 1;
-- Terminate the remote connection and wait for the termination to complete.
-SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity
+-- (If a cache flush happens, the remote connection might have already been
+-- dropped; so code this step in a way that doesn't fail if no connection.)
+DO $$ BEGIN
+PERFORM pg_terminate_backend(pid, 180000) FROM pg_stat_activity
WHERE application_name = 'fdw_retry_check';
+END $$;
-- This query should detect the broken connection when starting new remote
-- transaction, reestablish new connection, and then succeed.
@@ -2933,8 +2931,10 @@ SELECT 1 FROM ft1 LIMIT 1;
-- If we detect the broken connection when starting a new remote
-- subtransaction, we should fail instead of establishing a new connection.
-- Terminate the remote connection and wait for the termination to complete.
-SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity
+DO $$ BEGIN
+PERFORM pg_terminate_backend(pid, 180000) FROM pg_stat_activity
WHERE application_name = 'fdw_retry_check';
+END $$;
SAVEPOINT s;
-- The text of the error might vary across platforms, so only show SQLSTATE.
\set VERBOSITY sqlstate
@@ -2942,8 +2942,6 @@ SELECT 1 FROM ft1 LIMIT 1; -- should fail
\set VERBOSITY default
COMMIT;
-RESET debug_discard_caches;
-
-- =============================================================================
-- test connection invalidation cases and postgres_fdw_get_connections function
-- =============================================================================
diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile
index d10f9e5b398..58ae606bccf 100644
--- a/doc/src/sgml/Makefile
+++ b/doc/src/sgml/Makefile
@@ -44,11 +44,15 @@ endif
XMLINCLUDE = --path .
-ifndef XMLLINT
+ifdef XMLLINT
+XMLLINT := $(XMLLINT) --nonet
+else
XMLLINT = $(missing) xmllint
endif
-ifndef XSLTPROC
+ifdef XSLTPROC
+XSLTPROC := $(XSLTPROC) --nonet
+else
XSLTPROC = $(missing) xsltproc
endif
diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml
index 0e89b768c5d..692b6fe2c43 100644
--- a/doc/src/sgml/datatype.sgml
+++ b/doc/src/sgml/datatype.sgml
@@ -1438,7 +1438,12 @@ SELECT b, char_length(b) FROM test2;
Example:
-SELECT '\xDEADBEEF';
+SET bytea_output = 'hex';
+
+SELECT '\xDEADBEEF'::bytea;
+ bytea
+------------
+ \xdeadbeef
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index e49d19716c1..78f4448a5bc 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -3951,9 +3951,15 @@ CREATE TABLE measurement_y2008m02 PARTITION OF measurement
As an alternative, it is sometimes more convenient to create the
- new table outside the partition structure, and make it a proper
+ new table outside the partition structure, and attach it as a
partition later. This allows new data to be loaded, checked, and
transformed prior to it appearing in the partitioned table.
+ Moreover, the ATTACH PARTITION operation requires
+ only SHARE UPDATE EXCLUSIVE lock on the
+ partitioned table, as opposed to the ACCESS
+ EXCLUSIVE lock that is required by CREATE TABLE
+ ... PARTITION OF, so it is more friendly to concurrent
+ operations on the partitioned table.
The CREATE TABLE ... LIKE option is helpful
to avoid tediously repeating the parent table's definition:
@@ -3973,11 +3979,6 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
-
- The ATTACH PARTITION command requires taking a
- SHARE UPDATE EXCLUSIVE lock on the partitioned table.
-
-
Before running the ATTACH PARTITION command, it is
recommended to create a CHECK constraint on the table to
diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml
index e1bac68604f..55ef6417749 100644
--- a/doc/src/sgml/docguide.sgml
+++ b/doc/src/sgml/docguide.sgml
@@ -136,6 +136,7 @@
This is a program for converting, among other things, XML to PDF.
+ It is needed only if you want to build the documentation in PDF format.
@@ -151,25 +152,13 @@
here.
-
- You can get away with not installing DocBook XML and the DocBook XSLT
- stylesheets locally, because the required files will be downloaded from the
- Internet and cached locally. This may in fact be the preferred solution if
- your operating system packages provide only an old version of these files,
- or if no packages are available at all.
- If you want to prevent any attempt to access the Internet while building
- the documentation, you need to pass the option
- to xmllint and xsltproc; see below
- for an example.
-
-
Installation on Fedora, RHEL, and Derivatives
To install the required packages, use:
-yum install docbook-dtds docbook-style-xsl fop libxslt
+yum install docbook-dtds docbook-style-xsl libxslt fop
@@ -180,7 +169,7 @@ yum install docbook-dtds docbook-style-xsl fop libxslt
To install the required packages with pkg, use:
-pkg install docbook-xml docbook-xsl fop libxslt
+pkg install docbook-xml docbook-xsl libxslt fop
@@ -199,7 +188,7 @@ pkg install docbook-xml docbook-xsl fop libxslt
available for Debian GNU/Linux.
To install, simply use:
-apt-get install docbook-xml docbook-xsl fop libxml2-utils xsltproc
+apt-get install docbook-xml docbook-xsl libxml2-utils xsltproc fop
@@ -208,21 +197,37 @@ apt-get install docbook-xml docbook-xsl fop libxml2-utils xsltproc
macOS
- On macOS, you can build the HTML and man documentation without installing
- anything extra. If you want to build PDFs or want to install a local copy
- of DocBook, you can get those from your preferred package manager.
+ If you use MacPorts, the following will get you set up:
+
+sudo port install docbook-xml docbook-xsl-nons libxslt fop
+
+ If you use Homebrew, use this:
+
+brew install docbook docbook-xsl libxslt fop
+
- If you use MacPorts, the following will get you set up:
+ The Homebrew-supplied programs require the following environment variable
+ to be set:
-sudo port install docbook-xml-4.5 docbook-xsl fop
+export XML_CATALOG_FILES=/usr/local/etc/xml/catalog
- If you use Homebrew, use this:
+ Without it, xsltproc will throw errors like this:
-brew install docbook docbook-xsl fop
+I/O error : Attempt to load network entity http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd
+postgres.sgml:21: warning: failed to load external entity "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd"
+...
+
+
+ While it is possible to use the Apple-provided versions
+ of xmllint and xsltproc
+ instead of those from MacPorts or Homebrew, you'll still need
+ to install the DocBook DTD and stylesheets, and set up a catalog
+ file that points to them.
+
@@ -253,12 +258,6 @@ checking for dbtoepub... dbtoepub
these programs, for example
./configure ... XMLLINT=/opt/local/bin/xmllint ...
-
- Also, if you want to ensure that xmllint
- and xsltproc will not perform any network access,
- you can do something like
-
-./configure ... XMLLINT="xmllint --nonet" XSLTPROC="xsltproc --nonet" ...
diff --git a/doc/src/sgml/images/Makefile b/doc/src/sgml/images/Makefile
index f9e356348b2..645519095d0 100644
--- a/doc/src/sgml/images/Makefile
+++ b/doc/src/sgml/images/Makefile
@@ -9,7 +9,7 @@ ALL_IMAGES = \
DITAA = ditaa
DOT = dot
-XSLTPROC = xsltproc
+XSLTPROC = xsltproc --nonet
all: $(ALL_IMAGES)
diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index 4a0b6dcfcd4..3a4920c4c35 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -661,12 +661,24 @@ Where column_reference_storage_directive is:
- Operations such as TRUNCATE which normally affect a table and all of its
+ Operations such as TRUNCATE
+ which normally affect a table and all of its
inheritance children will cascade to all partitions, but may also be
- performed on an individual partition. Note that dropping a partition
- with DROP TABLE requires taking an ACCESS
- EXCLUSIVE lock on the parent table.
+ performed on an individual partition.
+
+
+ Note that creating a partition using PARTITION OF
+ requires taking an ACCESS EXCLUSIVE lock on the
+ parent partitioned table. Likewise, dropping a partition
+ with DROP TABLE requires taking
+ an ACCESS EXCLUSIVE lock on the parent table.
+ It is possible to use ALTER
+ TABLE ATTACH/DETACH PARTITION to perform these
+ operations with a weaker lock, thus reducing interference with
+ concurrent operations on the partitioned table.
+
+
diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index 956f97e2537..4bf68d3fbd6 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -863,16 +863,6 @@ PostgreSQL documentation
and the two systems have different definitions of the collation used
to sort the partitioning column.
-
-
- It is best not to use parallelism when restoring from an archive made
- with this option, because pg_restore will
- not know exactly which partition(s) a given archive data item will
- load data into. This could result in inefficiency due to lock
- conflicts between parallel jobs, or perhaps even restore failures due
- to foreign key constraints being set up before all the relevant data
- is loaded.
-
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index a64d70bafc7..a6549d9dcbf 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -777,6 +777,13 @@ bringetbitmap(IndexScanDesc scan, Node **bmNodeP)
break;
}
}
+
+ /*
+ * If we found a scan key eliminating the range, no need to
+ * check additional ones.
+ */
+ if (!addrange)
+ break;
}
}
}
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index c113f274b81..35c9a9fdafe 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -52,6 +52,7 @@
#include "postmaster/bgworker_internals.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
+#include "storage/pmsignal.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "utils/acl.h"
@@ -3005,11 +3006,18 @@ vacuum_delay_point(void)
if (msec > VacuumCostDelay * 4)
msec = VacuumCostDelay * 4;
- (void) WaitLatch(MyLatch,
- WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
- msec,
- WAIT_EVENT_VACUUM_DELAY);
- ResetLatch(MyLatch);
+ pgstat_report_wait_start(WAIT_EVENT_VACUUM_DELAY);
+ pg_usleep(msec * 1000);
+ pgstat_report_wait_end();
+
+ /*
+ * We don't want to ignore postmaster death during very long vacuums
+ * with vacuum_cost_delay configured. We can't use the usual
+ * WaitLatch() approach here because we want microsecond-based sleep
+ * durations above.
+ */
+ if (IsUnderPostmaster && !PostmasterIsAlive())
+ exit(1);
VacuumCostBalance = 0;
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index 611fcf94a89..f36a46cc79f 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -43,6 +43,8 @@
#include "nodes/execnodes.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/optimizer.h"
#include "parser/parse_agg.h"
#include "parser/parse_coerce.h"
#include "parser/parse_oper.h"
@@ -3032,16 +3034,24 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
* aggregate's arguments (and FILTER clause if any) contain any calls to
* volatile functions. Otherwise, the difference between restarting and
* not restarting the aggregation would be user-visible.
+ *
+ * We also don't risk using moving aggregates when there are subplans in
+ * the arguments or FILTER clause. This is partly because
+ * contain_volatile_functions() doesn't look inside subplans; but there
+ * are other reasons why a subplan's output might be volatile. For
+ * example, syncscan mode can render the results nonrepeatable.
*/
if (!OidIsValid(aggform->aggminvtransfn))
use_ma_code = false; /* sine qua non */
else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY &&
- aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
+ aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
use_ma_code = true; /* decision forced by safety */
else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
use_ma_code = false; /* non-moving frame head */
else if (contain_volatile_functions((Node *) wfunc))
use_ma_code = false; /* avoid possible behavioral change */
+ else if (contain_subplans((Node *) wfunc))
+ use_ma_code = false; /* subplans might contain volatile functions */
else
use_ma_code = true; /* yes, let's use it */
if (use_ma_code)
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 4a2ddd5dff3..8f04c342d5a 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -2084,6 +2084,8 @@ SPI_result_code_string(int code)
return "SPI_OK_REL_REGISTER";
case SPI_OK_REL_UNREGISTER:
return "SPI_OK_REL_UNREGISTER";
+ case SPI_OK_TD_REGISTER:
+ return "SPI_OK_TD_REGISTER";
}
/* Unrecognized code ... return something useful ... */
sprintf(buf, "Unrecognized SPI code %d", code);
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index c48812f955a..e39952494e6 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -1308,7 +1308,7 @@ be_tls_get_peer_serial(Port *port, char *ptr, size_t len)
ptr[0] = '\0';
}
-#ifdef HAVE_X509_GET_SIGNATURE_NID
+#if defined(HAVE_X509_GET_SIGNATURE_NID) || defined(HAVE_X509_GET_SIGNATURE_INFO)
char *
be_tls_get_certificate_hash(Port *port, size_t *len)
{
@@ -1326,10 +1326,15 @@ be_tls_get_certificate_hash(Port *port, size_t *len)
/*
* Get the signature algorithm of the certificate to determine the hash
- * algorithm to use for the result.
+ * algorithm to use for the result. Prefer X509_get_signature_info(),
+ * introduced in OpenSSL 1.1.1, which can handle RSA-PSS signatures.
*/
+#if HAVE_X509_GET_SIGNATURE_INFO
+ if (!X509_get_signature_info(server_cert, &algo_nid, NULL, NULL, NULL))
+#else
if (!OBJ_find_sigid_algs(X509_get_signature_nid(server_cert),
&algo_nid, NULL))
+#endif
elog(ERROR, "could not determine server certificate signature algorithm");
/*
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index dfe348c1f40..8e158d63dc1 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -2944,12 +2944,17 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
Assert(varattno == te->resno);
/*
- * In scenarios where columns have been added to a view
- * since the outer query was originally parsed, there can
- * be more items in the subquery tlist than the outer
- * query expects. We should ignore such extra column(s)
- * --- compare the behavior for composite-returning
- * functions, in the RTE_FUNCTION case below.
+ * In a just-parsed subquery RTE, rte->eref->colnames
+ * should always have exactly as many entries as the
+ * subquery has non-junk output columns. However, if the
+ * subquery RTE was created by expansion of a view,
+ * perhaps the subquery tlist could now have more entries
+ * than existed when the outer query was parsed. Such
+ * cases should now be prevented because ApplyRetrieveRule
+ * will extend the colnames list to match. But out of
+ * caution, we'll keep the code like this in the back
+ * branches: just ignore any columns that lack colnames
+ * entries.
*/
if (!aliasp_item)
break;
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 8f21a90f391..377055f6f84 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -5430,18 +5430,10 @@ internal_forkexec(int argc, char *argv[], Port *port)
/*
* Queue a waiter to signal when this child dies. The wait will be handled
- * automatically by an operating system thread pool.
- *
- * Note: use malloc instead of palloc, since it needs to be thread-safe.
- * Struct will be free():d from the callback function that runs on a
- * different thread.
+ * automatically by an operating system thread pool. The memory will be
+ * freed by a later call to waitpid().
*/
- childinfo = malloc(sizeof(win32_deadchild_waitinfo));
- if (!childinfo)
- ereport(FATAL,
- (errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory")));
-
+ childinfo = palloc(sizeof(win32_deadchild_waitinfo));
childinfo->procHandle = pi.hProcess;
childinfo->procId = pi.dwProcessId;
@@ -5455,7 +5447,7 @@ internal_forkexec(int argc, char *argv[], Port *port)
(errmsg_internal("could not register process for wait: error code %lu",
GetLastError())));
- /* Don't close pi.hProcess here - the wait thread needs access to it */
+ /* Don't close pi.hProcess here - waitpid() needs access to it */
CloseHandle(pi.hThread);
@@ -7288,36 +7280,21 @@ ShmemBackendArrayRemove(Backend *bn)
static pid_t
waitpid(pid_t pid, int *exitstatus, int options)
{
+ win32_deadchild_waitinfo *childinfo;
+ DWORD exitcode;
DWORD dwd;
ULONG_PTR key;
OVERLAPPED *ovl;
- /*
- * Check if there are any dead children. If there are, return the pid of
- * the first one that died.
- */
- if (GetQueuedCompletionStatus(win32ChildQueue, &dwd, &key, &ovl, 0))
+ /* Try to consume one win32_deadchild_waitinfo from the queue. */
+ if (!GetQueuedCompletionStatus(win32ChildQueue, &dwd, &key, &ovl, 0))
{
- *exitstatus = (int) key;
- return dwd;
+ errno = EAGAIN;
+ return -1;
}
- return -1;
-}
-
-/*
- * Note! Code below executes on a thread pool! All operations must
- * be thread safe! Note that elog() and friends must *not* be used.
- */
-static void WINAPI
-pgwin32_deadchild_callback(PVOID lpParameter, BOOLEAN TimerOrWaitFired)
-{
- win32_deadchild_waitinfo *childinfo = (win32_deadchild_waitinfo *) lpParameter;
- DWORD exitcode;
-
- if (TimerOrWaitFired)
- return; /* timeout. Should never happen, since we use
- * INFINITE as timeout value. */
+ childinfo = (win32_deadchild_waitinfo *) key;
+ pid = childinfo->procId;
/*
* Remove handle from wait - required even though it's set to wait only
@@ -7333,13 +7310,11 @@ pgwin32_deadchild_callback(PVOID lpParameter, BOOLEAN TimerOrWaitFired)
write_stderr("could not read exit code for process\n");
exitcode = 255;
}
-
- if (!PostQueuedCompletionStatus(win32ChildQueue, childinfo->procId, (ULONG_PTR) exitcode, NULL))
- write_stderr("could not post child completion status\n");
+ *exitstatus = exitcode;
/*
- * Handle is per-process, so we close it here instead of in the
- * originating thread
+ * Close the process handle. Only after this point can the PID can be
+ * recycled by the kernel.
*/
CloseHandle(childinfo->procHandle);
@@ -7347,9 +7322,36 @@ pgwin32_deadchild_callback(PVOID lpParameter, BOOLEAN TimerOrWaitFired)
* Free struct that was allocated before the call to
* RegisterWaitForSingleObject()
*/
- free(childinfo);
+ pfree(childinfo);
+
+ return pid;
+}
+
+/*
+ * Note! Code below executes on a thread pool! All operations must
+ * be thread safe! Note that elog() and friends must *not* be used.
+ */
+static void WINAPI
+pgwin32_deadchild_callback(PVOID lpParameter, BOOLEAN TimerOrWaitFired)
+{
+ /* Should never happen, since we use INFINITE as timeout value. */
+ if (TimerOrWaitFired)
+ return;
+
+ /*
+ * Post the win32_deadchild_waitinfo object for waitpid() to deal with. If
+ * that fails, we leak the object, but we also leak a whole process and
+ * get into an unrecoverable state, so there's not much point in worrying
+ * about that. We'd like to panic, but we can't use that infrastructure
+ * from this thread.
+ */
+ if (!PostQueuedCompletionStatus(win32ChildQueue,
+ 0,
+ (ULONG_PTR) lpParameter,
+ NULL))
+ write_stderr("could not post child completion status\n");
- /* Queue SIGCHLD signal */
+ /* Queue SIGCHLD signal. */
pg_queue_signal(SIGCHLD);
}
#endif /* WIN32 */
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 7a6323c3989..755d7ae6d2d 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -572,7 +572,7 @@ logicalmsg_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
TransactionId xid = XLogRecGetXid(r);
uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK;
RepOriginId origin_id = XLogRecGetOrigin(r);
- Snapshot snapshot;
+ Snapshot snapshot = NULL;
xl_logical_message *message;
if (info != XLOG_LOGICAL_MESSAGE)
@@ -602,7 +602,17 @@ logicalmsg_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
SnapBuildXactNeedsSkip(builder, buf->origptr)))
return;
- snapshot = SnapBuildGetOrBuildSnapshot(builder, xid);
+ /*
+ * If this is a non-transactional change, get the snapshot we're expected
+ * to use. We only get here when the snapshot is consistent, and the
+ * change is not meant to be skipped.
+ *
+ * For transactional changes we don't need a snapshot, we'll use the
+ * regular snapshot maintained by ReorderBuffer. We just leave it NULL.
+ */
+ if (!message->transactional)
+ snapshot = SnapBuildGetOrBuildSnapshot(builder, xid);
+
ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr,
message->transactional,
message->message, /* first part of message is
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 709365fc8c6..721fa652d25 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -821,6 +821,13 @@ ReorderBufferQueueMessage(ReorderBuffer *rb, TransactionId xid,
Assert(xid != InvalidTransactionId);
+ /*
+ * We don't expect snapshots for transactional changes - we'll use the
+ * snapshot derived later during apply (unless the change gets
+ * skipped).
+ */
+ Assert(!snapshot);
+
oldcontext = MemoryContextSwitchTo(rb->context);
change = ReorderBufferGetChange(rb);
@@ -839,6 +846,9 @@ ReorderBufferQueueMessage(ReorderBuffer *rb, TransactionId xid,
ReorderBufferTXN *txn = NULL;
volatile Snapshot snapshot_now = snapshot;
+ /* Non-transactional changes require a valid snapshot. */
+ Assert(snapshot_now);
+
if (xid != InvalidTransactionId)
txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index ff9cf5d406d..df2ea94d468 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -260,6 +260,7 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
bool is_init)
{
PGOutputData *data = palloc0(sizeof(PGOutputData));
+ static bool publication_callback_registered = false;
/* Create our memory context for private allocations. */
data->context = AllocSetContextCreate(ctx->context,
@@ -323,9 +324,18 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
/* Init publication state. */
data->publications = NIL;
publications_valid = false;
- CacheRegisterSyscacheCallback(PUBLICATIONOID,
- publication_invalidation_cb,
- (Datum) 0);
+
+ /*
+ * Register callback for pg_publication if we didn't already do that
+ * during some previous call in this process.
+ */
+ if (!publication_callback_registered)
+ {
+ CacheRegisterSyscacheCallback(PUBLICATIONOID,
+ publication_invalidation_cb,
+ (Datum) 0);
+ publication_callback_registered = true;
+ }
/* Initialize relation schema cache. */
init_rel_sync_cache(CacheMemoryContext);
@@ -948,7 +958,9 @@ static void
init_rel_sync_cache(MemoryContext cachectx)
{
HASHCTL ctl;
+ static bool relation_callbacks_registered = false;
+ /* Nothing to do if hash table already exists */
if (RelationSyncCache != NULL)
return;
@@ -963,10 +975,16 @@ init_rel_sync_cache(MemoryContext cachectx)
Assert(RelationSyncCache != NULL);
+ /* No more to do if we already registered callbacks */
+ if (relation_callbacks_registered)
+ return;
+
CacheRegisterRelcacheCallback(rel_sync_cache_relation_cb, (Datum) 0);
CacheRegisterSyscacheCallback(PUBLICATIONRELMAP,
rel_sync_cache_publication_cb,
(Datum) 0);
+
+ relation_callbacks_registered = true;
}
/*
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 2938edf1c6a..3ca8bfff2fb 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -28,6 +28,7 @@
#include "catalog/dependency.h"
#include "catalog/pg_type.h"
#include "commands/trigger.h"
+#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
@@ -403,6 +404,7 @@ rewriteRuleAction(Query *parsetree,
Query *sub_action;
Query **sub_action_ptr;
acquireLocksOnSubLinks_context context;
+ ListCell *lc;
context.for_execute = true;
@@ -441,6 +443,23 @@ rewriteRuleAction(Query *parsetree,
ChangeVarNodes(rule_qual,
PRS2_OLD_VARNO + rt_length, rt_index, 0);
+ /*
+ * Mark any subquery RTEs in the rule action as LATERAL if they contain
+ * Vars referring to the current query level (references to NEW/OLD).
+ * Those really are lateral references, but we've historically not
+ * required users to mark such subqueries with LATERAL explicitly. But
+ * the planner will complain if such Vars exist in a non-LATERAL subquery,
+ * so we have to fix things up here.
+ */
+ foreach(lc, sub_action->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+
+ if (rte->rtekind == RTE_SUBQUERY && !rte->lateral &&
+ contain_vars_of_level((Node *) rte->subquery, 1))
+ rte->lateral = true;
+ }
+
/*
* Generate expanded rtable consisting of main parsetree's rtable plus
* rule action's rtable; this becomes the complete rtable for the rule
@@ -482,8 +501,6 @@ rewriteRuleAction(Query *parsetree,
*/
if (parsetree->hasSubLinks && !sub_action->hasSubLinks)
{
- ListCell *lc;
-
foreach(lc, parsetree->rtable)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
@@ -585,8 +602,6 @@ rewriteRuleAction(Query *parsetree,
*/
if (parsetree->cteList != NIL && sub_action->commandType != CMD_UTILITY)
{
- ListCell *lc;
-
/*
* Annoying implementation restriction: because CTEs are identified by
* name within a cteList, we can't merge a CTE from the original query
@@ -1787,6 +1802,7 @@ ApplyRetrieveRule(Query *parsetree,
RangeTblEntry *rte,
*subrte;
RowMarkClause *rc;
+ int numCols;
if (list_length(rule->actions) != 1)
elog(ERROR, "expected just one rule action");
@@ -1946,6 +1962,20 @@ ApplyRetrieveRule(Query *parsetree,
rte->updatedCols = NULL;
rte->extraUpdatedCols = NULL;
+ /*
+ * Since we allow CREATE OR REPLACE VIEW to add columns to a view, the
+ * rule_action might emit more columns than we expected when the current
+ * query was parsed. Various places expect rte->eref->colnames to be
+ * consistent with the non-junk output columns of the subquery, so patch
+ * things up if necessary by adding some dummy column names.
+ */
+ numCols = ExecCleanTargetListLength(rule_action->targetList);
+ while (list_length(rte->eref->colnames) < numCols)
+ {
+ rte->eref->colnames = lappend(rte->eref->colnames,
+ makeString(pstrdup("?column?")));
+ }
+
return parsetree;
}
diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c
index 8b3d560cb20..06d930f9164 100644
--- a/src/backend/statistics/dependencies.c
+++ b/src/backend/statistics/dependencies.c
@@ -1170,13 +1170,12 @@ clauselist_apply_dependencies(PlannerInfo *root, List *clauses,
* Determines if the expression is compatible with functional dependencies
*
* Similar to dependency_is_compatible_clause, but doesn't enforce that the
- * expression is a simple Var. OTOH we check that there's at least one
- * statistics object matching the expression.
+ * expression is a simple Var. On success, return the matching statistics
+ * expression into *expr.
*/
static bool
dependency_is_compatible_expression(Node *clause, Index relid, List *statlist, Node **expr)
{
- List *vars;
ListCell *lc,
*lc2;
Node *clause_expr;
@@ -1324,29 +1323,8 @@ dependency_is_compatible_expression(Node *clause, Index relid, List *statlist, N
if (IsA(clause_expr, RelabelType))
clause_expr = (Node *) ((RelabelType *) clause_expr)->arg;
- vars = pull_var_clause(clause_expr, 0);
-
- foreach(lc, vars)
- {
- Var *var = (Var *) lfirst(lc);
-
- /* Ensure Var is from the correct relation */
- if (var->varno != relid)
- return false;
-
- /* We also better ensure the Var is from the current level */
- if (var->varlevelsup != 0)
- return false;
-
- /* Also ignore system attributes (we don't allow stats on those) */
- if (!AttrNumberIsForUserDefinedAttr(var->varattno))
- return false;
- }
-
/*
- * Check if we actually have a matching statistics for the expression.
- *
- * XXX Maybe this is an overkill. We'll eliminate the expressions later.
+ * Search for a matching statistics expression.
*/
foreach(lc, statlist)
{
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 3c7701e15b6..208874f793e 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -387,6 +387,9 @@ static inline void ProcArrayEndTransactionInternal(PGPROC *proc, TransactionId l
static void ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid);
static void MaintainLatestCompletedXid(TransactionId latestXid);
static void MaintainLatestCompletedXidRecovery(TransactionId latestXid);
+static void TransactionIdRetreatSafely(TransactionId *xid,
+ int retreat_by,
+ FullTransactionId rel);
static inline FullTransactionId FullXidRelativeTo(FullTransactionId rel,
TransactionId xid);
@@ -1995,17 +1998,35 @@ ComputeXidHorizons(ComputeXidHorizonsResult *h, bool updateGlobalVis)
* so guc.c should limit it to no more than the xidStopLimit threshold
* in varsup.c. Also note that we intentionally don't apply
* vacuum_defer_cleanup_age on standby servers.
+ *
+ * Need to use TransactionIdRetreatSafely() instead of open-coding the
+ * subtraction, to prevent creating an xid before
+ * FirstNormalTransactionId.
*/
- h->oldest_considered_running =
- TransactionIdRetreatedBy(h->oldest_considered_running,
- vacuum_defer_cleanup_age);
- h->shared_oldest_nonremovable =
- TransactionIdRetreatedBy(h->shared_oldest_nonremovable,
- vacuum_defer_cleanup_age);
- h->data_oldest_nonremovable =
- TransactionIdRetreatedBy(h->data_oldest_nonremovable,
- vacuum_defer_cleanup_age);
- /* defer doesn't apply to temp relations */
+ Assert(TransactionIdPrecedesOrEquals(h->oldest_considered_running,
+ h->shared_oldest_nonremovable));
+ Assert(TransactionIdPrecedesOrEquals(h->shared_oldest_nonremovable,
+ h->data_oldest_nonremovable));
+
+ if (vacuum_defer_cleanup_age > 0)
+ {
+ TransactionIdRetreatSafely(&h->oldest_considered_running,
+ vacuum_defer_cleanup_age,
+ h->latest_completed);
+ TransactionIdRetreatSafely(&h->shared_oldest_nonremovable,
+ vacuum_defer_cleanup_age,
+ h->latest_completed);
+ TransactionIdRetreatSafely(&h->data_oldest_nonremovable,
+ vacuum_defer_cleanup_age,
+ h->latest_completed);
+ /* defer doesn't apply to temp relations */
+
+
+ Assert(TransactionIdPrecedesOrEquals(h->oldest_considered_running,
+ h->shared_oldest_nonremovable));
+ Assert(TransactionIdPrecedesOrEquals(h->shared_oldest_nonremovable,
+ h->data_oldest_nonremovable));
+ }
}
/*
@@ -3329,8 +3350,10 @@ GetSnapshotData(Snapshot snapshot, DtxContext distributedTransactionContext)
oldestfxid = FullXidRelativeTo(latest_completed, oldestxid);
/* apply vacuum_defer_cleanup_age */
- def_vis_xid_data =
- TransactionIdRetreatedBy(globalxmin, vacuum_defer_cleanup_age);
+ def_vis_xid_data = globalxmin;
+ TransactionIdRetreatSafely(&def_vis_xid_data,
+ vacuum_defer_cleanup_age,
+ oldestfxid);
/* Check whether there's a replication slot requiring an older xmin. */
def_vis_xid_data =
@@ -5360,6 +5383,44 @@ GlobalVisCheckRemovableXid(Relation rel, TransactionId xid)
return GlobalVisTestIsRemovableXid(state, xid);
}
+/*
+ * Safely retract *xid by retreat_by, store the result in *xid.
+ *
+ * Need to be careful to prevent *xid from retreating below
+ * FirstNormalTransactionId during epoch 0. This is important to prevent
+ * generating xids that cannot be converted to a FullTransactionId without
+ * wrapping around.
+ *
+ * If retreat_by would lead to a too old xid, FirstNormalTransactionId is
+ * returned instead.
+ */
+static void
+TransactionIdRetreatSafely(TransactionId *xid, int retreat_by, FullTransactionId rel)
+{
+ TransactionId original_xid = *xid;
+ FullTransactionId fxid;
+ uint64 fxid_i;
+
+ Assert(TransactionIdIsNormal(original_xid));
+ Assert(retreat_by >= 0); /* relevant GUCs are stored as ints */
+ AssertTransactionIdInAllowableRange(original_xid);
+
+ if (retreat_by == 0)
+ return;
+
+ fxid = FullXidRelativeTo(rel, original_xid);
+ fxid_i = U64FromFullTransactionId(fxid);
+
+ if ((fxid_i - FirstNormalTransactionId) <= retreat_by)
+ *xid = FirstNormalTransactionId;
+ else
+ {
+ *xid = TransactionIdRetreatedBy(original_xid, retreat_by);
+ Assert(TransactionIdIsNormal(*xid));
+ Assert(NormalTransactionIdPrecedes(*xid, original_xid));
+ }
+}
+
/*
* Convert a 32 bit transaction id into 64 bit transaction id, by assuming it
* is within MaxTransactionId / 2 of XidFromFullTransactionId(rel).
@@ -6681,4 +6742,4 @@ LoopBackendProc(BackendProcCallbackFunction func, void *args)
(*func)(proc, args);
}
LWLockRelease(ProcArrayLock);
-}
\ No newline at end of file
+}
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index f5668bdb4ff..ec6f26a72ba 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -1846,24 +1846,6 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
return snapshot;
}
- /* Maintain serializable global xmin info. */
- if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
- {
- Assert(PredXact->SxactGlobalXminCount == 0);
- PredXact->SxactGlobalXmin = snapshot->xmin;
- PredXact->SxactGlobalXminCount = 1;
- SerialSetActiveSerXmin(snapshot->xmin);
- }
- else if (TransactionIdEquals(snapshot->xmin, PredXact->SxactGlobalXmin))
- {
- Assert(PredXact->SxactGlobalXminCount > 0);
- PredXact->SxactGlobalXminCount++;
- }
- else
- {
- Assert(TransactionIdFollows(snapshot->xmin, PredXact->SxactGlobalXmin));
- }
-
/* Initialize the structure. */
sxact->vxid = vxid;
sxact->SeqNo.lastCommitBeforeSnapshot = PredXact->LastSxactCommitSeqNo;
@@ -1900,6 +1882,19 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
SetPossibleUnsafeConflict(sxact, othersxact);
}
}
+
+ /*
+ * If we didn't find any possibly unsafe conflicts because every
+ * uncommitted writable transaction turned out to be doomed, then we
+ * can "opt out" immediately. See comments above the earlier check for
+ * PredXact->WritableSxactCount == 0.
+ */
+ if (SHMQueueEmpty(&sxact->possibleUnsafeConflicts))
+ {
+ ReleasePredXact(sxact);
+ LWLockRelease(SerializableXactHashLock);
+ return snapshot;
+ }
}
else
{
@@ -1908,6 +1903,24 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
(MaxBackends + max_prepared_xacts));
}
+ /* Maintain serializable global xmin info. */
+ if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
+ {
+ Assert(PredXact->SxactGlobalXminCount == 0);
+ PredXact->SxactGlobalXmin = snapshot->xmin;
+ PredXact->SxactGlobalXminCount = 1;
+ SerialSetActiveSerXmin(snapshot->xmin);
+ }
+ else if (TransactionIdEquals(snapshot->xmin, PredXact->SxactGlobalXmin))
+ {
+ Assert(PredXact->SxactGlobalXminCount > 0);
+ PredXact->SxactGlobalXminCount++;
+ }
+ else
+ {
+ Assert(TransactionIdFollows(snapshot->xmin, PredXact->SxactGlobalXmin));
+ }
+
MySerializableXact = sxact;
MyXactDidWrite = false; /* haven't written anything yet */
@@ -3331,6 +3344,7 @@ SetNewSxactGlobalXmin(void)
void
ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
{
+ bool partiallyReleasing = false;
bool needToClear;
RWConflict conflict,
nextConflict,
@@ -3431,6 +3445,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
else
{
MySerializableXact->flags |= SXACT_FLAG_PARTIALLY_RELEASED;
+ partiallyReleasing = true;
/* ... and proceed to perform the partial release below. */
}
}
@@ -3681,9 +3696,15 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
* serializable transactions completes. We then find the "new oldest"
* xmin and purge any transactions which finished before this transaction
* was launched.
+ *
+ * For parallel queries in read-only transactions, it might run twice.
+ * We only release the reference on the first call.
*/
needToClear = false;
- if (TransactionIdEquals(MySerializableXact->xmin, PredXact->SxactGlobalXmin))
+ if ((partiallyReleasing ||
+ !SxactIsPartiallyReleased(MySerializableXact)) &&
+ TransactionIdEquals(MySerializableXact->xmin,
+ PredXact->SxactGlobalXmin))
{
Assert(PredXact->SxactGlobalXminCount > 0);
if (--(PredXact->SxactGlobalXminCount) == 0)
@@ -4839,10 +4860,14 @@ PreCommit_CheckForSerializationFailure(void)
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
- /* Check if someone else has already decided that we need to die */
- if (SxactIsDoomed(MySerializableXact))
+ /*
+ * Check if someone else has already decided that we need to die. Since
+ * we set our own DOOMED flag when partially releasing, ignore in that
+ * case.
+ */
+ if (SxactIsDoomed(MySerializableXact) &&
+ !SxactIsPartiallyReleased(MySerializableXact))
{
- Assert(!SxactIsPartiallyReleased(MySerializableXact));
LWLockRelease(SerializableXactHashLock);
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 7d58e28cd1e..a7a426c3034 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -5719,13 +5719,20 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout,
/*
* If any '0' specifiers are present, make sure we don't strip
- * those digits.
+ * those digits. But don't advance last_relevant beyond the last
+ * character of the Np->number string, which is a hazard if the
+ * number got shortened due to precision limitations.
*/
if (Np->last_relevant && Np->Num->zero_end > Np->out_pre_spaces)
{
+ int last_zero_pos;
char *last_zero;
- last_zero = Np->number + (Np->Num->zero_end - Np->out_pre_spaces);
+ /* note that Np->number cannot be zero-length here */
+ last_zero_pos = strlen(Np->number) - 1;
+ last_zero_pos = Min(last_zero_pos,
+ Np->Num->zero_end - Np->out_pre_spaces);
+ last_zero = Np->number + last_zero_pos;
if (Np->last_relevant < last_zero)
Np->last_relevant = last_zero;
}
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index b342c81f27b..f6a074aa7d0 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -656,6 +656,7 @@ report_json_context(JsonLexContext *lex)
line_start = lex->line_start;
context_start = line_start;
context_end = lex->token_terminator;
+ Assert(context_end >= context_start);
/* Advance until we are close enough to context_end */
while (context_end - context_start >= 50)
diff --git a/src/backend/utils/adt/oracle_compat.c b/src/backend/utils/adt/oracle_compat.c
index f737aa6fbde..bd9e5f9e243 100644
--- a/src/backend/utils/adt/oracle_compat.c
+++ b/src/backend/utils/adt/oracle_compat.c
@@ -797,7 +797,8 @@ translate(PG_FUNCTION_ARGS)
text *to = PG_GETARG_TEXT_PP(2);
text *result;
char *from_ptr,
- *to_ptr;
+ *to_ptr,
+ *to_end;
char *source,
*target;
int m,
@@ -819,6 +820,7 @@ translate(PG_FUNCTION_ARGS)
from_ptr = VARDATA_ANY(from);
tolen = VARSIZE_ANY_EXHDR(to);
to_ptr = VARDATA_ANY(to);
+ to_end = to_ptr + tolen;
/*
* The worst-case expansion is to substitute a max-length character for a
@@ -852,16 +854,16 @@ translate(PG_FUNCTION_ARGS)
}
if (i < fromlen)
{
- /* substitute */
+ /* substitute, or delete if no corresponding "to" character */
char *p = to_ptr;
for (i = 0; i < from_index; i++)
{
- p += pg_mblen(p);
- if (p >= (to_ptr + tolen))
+ if (p >= to_end)
break;
+ p += pg_mblen(p);
}
- if (p < (to_ptr + tolen))
+ if (p < to_end)
{
len = pg_mblen(p);
memcpy(target, p, len);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index bf663afb3c5..dbbb2a70a07 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -481,6 +481,8 @@ static void get_from_clause(Query *query, const char *prefix,
deparse_context *context);
static void get_from_clause_item(Node *jtnode, Query *query,
deparse_context *context);
+static void get_rte_alias(RangeTblEntry *rte, int varno, bool use_as,
+ deparse_context *context);
static void get_column_alias_list(deparse_columns *colinfo,
deparse_context *context);
static void get_from_clause_coldeflist(RangeTblFunction *rtfunc,
@@ -6661,12 +6663,14 @@ get_insert_query_def(Query *query, deparse_context *context,
context->indentLevel += PRETTYINDENT_STD;
appendStringInfoChar(buf, ' ');
}
- appendStringInfo(buf, "INSERT INTO %s ",
+ appendStringInfo(buf, "INSERT INTO %s",
generate_relation_name(rte->relid, NIL));
- /* INSERT requires AS keyword for target alias */
- if (rte->alias != NULL)
- appendStringInfo(buf, "AS %s ",
- quote_identifier(rte->alias->aliasname));
+
+ /* Print the relation alias, if needed; INSERT requires explicit AS */
+ get_rte_alias(rte, query->resultRelation, true, context);
+
+ /* always want a space here */
+ appendStringInfoChar(buf, ' ');
/*
* Add the insert-column-names list. Any indirection decoration needed on
@@ -6848,9 +6852,10 @@ get_update_query_def(Query *query, deparse_context *context,
appendStringInfo(buf, "UPDATE %s%s",
only_marker(rte),
generate_relation_name(rte->relid, NIL));
- if (rte->alias != NULL)
- appendStringInfo(buf, " %s",
- quote_identifier(rte->alias->aliasname));
+
+ /* Print the relation alias, if needed */
+ get_rte_alias(rte, query->resultRelation, false, context);
+
appendStringInfoString(buf, " SET ");
/* Deparse targetlist */
@@ -7056,9 +7061,9 @@ get_delete_query_def(Query *query, deparse_context *context,
appendStringInfo(buf, "DELETE FROM %s%s",
only_marker(rte),
generate_relation_name(rte->relid, NIL));
- if (rte->alias != NULL)
- appendStringInfo(buf, " %s",
- quote_identifier(rte->alias->aliasname));
+
+ /* Print the relation alias, if needed */
+ get_rte_alias(rte, query->resultRelation, false, context);
/* Add the USING clause if given */
get_from_clause(query, " USING ", context);
@@ -11231,10 +11236,8 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
{
int varno = ((RangeTblRef *) jtnode)->rtindex;
RangeTblEntry *rte = rt_fetch(varno, query->rtable);
- char *refname = get_rtable_name(varno, context);
deparse_columns *colinfo = deparse_columns_fetch(varno, dpns);
RangeTblFunction *rtfunc1 = NULL;
- bool printalias;
if (rte->lateral)
appendStringInfoString(buf, "LATERAL ");
@@ -11382,54 +11385,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
}
/* Print the relation alias, if needed */
- printalias = false;
- if (rte->alias != NULL)
- {
- /* Always print alias if user provided one */
- printalias = true;
- }
- else if (colinfo->printaliases)
- {
- /* Always print alias if we need to print column aliases */
- printalias = true;
- }
- else if (rte->rtekind == RTE_RELATION)
- {
- /*
- * No need to print alias if it's same as relation name (this
- * would normally be the case, but not if set_rtable_names had to
- * resolve a conflict).
- */
- if (strcmp(refname, get_relation_name(rte->relid)) != 0)
- printalias = true;
- }
- else if (rte->rtekind == RTE_FUNCTION || rte->rtekind == RTE_TABLEFUNCTION)
- {
- /*
- * For a function RTE, always print alias. This covers possible
- * renaming of the function and/or instability of the
- * FigureColname rules for things that aren't simple functions.
- * Note we'd need to force it anyway for the columndef list case.
- */
- printalias = true;
- }
- else if (rte->rtekind == RTE_VALUES)
- {
- /* Alias is syntactically required for VALUES */
- printalias = true;
- }
- else if (rte->rtekind == RTE_CTE)
- {
- /*
- * No need to print alias if it's same as CTE name (this would
- * normally be the case, but not if set_rtable_names had to
- * resolve a conflict).
- */
- if (strcmp(refname, rte->ctename) != 0)
- printalias = true;
- }
- if (printalias)
- appendStringInfo(buf, " %s", quote_identifier(refname));
+ get_rte_alias(rte, varno, false, context);
/* Print the column definitions or aliases, if needed */
if (rtfunc1 && rtfunc1->funccolnames != NIL)
@@ -11567,6 +11523,73 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
(int) nodeTag(jtnode));
}
+/*
+ * get_rte_alias - print the relation's alias, if needed
+ *
+ * If printed, the alias is preceded by a space, or by " AS " if use_as is true.
+ */
+static void
+get_rte_alias(RangeTblEntry *rte, int varno, bool use_as,
+ deparse_context *context)
+{
+ deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces);
+ char *refname = get_rtable_name(varno, context);
+ deparse_columns *colinfo = deparse_columns_fetch(varno, dpns);
+ bool printalias = false;
+
+ if (rte->alias != NULL)
+ {
+ /* Always print alias if user provided one */
+ printalias = true;
+ }
+ else if (colinfo->printaliases)
+ {
+ /* Always print alias if we need to print column aliases */
+ printalias = true;
+ }
+ else if (rte->rtekind == RTE_RELATION)
+ {
+ /*
+ * No need to print alias if it's same as relation name (this would
+ * normally be the case, but not if set_rtable_names had to resolve a
+ * conflict).
+ */
+ if (strcmp(refname, get_relation_name(rte->relid)) != 0)
+ printalias = true;
+ }
+ else if (rte->rtekind == RTE_FUNCTION)
+ {
+ /*
+ * For a function RTE, always print alias. This covers possible
+ * renaming of the function and/or instability of the FigureColname
+ * rules for things that aren't simple functions. Note we'd need to
+ * force it anyway for the columndef list case.
+ */
+ printalias = true;
+ }
+ else if (rte->rtekind == RTE_SUBQUERY ||
+ rte->rtekind == RTE_VALUES)
+ {
+ /* Alias is syntactically required for SUBQUERY and VALUES */
+ printalias = true;
+ }
+ else if (rte->rtekind == RTE_CTE)
+ {
+ /*
+ * No need to print alias if it's same as CTE name (this would
+ * normally be the case, but not if set_rtable_names had to resolve a
+ * conflict).
+ */
+ if (strcmp(refname, rte->ctename) != 0)
+ printalias = true;
+ }
+
+ if (printalias)
+ appendStringInfo(context->buf, "%s%s",
+ use_as ? " AS " : " ",
+ quote_identifier(refname));
+}
+
/*
* get_column_alias_list - print column alias list for an RTE
*
diff --git a/src/bin/pg_amcheck/t/004_verify_heapam.pl b/src/bin/pg_amcheck/t/004_verify_heapam.pl
index b603efad929..4cadb837730 100644
--- a/src/bin/pg_amcheck/t/004_verify_heapam.pl
+++ b/src/bin/pg_amcheck/t/004_verify_heapam.pl
@@ -217,7 +217,7 @@ sub write_tuple
my $relpath = "$pgdata/$rel";
# Insert data and freeze public.test
-use constant ROWCOUNT => 16;
+use constant ROWCOUNT => 17;
$node->safe_psql(
'postgres', qq(
INSERT INTO public.test (a, b, c)
@@ -296,7 +296,7 @@ sub write_tuple
$node->start;
# Ok, Xids and page layout look ok. We can run corruption tests.
-plan tests => 19;
+plan tests => 20;
# Check that pg_amcheck runs against the uncorrupted table without error.
$node->command_ok(
@@ -379,23 +379,24 @@ sub header
elsif ($offnum == 3)
{
# Corruptly set xmin < datfrozenxid, further back, noting circularity
- # of xid comparison. For a new cluster with epoch = 0, the corrupt
- # xmin will be interpreted as in the future
- $tup->{t_xmin} = 4026531839;
+ # of xid comparison.
+ my $xmin = 4026531839;
+ $tup->{t_xmin} = $xmin;
$tup->{t_infomask} &= ~HEAP_XMIN_COMMITTED;
$tup->{t_infomask} &= ~HEAP_XMIN_INVALID;
push @expected,
- qr/${$header}xmin 4026531839 equals or exceeds next valid transaction ID 0:\d+/;
+ qr/${$header}xmin ${xmin} precedes oldest valid transaction ID 0:\d+/;
}
elsif ($offnum == 4)
{
# Corruptly set xmax < relminmxid;
- $tup->{t_xmax} = 4026531839;
+ my $xmax = 4026531839;
+ $tup->{t_xmax} = $xmax;
$tup->{t_infomask} &= ~HEAP_XMAX_INVALID;
push @expected,
- qr/${$header}xmax 4026531839 equals or exceeds next valid transaction ID 0:\d+/;
+ qr/${$header}xmax ${xmax} precedes oldest valid transaction ID 0:\d+/;
}
elsif ($offnum == 5)
{
@@ -503,7 +504,7 @@ sub header
push @expected,
qr/${header}multitransaction ID 4 equals or exceeds next valid multitransaction ID 1/;
}
- elsif ($offnum == 15) # Last offnum must equal ROWCOUNT
+ elsif ($offnum == 15)
{
# Set both HEAP_XMAX_COMMITTED and HEAP_XMAX_IS_MULTI
$tup->{t_infomask} |= HEAP_XMAX_COMMITTED;
@@ -513,6 +514,17 @@ sub header
push @expected,
qr/${header}multitransaction ID 4000000000 precedes relation minimum multitransaction ID threshold 1/;
}
+ elsif ($offnum == 16) # Last offnum must equal ROWCOUNT
+ {
+ # Corruptly set xmin > next_xid to be in the future.
+ my $xmin = 123456;
+ $tup->{t_xmin} = $xmin;
+ $tup->{t_infomask} &= ~HEAP_XMIN_COMMITTED;
+ $tup->{t_infomask} &= ~HEAP_XMIN_INVALID;
+
+ push @expected,
+ qr/${$header}xmin ${xmin} equals or exceeds next valid transaction ID 0:\d+/;
+ }
write_tuple($file, $offset, $tup);
}
close($file)
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index cc733cb7be1..26d62e823cf 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -466,6 +466,10 @@ start_postmaster(void)
fflush(stdout);
fflush(stderr);
+#ifdef EXEC_BACKEND
+ pg_disable_aslr();
+#endif
+
pm_pid = fork();
if (pm_pid < 0)
{
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 4a8f4937605..8a2095b21a3 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -8298,7 +8298,7 @@ getPartitioningInfo(Archive *fout)
tbinfo = findTableByOid(tabrelid);
if (tbinfo == NULL)
fatal("failed sanity check, table OID %u appearing in pg_partitioned_table not found",
- tabrelid);
+ tabrelid);
tbinfo->unsafe_partitions = true;
}
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index e0dfadcf414..c87bc3dd007 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -481,7 +481,8 @@ typedef struct
pg_time_usec_t txn_begin; /* used for measuring schedule lag times */
pg_time_usec_t stmt_begin; /* used for measuring statement latencies */
- bool prepared[MAX_SCRIPTS]; /* whether client prepared the script */
+ /* whether client prepared each command of each script */
+ bool **prepared;
/* per client collected stats */
int64 cnt; /* client transaction count, for -t */
@@ -573,7 +574,8 @@ static const char *QUERYMODE[] = {"simple", "extended", "prepared"};
* argv Command arguments, the first of which is the command or SQL
* string itself. For SQL commands, after post-processing
* argv[0] is the same as 'lines' with variables substituted.
- * varprefix SQL commands terminated with \gset or \aset have this set
+ * prepname The name that this command is prepared under, in prepare mode
+ * varprefix SQL commands terminated with \gset or \aset have this set
* to a non NULL value. If nonempty, it's used to prefix the
* variable name that receives the value.
* aset do gset on all possible queries of a combined query (\;).
@@ -588,6 +590,7 @@ typedef struct Command
MetaCommand meta;
int argc;
char *argv[MAX_ARGS];
+ char *prepname;
char *varprefix;
PgBenchExpr *expr;
SimpleStats stats;
@@ -2836,13 +2839,9 @@ runShellCommand(CState *st, char *variable, char **argv, int argc)
return true;
}
-#define MAX_PREPARE_NAME 32
-static void
-preparedStatementName(char *buffer, int file, int state)
-{
- sprintf(buffer, "P%d_%d", file, state);
-}
-
+/*
+ * Report the abortion of the client when processing SQL commands.
+ */
static void
commandFailed(CState *st, const char *cmd, const char *message)
{
@@ -2869,6 +2868,87 @@ chooseScript(TState *thread)
return i - 1;
}
+/*
+ * Prepare the SQL command from st->use_file at command_num.
+ */
+static void
+prepareCommand(CState *st, int command_num)
+{
+ Command *command = sql_script[st->use_file].commands[command_num];
+
+ /* No prepare for non-SQL commands */
+ if (command->type != SQL_COMMAND)
+ return;
+
+ /*
+ * If not already done, allocate space for 'prepared' flags: one boolean
+ * for each command of each script.
+ */
+ if (!st->prepared)
+ {
+ st->prepared = pg_malloc(sizeof(bool *) * num_scripts);
+ for (int i = 0; i < num_scripts; i++)
+ {
+ ParsedScript *script = &sql_script[i];
+ int numcmds;
+
+ for (numcmds = 0; script->commands[numcmds] != NULL; numcmds++)
+ ;
+ st->prepared[i] = pg_malloc0(sizeof(bool) * numcmds);
+ }
+ }
+
+ if (!st->prepared[st->use_file][command_num])
+ {
+ PGresult *res;
+
+ pg_log_debug("client %d preparing %s", st->id, command->prepname);
+ res = PQprepare(st->con, command->prepname,
+ command->argv[0], command->argc - 1, NULL);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pg_log_error("%s", PQerrorMessage(st->con));
+ PQclear(res);
+ st->prepared[st->use_file][command_num] = true;
+ }
+}
+
+/*
+ * Prepare all the commands in the script that come after the \startpipeline
+ * that's at position st->command, and the first \endpipeline we find.
+ *
+ * This sets the ->prepared flag for each relevant command as well as the
+ * \startpipeline itself, but doesn't move the st->command counter.
+ */
+static void
+prepareCommandsInPipeline(CState *st)
+{
+ int j;
+ Command **commands = sql_script[st->use_file].commands;
+
+ Assert(commands[st->command]->type == META_COMMAND &&
+ commands[st->command]->meta == META_STARTPIPELINE);
+
+ /*
+ * We set the 'prepared' flag on the \startpipeline itself to flag that we
+ * don't need to do this next time without calling prepareCommand(), even
+ * though we don't actually prepare this command.
+ */
+ if (st->prepared &&
+ st->prepared[st->use_file][st->command])
+ return;
+
+ for (j = st->command + 1; commands[j] != NULL; j++)
+ {
+ if (commands[j]->type == META_COMMAND &&
+ commands[j]->meta == META_ENDPIPELINE)
+ break;
+
+ prepareCommand(st, j);
+ }
+
+ st->prepared[st->use_file][st->command] = true;
+}
+
/* Send a SQL command, using the chosen querymode */
static bool
sendCommand(CState *st, Command *command)
@@ -2899,50 +2979,13 @@ sendCommand(CState *st, Command *command)
}
else if (querymode == QUERY_PREPARED)
{
- char name[MAX_PREPARE_NAME];
const char *params[MAX_ARGS];
- if (!st->prepared[st->use_file])
- {
- int j;
- Command **commands = sql_script[st->use_file].commands;
-
- for (j = 0; commands[j] != NULL; j++)
- {
- PGresult *res;
- char name[MAX_PREPARE_NAME];
-
- if (commands[j]->type != SQL_COMMAND)
- continue;
- preparedStatementName(name, st->use_file, j);
- if (PQpipelineStatus(st->con) == PQ_PIPELINE_OFF)
- {
- res = PQprepare(st->con, name,
- commands[j]->argv[0], commands[j]->argc - 1, NULL);
- if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pg_log_error("%s", PQerrorMessage(st->con));
- PQclear(res);
- }
- else
- {
- /*
- * In pipeline mode, we use asynchronous functions. If a
- * server-side error occurs, it will be processed later
- * among the other results.
- */
- if (!PQsendPrepare(st->con, name,
- commands[j]->argv[0], commands[j]->argc - 1, NULL))
- pg_log_error("%s", PQerrorMessage(st->con));
- }
- }
- st->prepared[st->use_file] = true;
- }
-
+ prepareCommand(st, st->command);
getQueryParams(st, command, params);
- preparedStatementName(name, st->use_file, st->command);
- pg_log_debug("client %d sending %s", st->id, name);
- r = PQsendQueryPrepared(st->con, name, command->argc - 1,
+ pg_log_debug("client %d sending %s", st->id, command->prepname);
+ r = PQsendQueryPrepared(st->con, command->prepname, command->argc - 1,
params, NULL, NULL, 0);
}
else /* unknown sql mode */
@@ -3202,7 +3245,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
thread->conn_duration += now - start;
/* Reset session-local state */
- memset(st->prepared, 0, sizeof(st->prepared));
+ pg_free(st->prepared);
+ st->prepared = NULL;
}
/* record transaction start time */
@@ -3777,6 +3821,16 @@ executeMetaCommand(CState *st, pg_time_usec_t *now)
return CSTATE_ABORTED;
}
+ /*
+ * If we're in prepared-query mode, we need to prepare all the
+ * commands that are inside the pipeline before we actually start the
+ * pipeline itself. This solves the problem that running BEGIN
+ * ISOLATION LEVEL SERIALIZABLE in a pipeline would fail due to a
+ * snapshot having been acquired by the prepare within the pipeline.
+ */
+ if (querymode == QUERY_PREPARED)
+ prepareCommandsInPipeline(st);
+
if (PQpipelineStatus(st->con) != PQ_PIPELINE_OFF)
{
commandFailed(st, "startpipeline", "already in pipeline mode");
@@ -4818,6 +4872,7 @@ create_sql_command(PQExpBuffer buf, const char *source)
my_command->varprefix = NULL; /* allocated later, if needed */
my_command->expr = NULL;
initSimpleStats(&my_command->stats);
+ my_command->prepname = NULL; /* set later, if needed */
return my_command;
}
@@ -4849,6 +4904,7 @@ static void
postprocess_sql_command(Command *my_command)
{
char buffer[128];
+ static int prepnum = 0;
Assert(my_command->type == SQL_COMMAND);
@@ -4857,15 +4913,17 @@ postprocess_sql_command(Command *my_command)
buffer[strcspn(buffer, "\n\r")] = '\0';
my_command->first_line = pg_strdup(buffer);
- /* parse query if necessary */
+ /* Parse query and generate prepared statement name, if necessary */
switch (querymode)
{
case QUERY_SIMPLE:
my_command->argv[0] = my_command->lines.data;
my_command->argc++;
break;
- case QUERY_EXTENDED:
case QUERY_PREPARED:
+ my_command->prepname = psprintf("P_%d", prepnum++);
+ /* fall through */
+ case QUERY_EXTENDED:
if (!parseQuery(my_command))
exit(1);
break;
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index 282ccc24aeb..76ecd9efeba 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -841,6 +841,26 @@
}
});
+# Working \startpipeline in prepared query mode with serializable
+$node->pgbench(
+ '-c4 -j2 -t 10 -n -M prepared',
+ 0,
+ [
+ qr{type: .*/001_pgbench_pipeline_serializable},
+ qr{actually processed: (\d+)/\1}
+ ],
+ [],
+ 'working \startpipeline with serializable',
+ {
+ '001_pgbench_pipeline_serializable' => q{
+-- test startpipeline with serializable
+\startpipeline
+BEGIN ISOLATION LEVEL SERIALIZABLE;
+} . "select 1;\n" x 10 . q{
+END;
+\endpipeline
+}
+ });
# trigger many expression errors
my @errors = (
diff --git a/src/common/exec.c b/src/common/exec.c
index 5159b616a39..dbac0598be0 100644
--- a/src/common/exec.c
+++ b/src/common/exec.c
@@ -27,6 +27,14 @@
#include "common/mdb_locale.h"
+#ifdef EXEC_BACKEND
+#if defined(HAVE_SYS_PERSONALITY_H)
+#include
+#elif defined(HAVE_SYS_PROCCTL_H)
+#include
+#endif
+#endif
+
/* Inhibit mingw CRT's auto-globbing of command line arguments */
#if defined(WIN32) && !defined(_MSC_VER)
extern int _CRT_glob = 0; /* 0 turns off globbing; 1 turns it on */
@@ -477,6 +485,31 @@ set_pglocale_pgservice(const char *argv0, const char *app)
}
}
+#ifdef EXEC_BACKEND
+/*
+ * For the benefit of PostgreSQL developers testing EXEC_BACKEND on Unix
+ * systems (code paths normally exercised only on Windows), provide a way to
+ * disable address space layout randomization, if we know how on this platform.
+ * Otherwise, backends may fail to attach to shared memory at the fixed address
+ * chosen by the postmaster. (See also the macOS-specific hack in
+ * sysv_shmem.c.)
+ */
+int
+pg_disable_aslr(void)
+{
+#if defined(HAVE_SYS_PERSONALITY_H)
+ return personality(ADDR_NO_RANDOMIZE);
+#elif defined(HAVE_SYS_PROCCTL_H) && defined(PROC_ASLR_FORCE_DISABLE)
+ int data = PROC_ASLR_FORCE_DISABLE;
+
+ return procctl(P_PID, 0, PROC_ASLR_CTL, &data);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+#endif
+
#ifdef WIN32
/*
diff --git a/src/common/jsonapi.c b/src/common/jsonapi.c
index ade13aed3a4..3d0fbfa7be1 100644
--- a/src/common/jsonapi.c
+++ b/src/common/jsonapi.c
@@ -675,6 +675,14 @@ json_lex(JsonLexContext *lex)
/*
* The next token in the input stream is known to be a string; lex it.
+ *
+ * If lex->strval isn't NULL, fill it with the decoded string.
+ * Set lex->token_terminator to the end of the decoded input, and in
+ * success cases, transfer its previous value to lex->prev_token_terminator.
+ * Return JSON_SUCCESS or an error code.
+ *
+ * Note: be careful that all error exits advance lex->token_terminator
+ * to the point after the character we detected the error on.
*/
static inline JsonParseErrorType
json_lex_string(JsonLexContext *lex)
@@ -683,6 +691,19 @@ json_lex_string(JsonLexContext *lex)
int len;
int hi_surrogate = -1;
+ /* Convenience macros for error exits */
+#define FAIL_AT_CHAR_START(code) \
+ do { \
+ lex->token_terminator = s; \
+ return code; \
+ } while (0)
+#define FAIL_AT_CHAR_END(code) \
+ do { \
+ lex->token_terminator = \
+ s + pg_encoding_mblen_bounded(lex->input_encoding, s); \
+ return code; \
+ } while (0)
+
if (lex->strval != NULL)
resetStringInfo(lex->strval);
@@ -695,18 +716,14 @@ json_lex_string(JsonLexContext *lex)
len++;
/* Premature end of the string. */
if (len >= lex->input_length)
- {
- lex->token_terminator = s;
- return JSON_INVALID_TOKEN;
- }
+ FAIL_AT_CHAR_START(JSON_INVALID_TOKEN);
else if (*s == '"')
break;
else if ((unsigned char) *s < 32)
{
/* Per RFC4627, these characters MUST be escaped. */
/* Since *s isn't printable, exclude it from the context string */
- lex->token_terminator = s;
- return JSON_ESCAPING_REQUIRED;
+ FAIL_AT_CHAR_START(JSON_ESCAPING_REQUIRED);
}
else if (*s == '\\')
{
@@ -714,10 +731,7 @@ json_lex_string(JsonLexContext *lex)
s++;
len++;
if (len >= lex->input_length)
- {
- lex->token_terminator = s;
- return JSON_INVALID_TOKEN;
- }
+ FAIL_AT_CHAR_START(JSON_INVALID_TOKEN);
else if (*s == 'u')
{
int i;
@@ -728,10 +742,7 @@ json_lex_string(JsonLexContext *lex)
s++;
len++;
if (len >= lex->input_length)
- {
- lex->token_terminator = s;
- return JSON_INVALID_TOKEN;
- }
+ FAIL_AT_CHAR_START(JSON_INVALID_TOKEN);
else if (*s >= '0' && *s <= '9')
ch = (ch * 16) + (*s - '0');
else if (*s >= 'a' && *s <= 'f')
@@ -739,10 +750,7 @@ json_lex_string(JsonLexContext *lex)
else if (*s >= 'A' && *s <= 'F')
ch = (ch * 16) + (*s - 'A') + 10;
else
- {
- lex->token_terminator = s + pg_encoding_mblen_bounded(lex->input_encoding, s);
- return JSON_UNICODE_ESCAPE_FORMAT;
- }
+ FAIL_AT_CHAR_END(JSON_UNICODE_ESCAPE_FORMAT);
}
if (lex->strval != NULL)
{
@@ -752,20 +760,20 @@ json_lex_string(JsonLexContext *lex)
if (is_utf16_surrogate_first(ch))
{
if (hi_surrogate != -1)
- return JSON_UNICODE_HIGH_SURROGATE;
+ FAIL_AT_CHAR_END(JSON_UNICODE_HIGH_SURROGATE);
hi_surrogate = ch;
continue;
}
else if (is_utf16_surrogate_second(ch))
{
if (hi_surrogate == -1)
- return JSON_UNICODE_LOW_SURROGATE;
+ FAIL_AT_CHAR_END(JSON_UNICODE_LOW_SURROGATE);
ch = surrogate_pair_to_codepoint(hi_surrogate, ch);
hi_surrogate = -1;
}
if (hi_surrogate != -1)
- return JSON_UNICODE_LOW_SURROGATE;
+ FAIL_AT_CHAR_END(JSON_UNICODE_LOW_SURROGATE);
/*
* Reject invalid cases. We can't have a value above
@@ -775,7 +783,7 @@ json_lex_string(JsonLexContext *lex)
if (ch == 0)
{
/* We can't allow this, since our TEXT type doesn't */
- return JSON_UNICODE_CODE_POINT_ZERO;
+ FAIL_AT_CHAR_END(JSON_UNICODE_CODE_POINT_ZERO);
}
/*
@@ -812,14 +820,14 @@ json_lex_string(JsonLexContext *lex)
appendStringInfoChar(lex->strval, (char) ch);
}
else
- return JSON_UNICODE_HIGH_ESCAPE;
+ FAIL_AT_CHAR_END(JSON_UNICODE_HIGH_ESCAPE);
#endif /* FRONTEND */
}
}
else if (lex->strval != NULL)
{
if (hi_surrogate != -1)
- return JSON_UNICODE_LOW_SURROGATE;
+ FAIL_AT_CHAR_END(JSON_UNICODE_LOW_SURROGATE);
switch (*s)
{
@@ -844,10 +852,14 @@ json_lex_string(JsonLexContext *lex)
appendStringInfoChar(lex->strval, '\t');
break;
default:
- /* Not a valid string escape, so signal error. */
+
+ /*
+ * Not a valid string escape, so signal error. We
+ * adjust token_start so that just the escape sequence
+ * is reported, not the whole string.
+ */
lex->token_start = s;
- lex->token_terminator = s + pg_encoding_mblen_bounded(lex->input_encoding, s);
- return JSON_ESCAPING_INVALID;
+ FAIL_AT_CHAR_END(JSON_ESCAPING_INVALID);
}
}
else if (strchr("\"\\/bfnrt", *s) == NULL)
@@ -860,15 +872,14 @@ json_lex_string(JsonLexContext *lex)
* shown it's not a performance win.
*/
lex->token_start = s;
- lex->token_terminator = s + pg_encoding_mblen_bounded(lex->input_encoding, s);
- return JSON_ESCAPING_INVALID;
+ FAIL_AT_CHAR_END(JSON_ESCAPING_INVALID);
}
}
else if (lex->strval != NULL)
{
if (hi_surrogate != -1)
- return JSON_UNICODE_LOW_SURROGATE;
+ FAIL_AT_CHAR_END(JSON_UNICODE_LOW_SURROGATE);
appendStringInfoChar(lex->strval, *s);
}
@@ -876,12 +887,18 @@ json_lex_string(JsonLexContext *lex)
}
if (hi_surrogate != -1)
+ {
+ lex->token_terminator = s + 1;
return JSON_UNICODE_LOW_SURROGATE;
+ }
/* Hooray, we found the end of the string! */
lex->prev_token_terminator = lex->token_terminator;
lex->token_terminator = s + 1;
return JSON_SUCCESS;
+
+#undef FAIL_AT_CHAR_START
+#undef FAIL_AT_CHAR_END
}
/*
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index de4883f4f69..8d2258aea44 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -307,7 +307,7 @@ extern void be_tls_get_peer_serial(Port *port, char *ptr, size_t len);
* This is not supported with old versions of OpenSSL that don't have
* the X509_get_signature_nid() function.
*/
-#if defined(USE_OPENSSL) && defined(HAVE_X509_GET_SIGNATURE_NID)
+#if defined(USE_OPENSSL) && (defined(HAVE_X509_GET_SIGNATURE_NID) || defined(HAVE_X509_GET_SIGNATURE_INFO))
#define HAVE_BE_TLS_GET_CERTIFICATE_HASH
extern char *be_tls_get_certificate_hash(Port *port, size_t *len);
#endif
diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in
index 54de6844f58..2a06438113b 100644
--- a/src/include/pg_config.h.in
+++ b/src/include/pg_config.h.in
@@ -699,6 +699,9 @@
/* Define to 1 if you have the header file. */
#undef HAVE_SYS_IPC_H
+/* Define to 1 if you have the header file. */
+#undef HAVE_SYS_PERSONALITY_H
+
/* Define to 1 if you have the header file. */
#undef HAVE_SYS_PRCTL_H
@@ -804,6 +807,9 @@
/* Define to 1 if you have the `writev' function. */
#undef HAVE_WRITEV
+/* Define to 1 if you have the `X509_get_signature_info' function. */
+#undef HAVE_X509_GET_SIGNATURE_INFO
+
/* Define to 1 if you have the `X509_get_signature_nid' function. */
#undef HAVE_X509_GET_SIGNATURE_NID
diff --git a/src/include/port.h b/src/include/port.h
index 4179df36ebc..5aecc883f63 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -141,6 +141,11 @@ extern char *pipe_read_line(char *cmd, char *line, int maxsize);
#define PG_VERSIONSTR "postgres (Apache Cloudberry) " PG_VERSION "\n"
#define PG_BACKEND_VERSIONSTR "postgres (Apache Cloudberry) " PG_VERSION "\n"
+#ifdef EXEC_BACKEND
+/* Disable ASLR before exec, for developer builds only (in exec.c) */
+extern int pg_disable_aslr(void);
+#endif
+
#if defined(WIN32) || defined(__CYGWIN__)
#define EXE ".exe"
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 7f3dfd462a6..46e8540004e 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -3323,17 +3323,22 @@ PQconnectPoll(PGconn *conn)
conn->status = CONNECTION_MADE;
return PGRES_POLLING_WRITING;
}
- else if (pollres == PGRES_POLLING_FAILED &&
- conn->gssencmode[0] == 'p')
+ else if (pollres == PGRES_POLLING_FAILED)
{
- /*
- * We failed, but we can retry on "prefer". Have to drop
- * the current connection to do so, though.
- */
- conn->try_gss = false;
- need_new_connection = true;
- goto keep_going;
+ if (conn->gssencmode[0] == 'p')
+ {
+ /*
+ * We failed, but we can retry on "prefer". Have to
+ * drop the current connection to do so, though.
+ */
+ conn->try_gss = false;
+ need_new_connection = true;
+ goto keep_going;
+ }
+ /* Else it's a hard failure */
+ goto error_return;
}
+ /* Else, return POLLING_READING or POLLING_WRITING status */
return pollres;
#else /* !ENABLE_GSS */
/* unreachable */
diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c
index c0988e10a30..d75a823b880 100644
--- a/src/interfaces/libpq/fe-secure-openssl.c
+++ b/src/interfaces/libpq/fe-secure-openssl.c
@@ -378,7 +378,7 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
return n;
}
-#ifdef HAVE_X509_GET_SIGNATURE_NID
+#if defined(HAVE_X509_GET_SIGNATURE_NID) || defined(HAVE_X509_GET_SIGNATURE_INFO)
char *
pgtls_get_peer_certificate_hash(PGconn *conn, size_t *len)
{
@@ -398,10 +398,15 @@ pgtls_get_peer_certificate_hash(PGconn *conn, size_t *len)
/*
* Get the signature algorithm of the certificate to determine the hash
- * algorithm to use for the result.
+ * algorithm to use for the result. Prefer X509_get_signature_info(),
+ * introduced in OpenSSL 1.1.1, which can handle RSA-PSS signatures.
*/
+#if HAVE_X509_GET_SIGNATURE_INFO
+ if (!X509_get_signature_info(peer_cert, &algo_nid, NULL, NULL, NULL))
+#else
if (!OBJ_find_sigid_algs(X509_get_signature_nid(peer_cert),
&algo_nid, NULL))
+#endif
{
appendPQExpBufferStr(&conn->errorMessage,
libpq_gettext("could not determine server certificate signature algorithm\n"));
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index 0cbd611bd98..27321d262cf 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -869,7 +869,7 @@ extern ssize_t pgtls_write(PGconn *conn, const void *ptr, size_t len);
* This is not supported with old versions of OpenSSL that don't have
* the X509_get_signature_nid() function.
*/
-#if defined(USE_OPENSSL) && defined(HAVE_X509_GET_SIGNATURE_NID)
+#if defined(USE_OPENSSL) && (defined(HAVE_X509_GET_SIGNATURE_NID) || defined(HAVE_X509_GET_SIGNATURE_INFO))
#define HAVE_PGTLS_GET_PEER_CERTIFICATE_HASH
extern char *pgtls_get_peer_certificate_hash(PGconn *conn, size_t *len);
#endif
diff --git a/src/port/win32stat.c b/src/port/win32stat.c
index 426e01f0efa..36c3b171f40 100644
--- a/src/port/win32stat.c
+++ b/src/port/win32stat.c
@@ -289,39 +289,66 @@ int
_pgfstat64(int fileno, struct stat *buf)
{
HANDLE hFile = (HANDLE) _get_osfhandle(fileno);
- BY_HANDLE_FILE_INFORMATION fiData;
+ DWORD fileType = FILE_TYPE_UNKNOWN;
+ DWORD lastError;
+ unsigned short st_mode;
- if (hFile == INVALID_HANDLE_VALUE || buf == NULL)
+ /*
+ * When stdin, stdout, and stderr aren't associated with a stream the
+ * special value -2 is returned:
+ * https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/get-osfhandle
+ */
+ if (hFile == INVALID_HANDLE_VALUE || hFile == (HANDLE) -2 || buf == NULL)
{
errno = EINVAL;
return -1;
}
+ fileType = GetFileType(hFile);
+ lastError = GetLastError();
+
/*
- * Check if the fileno is a data stream. If so, unless it has been
- * redirected to a file, getting information through its HANDLE will fail,
- * so emulate its stat information in the most appropriate way and return
- * it instead.
+ * Invoke GetLastError in order to distinguish between a "valid" return of
+ * FILE_TYPE_UNKNOWN and its return due to a calling error. In case of
+ * success, GetLastError returns NO_ERROR.
*/
- if ((fileno == _fileno(stdin) ||
- fileno == _fileno(stdout) ||
- fileno == _fileno(stderr)) &&
- !GetFileInformationByHandle(hFile, &fiData))
+ if (fileType == FILE_TYPE_UNKNOWN && lastError != NO_ERROR)
{
- memset(buf, 0, sizeof(*buf));
- buf->st_mode = _S_IFCHR;
- buf->st_dev = fileno;
- buf->st_rdev = fileno;
- buf->st_nlink = 1;
- return 0;
+ _dosmaperr(lastError);
+ return -1;
}
- /*
- * Since we already have a file handle there is no need to check for
- * ERROR_DELETE_PENDING.
- */
+ switch (fileType)
+ {
+ /* The specified file is a disk file */
+ case FILE_TYPE_DISK:
+ return fileinfo_to_stat(hFile, buf);
+
+ /*
+ * The specified file is a socket, a named pipe, or an anonymous
+ * pipe.
+ */
+ case FILE_TYPE_PIPE:
+ st_mode = _S_IFIFO;
+ break;
+ /* The specified file is a character file */
+ case FILE_TYPE_CHAR:
+ st_mode = _S_IFCHR;
+ break;
+ /* Unused flag and unknown file type */
+ case FILE_TYPE_REMOTE:
+ case FILE_TYPE_UNKNOWN:
+ default:
+ errno = EINVAL;
+ return -1;
+ }
- return fileinfo_to_stat(hFile, buf);
+ memset(buf, 0, sizeof(*buf));
+ buf->st_mode = st_mode;
+ buf->st_dev = fileno;
+ buf->st_rdev = fileno;
+ buf->st_nlink = 1;
+ return 0;
}
#endif /* WIN32 */
diff --git a/src/test/isolation/expected/serializable-parallel-2.out b/src/test/isolation/expected/serializable-parallel-2.out
index 92753ccf39f..904fdd90806 100644
--- a/src/test/isolation/expected/serializable-parallel-2.out
+++ b/src/test/isolation/expected/serializable-parallel-2.out
@@ -1,50 +1,23 @@
Parsed test spec with 2 sessions
starting permutation: s1r s2r1 s1c s2r2 s2c
-step s1r: SELECT * FROM foo;
- a
---
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
-10
-(10 rows)
+step s1r: SELECT COUNT(*) FROM foo;
+count
+-----
+ 100
+(1 row)
-step s2r1: SELECT * FROM foo;
- a
---
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
-10
-(10 rows)
+step s2r1: SELECT COUNT(*) FROM foo;
+count
+-----
+ 100
+(1 row)
step s1c: COMMIT;
-step s2r2: SELECT * FROM foo;
- a
---
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
-10
-(10 rows)
+step s2r2: SELECT COUNT(*) FROM foo;
+count
+-----
+ 100
+(1 row)
step s2c: COMMIT;
diff --git a/src/test/isolation/expected/serializable-parallel-3.out b/src/test/isolation/expected/serializable-parallel-3.out
new file mode 100644
index 00000000000..654276a3856
--- /dev/null
+++ b/src/test/isolation/expected/serializable-parallel-3.out
@@ -0,0 +1,97 @@
+Parsed test spec with 4 sessions
+
+starting permutation: s1r s3r s2r1 s4r1 s1c s2r2 s3c s4r2 s4c s2c
+step s1r: SELECT * FROM foo;
+ a
+--
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+(10 rows)
+
+step s3r: SELECT * FROM foo;
+ a
+--
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+(10 rows)
+
+step s2r1: SELECT * FROM foo;
+ a
+--
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+(10 rows)
+
+step s4r1: SELECT * FROM foo;
+ a
+--
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+(10 rows)
+
+step s1c: COMMIT;
+step s2r2: SELECT * FROM foo;
+ a
+--
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+(10 rows)
+
+step s3c: COMMIT;
+step s4r2: SELECT * FROM foo;
+ a
+--
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+(10 rows)
+
+step s4c: COMMIT;
+step s2c: COMMIT;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 9122028e15d..3f12f923c02 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -146,6 +146,7 @@ test: plpgsql-toast
test: truncate-conflict
#test: serializable-parallel
#test: serializable-parallel-2
+#test: serializable-parallel-3
#test: prepared-transactions
diff --git a/src/test/isolation/specs/serializable-parallel-2.spec b/src/test/isolation/specs/serializable-parallel-2.spec
index f3941f78631..c975d96d772 100644
--- a/src/test/isolation/specs/serializable-parallel-2.spec
+++ b/src/test/isolation/specs/serializable-parallel-2.spec
@@ -3,7 +3,8 @@
setup
{
- CREATE TABLE foo AS SELECT generate_series(1, 10)::int a;
+ CREATE TABLE foo AS SELECT generate_series(1, 100)::int a;
+ CREATE INDEX ON foo(a);
ALTER TABLE foo SET (parallel_workers = 2);
}
@@ -14,7 +15,7 @@ teardown
session s1
setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
-step s1r { SELECT * FROM foo; }
+step s1r { SELECT COUNT(*) FROM foo; }
step s1c { COMMIT; }
session s2
@@ -22,9 +23,12 @@ setup {
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
SET parallel_setup_cost = 0;
SET parallel_tuple_cost = 0;
+ SET min_parallel_index_scan_size = 0;
+ SET parallel_leader_participation = off;
+ SET enable_seqscan = off;
}
-step s2r1 { SELECT * FROM foo; }
-step s2r2 { SELECT * FROM foo; }
+step s2r1 { SELECT COUNT(*) FROM foo; }
+step s2r2 { SELECT COUNT(*) FROM foo; }
step s2c { COMMIT; }
permutation s1r s2r1 s1c s2r2 s2c
diff --git a/src/test/isolation/specs/serializable-parallel-3.spec b/src/test/isolation/specs/serializable-parallel-3.spec
new file mode 100644
index 00000000000..c27298c24ff
--- /dev/null
+++ b/src/test/isolation/specs/serializable-parallel-3.spec
@@ -0,0 +1,47 @@
+# Exercise the case where a read-only serializable transaction has
+# SXACT_FLAG_RO_SAFE set in a parallel query. This variant is like
+# two copies of #2 running at the same time, and excercises the case
+# where another transaction has the same xmin, and it is the oldest.
+
+setup
+{
+ CREATE TABLE foo AS SELECT generate_series(1, 10)::int a;
+ ALTER TABLE foo SET (parallel_workers = 2);
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s1r { SELECT * FROM foo; }
+step s1c { COMMIT; }
+
+session s2
+setup {
+ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+ SET parallel_setup_cost = 0;
+ SET parallel_tuple_cost = 0;
+ }
+step s2r1 { SELECT * FROM foo; }
+step s2r2 { SELECT * FROM foo; }
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s3r { SELECT * FROM foo; }
+step s3c { COMMIT; }
+
+session s4
+setup {
+ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+ SET parallel_setup_cost = 0;
+ SET parallel_tuple_cost = 0;
+ }
+step s4r1 { SELECT * FROM foo; }
+step s4r2 { SELECT * FROM foo; }
+step s4c { COMMIT; }
+
+permutation s1r s3r s2r1 s4r1 s1c s2r2 s3c s4r2 s4c s2c
diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl
index 0a310ccb15a..c2f588deeb8 100644
--- a/src/test/ldap/t/001_auth.pl
+++ b/src/test/ldap/t/001_auth.pl
@@ -113,7 +113,8 @@
"-CA", "$slapd_certs/ca.crt", "-CAkey", "$slapd_certs/ca.key",
"-CAcreateserial", "-out", "$slapd_certs/server.crt";
-system_or_bail $slapd, '-f', $slapd_conf, '-h', "$ldap_url $ldaps_url";
+# -s0 prevents log messages ending up in syslog
+system_or_bail $slapd, '-f', $slapd_conf,'-s0', '-h', "$ldap_url $ldaps_url";
END
{
diff --git a/src/test/modules/unsafe_tests/Makefile b/src/test/modules/unsafe_tests/Makefile
index 3ecf5fcfc5b..1d989007bd5 100644
--- a/src/test/modules/unsafe_tests/Makefile
+++ b/src/test/modules/unsafe_tests/Makefile
@@ -2,6 +2,9 @@
REGRESS = rolenames alter_system_table
+# the whole point of these tests is to not run installcheck
+NO_INSTALLCHECK = 1
+
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
diff --git a/src/test/modules/unsafe_tests/README b/src/test/modules/unsafe_tests/README
index a7e5b2a04f5..d9dbd038b95 100644
--- a/src/test/modules/unsafe_tests/README
+++ b/src/test/modules/unsafe_tests/README
@@ -1,6 +1,6 @@
This directory doesn't actually contain any extension module.
-What it is is a home for regression tests that we don't want to run
+Instead it is a home for regression tests that we don't want to run
during "make installcheck" because they could have side-effects that
seem undesirable for a production installation.
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index d29bcc0da6d..cbc578e3586 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -2644,26 +2644,55 @@ View definition:
FROM at_view_1 v1;
explain (verbose, costs off) select * from at_view_2;
- QUERY PLAN
-----------------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- Output: bt.id, bt.stuff, (to_json(ROW(bt.id, bt.stuff, NULL)))
+ Output: bt.id, bt.stuff, (to_json(ROW(bt.id, bt.stuff, 4)))
-> Seq Scan on public.at_base_table bt
- Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, NULL))
+ Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, 4))
Optimizer: Postgres query optimizer
Settings: constraint_exclusion=partition
(6 rows)
select * from at_view_2;
- id | stuff | j
-----+--------+----------------------------------------
- 23 | skidoo | {"id":23,"stuff":"skidoo","more":null}
+ id | stuff | j
+----+--------+-------------------------------------
+ 23 | skidoo | {"id":23,"stuff":"skidoo","more":4}
(1 row)
drop view at_view_2;
drop view at_view_1;
drop table at_base_table;
--- check adding a column not iself requiring a rewrite, together with
+-- related case (bug #17811)
+begin;
+create temp table t1 as select * from int8_tbl;
+create temp view v1 as select 1::int8 as q1;
+create temp view v2 as select * from v1;
+create or replace temp view v1 with (security_barrier = true)
+ as select * from t1;
+create temp table log (q1 int8, q2 int8);
+create rule v1_upd_rule as on update to v1
+ do also insert into log values (new.*);
+update v2 set q1 = q1 + 1 where q1 = 123;
+select * from t1;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+ 124 | 456
+ 124 | 4567890123456789
+(5 rows)
+
+select * from log;
+ q1 | q2
+-----+------------------
+ 124 | 456
+ 124 | 4567890123456789
+(2 rows)
+
+rollback;
+-- check adding a column not itself requiring a rewrite, together with
-- a column requiring a default (bug #16038)
-- ensure that rewrites aren't silently optimized away, removing the
-- value of the test
diff --git a/src/test/regress/expected/json_encoding.out b/src/test/regress/expected/json_encoding.out
index f343f74fe18..fa41b401030 100644
--- a/src/test/regress/expected/json_encoding.out
+++ b/src/test/regress/expected/json_encoding.out
@@ -56,19 +56,19 @@ select json '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a' as correct_in_utf8;
select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
ERROR: invalid input syntax for type json
DETAIL: Unicode high surrogate must not follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ud83d\ud83d...
select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
ERROR: invalid input syntax for type json
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ude04...
select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
ERROR: invalid input syntax for type json
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ud83dX...
select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
ERROR: invalid input syntax for type json
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ude04...
--handling of simple unicode escapes
select json '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
correct_in_utf8
@@ -121,7 +121,7 @@ select json '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
select json '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
ERROR: unsupported Unicode escape sequence
DETAIL: \u0000 cannot be converted to text.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "null \u0000...
select json '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
not_an_escape
--------------------
@@ -159,7 +159,7 @@ ERROR: unsupported Unicode escape sequence
LINE 1: SELECT '"\u0000"'::jsonb;
^
DETAIL: \u0000 cannot be converted to text.
-CONTEXT: JSON data, line 1: ...
+CONTEXT: JSON data, line 1: "\u0000...
-- use octet_length here so we don't get an odd unicode char in the
-- output
SELECT octet_length('"\uaBcD"'::jsonb::text); -- OK, uppercase and lower case both OK
@@ -180,25 +180,25 @@ ERROR: invalid input syntax for type json
LINE 1: SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a';
^
DETAIL: Unicode high surrogate must not follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ud83d\ud83d...
SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
ERROR: invalid input syntax for type json
LINE 1: SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a';
^
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ude04...
SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
ERROR: invalid input syntax for type json
LINE 1: SELECT jsonb '{ "a": "\ud83dX" }' -> 'a';
^
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ud83dX...
SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
ERROR: invalid input syntax for type json
LINE 1: SELECT jsonb '{ "a": "\ude04X" }' -> 'a';
^
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ude04...
-- handling of simple unicode escapes
SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
correct_in_utf8
@@ -223,7 +223,7 @@ ERROR: unsupported Unicode escape sequence
LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' as fails;
^
DETAIL: \u0000 cannot be converted to text.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "null \u0000...
SELECT jsonb '{ "a": "null \\u0000 escape" }' as not_an_escape;
not_an_escape
------------------------------
@@ -253,7 +253,7 @@ ERROR: unsupported Unicode escape sequence
LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fai...
^
DETAIL: \u0000 cannot be converted to text.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "null \u0000...
SELECT jsonb '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
not_an_escape
--------------------
diff --git a/src/test/regress/expected/json_encoding_1.out b/src/test/regress/expected/json_encoding_1.out
index e2fc131b0fa..938f8e24aaf 100644
--- a/src/test/regress/expected/json_encoding_1.out
+++ b/src/test/regress/expected/json_encoding_1.out
@@ -52,19 +52,19 @@ ERROR: conversion between UTF8 and SQL_ASCII is not supported
select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
ERROR: invalid input syntax for type json
DETAIL: Unicode high surrogate must not follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ud83d\ud83d...
select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
ERROR: invalid input syntax for type json
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ude04...
select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
ERROR: invalid input syntax for type json
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ud83dX...
select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
ERROR: invalid input syntax for type json
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ude04...
--handling of simple unicode escapes
select json '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
correct_in_utf8
@@ -113,7 +113,7 @@ select json '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
select json '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
ERROR: unsupported Unicode escape sequence
DETAIL: \u0000 cannot be converted to text.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "null \u0000...
select json '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
not_an_escape
--------------------
@@ -151,7 +151,7 @@ ERROR: unsupported Unicode escape sequence
LINE 1: SELECT '"\u0000"'::jsonb;
^
DETAIL: \u0000 cannot be converted to text.
-CONTEXT: JSON data, line 1: ...
+CONTEXT: JSON data, line 1: "\u0000...
-- use octet_length here so we don't get an odd unicode char in the
-- output
SELECT octet_length('"\uaBcD"'::jsonb::text); -- OK, uppercase and lower case both OK
@@ -168,25 +168,25 @@ ERROR: invalid input syntax for type json
LINE 1: SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a';
^
DETAIL: Unicode high surrogate must not follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ud83d\ud83d...
SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
ERROR: invalid input syntax for type json
LINE 1: SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a';
^
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ude04...
SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
ERROR: invalid input syntax for type json
LINE 1: SELECT jsonb '{ "a": "\ud83dX" }' -> 'a';
^
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ud83dX...
SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
ERROR: invalid input syntax for type json
LINE 1: SELECT jsonb '{ "a": "\ude04X" }' -> 'a';
^
DETAIL: Unicode low surrogate must follow a high surrogate.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "\ude04...
-- handling of simple unicode escapes
SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
ERROR: conversion between UTF8 and SQL_ASCII is not supported
@@ -209,7 +209,7 @@ ERROR: unsupported Unicode escape sequence
LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' as fails;
^
DETAIL: \u0000 cannot be converted to text.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "null \u0000...
SELECT jsonb '{ "a": "null \\u0000 escape" }' as not_an_escape;
not_an_escape
------------------------------
@@ -237,7 +237,7 @@ ERROR: unsupported Unicode escape sequence
LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fai...
^
DETAIL: \u0000 cannot be converted to text.
-CONTEXT: JSON data, line 1: { "a":...
+CONTEXT: JSON data, line 1: { "a": "null \u0000...
SELECT jsonb '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
not_an_escape
--------------------
diff --git a/src/test/regress/expected/numeric.out b/src/test/regress/expected/numeric.out
index bea33181bee..2f2818e001c 100644
--- a/src/test/regress/expected/numeric.out
+++ b/src/test/regress/expected/numeric.out
@@ -1929,6 +1929,12 @@ SELECT to_char('100'::numeric, 'FM999');
100
(1 row)
+SELECT to_char('12345678901'::float8, 'FM9999999999D9999900000000000000000');
+ to_char
+-----------------
+ ##########.####
+(1 row)
+
-- Check parsing of literal text in a format string
SELECT to_char('100'::numeric, 'foo999');
to_char
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index a2922a0a9ec..77fbb582cab 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -3171,28 +3171,21 @@ select * from rules_log;
(16 rows)
create rule r4 as on delete to rules_src do notify rules_src_deletion;
-\d+ rules_src
- Table "public.rules_src"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+---------+--------------+-------------
- f1 | integer | | | | plain | |
- f2 | integer | | | 0 | plain | |
-Rules:
- r1 AS
- ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (old.f1,old.f2,'old'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT)
- r2 AS
- ON UPDATE TO rules_src DO VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text)
- r3 AS
- ON INSERT TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (NULL::integer,NULL::integer,'-'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT)
- r4 AS
- ON DELETE TO rules_src DO
- NOTIFY rules_src_deletion
-
--
-- Ensure an aliased target relation for insert is correctly deparsed.
--
create rule r5 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2;
create rule r6 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1;
+--
+-- Check deparse disambiguation of INSERT/UPDATE/DELETE targets.
+--
+create rule r7 as on delete to rules_src do instead
+ with wins as (insert into int4_tbl as trgt values (0) returning *),
+ wupd as (update int4_tbl trgt set f1 = f1+1 returning *),
+ wdel as (delete from int4_tbl trgt where f1 = 0 returning *)
+ insert into rules_log AS trgt select old.* from wins, wupd, wdel
+ returning trgt.f1, trgt.f2;
+-- check display of all rules added above
\d+ rules_src
Table "public.rules_src"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
@@ -3217,6 +3210,26 @@ Rules:
r6 AS
ON UPDATE TO rules_src DO INSTEAD UPDATE rules_log trgt SET tag = 'updated'::text
WHERE trgt.f1 = new.f1
+ r7 AS
+ ON DELETE TO rules_src DO INSTEAD WITH wins AS (
+ INSERT INTO int4_tbl AS trgt_1 (f1)
+ VALUES (0)
+ RETURNING trgt_1.f1
+ ), wupd AS (
+ UPDATE int4_tbl trgt_1 SET f1 = trgt_1.f1 + 1
+ RETURNING trgt_1.f1
+ ), wdel AS (
+ DELETE FROM int4_tbl trgt_1
+ WHERE trgt_1.f1 = 0
+ RETURNING trgt_1.f1
+ )
+ INSERT INTO rules_log AS trgt (f1, f2) SELECT old.f1,
+ old.f2
+ FROM wins,
+ wupd,
+ wdel
+ RETURNING trgt.f1,
+ trgt.f2
--
-- Also check multiassignment deparsing.
@@ -3243,6 +3256,31 @@ Rules:
drop table rule_t1, rule_dest;
--
+-- Test implicit LATERAL references to old/new in rules
+--
+CREATE TABLE rule_t1(a int, b text DEFAULT 'xxx', c int);
+CREATE VIEW rule_v1 AS SELECT * FROM rule_t1;
+CREATE RULE v1_ins AS ON INSERT TO rule_v1
+ DO ALSO INSERT INTO rule_t1
+ SELECT * FROM (SELECT a + 10 FROM rule_t1 WHERE a = NEW.a) tt;
+CREATE RULE v1_upd AS ON UPDATE TO rule_v1
+ DO ALSO UPDATE rule_t1 t
+ SET c = tt.a * 10
+ FROM (SELECT a FROM rule_t1 WHERE a = OLD.a) tt WHERE t.a = tt.a;
+INSERT INTO rule_v1 VALUES (1, 'a'), (2, 'b');
+UPDATE rule_v1 SET b = upper(b);
+SELECT * FROM rule_t1;
+ a | b | c
+----+-----+-----
+ 1 | A | 10
+ 2 | B | 20
+ 11 | XXX | 110
+ 12 | XXX | 120
+(4 rows)
+
+DROP TABLE rule_t1 CASCADE;
+NOTICE: drop cascades to view rule_v1
+--
-- check alter rename rule
--
CREATE TABLE rule_t1 (a INT);
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 1745ca9ca68..d1485df7063 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -2256,6 +2256,12 @@ SELECT translate('12345', '14', 'ax');
a23x5
(1 row)
+SELECT translate('12345', '134', 'a');
+ translate
+-----------
+ a25
+(1 row)
+
SELECT ascii('x');
ascii
-------
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index c479142222e..c9921aaf5ce 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -1625,6 +1625,10 @@ spawn_process(const char *cmdline)
if (logfile)
fflush(logfile);
+#ifdef EXEC_BACKEND
+ pg_disable_aslr();
+#endif
+
pid = fork();
if (pid == -1)
{
diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql
index 7ddf9f898a8..9da0e5603ea 100644
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -1671,7 +1671,25 @@ drop view at_view_2;
drop view at_view_1;
drop table at_base_table;
--- check adding a column not iself requiring a rewrite, together with
+-- related case (bug #17811)
+begin;
+create temp table t1 as select * from int8_tbl;
+create temp view v1 as select 1::int8 as q1;
+create temp view v2 as select * from v1;
+create or replace temp view v1 with (security_barrier = true)
+ as select * from t1;
+
+create temp table log (q1 int8, q2 int8);
+create rule v1_upd_rule as on update to v1
+ do also insert into log values (new.*);
+
+update v2 set q1 = q1 + 1 where q1 = 123;
+
+select * from t1;
+select * from log;
+rollback;
+
+-- check adding a column not itself requiring a rewrite, together with
-- a column requiring a default (bug #16038)
-- ensure that rewrites aren't silently optimized away, removing the
diff --git a/src/test/regress/sql/numeric.sql b/src/test/regress/sql/numeric.sql
index 9233c666d4b..56294da5ae9 100644
--- a/src/test/regress/sql/numeric.sql
+++ b/src/test/regress/sql/numeric.sql
@@ -979,6 +979,7 @@ FROM v;
SELECT to_char('100'::numeric, 'FM999.9');
SELECT to_char('100'::numeric, 'FM999.');
SELECT to_char('100'::numeric, 'FM999');
+SELECT to_char('12345678901'::float8, 'FM9999999999D9999900000000000000000');
-- Check parsing of literal text in a format string
SELECT to_char('100'::numeric, 'foo999');
diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql
index 7b0cd28720c..90b37af2097 100644
--- a/src/test/regress/sql/rules.sql
+++ b/src/test/regress/sql/rules.sql
@@ -1034,13 +1034,24 @@ insert into rules_src values(22,23), (33,default);
select * from rules_src;
select * from rules_log;
create rule r4 as on delete to rules_src do notify rules_src_deletion;
-\d+ rules_src
--
-- Ensure an aliased target relation for insert is correctly deparsed.
--
create rule r5 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2;
create rule r6 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1;
+
+--
+-- Check deparse disambiguation of INSERT/UPDATE/DELETE targets.
+--
+create rule r7 as on delete to rules_src do instead
+ with wins as (insert into int4_tbl as trgt values (0) returning *),
+ wupd as (update int4_tbl trgt set f1 = f1+1 returning *),
+ wdel as (delete from int4_tbl trgt where f1 = 0 returning *)
+ insert into rules_log AS trgt select old.* from wins, wupd, wdel
+ returning trgt.f1, trgt.f2;
+
+-- check display of all rules added above
\d+ rules_src
--
@@ -1054,6 +1065,23 @@ create rule rr as on update to rule_t1 do instead UPDATE rule_dest trgt
\d+ rule_t1
drop table rule_t1, rule_dest;
+--
+-- Test implicit LATERAL references to old/new in rules
+--
+CREATE TABLE rule_t1(a int, b text DEFAULT 'xxx', c int);
+CREATE VIEW rule_v1 AS SELECT * FROM rule_t1;
+CREATE RULE v1_ins AS ON INSERT TO rule_v1
+ DO ALSO INSERT INTO rule_t1
+ SELECT * FROM (SELECT a + 10 FROM rule_t1 WHERE a = NEW.a) tt;
+CREATE RULE v1_upd AS ON UPDATE TO rule_v1
+ DO ALSO UPDATE rule_t1 t
+ SET c = tt.a * 10
+ FROM (SELECT a FROM rule_t1 WHERE a = OLD.a) tt WHERE t.a = tt.a;
+INSERT INTO rule_v1 VALUES (1, 'a'), (2, 'b');
+UPDATE rule_v1 SET b = upper(b);
+SELECT * FROM rule_t1;
+DROP TABLE rule_t1 CASCADE;
+
--
-- check alter rename rule
--
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index c53727f68d3..3c438a304ac 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -770,6 +770,7 @@ SELECT ltrim('zzzytrim', 'xyz');
SELECT translate('', '14', 'ax');
SELECT translate('12345', '14', 'ax');
+SELECT translate('12345', '134', 'a');
SELECT ascii('x');
SELECT ascii('');
diff --git a/src/test/singlenode_regress/expected/alter_table.out b/src/test/singlenode_regress/expected/alter_table.out
index 16d6768bbed..f28310d8bce 100644
--- a/src/test/singlenode_regress/expected/alter_table.out
+++ b/src/test/singlenode_regress/expected/alter_table.out
@@ -2575,18 +2575,18 @@ View definition:
FROM at_view_1 v1;
explain (verbose, costs off) select * from at_view_2;
- QUERY PLAN
-----------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------
Seq Scan on public.at_base_table bt
- Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, NULL))
+ Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, 4))
Settings: constraint_exclusion = 'partition'
Optimizer: Postgres query optimizer
(4 rows)
select * from at_view_2;
- id | stuff | j
-----+--------+----------------------------------------
- 23 | skidoo | {"id":23,"stuff":"skidoo","more":null}
+ id | stuff | j
+----+--------+-------------------------------------
+ 23 | skidoo | {"id":23,"stuff":"skidoo","more":4}
(1 row)
drop view at_view_2;
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index e7af4162759..2b9a77878a4 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -398,6 +398,7 @@ sub GenerateFiles
HAVE_SYS_EPOLL_H => undef,
HAVE_SYS_EVENT_H => undef,
HAVE_SYS_IPC_H => undef,
+ HAVE_SYS_PERSONALITY_H => undef,
HAVE_SYS_PRCTL_H => undef,
HAVE_SYS_PROCCTL_H => undef,
HAVE_SYS_PSTAT_H => undef,
@@ -432,6 +433,7 @@ sub GenerateFiles
HAVE_WCTYPE_H => 1,
HAVE_WRITEV => undef,
HAVE_X509_GET_SIGNATURE_NID => 1,
+ HAVE_X509_GET_SIGNATURE_INFO => undef,
HAVE_X86_64_POPCNTQ => undef,
HAVE__BOOL => undef,
HAVE__BUILTIN_BSWAP16 => undef,
@@ -547,7 +549,14 @@ sub GenerateFiles
my ($digit1, $digit2, $digit3) = $self->GetOpenSSLVersion();
- # More symbols are needed with OpenSSL 1.1.0 and above.
+ # Symbols needed with OpenSSL 1.1.1 and above.
+ if ( ($digit1 >= '3' && $digit2 >= '0' && $digit3 >= '0')
+ || ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '1'))
+ {
+ $define{HAVE_X509_GET_SIGNATURE_INFO} = 1;
+ }
+
+ # Symbols needed with OpenSSL 1.1.0 and above.
if ( ($digit1 >= '3' && $digit2 >= '0' && $digit3 >= '0')
|| ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '0'))
{
diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl
index d4e7d69df2e..885249d08a3 100644
--- a/src/tools/msvc/vcregress.pl
+++ b/src/tools/msvc/vcregress.pl
@@ -163,9 +163,7 @@ sub installcheck_internal
"--bindir=../../../$Config/psql",
"--schedule=${schedule}_schedule",
"--max-concurrent-tests=20",
- "--make-testtablespace-dir",
- "--encoding=SQL_ASCII",
- "--no-locale");
+ "--make-testtablespace-dir");
push(@args, $maxconn) if $maxconn;
push(@args, @EXTRA_REGRESS_OPTS);
system(@args);