From e27ce3861914e89ad43aafb36e5fb96f13cd2bc2 Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Tue, 11 Jun 2024 13:07:51 +0200 Subject: [PATCH] Add testing for extensions (#7818) ## Problem We need automated tests of extensions shipped with Neon to detect possible problems. ## Summary of changes A new image neon-test-extensions is added. Workflow changes to test the shipped extensions are added as well. Currently, the regression tests, shipped with extensions are in use. Some extensions, i.e. rum, timescaledb, rdkit, postgis, pgx_ulid, pgtap, pg_tiktoken, pg_jsonschema, pg_graphql, kq_imcx, wal2json_2_5 are excluded due to problems or absence of internal tests. --------- Co-authored-by: Alexander Bayandin Co-authored-by: Heikki Linnakangas --- .dockerignore | 1 + .github/workflows/build_and_test.yml | 29 ++- Dockerfile.compute-node | 63 +++++ docker-compose/compute_wrapper/Dockerfile | 7 +- .../var/db/postgres/specs/spec.json | 12 +- docker-compose/docker-compose.yml | 12 +- docker-compose/docker_compose_test.sh | 83 +++++-- docker-compose/run-tests.sh | 15 ++ patches/pg_anon.patch | 223 ++++++++++++++++++ patches/pg_cron.patch | 19 ++ patches/pg_hintplan.patch | 39 +++ 11 files changed, 478 insertions(+), 25 deletions(-) create mode 100644 docker-compose/run-tests.sh create mode 100644 patches/pg_anon.patch create mode 100644 patches/pg_cron.patch create mode 100644 patches/pg_hintplan.patch diff --git a/.dockerignore b/.dockerignore index 1258532db8..eead727994 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,6 +8,7 @@ !scripts/combine_control_files.py !scripts/ninstall.sh !vm-cgconfig.conf +!docker-compose/run-tests.sh # Directories !.cargo/ diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index b9caf76060..79a0a77638 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -858,6 +858,26 @@ jobs: cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max tags: | neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} + + - name: Build neon extensions test image + if: matrix.version == 'v16' + uses: docker/build-push-action@v5 + with: + context: . + build-args: | + GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }} + PG_VERSION=${{ matrix.version }} + BUILD_TAG=${{ needs.tag.outputs.build-tag }} + TAG=${{ needs.build-build-tools-image.outputs.image-tag }} + provenance: false + push: true + pull: true + file: Dockerfile.compute-node + target: neon-pg-ext-test + cache-from: type=registry,ref=neondatabase/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }} + cache-to: type=registry,ref=neondatabase/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max + tags: | + neondatabase/neon-test-extensions-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}-${{ matrix.arch }} - name: Build compute-tools image # compute-tools are Postgres independent, so build it only once @@ -902,6 +922,13 @@ jobs: neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-x64 \ neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-arm64 + - name: Create multi-arch neon-test-extensions image + if: matrix.version == 'v16' + run: | + docker buildx imagetools create -t neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \ + neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-x64 \ + neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-arm64 + - name: Create multi-arch compute-tools image if: matrix.version == 'v16' run: | @@ -1020,7 +1047,7 @@ jobs: exit 1 fi - - name: Verify docker-compose example + - name: Verify docker-compose example and test extensions timeout-minutes: 20 run: env TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index 90b8868b43..a86fdd0bc3 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -928,6 +928,69 @@ RUN rm -r /usr/local/pgsql/include # if they were to be used by other libraries. RUN rm /usr/local/pgsql/lib/lib*.a + +######################################################################################### +# +# Layer neon-pg-ext-test +# +######################################################################################### + +FROM neon-pg-ext-build AS neon-pg-ext-test +ARG PG_VERSION +RUN mkdir /ext-src + +#COPY --from=postgis-build /postgis.tar.gz /ext-src/ +#COPY --from=postgis-build /sfcgal/* /usr +COPY --from=plv8-build /plv8.tar.gz /ext-src/ +COPY --from=h3-pg-build /h3-pg.tar.gz /ext-src/ +COPY --from=unit-pg-build /postgresql-unit.tar.gz /ext-src/ +COPY --from=vector-pg-build /pgvector.tar.gz /ext-src/ +COPY --from=vector-pg-build /pgvector.patch /ext-src/ +COPY --from=pgjwt-pg-build /pgjwt.tar.gz /ext-src +#COPY --from=pg-jsonschema-pg-build /home/nonroot/pg_jsonschema.tar.gz /ext-src +#COPY --from=pg-graphql-pg-build /home/nonroot/pg_graphql.tar.gz /ext-src +#COPY --from=pg-tiktoken-pg-build /home/nonroot/pg_tiktoken.tar.gz /ext-src +COPY --from=hypopg-pg-build /hypopg.tar.gz /ext-src +COPY --from=pg-hashids-pg-build /pg_hashids.tar.gz /ext-src +#COPY --from=rum-pg-build /rum.tar.gz /ext-src +#COPY --from=pgtap-pg-build /pgtap.tar.gz /ext-src +COPY --from=ip4r-pg-build /ip4r.tar.gz /ext-src +COPY --from=prefix-pg-build /prefix.tar.gz /ext-src +COPY --from=hll-pg-build /hll.tar.gz /ext-src +COPY --from=plpgsql-check-pg-build /plpgsql_check.tar.gz /ext-src +#COPY --from=timescaledb-pg-build /timescaledb.tar.gz /ext-src +COPY --from=pg-hint-plan-pg-build /pg_hint_plan.tar.gz /ext-src +COPY patches/pg_hintplan.patch /ext-src +#COPY --from=kq-imcx-pg-build /kq_imcx.tar.gz /ext-src +COPY --from=pg-cron-pg-build /pg_cron.tar.gz /ext-src +COPY patches/pg_cron.patch /ext-src +#COPY --from=pg-pgx-ulid-build /home/nonroot/pgx_ulid.tar.gz /ext-src +COPY --from=rdkit-pg-build /rdkit.tar.gz /ext-src +COPY --from=pg-uuidv7-pg-build /pg_uuidv7.tar.gz /ext-src +COPY --from=pg-roaringbitmap-pg-build /pg_roaringbitmap.tar.gz /ext-src +COPY --from=pg-semver-pg-build /pg_semver.tar.gz /ext-src +#COPY --from=pg-embedding-pg-build /home/nonroot/pg_embedding-src/ /ext-src +#COPY --from=wal2json-pg-build /wal2json_2_5.tar.gz /ext-src +COPY --from=pg-anon-pg-build /pg_anon.tar.gz /ext-src +COPY patches/pg_anon.patch /ext-src +COPY --from=pg-ivm-build /pg_ivm.tar.gz /ext-src +COPY --from=pg-partman-build /pg_partman.tar.gz /ext-src +RUN cd /ext-src/ && for f in *.tar.gz; \ + do echo $f; dname=$(echo $f | sed 's/\.tar.*//')-src; \ + rm -rf $dname; mkdir $dname; tar xzf $f --strip-components=1 -C $dname \ + || exit 1; rm -f $f; done +RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch +# cmake is required for the h3 test +RUN apt-get update && apt-get install -y cmake +RUN patch -p1 < /ext-src/pg_hintplan.patch +COPY --chmod=755 docker-compose/run-tests.sh /run-tests.sh +RUN patch -p1 /dev/null && pwd )" -COMPOSE_FILE=$SCRIPT_DIR/docker-compose.yml - +COMPOSE_FILE='docker-compose.yml' +cd $(dirname $0) +docker compose -f $COMPOSE_FILE COMPUTE_CONTAINER_NAME=docker-compose-compute-1 -SQL="CREATE TABLE t(key int primary key, value text); insert into t values(1,1); select * from t;" -PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -c '$SQL' postgres" +TEST_CONTAINER_NAME=docker-compose-neon-test-extensions-1 +PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -d postgres" +: ${http_proxy:=} +: ${https_proxy:=} +export http_proxy https_proxy cleanup() { echo "show container information" @@ -25,34 +31,71 @@ cleanup() { docker compose -f $COMPOSE_FILE down } -echo "clean up containers if exists" -cleanup - for pg_version in 14 15 16; do - echo "start containers (pg_version=$pg_version)." - PG_VERSION=$pg_version docker compose -f $COMPOSE_FILE up --build -d + echo "clean up containers if exists" + cleanup + PG_TEST_VERSION=$(($pg_version < 16 ? 16 : $pg_version)) + PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose -f $COMPOSE_FILE up --build -d echo "wait until the compute is ready. timeout after 60s. " cnt=0 - while sleep 1; do + while sleep 3; do # check timeout - cnt=`expr $cnt + 1` + cnt=`expr $cnt + 3` if [ $cnt -gt 60 ]; then echo "timeout before the compute is ready." cleanup exit 1 fi - - # check if the compute is ready - set +o pipefail - result=`docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep "accepting connections" | wc -l` - set -o pipefail - if [ $result -eq 1 ]; then + if docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep -q "accepting connections"; then echo "OK. The compute is ready to connect." echo "execute simple queries." docker exec $COMPUTE_CONTAINER_NAME /bin/bash -c "psql $PSQL_OPTION" - cleanup break fi done + + if [ $pg_version -ge 16 ] + then + echo Enabling trust connection + docker exec $COMPUTE_CONTAINER_NAME bash -c "sed -i '\$d' /var/db/postgres/compute/pg_hba.conf && echo -e 'host\t all\t all\t all\t trust' >> /var/db/postgres/compute/pg_hba.conf && psql $PSQL_OPTION -c 'select pg_reload_conf()' " + echo Adding postgres role + docker exec $COMPUTE_CONTAINER_NAME psql $PSQL_OPTION -c "CREATE ROLE postgres SUPERUSER LOGIN" + # This is required for the pg_hint_plan test, to prevent flaky log message causing the test to fail + # It cannot be moved to Dockerfile now because the database directory is created after the start of the container + echo Adding dummy config + docker exec $COMPUTE_CONTAINER_NAME touch /var/db/postgres/compute/compute_ctl_temp_override.conf + # This block is required for the pg_anon extension test. + # The test assumes that it is running on the same host with the postgres engine. + # In our case it's not true, that's why we are copying files to the compute node + TMPDIR=$(mktemp -d) + docker cp $TEST_CONTAINER_NAME:/ext-src/pg_anon-src/data $TMPDIR/data + echo -e '1\t too \t many \t tabs' > $TMPDIR/data/bad.csv + docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/tmp/tmp_anon_alternate_data + rm -rf $TMPDIR + TMPDIR=$(mktemp -d) + # The following block does the same for the pg_hintplan test + docker cp $TEST_CONTAINER_NAME:/ext-src/pg_hint_plan-src/data $TMPDIR/data + docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/ext-src/pg_hint_plan-src/ + rm -rf $TMPDIR + # We are running tests now + if docker exec -e SKIP=rum-src,timescaledb-src,rdkit-src,postgis-src,pgx_ulid-src,pgtap-src,pg_tiktoken-src,pg_jsonschema-src,pg_graphql-src,kq_imcx-src,wal2json_2_5-src \ + $TEST_CONTAINER_NAME /run-tests.sh | tee testout.txt + then + cleanup + else + FAILED=$(tail -1 testout.txt) + for d in $FAILED + do + mkdir $d + docker cp $TEST_CONTAINER_NAME:/ext-src/$d/regression.diffs $d || true + docker cp $TEST_CONTAINER_NAME:/ext-src/$d/regression.out $d || true + cat $d/regression.out $d/regression.diffs || true + done + rm -rf $FAILED + cleanup + exit 1 + fi + fi + cleanup done diff --git a/docker-compose/run-tests.sh b/docker-compose/run-tests.sh new file mode 100644 index 0000000000..c05fc159aa --- /dev/null +++ b/docker-compose/run-tests.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -x + +cd /ext-src +FAILED= +LIST=$((echo ${SKIP} | sed 's/,/\n/g'; ls -d *-src) | sort | uniq -u) +for d in ${LIST} +do + [ -d ${d} ] || continue + psql -c "select 1" >/dev/null || break + make -C ${d} installcheck || FAILED="${d} ${FAILED}" +done +[ -z "${FAILED}" ] && exit 0 +echo ${FAILED} +exit 1 \ No newline at end of file diff --git a/patches/pg_anon.patch b/patches/pg_anon.patch new file mode 100644 index 0000000000..15dfd3c5a0 --- /dev/null +++ b/patches/pg_anon.patch @@ -0,0 +1,223 @@ +commit 7dd414ee75f2875cffb1d6ba474df1f135a6fc6f +Author: Alexey Masterov +Date: Fri May 31 06:34:26 2024 +0000 + + These alternative expected files were added to consider the neon features + +diff --git a/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out b/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out +new file mode 100644 +index 0000000..2539cfd +--- /dev/null ++++ b/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out +@@ -0,0 +1,101 @@ ++BEGIN; ++CREATE EXTENSION anon CASCADE; ++NOTICE: installing required extension "pgcrypto" ++SELECT anon.init(); ++ init ++------ ++ t ++(1 row) ++ ++CREATE ROLE mallory_the_masked_user; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS 'MASKED'; ++CREATE TABLE t1(i INT); ++ALTER TABLE t1 ADD COLUMN t TEXT; ++SECURITY LABEL FOR anon ON COLUMN t1.t ++IS 'MASKED WITH VALUE NULL'; ++INSERT INTO t1 VALUES (1,'test'); ++-- ++-- We're checking the owner's permissions ++-- ++-- see ++-- https://postgresql-anonymizer.readthedocs.io/en/latest/SECURITY/#permissions ++-- ++SET ROLE mallory_the_masked_user; ++SELECT anon.pseudo_first_name(0) IS NOT NULL; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.init(); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.anonymize_table('t1'); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++SAVEPOINT fail_start_engine; ++SELECT anon.start_dynamic_masking(); ++ERROR: Only supersusers can start the dynamic masking engine. ++CONTEXT: PL/pgSQL function anon.start_dynamic_masking(boolean) line 18 at RAISE ++ROLLBACK TO fail_start_engine; ++RESET ROLE; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++SET ROLE mallory_the_masked_user; ++SELECT * FROM mask.t1; ++ i | t ++---+--- ++ 1 | ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ SELECT * FROM public.t1; ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++SAVEPOINT fail_stop_engine; ++SELECT anon.stop_dynamic_masking(); ++ERROR: Only supersusers can stop the dynamic masking engine. ++CONTEXT: PL/pgSQL function anon.stop_dynamic_masking() line 18 at RAISE ++ROLLBACK TO fail_stop_engine; ++RESET ROLE; ++SELECT anon.stop_dynamic_masking(); ++NOTICE: The previous priviledges of 'mallory_the_masked_user' are not restored. You need to grant them manually. ++ stop_dynamic_masking ++---------------------- ++ t ++(1 row) ++ ++SET ROLE mallory_the_masked_user; ++SELECT COUNT(*)=1 FROM anon.pg_masking_rules; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++SAVEPOINT fail_seclabel_on_role; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS NULL; ++ERROR: permission denied ++DETAIL: The current user must have the CREATEROLE attribute. ++ROLLBACK TO fail_seclabel_on_role; ++ROLLBACK; +diff --git a/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out b/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out +new file mode 100644 +index 0000000..8b090fe +--- /dev/null ++++ b/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out +@@ -0,0 +1,104 @@ ++BEGIN; ++CREATE EXTENSION anon CASCADE; ++NOTICE: installing required extension "pgcrypto" ++SELECT anon.init(); ++ init ++------ ++ t ++(1 row) ++ ++CREATE ROLE oscar_the_owner; ++ALTER DATABASE :DBNAME OWNER TO oscar_the_owner; ++CREATE ROLE mallory_the_masked_user; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS 'MASKED'; ++-- ++-- We're checking the owner's permissions ++-- ++-- see ++-- https://postgresql-anonymizer.readthedocs.io/en/latest/SECURITY/#permissions ++-- ++SET ROLE oscar_the_owner; ++SELECT anon.pseudo_first_name(0) IS NOT NULL; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.init(); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++CREATE TABLE t1(i INT); ++ALTER TABLE t1 ADD COLUMN t TEXT; ++SECURITY LABEL FOR anon ON COLUMN t1.t ++IS 'MASKED WITH VALUE NULL'; ++INSERT INTO t1 VALUES (1,'test'); ++SELECT anon.anonymize_table('t1'); ++ anonymize_table ++----------------- ++ t ++(1 row) ++ ++SELECT * FROM t1; ++ i | t ++---+--- ++ 1 | ++(1 row) ++ ++UPDATE t1 SET t='test' WHERE i=1; ++-- SHOULD FAIL ++SAVEPOINT fail_start_engine; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++ROLLBACK TO fail_start_engine; ++RESET ROLE; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++SET ROLE oscar_the_owner; ++SELECT * FROM t1; ++ i | t ++---+------ ++ 1 | test ++(1 row) ++ ++--SELECT * FROM mask.t1; ++-- SHOULD FAIL ++SAVEPOINT fail_stop_engine; ++SELECT anon.stop_dynamic_masking(); ++ERROR: permission denied for schema mask ++CONTEXT: SQL statement "DROP VIEW mask.t1;" ++PL/pgSQL function anon.mask_drop_view(oid) line 3 at EXECUTE ++SQL statement "SELECT anon.mask_drop_view(oid) ++ FROM pg_catalog.pg_class ++ WHERE relnamespace=quote_ident(pg_catalog.current_setting('anon.sourceschema'))::REGNAMESPACE ++ AND relkind IN ('r','p','f')" ++PL/pgSQL function anon.stop_dynamic_masking() line 22 at PERFORM ++ROLLBACK TO fail_stop_engine; ++RESET ROLE; ++SELECT anon.stop_dynamic_masking(); ++NOTICE: The previous priviledges of 'mallory_the_masked_user' are not restored. You need to grant them manually. ++ stop_dynamic_masking ++---------------------- ++ t ++(1 row) ++ ++SET ROLE oscar_the_owner; ++-- SHOULD FAIL ++SAVEPOINT fail_seclabel_on_role; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS NULL; ++ERROR: permission denied ++DETAIL: The current user must have the CREATEROLE attribute. ++ROLLBACK TO fail_seclabel_on_role; ++ROLLBACK; diff --git a/patches/pg_cron.patch b/patches/pg_cron.patch new file mode 100644 index 0000000000..c2b648c20c --- /dev/null +++ b/patches/pg_cron.patch @@ -0,0 +1,19 @@ +commit b3ea51ee158f113f2f82d0b97c12c54343c9a695 (HEAD -> master) +Author: Alexey Masterov +Date: Fri Jun 7 19:23:42 2024 +0000 + + Disable REGRESS_OPTIONS causing initdb + +diff --git a/ext-src/pg_cron-src/Makefile b/ext-src/pg_cron-src/Makefile +index 053314c..fbd5fb5 100644 +--- a/ext-src/pg_cron-src/Makefile ++++ b/ext-src/pg_cron-src/Makefile +@@ -5,7 +5,7 @@ EXTENSION = pg_cron + DATA_built = $(EXTENSION)--1.0.sql + DATA = $(wildcard $(EXTENSION)--*--*.sql) + +-REGRESS_OPTS =--temp-config=./pg_cron.conf --temp-instance=./tmp_check ++#REGRESS_OPTS =--temp-config=./pg_cron.conf --temp-instance=./tmp_check + REGRESS = pg_cron-test + + # compilation configuration diff --git a/patches/pg_hintplan.patch b/patches/pg_hintplan.patch new file mode 100644 index 0000000000..61a5ecbb90 --- /dev/null +++ b/patches/pg_hintplan.patch @@ -0,0 +1,39 @@ +commit f7925d4d1406c0f0229e3c691c94b69e381899b1 (HEAD -> master) +Author: Alexey Masterov +Date: Thu Jun 6 08:02:42 2024 +0000 + + Patch expected files to consider Neon's log messages + +diff --git a/ext-src/pg_hint_plan-src/expected/ut-A.out b/ext-src/pg_hint_plan-src/expected/ut-A.out +index da723b8..f8d0102 100644 +--- a/ext-src/pg_hint_plan-src/expected/ut-A.out ++++ b/ext-src/pg_hint_plan-src/expected/ut-A.out +@@ -9,13 +9,16 @@ SET search_path TO public; + ---- + -- No.A-1-1-3 + CREATE EXTENSION pg_hint_plan; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + -- No.A-1-2-3 + DROP EXTENSION pg_hint_plan; + -- No.A-1-1-4 + CREATE SCHEMA other_schema; + CREATE EXTENSION pg_hint_plan SCHEMA other_schema; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + ERROR: extension "pg_hint_plan" must be installed in schema "hint_plan" + CREATE EXTENSION pg_hint_plan; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + DROP SCHEMA other_schema; + ---- + ---- No. A-5-1 comment pattern +diff --git a/ext-src/pg_hint_plan-src/expected/ut-fdw.out b/ext-src/pg_hint_plan-src/expected/ut-fdw.out +index d372459..6282afe 100644 +--- a/ext-src/pg_hint_plan-src/expected/ut-fdw.out ++++ b/ext-src/pg_hint_plan-src/expected/ut-fdw.out +@@ -7,6 +7,7 @@ SET pg_hint_plan.debug_print TO on; + SET client_min_messages TO LOG; + SET pg_hint_plan.enable_hint TO on; + CREATE EXTENSION file_fdw; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/file_fdw + CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw; + CREATE USER MAPPING FOR PUBLIC SERVER file_server; + CREATE FOREIGN TABLE ft1 (id int, val int) SERVER file_server OPTIONS (format 'csv', filename :'filename');