mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-23 13:20:37 +00:00
Compare commits
4 Commits
zip_ext
...
mx_offset_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
10774a5132 | ||
|
|
bb18c9958a | ||
|
|
e764fbf9f9 | ||
|
|
d37f7a0dd2 |
@@ -21,5 +21,4 @@
|
||||
!workspace_hack/
|
||||
!neon_local/
|
||||
!scripts/ninstall.sh
|
||||
!scripts/combine_control_files.py
|
||||
!vm-cgconfig.conf
|
||||
|
||||
40
.github/workflows/build_and_test.yml
vendored
40
.github/workflows/build_and_test.yml
vendored
@@ -917,9 +917,9 @@ jobs:
|
||||
run: rm -rf ~/.ecr
|
||||
|
||||
upload-postgres-extensions-to-s3:
|
||||
# if: |
|
||||
# (github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||
# github.event_name != 'workflow_dispatch'
|
||||
if: |
|
||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
runs-on: ${{ github.ref_name == 'release' && fromJSON('["self-hosted", "prod", "x64"]') || fromJSON('["self-hosted", "gen3", "small"]') }}
|
||||
needs: [ tag, promote-images ]
|
||||
strategy:
|
||||
@@ -929,7 +929,7 @@ jobs:
|
||||
|
||||
env:
|
||||
# While on transition period we extract public extensions from compute-node image and custom extensions from extensions image.
|
||||
# Later all the extensions will be moved to extensions image. (unless that is too slow)
|
||||
# Later all the extensions will be moved to extensions image.
|
||||
EXTENSIONS_IMAGE: ${{ github.ref_name == 'release' && '093970136003' || '369495373322'}}.dkr.ecr.eu-central-1.amazonaws.com/extensions-${{ matrix.version }}:latest
|
||||
COMPUTE_NODE_IMAGE: ${{ github.ref_name == 'release' && '093970136003' || '369495373322'}}.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:latest
|
||||
AWS_ACCESS_KEY_ID: ${{ github.ref_name == 'release' && secrets.AWS_ACCESS_KEY_PROD || secrets.AWS_ACCESS_KEY_DEV }}
|
||||
@@ -956,23 +956,33 @@ jobs:
|
||||
|
||||
- name: Extract postgres-extensions from container
|
||||
run: |
|
||||
rm -rf ./extensions ./control_files # Just in case
|
||||
mkdir extensions
|
||||
mkdir control_files
|
||||
rm -rf ./extensions-to-upload ./custom-extensions # Just in case
|
||||
|
||||
# TODO: Delete Neon extensitons (they always present on compute-node image)
|
||||
# rm -rf ./extensions/share/extension/neon*
|
||||
# rm -rf ./extensions/lib/neon*
|
||||
# In compute image we have a bit different directory layout
|
||||
mkdir -p extensions-to-upload/share
|
||||
docker cp ${{ steps.create-container.outputs.CID }}:/usr/local/share/extension ./extensions-to-upload/share/extension
|
||||
docker cp ${{ steps.create-container.outputs.CID }}:/usr/local/lib ./extensions-to-upload/lib
|
||||
|
||||
docker cp ${{ steps.create-container.outputs.EID }}:/ ./
|
||||
# sh ./scripts/combine_control_files.sh
|
||||
echo '{ "enabled_extensions": { "123454321": [ "anon" ], "public": [ "embedding" ] }, "control_data": { "embedding": "comment = 'hnsw index' \ndefault_version = '0.1.0' \nmodule_pathname = '$libdir/embedding' \nrelocatable = true \ntrusted = true", "anon": "# PostgreSQL Anonymizer (anon) extension \ncomment = 'Data anonymization tools' \ndefault_version = '1.1.0' \ndirectory='extension/anon' \nrelocatable = false \nrequires = 'pgcrypto' \nsuperuser = false \nmodule_pathname = '$libdir/anon' \ntrusted = true \n" } }' > ext_index.json
|
||||
# Delete Neon extensitons (they always present on compute-node image)
|
||||
rm -rf ./extensions-to-upload/share/extension/neon*
|
||||
rm -rf ./extensions-to-upload/lib/neon*
|
||||
|
||||
# Delete leftovers from the extension build step
|
||||
rm -rf ./extensions-to-upload/lib/pgxs
|
||||
rm -rf ./extensions-to-upload/lib/pkgconfig
|
||||
|
||||
docker cp ${{ steps.create-container.outputs.EID }}:/extensions ./custom-extensions
|
||||
for EXT_NAME in $(ls ./custom-extensions); do
|
||||
mkdir -p ./extensions-to-upload/${EXT_NAME}/share
|
||||
|
||||
mv ./custom-extensions/${EXT_NAME}/share/extension ./extensions-to-upload/${EXT_NAME}/share/extension
|
||||
mv ./custom-extensions/${EXT_NAME}/lib ./extensions-to-upload/${EXT_NAME}/lib
|
||||
done
|
||||
|
||||
- name: Upload postgres-extensions to S3
|
||||
run: |
|
||||
for BUCKET in $(echo ${S3_BUCKETS}); do
|
||||
aws s3 cp --recursive --only-show-errors ./extensions s3://${BUCKET}/${{ needs.tag.outputs.build-tag }}/${{ matrix.version }}
|
||||
aws s3 cp --only-show-errors ./ext_index.json s3://${BUCKET}/${{ needs.tag.outputs.build-tag }}/${{ matrix.version }}
|
||||
aws s3 cp --recursive --only-show-errors ./extensions-to-upload s3://${BUCKET}/${{ needs.tag.outputs.build-tag }}/${{ matrix.version }}
|
||||
done
|
||||
|
||||
- name: Cleanup
|
||||
|
||||
@@ -530,15 +530,9 @@ ENV PATH "/usr/local/pgsql/bin/:$PATH"
|
||||
RUN wget https://github.com/neondatabase/pg_embedding/archive/2465f831ea1f8d49c1d74f8959adb7fc277d70cd.tar.gz -O pg_embedding.tar.gz && \
|
||||
echo "047af2b1f664a1e6e37867bd4eeaf5934fa27d6ba3d6c4461efa388ddf7cd1d5 pg_embedding.tar.gz" | sha256sum --check && \
|
||||
mkdir pg_embedding-src && cd pg_embedding-src && tar xvzf ../pg_embedding.tar.gz --strip-components=1 -C . && \
|
||||
find /usr/local/pgsql -type f | sort > /before_embedding.txt && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/embedding.control &&\
|
||||
find /usr/local/pgsql -type f | sort > /after_embedding.txt &&\
|
||||
/bin/bash -c 'for from in $(comm -13 /before_embedding.txt /after_embedding.txt); do to=/extensions/embedding/${from:17} && mkdir -p $(dirname ${to}) && cp -a ${from} ${to}; done' && \
|
||||
tar -zcvf /extensions/embedding.tar.gz /extensions/embedding && \
|
||||
mkdir -p /control_files &&\
|
||||
cp /usr/local/pgsql/share/extension/embedding.control /control_files/embedding.control
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/embedding.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
@@ -558,15 +552,7 @@ RUN wget https://gitlab.com/dalibo/postgresql_anonymizer/-/archive/1.1.0/postgre
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/anon.control && \
|
||||
find /usr/local/pgsql -type f | sort > /after.txt && \
|
||||
/bin/bash -c 'for from in $(comm -13 /before.txt /after.txt); do to=/extensions/anon/${from:17} && mkdir -p $(dirname ${to}) && cp -a ${from} ${to}; done' && \
|
||||
mkdir /control_files &&\
|
||||
tar -zcvf /extensions/anon.tar.gz /extensions/anon && \
|
||||
mkdir -p /control_files &&\
|
||||
cp /usr/local/pgsql/share/extension/anon.control /control_files/anon.control
|
||||
|
||||
# # TODO: Delete leftovers from the extension build step
|
||||
# rm -rf ./extensions/lib/pgxs
|
||||
# rm -rf ./extensions/lib/pkgconfig
|
||||
/bin/bash -c 'for from in $(comm -13 /before.txt /after.txt); do to=/extensions/anon/${from:17} && mkdir -p $(dirname ${to}) && cp -a ${from} ${to}; done'
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
@@ -755,24 +741,19 @@ RUN rm /usr/local/pgsql/lib/lib*.a
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Extension only
|
||||
# Extenstion only
|
||||
#
|
||||
#########################################################################################
|
||||
FROM scratch AS postgres-extensions
|
||||
# After the transition this layer will include all extensitons.
|
||||
# As for now, it's only for new custom ones
|
||||
#
|
||||
# extensions
|
||||
COPY --from=pg-anon-pg-build /extensions/anon.tar.gz /extensions/anon.tar.gz
|
||||
COPY --from=pg-anon-pg-build /control_files/anon.control /control_files/anon.control
|
||||
|
||||
COPY --from=pg-embedding-pg-build /extensions/embedding.tar.gz /extensions/embedding.tar.gz
|
||||
COPY --from=pg-embedding-pg-build /control_files/embedding.control /control_files/embedding.control
|
||||
|
||||
FROM python:3.11-slim-bookworm AS alekpython
|
||||
COPY scripts/combine_control_files.py combine_control_files.py
|
||||
COPY --from=postgres-extensions /control_files /control_files
|
||||
CMD [ "python3", "./combine_control_files.py"]
|
||||
# # Default extensions
|
||||
# COPY --from=postgres-cleanup-layer /usr/local/pgsql/share/extension /usr/local/pgsql/share/extension
|
||||
# COPY --from=postgres-cleanup-layer /usr/local/pgsql/lib /usr/local/pgsql/lib
|
||||
# Custom extensions
|
||||
COPY --from=pg-anon-pg-build /extensions/anon/lib/ /extensions/anon/lib
|
||||
COPY --from=pg-anon-pg-build /extensions/anon/share/extension /extensions/anon/share/extension
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
|
||||
@@ -57,9 +57,9 @@ pub fn slru_may_delete_clogsegment(segpage: u32, cutoff_page: u32) -> bool {
|
||||
// Multixact utils
|
||||
|
||||
pub fn mx_offset_to_flags_offset(xid: MultiXactId) -> usize {
|
||||
((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32) as u16
|
||||
% pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE
|
||||
* pg_constants::MULTIXACT_MEMBERGROUP_SIZE) as usize
|
||||
((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32)
|
||||
% pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE as u32
|
||||
* pg_constants::MULTIXACT_MEMBERGROUP_SIZE as u32) as usize
|
||||
}
|
||||
|
||||
pub fn mx_offset_to_flags_bitshift(xid: MultiXactId) -> u16 {
|
||||
|
||||
@@ -149,7 +149,7 @@ hnsw_check_available_memory(Size requested)
|
||||
struct sysinfo si;
|
||||
Size total;
|
||||
if (sysinfo(&si) < 0)
|
||||
elog(ERROR, "Failed to get amount of RAM: %n");
|
||||
elog(ERROR, "Failed to get amount of RAM: %m");
|
||||
|
||||
total = si.totalram*si.mem_unit;
|
||||
if ((Size)NBuffers*BLCKSZ + requested >= total)
|
||||
@@ -580,6 +580,7 @@ l2_distance(PG_FUNCTION_ARGS)
|
||||
errmsg("different array dimensions %d and %d", a_dim, b_dim)));
|
||||
}
|
||||
|
||||
#pragma clang loop vectorize(enable)
|
||||
for (int i = 0; i < a_dim; i++)
|
||||
{
|
||||
diff = ax[i] - bx[i];
|
||||
|
||||
@@ -223,6 +223,7 @@ dist_t fstdistfunc_scalar(const coord_t *x, const coord_t *y, size_t n)
|
||||
{
|
||||
dist_t distance = 0.0;
|
||||
|
||||
#pragma clang loop vectorize(enable)
|
||||
for (size_t i = 0; i < n; i++)
|
||||
{
|
||||
dist_t diff = x[i] - y[i];
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
with open("SOMETHING", "w") as f:
|
||||
f.write("SOMETHING")
|
||||
|
||||
# enable custom extensions for specific tenants
|
||||
enabled_extensions = {"123454321": ["anon"], "public": ["embedding"]}
|
||||
|
||||
control_data = {}
|
||||
os.chdir("control_files")
|
||||
for control_file in os.listdir("."):
|
||||
ext_name = control_file.replace(".control", "")
|
||||
with open(control_file, "r") as f:
|
||||
control_data[ext_name] = f.read()
|
||||
|
||||
all_data = {"enabled_extensions": enabled_extensions, "control_data": control_data}
|
||||
|
||||
with open("ext_index.json", "w") as f:
|
||||
json.dump(all_data, f)
|
||||
@@ -1,3 +1,7 @@
|
||||
import random
|
||||
import threading
|
||||
from threading import Thread
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content
|
||||
from fixtures.utils import query_scalar
|
||||
@@ -15,11 +19,17 @@ def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
|
||||
endpoint = env.endpoints.create_start("test_multixact")
|
||||
|
||||
log.info("postgres is running on 'test_multixact' branch")
|
||||
|
||||
n_records = 100
|
||||
n_threads = 5
|
||||
n_iters = 1000
|
||||
n_restarts = 10
|
||||
|
||||
cur = endpoint.connect().cursor()
|
||||
cur.execute(
|
||||
"""
|
||||
CREATE TABLE t1(i int primary key);
|
||||
INSERT INTO t1 select * from generate_series(1, 100);
|
||||
f"""
|
||||
CREATE TABLE t1(pk int primary key, val integer);
|
||||
INSERT INTO t1 values (generate_series(1, {n_records}), 0);
|
||||
"""
|
||||
)
|
||||
|
||||
@@ -28,26 +38,32 @@ def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
|
||||
)
|
||||
|
||||
# Lock entries using parallel connections in a round-robin fashion.
|
||||
nclients = 20
|
||||
connections = []
|
||||
for i in range(nclients):
|
||||
# Do not turn on autocommit. We want to hold the key-share locks.
|
||||
def do_updates():
|
||||
conn = endpoint.connect(autocommit=False)
|
||||
connections.append(conn)
|
||||
for i in range(n_iters):
|
||||
pk = random.randrange(1, n_records)
|
||||
conn.cursor().execute(f"update t1 set val=val+1 where pk={pk}")
|
||||
conn.cursor().execute("select * from t1 for key share")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
# On each iteration, we commit the previous transaction on a connection,
|
||||
# and issue antoher select. Each SELECT generates a new multixact that
|
||||
# includes the new XID, and the XIDs of all the other parallel transactions.
|
||||
# This generates enough traffic on both multixact offsets and members SLRUs
|
||||
# to cross page boundaries.
|
||||
for i in range(5000):
|
||||
conn = connections[i % nclients]
|
||||
conn.commit()
|
||||
conn.cursor().execute("select * from t1 for key share")
|
||||
for iter in range(n_restarts):
|
||||
threads: List[threading.Thread] = []
|
||||
for i in range(n_threads):
|
||||
threads.append(threading.Thread(target=do_updates, args=(), daemon=False))
|
||||
threads[-1].start()
|
||||
|
||||
# We have multixacts now. We can close the connections.
|
||||
for c in connections:
|
||||
c.close()
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
# Restart endpoint
|
||||
endpoint.stop()
|
||||
endpoint.start()
|
||||
|
||||
conn = endpoint.connect()
|
||||
cur = conn.cursor()
|
||||
cur.execute("select count(*) from t1")
|
||||
assert cur.fetchone() == (n_records,)
|
||||
|
||||
# force wal flush
|
||||
cur.execute("checkpoint")
|
||||
@@ -74,6 +90,3 @@ def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
|
||||
|
||||
# Check that we restored pg_controlfile correctly
|
||||
assert next_multixact_id_new == next_multixact_id
|
||||
|
||||
# Check that we can restore the content of the datadir correctly
|
||||
check_restored_datadir_content(test_output_dir, env, endpoint)
|
||||
|
||||
Reference in New Issue
Block a user