mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-03 19:42:55 +00:00
Rename zenith to neon in python tests (#1871)
This commit is contained in:
@@ -1,14 +1,14 @@
|
||||
## Zenith test runner
|
||||
## Neon test runner
|
||||
|
||||
This directory contains integration tests.
|
||||
|
||||
Prerequisites:
|
||||
- Correctly configured Python, see [`/docs/sourcetree.md`](/docs/sourcetree.md#using-python)
|
||||
- Zenith and Postgres binaries
|
||||
- Neon and Postgres binaries
|
||||
- See the root [README.md](/README.md) for build directions
|
||||
- Tests can be run from the git tree; or see the environment variables
|
||||
below to run from other directories.
|
||||
- The zenith git repo, including the postgres submodule
|
||||
- The neon git repo, including the postgres submodule
|
||||
(for some tests, e.g. `pg_regress`)
|
||||
- Some tests (involving storage nodes coordination) require etcd installed. Follow
|
||||
[`the guide`](https://etcd.io/docs/v3.5/install/) to obtain it.
|
||||
@@ -51,8 +51,8 @@ Useful environment variables:
|
||||
should go.
|
||||
`TEST_SHARED_FIXTURES`: Try to re-use a single pageserver for all the tests.
|
||||
`ZENITH_PAGESERVER_OVERRIDES`: add a `;`-separated set of configs that will be passed as
|
||||
`--pageserver-config-override=${value}` parameter values when zenith cli is invoked
|
||||
`RUST_LOG`: logging configuration to pass into Zenith CLI
|
||||
`--pageserver-config-override=${value}` parameter values when neon_local cli is invoked
|
||||
`RUST_LOG`: logging configuration to pass into Neon CLI
|
||||
|
||||
Let stdout, stderr and `INFO` log messages go to the terminal instead of capturing them:
|
||||
`./scripts/pytest -s --log-cli-level=INFO ...`
|
||||
@@ -65,32 +65,32 @@ Exit after the first test failure:
|
||||
|
||||
### Writing a test
|
||||
|
||||
Every test needs a Zenith Environment, or ZenithEnv to operate in. A Zenith Environment
|
||||
Every test needs a Neon Environment, or NeonEnv to operate in. A Neon Environment
|
||||
is like a little cloud-in-a-box, and consists of a Pageserver, 0-N Safekeepers, and
|
||||
compute Postgres nodes. The connections between them can be configured to use JWT
|
||||
authentication tokens, and some other configuration options can be tweaked too.
|
||||
|
||||
The easiest way to get access to a Zenith Environment is by using the `zenith_simple_env`
|
||||
The easiest way to get access to a Neon Environment is by using the `neon_simple_env`
|
||||
fixture. The 'simple' env may be shared across multiple tests, so don't shut down the nodes
|
||||
or make other destructive changes in that environment. Also don't assume that
|
||||
there are no tenants or branches or data in the cluster. For convenience, there is a
|
||||
branch called `empty`, though. The convention is to create a test-specific branch of
|
||||
that and load any test data there, instead of the 'main' branch.
|
||||
|
||||
For more complicated cases, you can build a custom Zenith Environment, with the `zenith_env`
|
||||
For more complicated cases, you can build a custom Neon Environment, with the `neon_env`
|
||||
fixture:
|
||||
|
||||
```python
|
||||
def test_foobar(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_foobar(neon_env_builder: NeonEnvBuilder):
|
||||
# Prescribe the environment.
|
||||
# We want to have 3 safekeeper nodes, and use JWT authentication in the
|
||||
# connections to the page server
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
zenith_env_builder.set_pageserver_auth(True)
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
neon_env_builder.set_pageserver_auth(True)
|
||||
|
||||
# Now create the environment. This initializes the repository, and starts
|
||||
# up the page server and the safekeepers
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
# Run the test
|
||||
...
|
||||
|
||||
@@ -3,18 +3,18 @@ from contextlib import closing
|
||||
import psycopg2.extras
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, ZenithPageserverApiException
|
||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, NeonPageserverApiException
|
||||
|
||||
|
||||
#
|
||||
# Create ancestor branches off the main branch.
|
||||
#
|
||||
def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_ancestor_branch(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
# Override defaults, 1M gc_horizon and 4M checkpoint_distance.
|
||||
# Extend compaction_period and gc_period to disable background compaction and gc.
|
||||
tenant, _ = env.zenith_cli.create_tenant(
|
||||
tenant, _ = env.neon_cli.create_tenant(
|
||||
conf={
|
||||
'gc_period': '10 m',
|
||||
'gc_horizon': '1048576',
|
||||
@@ -48,7 +48,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
|
||||
log.info(f'LSN after 100k rows: {lsn_100}')
|
||||
|
||||
# Create branch1.
|
||||
env.zenith_cli.create_branch('branch1', 'main', tenant_id=tenant, ancestor_start_lsn=lsn_100)
|
||||
env.neon_cli.create_branch('branch1', 'main', tenant_id=tenant, ancestor_start_lsn=lsn_100)
|
||||
pg_branch1 = env.postgres.create_start('branch1', tenant_id=tenant)
|
||||
log.info("postgres is running on 'branch1' branch")
|
||||
|
||||
@@ -72,7 +72,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
|
||||
log.info(f'LSN after 200k rows: {lsn_200}')
|
||||
|
||||
# Create branch2.
|
||||
env.zenith_cli.create_branch('branch2', 'branch1', tenant_id=tenant, ancestor_start_lsn=lsn_200)
|
||||
env.neon_cli.create_branch('branch2', 'branch1', tenant_id=tenant, ancestor_start_lsn=lsn_200)
|
||||
pg_branch2 = env.postgres.create_start('branch2', tenant_id=tenant)
|
||||
log.info("postgres is running on 'branch2' branch")
|
||||
branch2_cur = pg_branch2.connect().cursor()
|
||||
@@ -110,15 +110,14 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
|
||||
assert branch2_cur.fetchone() == (300000, )
|
||||
|
||||
|
||||
def test_ancestor_branch_detach(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_ancestor_branch_detach(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
|
||||
parent_timeline_id = env.zenith_cli.create_branch("test_ancestor_branch_detach_parent", "empty")
|
||||
parent_timeline_id = env.neon_cli.create_branch("test_ancestor_branch_detach_parent", "empty")
|
||||
|
||||
env.zenith_cli.create_branch("test_ancestor_branch_detach_branch1",
|
||||
env.neon_cli.create_branch("test_ancestor_branch_detach_branch1",
|
||||
"test_ancestor_branch_detach_parent")
|
||||
|
||||
ps_http = env.pageserver.http_client()
|
||||
with pytest.raises(ZenithPageserverApiException,
|
||||
match="Failed to detach inmem tenant timeline"):
|
||||
with pytest.raises(NeonPageserverApiException, match="Failed to detach inmem tenant timeline"):
|
||||
ps_http.timeline_detach(env.initial_tenant, parent_timeline_id)
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
from contextlib import closing
|
||||
from typing import Iterator
|
||||
from uuid import UUID, uuid4
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder, ZenithPageserverApiException
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, NeonPageserverApiException
|
||||
from requests.exceptions import HTTPError
|
||||
import pytest
|
||||
|
||||
|
||||
def test_pageserver_auth(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.pageserver_auth_enabled = True
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_pageserver_auth(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.pageserver_auth_enabled = True
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
ps = env.pageserver
|
||||
|
||||
@@ -25,7 +25,7 @@ def test_pageserver_auth(zenith_env_builder: ZenithEnvBuilder):
|
||||
ps.safe_psql("set FOO", password=tenant_token)
|
||||
ps.safe_psql("set FOO", password=management_token)
|
||||
|
||||
new_timeline_id = env.zenith_cli.create_branch('test_pageserver_auth',
|
||||
new_timeline_id = env.neon_cli.create_branch('test_pageserver_auth',
|
||||
tenant_id=env.initial_tenant)
|
||||
|
||||
# tenant can create branches
|
||||
@@ -36,7 +36,7 @@ def test_pageserver_auth(zenith_env_builder: ZenithEnvBuilder):
|
||||
ancestor_timeline_id=new_timeline_id)
|
||||
|
||||
# fail to create branch using token with different tenant_id
|
||||
with pytest.raises(ZenithPageserverApiException,
|
||||
with pytest.raises(NeonPageserverApiException,
|
||||
match='Forbidden: Tenant id mismatch. Permission denied'):
|
||||
invalid_tenant_http_client.timeline_create(tenant_id=env.initial_tenant,
|
||||
ancestor_timeline_id=new_timeline_id)
|
||||
@@ -46,21 +46,21 @@ def test_pageserver_auth(zenith_env_builder: ZenithEnvBuilder):
|
||||
|
||||
# fail to create tenant using tenant token
|
||||
with pytest.raises(
|
||||
ZenithPageserverApiException,
|
||||
NeonPageserverApiException,
|
||||
match='Forbidden: Attempt to access management api with tenant scope. Permission denied'
|
||||
):
|
||||
tenant_http_client.tenant_create()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('with_safekeepers', [False, True])
|
||||
def test_compute_auth_to_pageserver(zenith_env_builder: ZenithEnvBuilder, with_safekeepers: bool):
|
||||
zenith_env_builder.pageserver_auth_enabled = True
|
||||
def test_compute_auth_to_pageserver(neon_env_builder: NeonEnvBuilder, with_safekeepers: bool):
|
||||
neon_env_builder.pageserver_auth_enabled = True
|
||||
if with_safekeepers:
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
branch = f'test_compute_auth_to_pageserver{with_safekeepers}'
|
||||
env.zenith_cli.create_branch(branch)
|
||||
env.neon_cli.create_branch(branch)
|
||||
pg = env.postgres.create_start(branch)
|
||||
|
||||
with closing(pg.connect()) as conn:
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from contextlib import closing, contextmanager
|
||||
import psycopg2.extras
|
||||
import pytest
|
||||
from fixtures.zenith_fixtures import PgProtocol, ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import PgProtocol, NeonEnvBuilder
|
||||
from fixtures.log_helper import log
|
||||
import os
|
||||
import time
|
||||
import asyncpg
|
||||
from fixtures.zenith_fixtures import Postgres
|
||||
from fixtures.neon_fixtures import Postgres
|
||||
import threading
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
pytest_plugins = ("fixtures.neon_fixtures")
|
||||
|
||||
|
||||
@contextmanager
|
||||
@@ -26,7 +26,7 @@ def check_backpressure(pg: Postgres, stop_event: threading.Event, polling_interv
|
||||
log.info("checks started")
|
||||
|
||||
with pg_cur(pg) as cur:
|
||||
cur.execute("CREATE EXTENSION neon") # TODO move it to zenith_fixtures?
|
||||
cur.execute("CREATE EXTENSION neon") # TODO move it to neon_fixtures?
|
||||
|
||||
cur.execute("select pg_size_bytes(current_setting('max_replication_write_lag'))")
|
||||
res = cur.fetchone()
|
||||
@@ -93,10 +93,10 @@ def check_backpressure(pg: Postgres, stop_event: threading.Event, polling_interv
|
||||
|
||||
|
||||
@pytest.mark.skip("See https://github.com/neondatabase/neon/issues/1587")
|
||||
def test_backpressure_received_lsn_lag(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_backpressure_received_lsn_lag(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
# Create a branch for us
|
||||
env.zenith_cli.create_branch('test_backpressure')
|
||||
env.neon_cli.create_branch('test_backpressure')
|
||||
|
||||
pg = env.postgres.create_start('test_backpressure',
|
||||
config_lines=['max_replication_write_lag=30MB'])
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
from contextlib import closing
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ from fixtures.log_helper import log
|
||||
# Test error handling, if the 'basebackup' command fails in the middle
|
||||
# of building the tar archive.
|
||||
#
|
||||
def test_basebackup_error(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch("test_basebackup_error", "empty")
|
||||
def test_basebackup_error(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch("test_basebackup_error", "empty")
|
||||
|
||||
# Introduce failpoint
|
||||
env.pageserver.safe_psql(f"failpoints basebackup-before-control-file=return")
|
||||
|
||||
@@ -5,26 +5,26 @@ import psycopg2.extras
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.utils import print_gc_result
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
#
|
||||
# Create a couple of branches off the main branch, at a historical point in time.
|
||||
#
|
||||
def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_branch_behind(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
# Use safekeeper in this test to avoid a subtle race condition.
|
||||
# Without safekeeper, walreceiver reconnection can stuck
|
||||
# because of IO deadlock.
|
||||
#
|
||||
# See https://github.com/zenithdb/zenith/issues/1068
|
||||
zenith_env_builder.num_safekeepers = 1
|
||||
# See https://github.com/neondatabase/neon/issues/1068
|
||||
neon_env_builder.num_safekeepers = 1
|
||||
# Disable pitr, because here we want to test branch creation after GC
|
||||
zenith_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}"
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}"
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
# Branch at the point where only 100 rows were inserted
|
||||
env.zenith_cli.create_branch('test_branch_behind')
|
||||
env.neon_cli.create_branch('test_branch_behind')
|
||||
pgmain = env.postgres.create_start('test_branch_behind')
|
||||
log.info("postgres is running on 'test_branch_behind' branch")
|
||||
|
||||
@@ -61,7 +61,7 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
|
||||
log.info(f'LSN after 200100 rows: {lsn_b}')
|
||||
|
||||
# Branch at the point where only 100 rows were inserted
|
||||
env.zenith_cli.create_branch('test_branch_behind_hundred',
|
||||
env.neon_cli.create_branch('test_branch_behind_hundred',
|
||||
'test_branch_behind',
|
||||
ancestor_start_lsn=lsn_a)
|
||||
|
||||
@@ -78,7 +78,7 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
|
||||
log.info(f'LSN after 400100 rows: {lsn_c}')
|
||||
|
||||
# Branch at the point where only 200100 rows were inserted
|
||||
env.zenith_cli.create_branch('test_branch_behind_more',
|
||||
env.neon_cli.create_branch('test_branch_behind_more',
|
||||
'test_branch_behind',
|
||||
ancestor_start_lsn=lsn_b)
|
||||
|
||||
@@ -104,7 +104,7 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
|
||||
# Check bad lsn's for branching
|
||||
|
||||
# branch at segment boundary
|
||||
env.zenith_cli.create_branch('test_branch_segment_boundary',
|
||||
env.neon_cli.create_branch('test_branch_segment_boundary',
|
||||
'test_branch_behind',
|
||||
ancestor_start_lsn="0/3000000")
|
||||
pg = env.postgres.create_start('test_branch_segment_boundary')
|
||||
@@ -114,11 +114,11 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
|
||||
|
||||
# branch at pre-initdb lsn
|
||||
with pytest.raises(Exception, match="invalid branch start lsn"):
|
||||
env.zenith_cli.create_branch('test_branch_preinitdb', ancestor_start_lsn="0/42")
|
||||
env.neon_cli.create_branch('test_branch_preinitdb', ancestor_start_lsn="0/42")
|
||||
|
||||
# branch at pre-ancestor lsn
|
||||
with pytest.raises(Exception, match="less than timeline ancestor lsn"):
|
||||
env.zenith_cli.create_branch('test_branch_preinitdb',
|
||||
env.neon_cli.create_branch('test_branch_preinitdb',
|
||||
'test_branch_behind',
|
||||
ancestor_start_lsn="0/42")
|
||||
|
||||
@@ -132,7 +132,7 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
|
||||
|
||||
with pytest.raises(Exception, match="invalid branch start lsn"):
|
||||
# this gced_lsn is pretty random, so if gc is disabled this woudln't fail
|
||||
env.zenith_cli.create_branch('test_branch_create_fail',
|
||||
env.neon_cli.create_branch('test_branch_create_fail',
|
||||
'test_branch_behind',
|
||||
ancestor_start_lsn=gced_lsn)
|
||||
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
import pytest
|
||||
import concurrent.futures
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder, ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
import os
|
||||
|
||||
|
||||
# Test restarting page server, while safekeeper and compute node keep
|
||||
# running.
|
||||
def test_broken_timeline(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_broken_timeline(neon_env_builder: NeonEnvBuilder):
|
||||
# One safekeeper is enough for this test.
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
tenant_timelines = []
|
||||
|
||||
for n in range(4):
|
||||
tenant_id_uuid, timeline_id_uuid = env.zenith_cli.create_tenant()
|
||||
tenant_id_uuid, timeline_id_uuid = env.neon_cli.create_tenant()
|
||||
tenant_id = tenant_id_uuid.hex
|
||||
timeline_id = timeline_id_uuid.hex
|
||||
|
||||
@@ -81,14 +81,14 @@ def test_broken_timeline(zenith_env_builder: ZenithEnvBuilder):
|
||||
log.info(f'compute startup failed as expected: {err}')
|
||||
|
||||
|
||||
def test_create_multiple_timelines_parallel(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_create_multiple_timelines_parallel(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
|
||||
tenant_id, _ = env.zenith_cli.create_tenant()
|
||||
tenant_id, _ = env.neon_cli.create_tenant()
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
|
||||
futures = [
|
||||
executor.submit(env.zenith_cli.create_timeline,
|
||||
executor.submit(env.neon_cli.create_timeline,
|
||||
f"test-create-multiple-timelines-{i}",
|
||||
tenant_id) for i in range(4)
|
||||
]
|
||||
@@ -96,20 +96,20 @@ def test_create_multiple_timelines_parallel(zenith_simple_env: ZenithEnv):
|
||||
future.result()
|
||||
|
||||
|
||||
def test_fix_broken_timelines_on_startup(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_fix_broken_timelines_on_startup(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
|
||||
tenant_id, _ = env.zenith_cli.create_tenant()
|
||||
tenant_id, _ = env.neon_cli.create_tenant()
|
||||
|
||||
# Introduce failpoint when creating a new timeline
|
||||
env.pageserver.safe_psql(f"failpoints before-checkpoint-new-timeline=return")
|
||||
with pytest.raises(Exception, match="before-checkpoint-new-timeline"):
|
||||
_ = env.zenith_cli.create_timeline("test_fix_broken_timelines", tenant_id)
|
||||
_ = env.neon_cli.create_timeline("test_fix_broken_timelines", tenant_id)
|
||||
|
||||
# Restart the page server
|
||||
env.zenith_cli.pageserver_stop(immediate=True)
|
||||
env.zenith_cli.pageserver_start()
|
||||
env.neon_cli.pageserver_stop(immediate=True)
|
||||
env.neon_cli.pageserver_start()
|
||||
|
||||
# Check that the "broken" timeline is not loaded
|
||||
timelines = env.zenith_cli.list_timelines(tenant_id)
|
||||
timelines = env.neon_cli.list_timelines(tenant_id)
|
||||
assert len(timelines) == 1
|
||||
|
||||
@@ -3,16 +3,16 @@ import os
|
||||
|
||||
from contextlib import closing
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
#
|
||||
# Test compute node start after clog truncation
|
||||
#
|
||||
def test_clog_truncate(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch('test_clog_truncate', 'empty')
|
||||
def test_clog_truncate(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch('test_clog_truncate', 'empty')
|
||||
|
||||
# set aggressive autovacuum to make sure that truncation will happen
|
||||
config = [
|
||||
@@ -62,7 +62,7 @@ def test_clog_truncate(zenith_simple_env: ZenithEnv):
|
||||
|
||||
# create new branch after clog truncation and start a compute node on it
|
||||
log.info(f'create branch at lsn_after_truncation {lsn_after_truncation}')
|
||||
env.zenith_cli.create_branch('test_clog_truncate_new',
|
||||
env.neon_cli.create_branch('test_clog_truncate_new',
|
||||
'test_clog_truncate',
|
||||
ancestor_start_lsn=lsn_after_truncation)
|
||||
pg2 = env.postgres.create_start('test_clog_truncate_new')
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from contextlib import closing
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
#
|
||||
# Test starting Postgres with custom options
|
||||
#
|
||||
def test_config(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch("test_config", "empty")
|
||||
def test_config(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch("test_config", "empty")
|
||||
|
||||
# change config
|
||||
pg = env.postgres.create_start('test_config', config_lines=['log_min_messages=debug1'])
|
||||
|
||||
@@ -2,16 +2,16 @@ import os
|
||||
import pathlib
|
||||
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import ZenithEnv, check_restored_datadir_content
|
||||
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
#
|
||||
# Test CREATE DATABASE when there have been relmapper changes
|
||||
#
|
||||
def test_createdb(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch('test_createdb', 'empty')
|
||||
def test_createdb(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch('test_createdb', 'empty')
|
||||
|
||||
pg = env.postgres.create_start('test_createdb')
|
||||
log.info("postgres is running on 'test_createdb' branch")
|
||||
@@ -27,7 +27,7 @@ def test_createdb(zenith_simple_env: ZenithEnv):
|
||||
lsn = cur.fetchone()[0]
|
||||
|
||||
# Create a branch
|
||||
env.zenith_cli.create_branch('test_createdb2', 'test_createdb', ancestor_start_lsn=lsn)
|
||||
env.neon_cli.create_branch('test_createdb2', 'test_createdb', ancestor_start_lsn=lsn)
|
||||
pg2 = env.postgres.create_start('test_createdb2')
|
||||
|
||||
# Test that you can connect to the new database on both branches
|
||||
@@ -40,16 +40,16 @@ def test_createdb(zenith_simple_env: ZenithEnv):
|
||||
('foodb', ))
|
||||
res = cur.fetchone()
|
||||
# check that dbsize equals sum of all relation sizes, excluding shared ones
|
||||
# This is how we define dbsize in zenith for now
|
||||
# This is how we define dbsize in neon for now
|
||||
assert res[0] == res[1]
|
||||
|
||||
|
||||
#
|
||||
# Test DROP DATABASE
|
||||
#
|
||||
def test_dropdb(zenith_simple_env: ZenithEnv, test_output_dir):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch('test_dropdb', 'empty')
|
||||
def test_dropdb(neon_simple_env: NeonEnv, test_output_dir):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch('test_dropdb', 'empty')
|
||||
pg = env.postgres.create_start('test_dropdb')
|
||||
log.info("postgres is running on 'test_dropdb' branch")
|
||||
|
||||
@@ -73,12 +73,12 @@ def test_dropdb(zenith_simple_env: ZenithEnv, test_output_dir):
|
||||
lsn_after_drop = cur.fetchone()[0]
|
||||
|
||||
# Create two branches before and after database drop.
|
||||
env.zenith_cli.create_branch('test_before_dropdb',
|
||||
env.neon_cli.create_branch('test_before_dropdb',
|
||||
'test_dropdb',
|
||||
ancestor_start_lsn=lsn_before_drop)
|
||||
pg_before = env.postgres.create_start('test_before_dropdb')
|
||||
|
||||
env.zenith_cli.create_branch('test_after_dropdb',
|
||||
env.neon_cli.create_branch('test_after_dropdb',
|
||||
'test_dropdb',
|
||||
ancestor_start_lsn=lsn_after_drop)
|
||||
pg_after = env.postgres.create_start('test_after_dropdb')
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from contextlib import closing
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
#
|
||||
# Test CREATE USER to check shared catalog restore
|
||||
#
|
||||
def test_createuser(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch('test_createuser', 'empty')
|
||||
def test_createuser(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch('test_createuser', 'empty')
|
||||
pg = env.postgres.create_start('test_createuser')
|
||||
log.info("postgres is running on 'test_createuser' branch")
|
||||
|
||||
@@ -24,7 +24,7 @@ def test_createuser(zenith_simple_env: ZenithEnv):
|
||||
lsn = cur.fetchone()[0]
|
||||
|
||||
# Create a branch
|
||||
env.zenith_cli.create_branch('test_createuser2', 'test_createuser', ancestor_start_lsn=lsn)
|
||||
env.neon_cli.create_branch('test_createuser2', 'test_createuser', ancestor_start_lsn=lsn)
|
||||
pg2 = env.postgres.create_start('test_createuser2')
|
||||
|
||||
# Test that you can connect to new branch as a new user
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import asyncio
|
||||
import random
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, Postgres
|
||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres
|
||||
from fixtures.log_helper import log
|
||||
|
||||
# Test configuration
|
||||
@@ -27,7 +27,7 @@ async def update_table(pg: Postgres):
|
||||
|
||||
|
||||
# Perform aggressive GC with 0 horizon
|
||||
async def gc(env: ZenithEnv, timeline: str):
|
||||
async def gc(env: NeonEnv, timeline: str):
|
||||
psconn = await env.pageserver.connect_async()
|
||||
|
||||
while updates_performed < updates_to_perform:
|
||||
@@ -35,7 +35,7 @@ async def gc(env: ZenithEnv, timeline: str):
|
||||
|
||||
|
||||
# At the same time, run UPDATEs and GC
|
||||
async def update_and_gc(env: ZenithEnv, pg: Postgres, timeline: str):
|
||||
async def update_and_gc(env: NeonEnv, pg: Postgres, timeline: str):
|
||||
workers = []
|
||||
for worker_id in range(num_connections):
|
||||
workers.append(asyncio.create_task(update_table(pg)))
|
||||
@@ -48,14 +48,14 @@ async def update_and_gc(env: ZenithEnv, pg: Postgres, timeline: str):
|
||||
#
|
||||
# Aggressively force GC, while running queries.
|
||||
#
|
||||
# (repro for https://github.com/zenithdb/zenith/issues/1047)
|
||||
# (repro for https://github.com/neondatabase/neon/issues/1047)
|
||||
#
|
||||
def test_gc_aggressive(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_gc_aggressive(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
# Disable pitr, because here we want to test branch creation after GC
|
||||
zenith_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}"
|
||||
env = zenith_env_builder.init_start()
|
||||
env.zenith_cli.create_branch("test_gc_aggressive", "main")
|
||||
neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}"
|
||||
env = neon_env_builder.init_start()
|
||||
env.neon_cli.create_branch("test_gc_aggressive", "main")
|
||||
pg = env.postgres.create_start('test_gc_aggressive')
|
||||
log.info('postgres is running on test_gc_aggressive branch')
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import math
|
||||
from uuid import UUID
|
||||
import psycopg2.extras
|
||||
import psycopg2.errors
|
||||
from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, Postgres
|
||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres
|
||||
from fixtures.log_helper import log
|
||||
import time
|
||||
|
||||
@@ -12,11 +12,11 @@ import time
|
||||
#
|
||||
# Test pageserver get_lsn_by_timestamp API
|
||||
#
|
||||
def test_lsn_mapping(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 1
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_lsn_mapping(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 1
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
new_timeline_id = env.zenith_cli.create_branch('test_lsn_mapping')
|
||||
new_timeline_id = env.neon_cli.create_branch('test_lsn_mapping')
|
||||
pgmain = env.postgres.create_start("test_lsn_mapping")
|
||||
log.info("postgres is running on 'test_lsn_mapping' branch")
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from fixtures.zenith_fixtures import ZenithEnv, check_restored_datadir_content
|
||||
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
@@ -8,9 +8,9 @@ from fixtures.log_helper import log
|
||||
# it only checks next_multixact_id field in restored pg_control,
|
||||
# since we don't have functions to check multixact internals.
|
||||
#
|
||||
def test_multixact(zenith_simple_env: ZenithEnv, test_output_dir):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch('test_multixact', 'empty')
|
||||
def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch('test_multixact', 'empty')
|
||||
pg = env.postgres.create_start('test_multixact')
|
||||
|
||||
log.info("postgres is running on 'test_multixact' branch")
|
||||
@@ -60,7 +60,7 @@ def test_multixact(zenith_simple_env: ZenithEnv, test_output_dir):
|
||||
assert int(next_multixact_id) > int(next_multixact_id_old)
|
||||
|
||||
# Branch at this point
|
||||
env.zenith_cli.create_branch('test_multixact_new', 'test_multixact', ancestor_start_lsn=lsn)
|
||||
env.neon_cli.create_branch('test_multixact_new', 'test_multixact', ancestor_start_lsn=lsn)
|
||||
pg_new = env.postgres.create_start('test_multixact_new')
|
||||
|
||||
log.info("postgres is running on 'test_multixact_new' branch")
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import uuid
|
||||
import requests
|
||||
|
||||
from fixtures.zenith_fixtures import DEFAULT_BRANCH_NAME, ZenithEnv, ZenithEnvBuilder, ZenithPageserverHttpClient
|
||||
from fixtures.neon_fixtures import DEFAULT_BRANCH_NAME, NeonEnv, NeonEnvBuilder, NeonPageserverHttpClient
|
||||
from typing import cast
|
||||
|
||||
|
||||
def helper_compare_timeline_list(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
env: ZenithEnv,
|
||||
def helper_compare_timeline_list(pageserver_http_client: NeonPageserverHttpClient,
|
||||
env: NeonEnv,
|
||||
initial_tenant: uuid.UUID):
|
||||
"""
|
||||
Compare timelines list returned by CLI and directly via API.
|
||||
@@ -17,65 +17,65 @@ def helper_compare_timeline_list(pageserver_http_client: ZenithPageserverHttpCli
|
||||
map(lambda t: cast(str, t['timeline_id']),
|
||||
pageserver_http_client.timeline_list(initial_tenant)))
|
||||
|
||||
timelines_cli = env.zenith_cli.list_timelines()
|
||||
assert timelines_cli == env.zenith_cli.list_timelines(initial_tenant)
|
||||
timelines_cli = env.neon_cli.list_timelines()
|
||||
assert timelines_cli == env.neon_cli.list_timelines(initial_tenant)
|
||||
|
||||
cli_timeline_ids = sorted([timeline_id for (_, timeline_id) in timelines_cli])
|
||||
assert timelines_api == cli_timeline_ids
|
||||
|
||||
|
||||
def test_cli_timeline_list(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_cli_timeline_list(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
pageserver_http_client = env.pageserver.http_client()
|
||||
|
||||
# Initial sanity check
|
||||
helper_compare_timeline_list(pageserver_http_client, env, env.initial_tenant)
|
||||
|
||||
# Create a branch for us
|
||||
main_timeline_id = env.zenith_cli.create_branch('test_cli_branch_list_main')
|
||||
main_timeline_id = env.neon_cli.create_branch('test_cli_branch_list_main')
|
||||
helper_compare_timeline_list(pageserver_http_client, env, env.initial_tenant)
|
||||
|
||||
# Create a nested branch
|
||||
nested_timeline_id = env.zenith_cli.create_branch('test_cli_branch_list_nested',
|
||||
nested_timeline_id = env.neon_cli.create_branch('test_cli_branch_list_nested',
|
||||
'test_cli_branch_list_main')
|
||||
helper_compare_timeline_list(pageserver_http_client, env, env.initial_tenant)
|
||||
|
||||
# Check that all new branches are visible via CLI
|
||||
timelines_cli = [timeline_id for (_, timeline_id) in env.zenith_cli.list_timelines()]
|
||||
timelines_cli = [timeline_id for (_, timeline_id) in env.neon_cli.list_timelines()]
|
||||
|
||||
assert main_timeline_id.hex in timelines_cli
|
||||
assert nested_timeline_id.hex in timelines_cli
|
||||
|
||||
|
||||
def helper_compare_tenant_list(pageserver_http_client: ZenithPageserverHttpClient, env: ZenithEnv):
|
||||
def helper_compare_tenant_list(pageserver_http_client: NeonPageserverHttpClient, env: NeonEnv):
|
||||
tenants = pageserver_http_client.tenant_list()
|
||||
tenants_api = sorted(map(lambda t: cast(str, t['id']), tenants))
|
||||
|
||||
res = env.zenith_cli.list_tenants()
|
||||
res = env.neon_cli.list_tenants()
|
||||
tenants_cli = sorted(map(lambda t: t.split()[0], res.stdout.splitlines()))
|
||||
|
||||
assert tenants_api == tenants_cli
|
||||
|
||||
|
||||
def test_cli_tenant_list(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_cli_tenant_list(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
pageserver_http_client = env.pageserver.http_client()
|
||||
# Initial sanity check
|
||||
helper_compare_tenant_list(pageserver_http_client, env)
|
||||
|
||||
# Create new tenant
|
||||
tenant1, _ = env.zenith_cli.create_tenant()
|
||||
tenant1, _ = env.neon_cli.create_tenant()
|
||||
|
||||
# check tenant1 appeared
|
||||
helper_compare_tenant_list(pageserver_http_client, env)
|
||||
|
||||
# Create new tenant
|
||||
tenant2, _ = env.zenith_cli.create_tenant()
|
||||
tenant2, _ = env.neon_cli.create_tenant()
|
||||
|
||||
# check tenant2 appeared
|
||||
helper_compare_tenant_list(pageserver_http_client, env)
|
||||
|
||||
res = env.zenith_cli.list_tenants()
|
||||
res = env.neon_cli.list_tenants()
|
||||
tenants = sorted(map(lambda t: t.split()[0], res.stdout.splitlines()))
|
||||
|
||||
assert env.initial_tenant.hex in tenants
|
||||
@@ -83,18 +83,18 @@ def test_cli_tenant_list(zenith_simple_env: ZenithEnv):
|
||||
assert tenant2.hex in tenants
|
||||
|
||||
|
||||
def test_cli_tenant_create(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
tenant_id, _ = env.zenith_cli.create_tenant()
|
||||
timelines = env.zenith_cli.list_timelines(tenant_id)
|
||||
def test_cli_tenant_create(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
tenant_id, _ = env.neon_cli.create_tenant()
|
||||
timelines = env.neon_cli.list_timelines(tenant_id)
|
||||
|
||||
# an initial timeline should be created upon tenant creation
|
||||
assert len(timelines) == 1
|
||||
assert timelines[0][0] == DEFAULT_BRANCH_NAME
|
||||
|
||||
|
||||
def test_cli_ipv4_listeners(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_cli_ipv4_listeners(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
# Connect to sk port on v4 loopback
|
||||
res = requests.get(f'http://127.0.0.1:{env.safekeepers[0].port.http}/v1/status')
|
||||
@@ -108,17 +108,17 @@ def test_cli_ipv4_listeners(zenith_env_builder: ZenithEnvBuilder):
|
||||
# assert res.ok
|
||||
|
||||
|
||||
def test_cli_start_stop(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_cli_start_stop(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
# Stop default ps/sk
|
||||
env.zenith_cli.pageserver_stop()
|
||||
env.zenith_cli.safekeeper_stop()
|
||||
env.neon_cli.pageserver_stop()
|
||||
env.neon_cli.safekeeper_stop()
|
||||
|
||||
# Default start
|
||||
res = env.zenith_cli.raw_cli(["start"])
|
||||
res = env.neon_cli.raw_cli(["start"])
|
||||
res.check_returncode()
|
||||
|
||||
# Default stop
|
||||
res = env.zenith_cli.raw_cli(["stop"])
|
||||
res = env.neon_cli.raw_cli(["stop"])
|
||||
res.check_returncode()
|
||||
@@ -1,12 +1,12 @@
|
||||
import time
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
# Test restarting page server, while safekeeper and compute node keep
|
||||
# running.
|
||||
def test_next_xid(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_next_xid(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
pg = env.postgres.create_start('main')
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, ZenithPageserverHttpClient
|
||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, NeonPageserverHttpClient
|
||||
|
||||
|
||||
def check_tenant(env: ZenithEnv, pageserver_http: ZenithPageserverHttpClient):
|
||||
tenant_id, timeline_id = env.zenith_cli.create_tenant()
|
||||
def check_tenant(env: NeonEnv, pageserver_http: NeonPageserverHttpClient):
|
||||
tenant_id, timeline_id = env.neon_cli.create_tenant()
|
||||
pg = env.postgres.create_start('main', tenant_id=tenant_id)
|
||||
# we rely upon autocommit after each statement
|
||||
res_1 = pg.safe_psql_many(queries=[
|
||||
@@ -26,7 +26,7 @@ def check_tenant(env: ZenithEnv, pageserver_http: ZenithPageserverHttpClient):
|
||||
pageserver_http.timeline_detach(tenant_id, timeline_id)
|
||||
|
||||
|
||||
def test_normal_work(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_normal_work(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
Basic test:
|
||||
* create new tenant with a timeline
|
||||
@@ -40,7 +40,7 @@ def test_normal_work(zenith_env_builder: ZenithEnvBuilder):
|
||||
Repeat check for several tenants/timelines.
|
||||
"""
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
pageserver_http = env.pageserver.http_client()
|
||||
|
||||
for _ in range(3):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.utils import print_gc_result
|
||||
import psycopg2.extras
|
||||
@@ -14,11 +14,11 @@ import psycopg2.extras
|
||||
# just a hint that the page hasn't been modified since that LSN, and the page
|
||||
# server should return the latest page version regardless of the LSN.
|
||||
#
|
||||
def test_old_request_lsn(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_old_request_lsn(neon_env_builder: NeonEnvBuilder):
|
||||
# Disable pitr, because here we want to test branch creation after GC
|
||||
zenith_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}"
|
||||
env = zenith_env_builder.init_start()
|
||||
env.zenith_cli.create_branch("test_old_request_lsn", "main")
|
||||
neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}"
|
||||
env = neon_env_builder.init_start()
|
||||
env.neon_cli.create_branch("test_old_request_lsn", "main")
|
||||
pg = env.postgres.create_start('test_old_request_lsn')
|
||||
log.info('postgres is running on test_old_request_lsn branch')
|
||||
|
||||
|
||||
@@ -2,26 +2,26 @@ from typing import Optional
|
||||
from uuid import uuid4, UUID
|
||||
import pytest
|
||||
from fixtures.utils import lsn_from_hex
|
||||
from fixtures.zenith_fixtures import (
|
||||
from fixtures.neon_fixtures import (
|
||||
DEFAULT_BRANCH_NAME,
|
||||
ZenithEnv,
|
||||
ZenithEnvBuilder,
|
||||
ZenithPageserverHttpClient,
|
||||
ZenithPageserverApiException,
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
NeonPageserverHttpClient,
|
||||
NeonPageserverApiException,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
# test that we cannot override node id
|
||||
def test_pageserver_init_node_id(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init()
|
||||
def test_pageserver_init_node_id(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init()
|
||||
with pytest.raises(
|
||||
Exception,
|
||||
match="node id can only be set during pageserver init and cannot be overridden"):
|
||||
env.pageserver.start(overrides=['--pageserver-config-override=id=10'])
|
||||
|
||||
|
||||
def check_client(client: ZenithPageserverHttpClient, initial_tenant: UUID):
|
||||
def check_client(client: NeonPageserverHttpClient, initial_tenant: UUID):
|
||||
client.check_status()
|
||||
|
||||
# check initial tenant is there
|
||||
@@ -57,11 +57,11 @@ def check_client(client: ZenithPageserverHttpClient, initial_tenant: UUID):
|
||||
assert local_timeline_details['timeline_state'] == 'Loaded'
|
||||
|
||||
|
||||
def test_pageserver_http_get_wal_receiver_not_found(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_pageserver_http_get_wal_receiver_not_found(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
client = env.pageserver.http_client()
|
||||
|
||||
tenant_id, timeline_id = env.zenith_cli.create_tenant()
|
||||
tenant_id, timeline_id = env.neon_cli.create_tenant()
|
||||
|
||||
empty_response = client.wal_receiver_get(tenant_id, timeline_id)
|
||||
|
||||
@@ -70,11 +70,11 @@ def test_pageserver_http_get_wal_receiver_not_found(zenith_simple_env: ZenithEnv
|
||||
assert empty_response.get('last_received_msg_ts') is None, 'Should not be able to connect to WAL streaming without PG compute node running'
|
||||
|
||||
|
||||
def test_pageserver_http_get_wal_receiver_success(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_pageserver_http_get_wal_receiver_success(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
client = env.pageserver.http_client()
|
||||
|
||||
tenant_id, timeline_id = env.zenith_cli.create_tenant()
|
||||
tenant_id, timeline_id = env.neon_cli.create_tenant()
|
||||
pg = env.postgres.create_start(DEFAULT_BRANCH_NAME, tenant_id=tenant_id)
|
||||
|
||||
def expect_updated_msg_lsn(prev_msg_lsn: Optional[int]) -> int:
|
||||
@@ -107,15 +107,15 @@ def test_pageserver_http_get_wal_receiver_success(zenith_simple_env: ZenithEnv):
|
||||
wait_until(number_of_iterations=5, interval=1, func=lambda: expect_updated_msg_lsn(lsn))
|
||||
|
||||
|
||||
def test_pageserver_http_api_client(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_pageserver_http_api_client(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
client = env.pageserver.http_client()
|
||||
check_client(client, env.initial_tenant)
|
||||
|
||||
|
||||
def test_pageserver_http_api_client_auth_enabled(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.pageserver_auth_enabled = True
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_pageserver_http_api_client_auth_enabled(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.pageserver_auth_enabled = True
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
management_token = env.auth_keys.generate_management_token()
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
# Test safekeeper sync and pageserver catch up
|
||||
# while initial compute node is down and pageserver is lagging behind safekeepers.
|
||||
# Ensure that basebackup after restart of all components is correct
|
||||
# and new compute node contains all data.
|
||||
def test_pageserver_catchup_while_compute_down(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_pageserver_catchup_while_compute_down(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_pageserver_catchup_while_compute_down')
|
||||
env.neon_cli.create_branch('test_pageserver_catchup_while_compute_down')
|
||||
# Make shared_buffers large to ensure we won't query pageserver while it is down.
|
||||
pg = env.postgres.create_start('test_pageserver_catchup_while_compute_down',
|
||||
config_lines=['shared_buffers=512MB'])
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
# Test restarting page server, while safekeeper and compute node keep
|
||||
# running.
|
||||
def test_pageserver_restart(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_pageserver_restart(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_pageserver_restart')
|
||||
env.neon_cli.create_branch('test_pageserver_restart')
|
||||
pg = env.postgres.create_start('test_pageserver_restart')
|
||||
|
||||
pg_conn = pg.connect()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from io import BytesIO
|
||||
import asyncio
|
||||
from fixtures.zenith_fixtures import ZenithEnv, Postgres
|
||||
from fixtures.neon_fixtures import NeonEnv, Postgres
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
@@ -38,9 +38,9 @@ async def parallel_load_same_table(pg: Postgres, n_parallel: int):
|
||||
|
||||
|
||||
# Load data into one table with COPY TO from 5 parallel connections
|
||||
def test_parallel_copy(zenith_simple_env: ZenithEnv, n_parallel=5):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch("test_parallel_copy", "empty")
|
||||
def test_parallel_copy(neon_simple_env: NeonEnv, n_parallel=5):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch("test_parallel_copy", "empty")
|
||||
pg = env.postgres.create_start('test_parallel_copy')
|
||||
log.info("postgres is running on 'test_parallel_copy' branch")
|
||||
|
||||
|
||||
@@ -5,20 +5,20 @@ import psycopg2.extras
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.utils import print_gc_result
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
#
|
||||
# Check pitr_interval GC behavior.
|
||||
# Insert some data, run GC and create a branch in the past.
|
||||
#
|
||||
def test_pitr_gc(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_pitr_gc(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
zenith_env_builder.num_safekeepers = 1
|
||||
neon_env_builder.num_safekeepers = 1
|
||||
# Set pitr interval such that we need to keep the data
|
||||
zenith_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '1 day', gc_horizon = 0}"
|
||||
neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '1 day', gc_horizon = 0}"
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
pgmain = env.postgres.create_start('main')
|
||||
log.info("postgres is running on 'main' branch")
|
||||
|
||||
@@ -62,7 +62,7 @@ def test_pitr_gc(zenith_env_builder: ZenithEnvBuilder):
|
||||
|
||||
# Branch at the point where only 100 rows were inserted
|
||||
# It must have been preserved by PITR setting
|
||||
env.zenith_cli.create_branch('test_pitr_gc_hundred', 'main', ancestor_start_lsn=lsn_a)
|
||||
env.neon_cli.create_branch('test_pitr_gc_hundred', 'main', ancestor_start_lsn=lsn_a)
|
||||
|
||||
pg_hundred = env.postgres.create_start('test_pitr_gc_hundred')
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from contextlib import closing
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
|
||||
from psycopg2.errors import UndefinedTable
|
||||
from psycopg2.errors import IoError
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
pytest_plugins = ("fixtures.neon_fixtures")
|
||||
|
||||
extensions = ["pageinspect", "neon_test_utils", "pg_buffercache"]
|
||||
|
||||
@@ -14,9 +14,9 @@ extensions = ["pageinspect", "neon_test_utils", "pg_buffercache"]
|
||||
#
|
||||
# Validation of reading different page versions
|
||||
#
|
||||
def test_read_validation(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch("test_read_validation", "empty")
|
||||
def test_read_validation(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch("test_read_validation", "empty")
|
||||
|
||||
pg = env.postgres.create_start("test_read_validation")
|
||||
log.info("postgres is running on 'test_read_validation' branch")
|
||||
@@ -125,9 +125,9 @@ def test_read_validation(zenith_simple_env: ZenithEnv):
|
||||
log.info("Caught an expected failure: {}".format(e))
|
||||
|
||||
|
||||
def test_read_validation_neg(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch("test_read_validation_neg", "empty")
|
||||
def test_read_validation_neg(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch("test_read_validation_neg", "empty")
|
||||
|
||||
pg = env.postgres.create_start("test_read_validation_neg")
|
||||
log.info("postgres is running on 'test_read_validation_neg' branch")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
#
|
||||
@@ -9,9 +9,9 @@ from fixtures.zenith_fixtures import ZenithEnv
|
||||
# This is very similar to the 'test_branch_behind' test, but instead of
|
||||
# creating branches, creates read-only nodes.
|
||||
#
|
||||
def test_readonly_node(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch('test_readonly_node', 'empty')
|
||||
def test_readonly_node(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch('test_readonly_node', 'empty')
|
||||
pgmain = env.postgres.create_start('test_readonly_node')
|
||||
log.info("postgres is running on 'test_readonly_node' branch")
|
||||
|
||||
|
||||
@@ -4,28 +4,28 @@ import psycopg2.extras
|
||||
import json
|
||||
from ast import Assert
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
#
|
||||
# Test pageserver recovery after crash
|
||||
#
|
||||
def test_pageserver_recovery(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 1
|
||||
def test_pageserver_recovery(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 1
|
||||
# Override default checkpointer settings to run it more often
|
||||
zenith_env_builder.pageserver_config_override = "tenant_config={checkpoint_distance = 1048576}"
|
||||
neon_env_builder.pageserver_config_override = "tenant_config={checkpoint_distance = 1048576}"
|
||||
|
||||
env = zenith_env_builder.init()
|
||||
env = neon_env_builder.init()
|
||||
|
||||
# Check if failpoints enables. Otherwise the test doesn't make sense
|
||||
f = env.zenith_cli.pageserver_enabled_features()
|
||||
f = env.neon_cli.pageserver_enabled_features()
|
||||
|
||||
assert "failpoints" in f["features"], "Build pageserver with --features=failpoints option to run this test"
|
||||
zenith_env_builder.start()
|
||||
neon_env_builder.start()
|
||||
|
||||
# Create a branch for us
|
||||
env.zenith_cli.create_branch("test_pageserver_recovery", "main")
|
||||
env.neon_cli.create_branch("test_pageserver_recovery", "main")
|
||||
|
||||
pg = env.postgres.create_start('test_pageserver_recovery')
|
||||
log.info("postgres is running on 'test_pageserver_recovery' branch")
|
||||
|
||||
@@ -6,7 +6,7 @@ from contextlib import closing
|
||||
from pathlib import Path
|
||||
import time
|
||||
from uuid import UUID
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder, assert_local, wait_until, wait_for_last_record_lsn, wait_for_upload
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, assert_local, wait_until, wait_for_last_record_lsn, wait_for_upload
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.utils import lsn_from_hex, lsn_to_hex
|
||||
import pytest
|
||||
@@ -30,12 +30,12 @@ import pytest
|
||||
#
|
||||
# The tests are done for all types of remote storage pageserver supports.
|
||||
@pytest.mark.parametrize('storage_type', ['local_fs', 'mock_s3'])
|
||||
def test_remote_storage_backup_and_restore(zenith_env_builder: ZenithEnvBuilder, storage_type: str):
|
||||
# zenith_env_builder.rust_log_override = 'debug'
|
||||
def test_remote_storage_backup_and_restore(neon_env_builder: NeonEnvBuilder, storage_type: str):
|
||||
# neon_env_builder.rust_log_override = 'debug'
|
||||
if storage_type == 'local_fs':
|
||||
zenith_env_builder.enable_local_fs_remote_storage()
|
||||
neon_env_builder.enable_local_fs_remote_storage()
|
||||
elif storage_type == 'mock_s3':
|
||||
zenith_env_builder.enable_s3_mock_remote_storage('test_remote_storage_backup_and_restore')
|
||||
neon_env_builder.enable_s3_mock_remote_storage('test_remote_storage_backup_and_restore')
|
||||
else:
|
||||
raise RuntimeError(f'Unknown storage type: {storage_type}')
|
||||
|
||||
@@ -43,7 +43,7 @@ def test_remote_storage_backup_and_restore(zenith_env_builder: ZenithEnvBuilder,
|
||||
data_secret = 'very secret secret'
|
||||
|
||||
##### First start, insert secret data and upload it to the remote storage
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
pg = env.postgres.create_start('main')
|
||||
|
||||
client = env.pageserver.http_client()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
@@ -9,13 +9,13 @@ from fixtures.log_helper import log
|
||||
# Test restarting and recreating a postgres instance
|
||||
#
|
||||
@pytest.mark.parametrize('with_safekeepers', [False, True])
|
||||
def test_restart_compute(zenith_env_builder: ZenithEnvBuilder, with_safekeepers: bool):
|
||||
zenith_env_builder.pageserver_auth_enabled = True
|
||||
def test_restart_compute(neon_env_builder: NeonEnvBuilder, with_safekeepers: bool):
|
||||
neon_env_builder.pageserver_auth_enabled = True
|
||||
if with_safekeepers:
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_restart_compute')
|
||||
env.neon_cli.create_branch('test_restart_compute')
|
||||
pg = env.postgres.create_start('test_restart_compute')
|
||||
log.info("postgres is running on 'test_restart_compute' branch")
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from fixtures.zenith_fixtures import ZenithEnv, check_restored_datadir_content
|
||||
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
@@ -6,11 +6,11 @@ from fixtures.log_helper import log
|
||||
#
|
||||
# The pg_subxact SLRU is not preserved on restarts, and doesn't need to be
|
||||
# maintained in the pageserver, so subtransactions are not very exciting for
|
||||
# Zenith. They are included in the commit record though and updated in the
|
||||
# Neon. They are included in the commit record though and updated in the
|
||||
# CLOG.
|
||||
def test_subxacts(zenith_simple_env: ZenithEnv, test_output_dir):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch("test_subxacts", "empty")
|
||||
def test_subxacts(neon_simple_env: NeonEnv, test_output_dir):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch("test_subxacts", "empty")
|
||||
pg = env.postgres.create_start('test_subxacts')
|
||||
|
||||
log.info("postgres is running on 'test_subxacts' branch")
|
||||
|
||||
@@ -3,25 +3,25 @@ from contextlib import closing
|
||||
import pytest
|
||||
import psycopg2.extras
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
def test_tenant_config(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_tenant_config(neon_env_builder: NeonEnvBuilder):
|
||||
# set some non-default global config
|
||||
zenith_env_builder.pageserver_config_override = '''
|
||||
neon_env_builder.pageserver_config_override = '''
|
||||
page_cache_size=444;
|
||||
wait_lsn_timeout='111 s';
|
||||
tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}'''
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
"""Test per tenant configuration"""
|
||||
tenant, _ = env.zenith_cli.create_tenant(conf={
|
||||
tenant, _ = env.neon_cli.create_tenant(conf={
|
||||
'checkpoint_distance': '20000',
|
||||
'gc_period': '30sec',
|
||||
})
|
||||
|
||||
env.zenith_cli.create_timeline(f'test_tenant_conf', tenant_id=tenant)
|
||||
env.neon_cli.create_timeline(f'test_tenant_conf', tenant_id=tenant)
|
||||
pg = env.postgres.create_start(
|
||||
"test_tenant_conf",
|
||||
"main",
|
||||
@@ -66,7 +66,7 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}'''
|
||||
}.items())
|
||||
|
||||
# update the config and ensure that it has changed
|
||||
env.zenith_cli.config_tenant(tenant_id=tenant,
|
||||
env.neon_cli.config_tenant(tenant_id=tenant,
|
||||
conf={
|
||||
'checkpoint_distance': '15000',
|
||||
'gc_period': '80sec',
|
||||
|
||||
@@ -10,7 +10,7 @@ from typing import Optional
|
||||
import signal
|
||||
import pytest
|
||||
|
||||
from fixtures.zenith_fixtures import PgProtocol, PortDistributor, Postgres, ZenithEnvBuilder, Etcd, ZenithPageserverHttpClient, assert_local, wait_until, wait_for_last_record_lsn, wait_for_upload, zenith_binpath, pg_distrib_dir
|
||||
from fixtures.neon_fixtures import PgProtocol, PortDistributor, Postgres, NeonEnvBuilder, Etcd, NeonPageserverHttpClient, assert_local, wait_until, wait_for_last_record_lsn, wait_for_upload, neon_binpath, pg_distrib_dir
|
||||
from fixtures.utils import lsn_from_hex
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ def new_pageserver_helper(new_pageserver_dir: pathlib.Path,
|
||||
http_port: int,
|
||||
broker: Optional[Etcd]):
|
||||
"""
|
||||
cannot use ZenithPageserver yet because it depends on zenith cli
|
||||
cannot use NeonPageserver yet because it depends on neon cli
|
||||
which currently lacks support for multiple pageservers
|
||||
"""
|
||||
cmd = [
|
||||
@@ -106,21 +106,21 @@ def load(pg: Postgres, stop_event: threading.Event, load_ok_event: threading.Eve
|
||||
"needs to replace callmemaybe call with better idea how to migrate timelines between pageservers"
|
||||
)
|
||||
@pytest.mark.parametrize('with_load', ['with_load', 'without_load'])
|
||||
def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
|
||||
def test_tenant_relocation(neon_env_builder: NeonEnvBuilder,
|
||||
port_distributor: PortDistributor,
|
||||
with_load: str):
|
||||
zenith_env_builder.enable_local_fs_remote_storage()
|
||||
neon_env_builder.enable_local_fs_remote_storage()
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
# create folder for remote storage mock
|
||||
remote_storage_mock_path = env.repo_dir / 'local_fs_remote_storage'
|
||||
|
||||
tenant, _ = env.zenith_cli.create_tenant(UUID("74ee8b079a0e437eb0afea7d26a07209"))
|
||||
tenant, _ = env.neon_cli.create_tenant(UUID("74ee8b079a0e437eb0afea7d26a07209"))
|
||||
log.info("tenant to relocate %s", tenant)
|
||||
|
||||
# attach does not download ancestor branches (should it?), just use root branch for now
|
||||
env.zenith_cli.create_root_branch('test_tenant_relocation', tenant_id=tenant)
|
||||
env.neon_cli.create_root_branch('test_tenant_relocation', tenant_id=tenant)
|
||||
|
||||
tenant_pg = env.postgres.create_start(branch_name='test_tenant_relocation',
|
||||
node_name='test_tenant_relocation',
|
||||
@@ -177,16 +177,16 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
|
||||
new_pageserver_pg_port = port_distributor.get_port()
|
||||
new_pageserver_http_port = port_distributor.get_port()
|
||||
log.info("new pageserver ports pg %s http %s", new_pageserver_pg_port, new_pageserver_http_port)
|
||||
pageserver_bin = pathlib.Path(zenith_binpath) / 'pageserver'
|
||||
pageserver_bin = pathlib.Path(neon_binpath) / 'pageserver'
|
||||
|
||||
new_pageserver_http = ZenithPageserverHttpClient(port=new_pageserver_http_port, auth_token=None)
|
||||
new_pageserver_http = NeonPageserverHttpClient(port=new_pageserver_http_port, auth_token=None)
|
||||
|
||||
with new_pageserver_helper(new_pageserver_dir,
|
||||
pageserver_bin,
|
||||
remote_storage_mock_path,
|
||||
new_pageserver_pg_port,
|
||||
new_pageserver_http_port,
|
||||
zenith_env_builder.broker):
|
||||
neon_env_builder.broker):
|
||||
|
||||
# call to attach timeline to new pageserver
|
||||
new_pageserver_http.timeline_attach(tenant, timeline)
|
||||
@@ -215,7 +215,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
|
||||
|
||||
tenant_pg.stop()
|
||||
|
||||
# rewrite zenith cli config to use new pageserver for basebackup to start new compute
|
||||
# rewrite neon cli config to use new pageserver for basebackup to start new compute
|
||||
cli_config_lines = (env.repo_dir / 'config').read_text().splitlines()
|
||||
cli_config_lines[-2] = f"listen_http_addr = 'localhost:{new_pageserver_http_port}'"
|
||||
cli_config_lines[-1] = f"listen_pg_addr = 'localhost:{new_pageserver_pg_port}'"
|
||||
@@ -258,7 +258,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
|
||||
|
||||
assert not os.path.exists(timeline_to_detach_local_path), f'After detach, local timeline dir {timeline_to_detach_local_path} should be removed'
|
||||
|
||||
# bring old pageserver back for clean shutdown via zenith cli
|
||||
# bring old pageserver back for clean shutdown via neon cli
|
||||
# new pageserver will be shut down by the context manager
|
||||
cli_config_lines = (env.repo_dir / 'config').read_text().splitlines()
|
||||
cli_config_lines[-2] = f"listen_http_addr = 'localhost:{env.pageserver.service_port.http}'"
|
||||
|
||||
@@ -3,25 +3,25 @@ from datetime import datetime
|
||||
import os
|
||||
import pytest
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.metrics import parse_metrics
|
||||
from fixtures.utils import lsn_to_hex
|
||||
|
||||
|
||||
@pytest.mark.parametrize('with_safekeepers', [False, True])
|
||||
def test_tenants_normal_work(zenith_env_builder: ZenithEnvBuilder, with_safekeepers: bool):
|
||||
def test_tenants_normal_work(neon_env_builder: NeonEnvBuilder, with_safekeepers: bool):
|
||||
if with_safekeepers:
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
"""Tests tenants with and without wal acceptors"""
|
||||
tenant_1, _ = env.zenith_cli.create_tenant()
|
||||
tenant_2, _ = env.zenith_cli.create_tenant()
|
||||
tenant_1, _ = env.neon_cli.create_tenant()
|
||||
tenant_2, _ = env.neon_cli.create_tenant()
|
||||
|
||||
env.zenith_cli.create_timeline(f'test_tenants_normal_work_with_safekeepers{with_safekeepers}',
|
||||
env.neon_cli.create_timeline(f'test_tenants_normal_work_with_safekeepers{with_safekeepers}',
|
||||
tenant_id=tenant_1)
|
||||
env.zenith_cli.create_timeline(f'test_tenants_normal_work_with_safekeepers{with_safekeepers}',
|
||||
env.neon_cli.create_timeline(f'test_tenants_normal_work_with_safekeepers{with_safekeepers}',
|
||||
tenant_id=tenant_2)
|
||||
|
||||
pg_tenant1 = env.postgres.create_start(
|
||||
@@ -44,15 +44,15 @@ def test_tenants_normal_work(zenith_env_builder: ZenithEnvBuilder, with_safekeep
|
||||
assert cur.fetchone() == (5000050000, )
|
||||
|
||||
|
||||
def test_metrics_normal_work(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
def test_metrics_normal_work(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
tenant_1, _ = env.zenith_cli.create_tenant()
|
||||
tenant_2, _ = env.zenith_cli.create_tenant()
|
||||
env = neon_env_builder.init_start()
|
||||
tenant_1, _ = env.neon_cli.create_tenant()
|
||||
tenant_2, _ = env.neon_cli.create_tenant()
|
||||
|
||||
timeline_1 = env.zenith_cli.create_timeline('test_metrics_normal_work', tenant_id=tenant_1)
|
||||
timeline_2 = env.zenith_cli.create_timeline('test_metrics_normal_work', tenant_id=tenant_2)
|
||||
timeline_1 = env.neon_cli.create_timeline('test_metrics_normal_work', tenant_id=tenant_1)
|
||||
timeline_2 = env.neon_cli.create_timeline('test_metrics_normal_work', tenant_id=tenant_2)
|
||||
|
||||
pg_tenant1 = env.postgres.create_start('test_metrics_normal_work', tenant_id=tenant_1)
|
||||
pg_tenant2 = env.postgres.create_start('test_metrics_normal_work', tenant_id=tenant_2)
|
||||
@@ -72,7 +72,7 @@ def test_metrics_normal_work(zenith_env_builder: ZenithEnvBuilder):
|
||||
collected_metrics[f'safekeeper{sk.id}'] = sk.http_client().get_metrics_str()
|
||||
|
||||
for name in collected_metrics:
|
||||
basepath = os.path.join(zenith_env_builder.repo_dir, f'{name}.metrics')
|
||||
basepath = os.path.join(neon_env_builder.repo_dir, f'{name}.metrics')
|
||||
|
||||
with open(basepath, 'w') as stdout_f:
|
||||
print(collected_metrics[name], file=stdout_f, flush=True)
|
||||
|
||||
@@ -12,11 +12,11 @@ from uuid import UUID
|
||||
|
||||
import pytest
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder, ZenithEnv, Postgres, wait_for_last_record_lsn, wait_for_upload
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, NeonEnv, Postgres, wait_for_last_record_lsn, wait_for_upload
|
||||
from fixtures.utils import lsn_from_hex
|
||||
|
||||
|
||||
async def tenant_workload(env: ZenithEnv, pg: Postgres):
|
||||
async def tenant_workload(env: NeonEnv, pg: Postgres):
|
||||
pageserver_conn = await env.pageserver.connect_async()
|
||||
|
||||
pg_conn = await pg.connect_async()
|
||||
@@ -35,7 +35,7 @@ async def tenant_workload(env: ZenithEnv, pg: Postgres):
|
||||
assert res == i * 1000
|
||||
|
||||
|
||||
async def all_tenants_workload(env: ZenithEnv, tenants_pgs):
|
||||
async def all_tenants_workload(env: NeonEnv, tenants_pgs):
|
||||
workers = []
|
||||
for tenant, pg in tenants_pgs:
|
||||
worker = tenant_workload(env, pg)
|
||||
@@ -46,28 +46,28 @@ async def all_tenants_workload(env: ZenithEnv, tenants_pgs):
|
||||
|
||||
|
||||
@pytest.mark.parametrize('storage_type', ['local_fs', 'mock_s3'])
|
||||
def test_tenants_many(zenith_env_builder: ZenithEnvBuilder, storage_type: str):
|
||||
def test_tenants_many(neon_env_builder: NeonEnvBuilder, storage_type: str):
|
||||
|
||||
if storage_type == 'local_fs':
|
||||
zenith_env_builder.enable_local_fs_remote_storage()
|
||||
neon_env_builder.enable_local_fs_remote_storage()
|
||||
elif storage_type == 'mock_s3':
|
||||
zenith_env_builder.enable_s3_mock_remote_storage('test_remote_storage_backup_and_restore')
|
||||
neon_env_builder.enable_s3_mock_remote_storage('test_remote_storage_backup_and_restore')
|
||||
else:
|
||||
raise RuntimeError(f'Unknown storage type: {storage_type}')
|
||||
|
||||
zenith_env_builder.enable_local_fs_remote_storage()
|
||||
neon_env_builder.enable_local_fs_remote_storage()
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
tenants_pgs = []
|
||||
|
||||
for i in range(1, 5):
|
||||
# Use a tiny checkpoint distance, to create a lot of layers quickly
|
||||
tenant, _ = env.zenith_cli.create_tenant(
|
||||
tenant, _ = env.neon_cli.create_tenant(
|
||||
conf={
|
||||
'checkpoint_distance': '5000000',
|
||||
})
|
||||
env.zenith_cli.create_timeline(f'test_tenants_many', tenant_id=tenant)
|
||||
env.neon_cli.create_timeline(f'test_tenants_many', tenant_id=tenant)
|
||||
|
||||
pg = env.postgres.create_start(
|
||||
f'test_tenants_many',
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from contextlib import closing
|
||||
import psycopg2.extras
|
||||
import psycopg2.errors
|
||||
from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, Postgres, assert_local
|
||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres, assert_local
|
||||
from fixtures.log_helper import log
|
||||
import time
|
||||
|
||||
|
||||
def test_timeline_size(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_timeline_size(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
# Branch at the point where only 100 rows were inserted
|
||||
new_timeline_id = env.zenith_cli.create_branch('test_timeline_size', 'empty')
|
||||
new_timeline_id = env.neon_cli.create_branch('test_timeline_size', 'empty')
|
||||
|
||||
client = env.pageserver.http_client()
|
||||
timeline_details = assert_local(client, env.initial_tenant, new_timeline_id)
|
||||
@@ -69,9 +69,9 @@ def wait_for_pageserver_catchup(pgmain: Postgres, polling_interval=1, timeout=60
|
||||
time.sleep(polling_interval)
|
||||
|
||||
|
||||
def test_timeline_size_quota(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init_start()
|
||||
new_timeline_id = env.zenith_cli.create_branch('test_timeline_size_quota')
|
||||
def test_timeline_size_quota(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
new_timeline_id = env.neon_cli.create_branch('test_timeline_size_quota')
|
||||
|
||||
client = env.pageserver.http_client()
|
||||
res = assert_local(client, env.initial_tenant, new_timeline_id)
|
||||
@@ -86,7 +86,7 @@ def test_timeline_size_quota(zenith_env_builder: ZenithEnvBuilder):
|
||||
|
||||
with closing(pgmain.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("CREATE EXTENSION neon") # TODO move it to zenith_fixtures?
|
||||
cur.execute("CREATE EXTENSION neon") # TODO move it to neon_fixtures?
|
||||
|
||||
cur.execute("CREATE TABLE foo (t text)")
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import os
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
#
|
||||
# Test branching, when a transaction is in prepared state
|
||||
#
|
||||
def test_twophase(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
env.zenith_cli.create_branch("test_twophase", "empty")
|
||||
def test_twophase(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
env.neon_cli.create_branch("test_twophase", "empty")
|
||||
pg = env.postgres.create_start('test_twophase', config_lines=['max_prepared_transactions=5'])
|
||||
log.info("postgres is running on 'test_twophase' branch")
|
||||
|
||||
@@ -55,7 +55,7 @@ def test_twophase(zenith_simple_env: ZenithEnv):
|
||||
assert len(twophase_files) == 2
|
||||
|
||||
# Create a branch with the transaction in prepared state
|
||||
env.zenith_cli.create_branch("test_twophase_prepared", "test_twophase")
|
||||
env.neon_cli.create_branch("test_twophase_prepared", "test_twophase")
|
||||
|
||||
# Start compute on the new branch
|
||||
pg2 = env.postgres.create_start(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@ from fixtures.log_helper import log
|
||||
# Test that the VM bit is cleared correctly at a HEAP_DELETE and
|
||||
# HEAP_UPDATE record.
|
||||
#
|
||||
def test_vm_bit_clear(zenith_simple_env: ZenithEnv):
|
||||
env = zenith_simple_env
|
||||
def test_vm_bit_clear(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
|
||||
env.zenith_cli.create_branch("test_vm_bit_clear", "empty")
|
||||
env.neon_cli.create_branch("test_vm_bit_clear", "empty")
|
||||
pg = env.postgres.create_start('test_vm_bit_clear')
|
||||
|
||||
log.info("postgres is running on 'test_vm_bit_clear' branch")
|
||||
@@ -33,7 +33,7 @@ def test_vm_bit_clear(zenith_simple_env: ZenithEnv):
|
||||
cur.execute('UPDATE vmtest_update SET id = 5000 WHERE id = 1')
|
||||
|
||||
# Branch at this point, to test that later
|
||||
env.zenith_cli.create_branch("test_vm_bit_clear_new", "test_vm_bit_clear")
|
||||
env.neon_cli.create_branch("test_vm_bit_clear_new", "test_vm_bit_clear")
|
||||
|
||||
# Clear the buffer cache, to force the VM page to be re-fetched from
|
||||
# the page server
|
||||
|
||||
@@ -12,7 +12,7 @@ from contextlib import closing
|
||||
from dataclasses import dataclass, field
|
||||
from multiprocessing import Process, Value
|
||||
from pathlib import Path
|
||||
from fixtures.zenith_fixtures import PgBin, Etcd, Postgres, RemoteStorageUsers, Safekeeper, ZenithEnv, ZenithEnvBuilder, PortDistributor, SafekeeperPort, zenith_binpath, PgProtocol
|
||||
from fixtures.neon_fixtures import PgBin, Etcd, Postgres, RemoteStorageUsers, Safekeeper, NeonEnv, NeonEnvBuilder, PortDistributor, SafekeeperPort, neon_binpath, PgProtocol
|
||||
from fixtures.utils import get_dir_size, lsn_to_hex, mkdir_if_needed, lsn_from_hex
|
||||
from fixtures.log_helper import log
|
||||
from typing import List, Optional, Any
|
||||
@@ -29,9 +29,9 @@ class TimelineMetrics:
|
||||
|
||||
# Run page server and multiple acceptors, and multiple compute nodes running
|
||||
# against different timelines.
|
||||
def test_many_timelines(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_many_timelines(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
n_timelines = 3
|
||||
|
||||
@@ -39,15 +39,15 @@ def test_many_timelines(zenith_env_builder: ZenithEnvBuilder):
|
||||
"test_safekeepers_many_timelines_{}".format(tlin) for tlin in range(n_timelines)
|
||||
]
|
||||
# pageserver, safekeeper operate timelines via their ids (can be represented in hex as 'ad50847381e248feaac9876cc71ae418')
|
||||
# that's not really human readable, so the branch names are introduced in Zenith CLI.
|
||||
# Zenith CLI stores its branch <-> timeline mapping in its internals,
|
||||
# that's not really human readable, so the branch names are introduced in Neon CLI.
|
||||
# Neon CLI stores its branch <-> timeline mapping in its internals,
|
||||
# but we need this to collect metrics from other servers, related to the timeline.
|
||||
branch_names_to_timeline_ids = {}
|
||||
|
||||
# start postgres on each timeline
|
||||
pgs = []
|
||||
for branch_name in branch_names:
|
||||
new_timeline_id = env.zenith_cli.create_branch(branch_name)
|
||||
new_timeline_id = env.neon_cli.create_branch(branch_name)
|
||||
pgs.append(env.postgres.create_start(branch_name))
|
||||
branch_names_to_timeline_ids[branch_name] = new_timeline_id
|
||||
|
||||
@@ -93,14 +93,14 @@ def test_many_timelines(zenith_env_builder: ZenithEnvBuilder):
|
||||
# the compute node, which only happens after a consensus of safekeepers
|
||||
# has confirmed the transaction. We assume majority consensus here.
|
||||
assert (2 * sum(m.last_record_lsn <= lsn
|
||||
for lsn in m.flush_lsns) > zenith_env_builder.num_safekeepers), f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}"
|
||||
for lsn in m.flush_lsns) > neon_env_builder.num_safekeepers), f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}"
|
||||
assert (2 * sum(m.last_record_lsn <= lsn
|
||||
for lsn in m.commit_lsns) > zenith_env_builder.num_safekeepers), f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}"
|
||||
for lsn in m.commit_lsns) > neon_env_builder.num_safekeepers), f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}"
|
||||
timeline_metrics.append(m)
|
||||
log.info(f"{message}: {timeline_metrics}")
|
||||
return timeline_metrics
|
||||
|
||||
# TODO: https://github.com/zenithdb/zenith/issues/809
|
||||
# TODO: https://github.com/neondatabase/neon/issues/809
|
||||
# collect_metrics("before CREATE TABLE")
|
||||
|
||||
# Do everything in different loops to have actions on different timelines
|
||||
@@ -168,15 +168,15 @@ def test_many_timelines(zenith_env_builder: ZenithEnvBuilder):
|
||||
# Check that dead minority doesn't prevent the commits: execute insert n_inserts
|
||||
# times, with fault_probability chance of getting a wal acceptor down or up
|
||||
# along the way. 2 of 3 are always alive, so the work keeps going.
|
||||
def test_restarts(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_restarts(neon_env_builder: NeonEnvBuilder):
|
||||
fault_probability = 0.01
|
||||
n_inserts = 1000
|
||||
n_acceptors = 3
|
||||
|
||||
zenith_env_builder.num_safekeepers = n_acceptors
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.num_safekeepers = n_acceptors
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_safekeepers_restarts')
|
||||
env.neon_cli.create_branch('test_safekeepers_restarts')
|
||||
pg = env.postgres.create_start('test_safekeepers_restarts')
|
||||
|
||||
# we rely upon autocommit after each statement
|
||||
@@ -209,11 +209,11 @@ def delayed_safekeeper_start(wa):
|
||||
|
||||
|
||||
# When majority of acceptors is offline, commits are expected to be frozen
|
||||
def test_unavailability(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 2
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_unavailability(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 2
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_safekeepers_unavailability')
|
||||
env.neon_cli.create_branch('test_safekeepers_unavailability')
|
||||
pg = env.postgres.create_start('test_safekeepers_unavailability')
|
||||
|
||||
# we rely upon autocommit after each statement
|
||||
@@ -279,12 +279,12 @@ def stop_value():
|
||||
|
||||
|
||||
# do inserts while concurrently getting up/down subsets of acceptors
|
||||
def test_race_conditions(zenith_env_builder: ZenithEnvBuilder, stop_value):
|
||||
def test_race_conditions(neon_env_builder: NeonEnvBuilder, stop_value):
|
||||
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_safekeepers_race_conditions')
|
||||
env.neon_cli.create_branch('test_safekeepers_race_conditions')
|
||||
pg = env.postgres.create_start('test_safekeepers_race_conditions')
|
||||
|
||||
# we rely upon autocommit after each statement
|
||||
@@ -308,16 +308,16 @@ def test_race_conditions(zenith_env_builder: ZenithEnvBuilder, stop_value):
|
||||
|
||||
|
||||
# Test that safekeepers push their info to the broker and learn peer status from it
|
||||
def test_broker(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
zenith_env_builder.enable_local_fs_remote_storage()
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_broker(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
neon_env_builder.enable_local_fs_remote_storage()
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch("test_broker", "main")
|
||||
env.neon_cli.create_branch("test_broker", "main")
|
||||
pg = env.postgres.create_start('test_broker')
|
||||
pg.safe_psql("CREATE TABLE t(key int primary key, value text)")
|
||||
|
||||
# learn zenith timeline from compute
|
||||
# learn neon timeline from compute
|
||||
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
|
||||
|
||||
@@ -349,13 +349,13 @@ def test_broker(zenith_env_builder: ZenithEnvBuilder):
|
||||
|
||||
|
||||
# Test that old WAL consumed by peers and pageserver is removed from safekeepers.
|
||||
def test_wal_removal(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 2
|
||||
def test_wal_removal(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 2
|
||||
# to advance remote_consistent_llsn
|
||||
zenith_env_builder.enable_local_fs_remote_storage()
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.enable_local_fs_remote_storage()
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_safekeepers_wal_removal')
|
||||
env.neon_cli.create_branch('test_safekeepers_wal_removal')
|
||||
pg = env.postgres.create_start('test_safekeepers_wal_removal')
|
||||
|
||||
with closing(pg.connect()) as conn:
|
||||
@@ -412,22 +412,22 @@ def wait_segment_offload(tenant_id, timeline_id, live_sk, seg_end):
|
||||
|
||||
|
||||
@pytest.mark.parametrize('storage_type', ['mock_s3', 'local_fs'])
|
||||
def test_wal_backup(zenith_env_builder: ZenithEnvBuilder, storage_type: str):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
def test_wal_backup(neon_env_builder: NeonEnvBuilder, storage_type: str):
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
if storage_type == 'local_fs':
|
||||
zenith_env_builder.enable_local_fs_remote_storage()
|
||||
neon_env_builder.enable_local_fs_remote_storage()
|
||||
elif storage_type == 'mock_s3':
|
||||
zenith_env_builder.enable_s3_mock_remote_storage('test_safekeepers_wal_backup')
|
||||
neon_env_builder.enable_s3_mock_remote_storage('test_safekeepers_wal_backup')
|
||||
else:
|
||||
raise RuntimeError(f'Unknown storage type: {storage_type}')
|
||||
zenith_env_builder.remote_storage_users = RemoteStorageUsers.SAFEKEEPER
|
||||
neon_env_builder.remote_storage_users = RemoteStorageUsers.SAFEKEEPER
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_safekeepers_wal_backup')
|
||||
env.neon_cli.create_branch('test_safekeepers_wal_backup')
|
||||
pg = env.postgres.create_start('test_safekeepers_wal_backup')
|
||||
|
||||
# learn zenith timeline from compute
|
||||
# learn neon timeline from compute
|
||||
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
|
||||
|
||||
@@ -460,7 +460,7 @@ def test_wal_backup(zenith_env_builder: ZenithEnvBuilder, storage_type: str):
|
||||
|
||||
|
||||
class ProposerPostgres(PgProtocol):
|
||||
"""Object for running postgres without ZenithEnv"""
|
||||
"""Object for running postgres without NeonEnv"""
|
||||
def __init__(self,
|
||||
pgdata_dir: str,
|
||||
pg_bin,
|
||||
@@ -542,14 +542,14 @@ class ProposerPostgres(PgProtocol):
|
||||
|
||||
|
||||
# insert wal in all safekeepers and run sync on proposer
|
||||
def test_sync_safekeepers(zenith_env_builder: ZenithEnvBuilder,
|
||||
def test_sync_safekeepers(neon_env_builder: NeonEnvBuilder,
|
||||
pg_bin: PgBin,
|
||||
port_distributor: PortDistributor):
|
||||
|
||||
# We don't really need the full environment for this test, just the
|
||||
# safekeepers would be enough.
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
timeline_id = uuid.uuid4()
|
||||
tenant_id = uuid.uuid4()
|
||||
@@ -596,17 +596,17 @@ def test_sync_safekeepers(zenith_env_builder: ZenithEnvBuilder,
|
||||
assert all(lsn_after_sync == lsn for lsn in lsn_after_append)
|
||||
|
||||
|
||||
def test_timeline_status(zenith_env_builder: ZenithEnvBuilder):
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_timeline_status(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_timeline_status')
|
||||
env.neon_cli.create_branch('test_timeline_status')
|
||||
pg = env.postgres.create_start('test_timeline_status')
|
||||
|
||||
wa = env.safekeepers[0]
|
||||
wa_http_cli = wa.http_client()
|
||||
wa_http_cli.check_status()
|
||||
|
||||
# learn zenith timeline from compute
|
||||
# learn neon timeline from compute
|
||||
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
|
||||
|
||||
@@ -642,7 +642,7 @@ class SafekeeperEnv:
|
||||
peer_port=self.port_distributor.get_port())
|
||||
self.pg_bin = pg_bin
|
||||
self.num_safekeepers = num_safekeepers
|
||||
self.bin_safekeeper = os.path.join(str(zenith_binpath), 'safekeeper')
|
||||
self.bin_safekeeper = os.path.join(str(neon_binpath), 'safekeeper')
|
||||
self.safekeepers: Optional[List[subprocess.CompletedProcess[Any]]] = None
|
||||
self.postgres: Optional[ProposerPostgres] = None
|
||||
self.tenant_id: Optional[uuid.UUID] = None
|
||||
@@ -753,8 +753,8 @@ def test_safekeeper_without_pageserver(test_output_dir: str,
|
||||
assert res == 5050
|
||||
|
||||
|
||||
def test_replace_safekeeper(zenith_env_builder: ZenithEnvBuilder):
|
||||
def safekeepers_guc(env: ZenithEnv, sk_names: List[int]) -> str:
|
||||
def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder):
|
||||
def safekeepers_guc(env: NeonEnv, sk_names: List[int]) -> str:
|
||||
return ','.join([f'localhost:{sk.port.pg}' for sk in env.safekeepers if sk.id in sk_names])
|
||||
|
||||
def execute_payload(pg: Postgres):
|
||||
@@ -781,9 +781,9 @@ def test_replace_safekeeper(zenith_env_builder: ZenithEnvBuilder):
|
||||
except Exception as e:
|
||||
log.info(f"Safekeeper {sk.id} status error: {e}")
|
||||
|
||||
zenith_env_builder.num_safekeepers = 4
|
||||
env = zenith_env_builder.init_start()
|
||||
env.zenith_cli.create_branch('test_replace_safekeeper')
|
||||
neon_env_builder.num_safekeepers = 4
|
||||
env = neon_env_builder.init_start()
|
||||
env.neon_cli.create_branch('test_replace_safekeeper')
|
||||
|
||||
log.info("Use only first 3 safekeepers")
|
||||
env.safekeepers[3].stop()
|
||||
@@ -792,7 +792,7 @@ def test_replace_safekeeper(zenith_env_builder: ZenithEnvBuilder):
|
||||
pg.adjust_for_safekeepers(safekeepers_guc(env, active_safekeepers))
|
||||
pg.start()
|
||||
|
||||
# learn zenith timeline from compute
|
||||
# learn neon timeline from compute
|
||||
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
|
||||
|
||||
@@ -844,7 +844,7 @@ def test_replace_safekeeper(zenith_env_builder: ZenithEnvBuilder):
|
||||
# We have `wal_keep_size=0`, so postgres should trim WAL once it's broadcasted
|
||||
# to all safekeepers. This test checks that compute WAL can fit into small number
|
||||
# of WAL segments.
|
||||
def test_wal_deleted_after_broadcast(zenith_env_builder: ZenithEnvBuilder):
|
||||
def test_wal_deleted_after_broadcast(neon_env_builder: NeonEnvBuilder):
|
||||
# used to calculate delta in collect_stats
|
||||
last_lsn = .0
|
||||
|
||||
@@ -866,10 +866,10 @@ def test_wal_deleted_after_broadcast(zenith_env_builder: ZenithEnvBuilder):
|
||||
def generate_wal(cur):
|
||||
cur.execute("INSERT INTO t SELECT generate_series(1,300000), 'payload'")
|
||||
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_wal_deleted_after_broadcast')
|
||||
env.neon_cli.create_branch('test_wal_deleted_after_broadcast')
|
||||
# Adjust checkpoint config to prevent keeping old WAL segments
|
||||
pg = env.postgres.create_start(
|
||||
'test_wal_deleted_after_broadcast',
|
||||
@@ -894,18 +894,18 @@ def test_wal_deleted_after_broadcast(zenith_env_builder: ZenithEnvBuilder):
|
||||
assert wal_size_after_checkpoint < 16 * 2.5
|
||||
|
||||
|
||||
def test_delete_force(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 1
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_delete_force(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 1
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
# Create two tenants: one will be deleted, other should be preserved.
|
||||
tenant_id = env.initial_tenant.hex
|
||||
timeline_id_1 = env.zenith_cli.create_branch('br1').hex # Active, delete explicitly
|
||||
timeline_id_2 = env.zenith_cli.create_branch('br2').hex # Inactive, delete explicitly
|
||||
timeline_id_3 = env.zenith_cli.create_branch('br3').hex # Active, delete with the tenant
|
||||
timeline_id_4 = env.zenith_cli.create_branch('br4').hex # Inactive, delete with the tenant
|
||||
timeline_id_1 = env.neon_cli.create_branch('br1').hex # Active, delete explicitly
|
||||
timeline_id_2 = env.neon_cli.create_branch('br2').hex # Inactive, delete explicitly
|
||||
timeline_id_3 = env.neon_cli.create_branch('br3').hex # Active, delete with the tenant
|
||||
timeline_id_4 = env.neon_cli.create_branch('br4').hex # Inactive, delete with the tenant
|
||||
|
||||
tenant_id_other_uuid, timeline_id_other_uuid = env.zenith_cli.create_tenant()
|
||||
tenant_id_other_uuid, timeline_id_other_uuid = env.neon_cli.create_tenant()
|
||||
tenant_id_other = tenant_id_other_uuid.hex
|
||||
timeline_id_other = timeline_id_other_uuid.hex
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import asyncpg
|
||||
import random
|
||||
import time
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, Postgres, Safekeeper
|
||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres, Safekeeper
|
||||
from fixtures.log_helper import getLogger
|
||||
from fixtures.utils import lsn_from_hex, lsn_to_hex
|
||||
from typing import List
|
||||
@@ -136,7 +136,7 @@ async def wait_for_lsn(safekeeper: Safekeeper,
|
||||
# On each iteration 1 acceptor is stopped, and 2 others should allow
|
||||
# background workers execute transactions. In the end, state should remain
|
||||
# consistent.
|
||||
async def run_restarts_under_load(env: ZenithEnv,
|
||||
async def run_restarts_under_load(env: NeonEnv,
|
||||
pg: Postgres,
|
||||
acceptors: List[Safekeeper],
|
||||
n_workers=10,
|
||||
@@ -202,11 +202,11 @@ async def run_restarts_under_load(env: ZenithEnv,
|
||||
|
||||
|
||||
# Restart acceptors one by one, while executing and validating bank transactions
|
||||
def test_restarts_under_load(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_restarts_under_load(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_safekeepers_restarts_under_load')
|
||||
env.neon_cli.create_branch('test_safekeepers_restarts_under_load')
|
||||
# Enable backpressure with 1MB maximal lag, because we don't want to block on `wait_for_lsn()` for too long
|
||||
pg = env.postgres.create_start('test_safekeepers_restarts_under_load',
|
||||
config_lines=['max_replication_write_lag=1MB'])
|
||||
@@ -217,11 +217,11 @@ def test_restarts_under_load(zenith_env_builder: ZenithEnvBuilder):
|
||||
# Restart acceptors one by one and test that everything is working as expected
|
||||
# when checkpoins are triggered frequently by max_wal_size=32MB. Because we have
|
||||
# wal_keep_size=0, there will be aggressive WAL segments recycling.
|
||||
def test_restarts_frequent_checkpoints(zenith_env_builder: ZenithEnvBuilder):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_restarts_frequent_checkpoints(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.zenith_cli.create_branch('test_restarts_frequent_checkpoints')
|
||||
env.neon_cli.create_branch('test_restarts_frequent_checkpoints')
|
||||
# Enable backpressure with 1MB maximal lag, because we don't want to block on `wait_for_lsn()` for too long
|
||||
pg = env.postgres.create_start('test_restarts_frequent_checkpoints',
|
||||
config_lines=[
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from fixtures.zenith_fixtures import (ZenithEnvBuilder,
|
||||
from fixtures.neon_fixtures import (NeonEnvBuilder,
|
||||
VanillaPostgres,
|
||||
PortDistributor,
|
||||
PgBin,
|
||||
@@ -11,16 +11,16 @@ from fixtures.zenith_fixtures import (ZenithEnvBuilder,
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
def test_wal_restore(zenith_env_builder: ZenithEnvBuilder,
|
||||
def test_wal_restore(neon_env_builder: NeonEnvBuilder,
|
||||
pg_bin: PgBin,
|
||||
test_output_dir,
|
||||
port_distributor: PortDistributor):
|
||||
env = zenith_env_builder.init_start()
|
||||
env.zenith_cli.create_branch("test_wal_restore")
|
||||
env = neon_env_builder.init_start()
|
||||
env.neon_cli.create_branch("test_wal_restore")
|
||||
pg = env.postgres.create_start('test_wal_restore')
|
||||
pg.safe_psql("create table t as select generate_series(1,300000)")
|
||||
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
|
||||
env.zenith_cli.pageserver_stop()
|
||||
env.neon_cli.pageserver_stop()
|
||||
port = port_distributor.get_port()
|
||||
data_dir = os.path.join(test_output_dir, 'pgsql.restored')
|
||||
with VanillaPostgres(data_dir, PgBin(test_output_dir), port) as restored:
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
import os
|
||||
import pytest
|
||||
from fixtures.utils import mkdir_if_needed
|
||||
from fixtures.zenith_fixtures import ZenithEnv, base_dir, pg_distrib_dir
|
||||
from fixtures.neon_fixtures import NeonEnv, base_dir, pg_distrib_dir
|
||||
|
||||
|
||||
# The isolation tests run for a long time, especially in debug mode,
|
||||
# so use a larger-than-default timeout.
|
||||
@pytest.mark.timeout(1800)
|
||||
def test_isolation(zenith_simple_env: ZenithEnv, test_output_dir, pg_bin, capsys):
|
||||
env = zenith_simple_env
|
||||
def test_isolation(neon_simple_env: NeonEnv, test_output_dir, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.zenith_cli.create_branch("test_isolation", "empty")
|
||||
env.neon_cli.create_branch("test_isolation", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
# isolation tests use prepared transactions, so enable them
|
||||
pg = env.postgres.create_start('test_isolation', config_lines=['max_prepared_transactions=100'])
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
import os
|
||||
|
||||
from fixtures.utils import mkdir_if_needed
|
||||
from fixtures.zenith_fixtures import (ZenithEnv,
|
||||
from fixtures.neon_fixtures import (NeonEnv,
|
||||
check_restored_datadir_content,
|
||||
base_dir,
|
||||
pg_distrib_dir)
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
def test_zenith_regress(zenith_simple_env: ZenithEnv, test_output_dir, pg_bin, capsys):
|
||||
env = zenith_simple_env
|
||||
def test_neon_regress(neon_simple_env: NeonEnv, test_output_dir, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.zenith_cli.create_branch("test_zenith_regress", "empty")
|
||||
env.neon_cli.create_branch("test_neon_regress", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
pg = env.postgres.create_start('test_zenith_regress')
|
||||
pg = env.postgres.create_start('test_neon_regress')
|
||||
pg.safe_psql('CREATE DATABASE regression')
|
||||
|
||||
# Create some local directories for pg_regress to run in.
|
||||
@@ -22,9 +22,9 @@ def test_zenith_regress(zenith_simple_env: ZenithEnv, test_output_dir, pg_bin, c
|
||||
mkdir_if_needed(os.path.join(runpath, 'testtablespace'))
|
||||
|
||||
# Compute all the file locations that pg_regress will need.
|
||||
# This test runs zenith specific tests
|
||||
# This test runs neon specific tests
|
||||
build_path = os.path.join(pg_distrib_dir, 'build/src/test/regress')
|
||||
src_path = os.path.join(base_dir, 'test_runner/zenith_regress')
|
||||
src_path = os.path.join(base_dir, 'test_runner/neon_regress')
|
||||
bindir = os.path.join(pg_distrib_dir, 'bin')
|
||||
schedule = os.path.join(src_path, 'parallel_schedule')
|
||||
pg_regress = os.path.join(build_path, 'pg_regress')
|
||||
@@ -1,16 +1,16 @@
|
||||
import os
|
||||
import pytest
|
||||
from fixtures.utils import mkdir_if_needed
|
||||
from fixtures.zenith_fixtures import ZenithEnv, check_restored_datadir_content, base_dir, pg_distrib_dir
|
||||
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content, base_dir, pg_distrib_dir
|
||||
|
||||
|
||||
# The pg_regress tests run for a long time, especially in debug mode,
|
||||
# so use a larger-than-default timeout.
|
||||
@pytest.mark.timeout(1800)
|
||||
def test_pg_regress(zenith_simple_env: ZenithEnv, test_output_dir: str, pg_bin, capsys):
|
||||
env = zenith_simple_env
|
||||
def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: str, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.zenith_cli.create_branch("test_pg_regress", "empty")
|
||||
env.neon_cli.create_branch("test_pg_regress", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
pg = env.postgres.create_start('test_pg_regress')
|
||||
pg.safe_psql('CREATE DATABASE regression')
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
pytest_plugins = (
|
||||
"fixtures.zenith_fixtures",
|
||||
"fixtures.neon_fixtures",
|
||||
"fixtures.benchmark_fixture",
|
||||
"fixtures.compare_fixtures",
|
||||
"fixtures.slow",
|
||||
|
||||
@@ -25,9 +25,9 @@ To use, declare the 'zenbenchmark' fixture in the test function. Run the
|
||||
bencmark, and then record the result by calling zenbenchmark.record. For example:
|
||||
|
||||
import timeit
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
def test_mybench(zenith_simple_env: env, zenbenchmark):
|
||||
def test_mybench(neon_simple_env: env, zenbenchmark):
|
||||
|
||||
# Initialize the test
|
||||
...
|
||||
@@ -142,7 +142,7 @@ class MetricReport(str, enum.Enum): # str is a hack to make it json serializabl
|
||||
LOWER_IS_BETTER = 'lower_is_better'
|
||||
|
||||
|
||||
class ZenithBenchmarker:
|
||||
class NeonBenchmarker:
|
||||
"""
|
||||
An object for recording benchmark results. This is created for each test
|
||||
function by the zenbenchmark fixture
|
||||
@@ -163,7 +163,7 @@ class ZenithBenchmarker:
|
||||
Record a benchmark result.
|
||||
"""
|
||||
# just to namespace the value
|
||||
name = f"zenith_benchmarker_{metric_name}"
|
||||
name = f"neon_benchmarker_{metric_name}"
|
||||
self.property_recorder(
|
||||
name,
|
||||
{
|
||||
@@ -289,12 +289,12 @@ class ZenithBenchmarker:
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def zenbenchmark(record_property) -> Iterator[ZenithBenchmarker]:
|
||||
def zenbenchmark(record_property) -> Iterator[NeonBenchmarker]:
|
||||
"""
|
||||
This is a python decorator for benchmark fixtures. It contains functions for
|
||||
recording measurements, and prints them out at the end.
|
||||
"""
|
||||
benchmarker = ZenithBenchmarker(record_property)
|
||||
benchmarker = NeonBenchmarker(record_property)
|
||||
yield benchmarker
|
||||
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@ import pytest
|
||||
from contextlib import contextmanager
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from fixtures.zenith_fixtures import PgBin, PgProtocol, VanillaPostgres, RemotePostgres, ZenithEnv
|
||||
from fixtures.benchmark_fixture import MetricReport, ZenithBenchmarker
|
||||
from fixtures.neon_fixtures import PgBin, PgProtocol, VanillaPostgres, RemotePostgres, NeonEnv
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
|
||||
# Type-related stuff
|
||||
from typing import Iterator
|
||||
@@ -12,7 +12,7 @@ from typing import Iterator
|
||||
class PgCompare(ABC):
|
||||
"""Common interface of all postgres implementations, useful for benchmarks.
|
||||
|
||||
This class is a helper class for the zenith_with_baseline fixture. See its documentation
|
||||
This class is a helper class for the neon_with_baseline fixture. See its documentation
|
||||
for more details.
|
||||
"""
|
||||
@property
|
||||
@@ -26,7 +26,7 @@ class PgCompare(ABC):
|
||||
pass
|
||||
|
||||
@property
|
||||
def zenbenchmark(self) -> ZenithBenchmarker:
|
||||
def zenbenchmark(self) -> NeonBenchmarker:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@@ -52,19 +52,19 @@ class PgCompare(ABC):
|
||||
pass
|
||||
|
||||
|
||||
class ZenithCompare(PgCompare):
|
||||
"""PgCompare interface for the zenith stack."""
|
||||
class NeonCompare(PgCompare):
|
||||
"""PgCompare interface for the neon stack."""
|
||||
def __init__(self,
|
||||
zenbenchmark: ZenithBenchmarker,
|
||||
zenith_simple_env: ZenithEnv,
|
||||
zenbenchmark: NeonBenchmarker,
|
||||
neon_simple_env: NeonEnv,
|
||||
pg_bin: PgBin,
|
||||
branch_name):
|
||||
self.env = zenith_simple_env
|
||||
self.env = neon_simple_env
|
||||
self._zenbenchmark = zenbenchmark
|
||||
self._pg_bin = pg_bin
|
||||
|
||||
# We only use one branch and one timeline
|
||||
self.env.zenith_cli.create_branch(branch_name, 'empty')
|
||||
self.env.neon_cli.create_branch(branch_name, 'empty')
|
||||
self._pg = self.env.postgres.create_start(branch_name)
|
||||
self.timeline = self.pg.safe_psql("SHOW neon.timeline_id")[0][0]
|
||||
|
||||
@@ -221,9 +221,9 @@ class RemoteCompare(PgCompare):
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def zenith_compare(request, zenbenchmark, pg_bin, zenith_simple_env) -> ZenithCompare:
|
||||
def neon_compare(request, zenbenchmark, pg_bin, neon_simple_env) -> NeonCompare:
|
||||
branch_name = request.node.name
|
||||
return ZenithCompare(zenbenchmark, zenith_simple_env, pg_bin, branch_name)
|
||||
return NeonCompare(zenbenchmark, neon_simple_env, pg_bin, branch_name)
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
@@ -236,13 +236,13 @@ def remote_compare(zenbenchmark, remote_pg) -> RemoteCompare:
|
||||
return RemoteCompare(zenbenchmark, remote_pg)
|
||||
|
||||
|
||||
@pytest.fixture(params=["vanilla_compare", "zenith_compare"], ids=["vanilla", "zenith"])
|
||||
def zenith_with_baseline(request) -> PgCompare:
|
||||
"""Parameterized fixture that helps compare zenith against vanilla postgres.
|
||||
@pytest.fixture(params=["vanilla_compare", "neon_compare"], ids=["vanilla", "neon"])
|
||||
def neon_with_baseline(request) -> PgCompare:
|
||||
"""Parameterized fixture that helps compare neon against vanilla postgres.
|
||||
|
||||
A test that uses this fixture turns into a parameterized test that runs against:
|
||||
1. A vanilla postgres instance
|
||||
2. A simple zenith env (see zenith_simple_env)
|
||||
2. A simple neon env (see neon_simple_env)
|
||||
3. Possibly other postgres protocol implementations.
|
||||
|
||||
The main goal of this fixture is to make it easier for people to read and write
|
||||
@@ -254,7 +254,7 @@ def zenith_with_baseline(request) -> PgCompare:
|
||||
of that.
|
||||
|
||||
If a test requires some one-off special implementation-specific logic, use of
|
||||
isinstance(zenith_with_baseline, ZenithCompare) is encouraged. Though if that
|
||||
isinstance(neon_with_baseline, NeonCompare) is encouraged. Though if that
|
||||
implementation-specific logic is widely useful across multiple tests, it might
|
||||
make sense to add methods to the PgCompare class.
|
||||
"""
|
||||
|
||||
@@ -81,7 +81,7 @@ def pytest_addoption(parser):
|
||||
|
||||
# These are set in pytest_configure()
|
||||
base_dir = ""
|
||||
zenith_binpath = ""
|
||||
neon_binpath = ""
|
||||
pg_distrib_dir = ""
|
||||
top_output_dir = ""
|
||||
|
||||
@@ -100,7 +100,7 @@ def check_interferring_processes(config):
|
||||
# result of the test.
|
||||
# NOTE this shows as an internal pytest error, there might be a better way
|
||||
raise Exception(
|
||||
'Found interfering processes running. Stop all Zenith pageservers, nodes, safekeepers, as well as stand-alone Postgres.'
|
||||
'Found interfering processes running. Stop all Neon pageservers, nodes, safekeepers, as well as stand-alone Postgres.'
|
||||
)
|
||||
|
||||
|
||||
@@ -146,25 +146,25 @@ def pytest_configure(config):
|
||||
raise Exception('postgres not found at "{}"'.format(pg_distrib_dir))
|
||||
|
||||
if os.getenv("REMOTE_ENV"):
|
||||
# we are in remote env and do not have zenith binaries locally
|
||||
# we are in remote env and do not have neon binaries locally
|
||||
# this is the case for benchmarks run on self-hosted runner
|
||||
return
|
||||
# Find the zenith binaries.
|
||||
global zenith_binpath
|
||||
env_zenith_bin = os.environ.get('ZENITH_BIN')
|
||||
if env_zenith_bin:
|
||||
zenith_binpath = env_zenith_bin
|
||||
# Find the neon binaries.
|
||||
global neon_binpath
|
||||
env_neon_bin = os.environ.get('ZENITH_BIN')
|
||||
if env_neon_bin:
|
||||
neon_binpath = env_neon_bin
|
||||
else:
|
||||
zenith_binpath = os.path.join(base_dir, 'target/debug')
|
||||
log.info(f'zenith_binpath is {zenith_binpath}')
|
||||
if not os.path.exists(os.path.join(zenith_binpath, 'pageserver')):
|
||||
raise Exception('zenith binaries not found at "{}"'.format(zenith_binpath))
|
||||
neon_binpath = os.path.join(base_dir, 'target/debug')
|
||||
log.info(f'neon_binpath is {neon_binpath}')
|
||||
if not os.path.exists(os.path.join(neon_binpath, 'pageserver')):
|
||||
raise Exception('neon binaries not found at "{}"'.format(neon_binpath))
|
||||
|
||||
|
||||
def profiling_supported():
|
||||
"""Return True if the pageserver was compiled with the 'profiling' feature
|
||||
"""
|
||||
bin_pageserver = os.path.join(str(zenith_binpath), 'pageserver')
|
||||
bin_pageserver = os.path.join(str(neon_binpath), 'pageserver')
|
||||
res = subprocess.run([bin_pageserver, '--version'],
|
||||
check=True,
|
||||
universal_newlines=True,
|
||||
@@ -223,7 +223,7 @@ def can_bind(host: str, port: int) -> bool:
|
||||
# TODO: The pageserver and safekeepers don't use SO_REUSEADDR at the
|
||||
# moment. If that changes, we should use start using SO_REUSEADDR here
|
||||
# too, to allow reusing ports more quickly.
|
||||
# See https://github.com/zenithdb/zenith/issues/801
|
||||
# See https://github.com/neondatabase/neon/issues/801
|
||||
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
|
||||
try:
|
||||
@@ -479,12 +479,12 @@ class RemoteStorageUsers(Flag):
|
||||
SAFEKEEPER = auto()
|
||||
|
||||
|
||||
class ZenithEnvBuilder:
|
||||
class NeonEnvBuilder:
|
||||
"""
|
||||
Builder object to create a Zenith runtime environment
|
||||
Builder object to create a Neon runtime environment
|
||||
|
||||
You should use the `zenith_env_builder` or `zenith_simple_env` pytest
|
||||
fixture to create the ZenithEnv object. That way, the repository is
|
||||
You should use the `neon_env_builder` or `neon_simple_env` pytest
|
||||
fixture to create the NeonEnv object. That way, the repository is
|
||||
created in the right directory, based on the test name, and it's properly
|
||||
cleaned up after the test has finished.
|
||||
"""
|
||||
@@ -511,18 +511,18 @@ class ZenithEnvBuilder:
|
||||
self.num_safekeepers = num_safekeepers
|
||||
self.pageserver_auth_enabled = pageserver_auth_enabled
|
||||
self.default_branch_name = default_branch_name
|
||||
self.env: Optional[ZenithEnv] = None
|
||||
self.env: Optional[NeonEnv] = None
|
||||
|
||||
def init(self) -> ZenithEnv:
|
||||
def init(self) -> NeonEnv:
|
||||
# Cannot create more than one environment from one builder
|
||||
assert self.env is None, "environment already initialized"
|
||||
self.env = ZenithEnv(self)
|
||||
self.env = NeonEnv(self)
|
||||
return self.env
|
||||
|
||||
def start(self):
|
||||
self.env.start()
|
||||
|
||||
def init_start(self) -> ZenithEnv:
|
||||
def init_start(self) -> NeonEnv:
|
||||
env = self.init()
|
||||
self.start()
|
||||
return env
|
||||
@@ -571,12 +571,12 @@ class ZenithEnvBuilder:
|
||||
self.env.pageserver.stop(immediate=True)
|
||||
|
||||
|
||||
class ZenithEnv:
|
||||
class NeonEnv:
|
||||
"""
|
||||
An object representing the Zenith runtime environment. It consists of
|
||||
An object representing the Neon runtime environment. It consists of
|
||||
the page server, 0-N safekeepers, and the compute nodes.
|
||||
|
||||
ZenithEnv contains functions for stopping/starting nodes in the
|
||||
NeonEnv contains functions for stopping/starting nodes in the
|
||||
environment, checking their status, creating tenants, connecting to the
|
||||
nodes, creating and destroying compute nodes, etc. The page server and
|
||||
the safekeepers are considered fixed in the environment, you cannot
|
||||
@@ -584,7 +584,7 @@ class ZenithEnv:
|
||||
likely change in the future, as we start supporting multiple page
|
||||
servers and adding/removing safekeepers on the fly).
|
||||
|
||||
Some notable functions and fields in ZenithEnv:
|
||||
Some notable functions and fields in NeonEnv:
|
||||
|
||||
postgres - A factory object for creating postgres compute nodes.
|
||||
|
||||
@@ -598,24 +598,24 @@ class ZenithEnv:
|
||||
|
||||
initial_tenant - tenant ID of the initial tenant created in the repository
|
||||
|
||||
zenith_cli - can be used to run the 'zenith' CLI tool
|
||||
neon_cli - can be used to run the 'neon' CLI tool
|
||||
|
||||
create_tenant() - initializes a new tenant in the page server, returns
|
||||
the tenant id
|
||||
"""
|
||||
def __init__(self, config: ZenithEnvBuilder):
|
||||
def __init__(self, config: NeonEnvBuilder):
|
||||
self.repo_dir = config.repo_dir
|
||||
self.rust_log_override = config.rust_log_override
|
||||
self.port_distributor = config.port_distributor
|
||||
self.s3_mock_server = config.mock_s3_server
|
||||
self.zenith_cli = ZenithCli(env=self)
|
||||
self.neon_cli = NeonCli(env=self)
|
||||
self.postgres = PostgresFactory(self)
|
||||
self.safekeepers: List[Safekeeper] = []
|
||||
self.broker = config.broker
|
||||
self.remote_storage = config.remote_storage
|
||||
self.remote_storage_users = config.remote_storage_users
|
||||
|
||||
# generate initial tenant ID here instead of letting 'zenith init' generate it,
|
||||
# generate initial tenant ID here instead of letting 'neon init' generate it,
|
||||
# so that we don't need to dig it out of the config file afterwards.
|
||||
self.initial_tenant = uuid.uuid4()
|
||||
|
||||
@@ -645,8 +645,8 @@ class ZenithEnv:
|
||||
auth_type = '{pageserver_auth_type}'
|
||||
""")
|
||||
|
||||
# Create a corresponding ZenithPageserver object
|
||||
self.pageserver = ZenithPageserver(self,
|
||||
# Create a corresponding NeonPageserver object
|
||||
self.pageserver = NeonPageserver(self,
|
||||
port=pageserver_port,
|
||||
config_override=config.pageserver_config_override)
|
||||
|
||||
@@ -672,7 +672,7 @@ class ZenithEnv:
|
||||
self.safekeepers.append(safekeeper)
|
||||
|
||||
log.info(f"Config: {toml}")
|
||||
self.zenith_cli.init(toml)
|
||||
self.neon_cli.init(toml)
|
||||
|
||||
def start(self):
|
||||
# Start up broker, pageserver and all safekeepers
|
||||
@@ -697,10 +697,10 @@ class ZenithEnv:
|
||||
def _shared_simple_env(request: Any,
|
||||
port_distributor: PortDistributor,
|
||||
mock_s3_server: MockS3Server,
|
||||
default_broker: Etcd) -> Iterator[ZenithEnv]:
|
||||
default_broker: Etcd) -> Iterator[NeonEnv]:
|
||||
"""
|
||||
# Internal fixture backing the `zenith_simple_env` fixture. If TEST_SHARED_FIXTURES
|
||||
is set, this is shared by all tests using `zenith_simple_env`.
|
||||
# Internal fixture backing the `neon_simple_env` fixture. If TEST_SHARED_FIXTURES
|
||||
is set, this is shared by all tests using `neon_simple_env`.
|
||||
"""
|
||||
|
||||
if os.environ.get('TEST_SHARED_FIXTURES') is None:
|
||||
@@ -711,23 +711,23 @@ def _shared_simple_env(request: Any,
|
||||
repo_dir = os.path.join(str(top_output_dir), "shared_repo")
|
||||
shutil.rmtree(repo_dir, ignore_errors=True)
|
||||
|
||||
with ZenithEnvBuilder(Path(repo_dir), port_distributor, default_broker,
|
||||
with NeonEnvBuilder(Path(repo_dir), port_distributor, default_broker,
|
||||
mock_s3_server) as builder:
|
||||
env = builder.init_start()
|
||||
|
||||
# For convenience in tests, create a branch from the freshly-initialized cluster.
|
||||
env.zenith_cli.create_branch('empty', ancestor_branch_name=DEFAULT_BRANCH_NAME)
|
||||
env.neon_cli.create_branch('empty', ancestor_branch_name=DEFAULT_BRANCH_NAME)
|
||||
|
||||
yield env
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def zenith_simple_env(_shared_simple_env: ZenithEnv) -> Iterator[ZenithEnv]:
|
||||
def neon_simple_env(_shared_simple_env: NeonEnv) -> Iterator[NeonEnv]:
|
||||
"""
|
||||
Simple Zenith environment, with no authentication and no safekeepers.
|
||||
Simple Neon environment, with no authentication and no safekeepers.
|
||||
|
||||
If TEST_SHARED_FIXTURES environment variable is set, we reuse the same
|
||||
environment for all tests that use 'zenith_simple_env', keeping the
|
||||
environment for all tests that use 'neon_simple_env', keeping the
|
||||
page server and safekeepers running. Any compute nodes are stopped after
|
||||
each the test, however.
|
||||
"""
|
||||
@@ -737,17 +737,17 @@ def zenith_simple_env(_shared_simple_env: ZenithEnv) -> Iterator[ZenithEnv]:
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def zenith_env_builder(test_output_dir,
|
||||
def neon_env_builder(test_output_dir,
|
||||
port_distributor: PortDistributor,
|
||||
mock_s3_server: MockS3Server,
|
||||
default_broker: Etcd) -> Iterator[ZenithEnvBuilder]:
|
||||
default_broker: Etcd) -> Iterator[NeonEnvBuilder]:
|
||||
"""
|
||||
Fixture to create a Zenith environment for test.
|
||||
Fixture to create a Neon environment for test.
|
||||
|
||||
To use, define 'zenith_env_builder' fixture in your test to get access to the
|
||||
To use, define 'neon_env_builder' fixture in your test to get access to the
|
||||
builder object. Set properties on it to describe the environment.
|
||||
Finally, initialize and start up the environment by calling
|
||||
zenith_env_builder.init_start().
|
||||
neon_env_builder.init_start().
|
||||
|
||||
After the initialization, you can launch compute nodes by calling
|
||||
the functions in the 'env.postgres' factory object, stop/start the
|
||||
@@ -758,16 +758,16 @@ def zenith_env_builder(test_output_dir,
|
||||
repo_dir = os.path.join(test_output_dir, "repo")
|
||||
|
||||
# Return the builder to the caller
|
||||
with ZenithEnvBuilder(Path(repo_dir), port_distributor, default_broker,
|
||||
with NeonEnvBuilder(Path(repo_dir), port_distributor, default_broker,
|
||||
mock_s3_server) as builder:
|
||||
yield builder
|
||||
|
||||
|
||||
class ZenithPageserverApiException(Exception):
|
||||
class NeonPageserverApiException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ZenithPageserverHttpClient(requests.Session):
|
||||
class NeonPageserverHttpClient(requests.Session):
|
||||
def __init__(self, port: int, auth_token: Optional[str] = None):
|
||||
super().__init__()
|
||||
self.port = port
|
||||
@@ -784,7 +784,7 @@ class ZenithPageserverHttpClient(requests.Session):
|
||||
msg = res.json()['msg']
|
||||
except:
|
||||
msg = ''
|
||||
raise ZenithPageserverApiException(msg) from e
|
||||
raise NeonPageserverApiException(msg) from e
|
||||
|
||||
def check_status(self):
|
||||
self.get(f"http://localhost:{self.port}/v1/status").raise_for_status()
|
||||
@@ -891,12 +891,12 @@ TIMELINE_DATA_EXTRACTOR = re.compile(r"\s(?P<branch_name>[^\s]+)\s\[(?P<timeline
|
||||
re.MULTILINE)
|
||||
|
||||
|
||||
class ZenithCli:
|
||||
class NeonCli:
|
||||
"""
|
||||
A typed wrapper around the `zenith` CLI tool.
|
||||
A typed wrapper around the `neon` CLI tool.
|
||||
Supports main commands via typed methods and a way to run arbitrary command directly via CLI.
|
||||
"""
|
||||
def __init__(self, env: ZenithEnv):
|
||||
def __init__(self, env: NeonEnv):
|
||||
self.env = env
|
||||
pass
|
||||
|
||||
@@ -982,7 +982,7 @@ class ZenithCli:
|
||||
created_timeline_id = matches.group('timeline_id')
|
||||
|
||||
if created_timeline_id is None:
|
||||
raise Exception('could not find timeline id after `zenith timeline create` invocation')
|
||||
raise Exception('could not find timeline id after `neon timeline create` invocation')
|
||||
else:
|
||||
return uuid.UUID(created_timeline_id)
|
||||
|
||||
@@ -1014,13 +1014,13 @@ class ZenithCli:
|
||||
created_timeline_id = matches.group('timeline_id')
|
||||
|
||||
if created_timeline_id is None:
|
||||
raise Exception('could not find timeline id after `zenith timeline create` invocation')
|
||||
raise Exception('could not find timeline id after `neon timeline create` invocation')
|
||||
else:
|
||||
return uuid.UUID(created_timeline_id)
|
||||
|
||||
def list_timelines(self, tenant_id: Optional[uuid.UUID] = None) -> List[Tuple[str, str]]:
|
||||
"""
|
||||
Returns a list of (branch_name, timeline_id) tuples out of parsed `zenith timeline list` CLI output.
|
||||
Returns a list of (branch_name, timeline_id) tuples out of parsed `neon timeline list` CLI output.
|
||||
"""
|
||||
|
||||
# (L) main [b49f7954224a0ad25cc0013ea107b54b]
|
||||
@@ -1053,7 +1053,7 @@ class ZenithCli:
|
||||
return res
|
||||
|
||||
def pageserver_enabled_features(self) -> Any:
|
||||
bin_pageserver = os.path.join(str(zenith_binpath), 'pageserver')
|
||||
bin_pageserver = os.path.join(str(neon_binpath), 'pageserver')
|
||||
args = [bin_pageserver, '--enabled-features']
|
||||
log.info('Running command "{}"'.format(' '.join(args)))
|
||||
|
||||
@@ -1173,22 +1173,22 @@ class ZenithCli:
|
||||
extra_env_vars: Optional[Dict[str, str]] = None,
|
||||
check_return_code=True) -> 'subprocess.CompletedProcess[str]':
|
||||
"""
|
||||
Run "zenith" with the specified arguments.
|
||||
Run "neon" with the specified arguments.
|
||||
|
||||
Arguments must be in list form, e.g. ['pg', 'create']
|
||||
|
||||
Return both stdout and stderr, which can be accessed as
|
||||
|
||||
>>> result = env.zenith_cli.raw_cli(...)
|
||||
>>> result = env.neon_cli.raw_cli(...)
|
||||
>>> assert result.stderr == ""
|
||||
>>> log.info(result.stdout)
|
||||
"""
|
||||
|
||||
assert type(arguments) == list
|
||||
|
||||
bin_zenith = os.path.join(str(zenith_binpath), 'neon_local')
|
||||
bin_neon = os.path.join(str(neon_binpath), 'neon_local')
|
||||
|
||||
args = [bin_zenith] + arguments
|
||||
args = [bin_neon] + arguments
|
||||
log.info('Running command "{}"'.format(' '.join(args)))
|
||||
log.info(f'Running in "{self.env.repo_dir}"')
|
||||
|
||||
@@ -1231,20 +1231,20 @@ class ZenithCli:
|
||||
return res
|
||||
|
||||
|
||||
class ZenithPageserver(PgProtocol):
|
||||
class NeonPageserver(PgProtocol):
|
||||
"""
|
||||
An object representing a running pageserver.
|
||||
|
||||
Initializes the repository via `zenith init`.
|
||||
Initializes the repository via `neon init`.
|
||||
"""
|
||||
def __init__(self, env: ZenithEnv, port: PageserverPort, config_override: Optional[str] = None):
|
||||
def __init__(self, env: NeonEnv, port: PageserverPort, config_override: Optional[str] = None):
|
||||
super().__init__(host='localhost', port=port.pg, user='cloud_admin')
|
||||
self.env = env
|
||||
self.running = False
|
||||
self.service_port = port
|
||||
self.config_override = config_override
|
||||
|
||||
def start(self, overrides=()) -> 'ZenithPageserver':
|
||||
def start(self, overrides=()) -> 'NeonPageserver':
|
||||
"""
|
||||
Start the page server.
|
||||
`overrides` allows to add some config to this pageserver start.
|
||||
@@ -1252,17 +1252,17 @@ class ZenithPageserver(PgProtocol):
|
||||
"""
|
||||
assert self.running == False
|
||||
|
||||
self.env.zenith_cli.pageserver_start(overrides=overrides)
|
||||
self.env.neon_cli.pageserver_start(overrides=overrides)
|
||||
self.running = True
|
||||
return self
|
||||
|
||||
def stop(self, immediate=False) -> 'ZenithPageserver':
|
||||
def stop(self, immediate=False) -> 'NeonPageserver':
|
||||
"""
|
||||
Stop the page server.
|
||||
Returns self.
|
||||
"""
|
||||
if self.running:
|
||||
self.env.zenith_cli.pageserver_stop(immediate)
|
||||
self.env.neon_cli.pageserver_stop(immediate)
|
||||
self.running = False
|
||||
return self
|
||||
|
||||
@@ -1272,8 +1272,8 @@ class ZenithPageserver(PgProtocol):
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
self.stop(True)
|
||||
|
||||
def http_client(self, auth_token: Optional[str] = None) -> ZenithPageserverHttpClient:
|
||||
return ZenithPageserverHttpClient(
|
||||
def http_client(self, auth_token: Optional[str] = None) -> NeonPageserverHttpClient:
|
||||
return NeonPageserverHttpClient(
|
||||
port=self.service_port.http,
|
||||
auth_token=auth_token,
|
||||
)
|
||||
@@ -1453,7 +1453,7 @@ def remote_pg(test_output_dir: str) -> Iterator[RemotePostgres]:
|
||||
yield remote_pg
|
||||
|
||||
|
||||
class ZenithProxy(PgProtocol):
|
||||
class NeonProxy(PgProtocol):
|
||||
def __init__(self, port: int):
|
||||
super().__init__(host="127.0.0.1",
|
||||
user="proxy_user",
|
||||
@@ -1469,7 +1469,7 @@ class ZenithProxy(PgProtocol):
|
||||
assert self._popen is None
|
||||
|
||||
# Start proxy
|
||||
bin_proxy = os.path.join(str(zenith_binpath), 'proxy')
|
||||
bin_proxy = os.path.join(str(neon_binpath), 'proxy')
|
||||
args = [bin_proxy]
|
||||
args.extend(["--http", f"{self.host}:{self.http_port}"])
|
||||
args.extend(["--proxy", f"{self.host}:{self.port}"])
|
||||
@@ -1493,20 +1493,20 @@ class ZenithProxy(PgProtocol):
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def static_proxy(vanilla_pg) -> Iterator[ZenithProxy]:
|
||||
"""Zenith proxy that routes directly to vanilla postgres."""
|
||||
def static_proxy(vanilla_pg) -> Iterator[NeonProxy]:
|
||||
"""Neon proxy that routes directly to vanilla postgres."""
|
||||
vanilla_pg.start()
|
||||
vanilla_pg.safe_psql("create user proxy_auth with password 'pytest1' superuser")
|
||||
vanilla_pg.safe_psql("create user proxy_user with password 'pytest2'")
|
||||
|
||||
with ZenithProxy(4432) as proxy:
|
||||
with NeonProxy(4432) as proxy:
|
||||
proxy.start_static()
|
||||
yield proxy
|
||||
|
||||
|
||||
class Postgres(PgProtocol):
|
||||
""" An object representing a running postgres daemon. """
|
||||
def __init__(self, env: ZenithEnv, tenant_id: uuid.UUID, port: int):
|
||||
def __init__(self, env: NeonEnv, tenant_id: uuid.UUID, port: int):
|
||||
super().__init__(host='localhost', port=port, user='cloud_admin', dbname='postgres')
|
||||
self.env = env
|
||||
self.running = False
|
||||
@@ -1532,7 +1532,7 @@ class Postgres(PgProtocol):
|
||||
config_lines = []
|
||||
|
||||
self.node_name = node_name or f'{branch_name}_pg_node'
|
||||
self.env.zenith_cli.pg_create(branch_name,
|
||||
self.env.neon_cli.pg_create(branch_name,
|
||||
node_name=self.node_name,
|
||||
tenant_id=self.tenant_id,
|
||||
lsn=lsn,
|
||||
@@ -1560,7 +1560,7 @@ class Postgres(PgProtocol):
|
||||
|
||||
log.info(f"Starting postgres node {self.node_name}")
|
||||
|
||||
run_result = self.env.zenith_cli.pg_start(self.node_name,
|
||||
run_result = self.env.neon_cli.pg_start(self.node_name,
|
||||
tenant_id=self.tenant_id,
|
||||
port=self.port)
|
||||
self.running = True
|
||||
@@ -1630,7 +1630,7 @@ class Postgres(PgProtocol):
|
||||
|
||||
if self.running:
|
||||
assert self.node_name is not None
|
||||
self.env.zenith_cli.pg_stop(self.node_name, self.tenant_id)
|
||||
self.env.neon_cli.pg_stop(self.node_name, self.tenant_id)
|
||||
self.running = False
|
||||
|
||||
return self
|
||||
@@ -1642,7 +1642,7 @@ class Postgres(PgProtocol):
|
||||
"""
|
||||
|
||||
assert self.node_name is not None
|
||||
self.env.zenith_cli.pg_stop(self.node_name, self.tenant_id, True)
|
||||
self.env.neon_cli.pg_stop(self.node_name, self.tenant_id, True)
|
||||
self.node_name = None
|
||||
self.running = False
|
||||
|
||||
@@ -1679,7 +1679,7 @@ class Postgres(PgProtocol):
|
||||
|
||||
class PostgresFactory:
|
||||
""" An object representing multiple running postgres daemons. """
|
||||
def __init__(self, env: ZenithEnv):
|
||||
def __init__(self, env: NeonEnv):
|
||||
self.env = env
|
||||
self.num_instances = 0
|
||||
self.instances: List[Postgres] = []
|
||||
@@ -1750,7 +1750,7 @@ class SafekeeperPort:
|
||||
@dataclass
|
||||
class Safekeeper:
|
||||
""" An object representing a running safekeeper daemon. """
|
||||
env: ZenithEnv
|
||||
env: NeonEnv
|
||||
port: SafekeeperPort
|
||||
id: int
|
||||
auth_token: Optional[str] = None
|
||||
@@ -1758,7 +1758,7 @@ class Safekeeper:
|
||||
|
||||
def start(self) -> 'Safekeeper':
|
||||
assert self.running == False
|
||||
self.env.zenith_cli.safekeeper_start(self.id)
|
||||
self.env.neon_cli.safekeeper_start(self.id)
|
||||
self.running = True
|
||||
# wait for wal acceptor start by checking its status
|
||||
started_at = time.time()
|
||||
@@ -1778,7 +1778,7 @@ class Safekeeper:
|
||||
|
||||
def stop(self, immediate=False) -> 'Safekeeper':
|
||||
log.info('Stopping safekeeper {}'.format(self.id))
|
||||
self.env.zenith_cli.safekeeper_stop(self.id, immediate)
|
||||
self.env.neon_cli.safekeeper_stop(self.id, immediate)
|
||||
self.running = False
|
||||
return self
|
||||
|
||||
@@ -1966,7 +1966,7 @@ def get_test_output_dir(request: Any) -> str:
|
||||
|
||||
# This is autouse, so the test output directory always gets created, even
|
||||
# if a test doesn't put anything there. It also solves a problem with the
|
||||
# zenith_simple_env fixture: if TEST_SHARED_FIXTURES is not set, it
|
||||
# neon_simple_env fixture: if TEST_SHARED_FIXTURES is not set, it
|
||||
# creates the repo in the test output directory. But it cannot depend on
|
||||
# 'test_output_dir' fixture, because when TEST_SHARED_FIXTURES is not set,
|
||||
# it has 'session' scope and cannot access fixtures with 'function'
|
||||
@@ -2044,7 +2044,7 @@ def list_files_to_compare(pgdata_dir: str):
|
||||
|
||||
|
||||
# pg is the existing and running compute node, that we want to compare with a basebackup
|
||||
def check_restored_datadir_content(test_output_dir: str, env: ZenithEnv, pg: Postgres):
|
||||
def check_restored_datadir_content(test_output_dir: str, env: NeonEnv, pg: Postgres):
|
||||
|
||||
# Get the timeline ID. We need it for the 'basebackup' command
|
||||
with closing(pg.connect()) as conn:
|
||||
@@ -2134,7 +2134,7 @@ def wait_until(number_of_iterations: int, interval: int, func):
|
||||
raise Exception("timed out while waiting for %s" % func) from last_exception
|
||||
|
||||
|
||||
def assert_local(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
def assert_local(pageserver_http_client: NeonPageserverHttpClient,
|
||||
tenant: uuid.UUID,
|
||||
timeline: uuid.UUID):
|
||||
timeline_detail = pageserver_http_client.timeline_detail(tenant, timeline)
|
||||
@@ -2142,7 +2142,7 @@ def assert_local(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
return timeline_detail
|
||||
|
||||
|
||||
def remote_consistent_lsn(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
def remote_consistent_lsn(pageserver_http_client: NeonPageserverHttpClient,
|
||||
tenant: uuid.UUID,
|
||||
timeline: uuid.UUID) -> int:
|
||||
detail = pageserver_http_client.timeline_detail(tenant, timeline)
|
||||
@@ -2158,7 +2158,7 @@ def remote_consistent_lsn(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
return lsn_from_hex(lsn_str)
|
||||
|
||||
|
||||
def wait_for_upload(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
def wait_for_upload(pageserver_http_client: NeonPageserverHttpClient,
|
||||
tenant: uuid.UUID,
|
||||
timeline: uuid.UUID,
|
||||
lsn: int):
|
||||
@@ -2174,7 +2174,7 @@ def wait_for_upload(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
lsn_to_hex(lsn), lsn_to_hex(current_lsn)))
|
||||
|
||||
|
||||
def last_record_lsn(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
def last_record_lsn(pageserver_http_client: NeonPageserverHttpClient,
|
||||
tenant: uuid.UUID,
|
||||
timeline: uuid.UUID) -> int:
|
||||
detail = pageserver_http_client.timeline_detail(tenant, timeline)
|
||||
@@ -2184,7 +2184,7 @@ def last_record_lsn(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
return lsn_from_hex(lsn_str)
|
||||
|
||||
|
||||
def wait_for_last_record_lsn(pageserver_http_client: ZenithPageserverHttpClient,
|
||||
def wait_for_last_record_lsn(pageserver_http_client: NeonPageserverHttpClient,
|
||||
tenant: uuid.UUID,
|
||||
timeline: uuid.UUID,
|
||||
lsn: int):
|
||||
@@ -1,7 +1,7 @@
|
||||
To add a new SQL test
|
||||
|
||||
- add sql script to run to zenith_regress/sql/testname.sql
|
||||
- add expected output to zenith_regress/expected/testname.out
|
||||
- add sql script to run to neon_regress/sql/testname.sql
|
||||
- add expected output to neon_regress/expected/testname.out
|
||||
- add testname to parallel_schedule
|
||||
|
||||
That's it.
|
||||
@@ -4,7 +4,7 @@
|
||||
# number of connections needed to run the tests.
|
||||
# ----------
|
||||
|
||||
test: zenith-cid
|
||||
test: zenith-rel-truncate
|
||||
test: zenith-clog
|
||||
test: zenith-vacuum-full
|
||||
test: neon-cid
|
||||
test: neon-rel-truncate
|
||||
test: neon-clog
|
||||
test: neon-vacuum-full
|
||||
@@ -1,8 +1,8 @@
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.benchmark_fixture import MetricReport, ZenithBenchmarker
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, NeonCompare
|
||||
|
||||
|
||||
#
|
||||
@@ -15,8 +15,8 @@ from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare
|
||||
# 3. Disk space used
|
||||
# 4. Peak memory usage
|
||||
#
|
||||
def test_bulk_insert(zenith_with_baseline: PgCompare):
|
||||
env = zenith_with_baseline
|
||||
def test_bulk_insert(neon_with_baseline: PgCompare):
|
||||
env = neon_with_baseline
|
||||
|
||||
with closing(env.pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
|
||||
@@ -2,7 +2,7 @@ import timeit
|
||||
from fixtures.benchmark_fixture import MetricReport
|
||||
import pytest
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
# Run bulk tenant creation test.
|
||||
#
|
||||
@@ -14,20 +14,20 @@ from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
|
||||
@pytest.mark.parametrize('tenants_count', [1, 5, 10])
|
||||
def test_bulk_tenant_create(
|
||||
zenith_env_builder: ZenithEnvBuilder,
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
tenants_count: int,
|
||||
zenbenchmark,
|
||||
):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
time_slices = []
|
||||
|
||||
for i in range(tenants_count):
|
||||
start = timeit.default_timer()
|
||||
|
||||
tenant, _ = env.zenith_cli.create_tenant()
|
||||
env.zenith_cli.create_timeline(f'test_bulk_tenant_create_{tenants_count}_{i}',
|
||||
tenant, _ = env.neon_cli.create_tenant()
|
||||
env.neon_cli.create_timeline(f'test_bulk_tenant_create_{tenants_count}_{i}',
|
||||
tenant_id=tenant)
|
||||
|
||||
# FIXME: We used to start new safekeepers here. Did that make sense? Should we do it now?
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.benchmark_fixture import MetricReport, ZenithBenchmarker
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, NeonCompare
|
||||
from io import BufferedReader, RawIOBase
|
||||
from itertools import repeat
|
||||
|
||||
@@ -41,8 +41,8 @@ def copy_test_data(rows: int):
|
||||
#
|
||||
# COPY performance tests.
|
||||
#
|
||||
def test_copy(zenith_with_baseline: PgCompare):
|
||||
env = zenith_with_baseline
|
||||
def test_copy(neon_with_baseline: PgCompare):
|
||||
env = neon_with_baseline
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the pageserver 'checkpoint' command
|
||||
with closing(env.pg.connect()) as conn:
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import os
|
||||
from contextlib import closing
|
||||
from fixtures.benchmark_fixture import MetricReport
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, NeonCompare
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@ from fixtures.log_helper import log
|
||||
# As of this writing, we're duplicate those giant WAL records for each page,
|
||||
# which makes the delta layer about 32x larger than it needs to be.
|
||||
#
|
||||
def test_gist_buffering_build(zenith_with_baseline: PgCompare):
|
||||
env = zenith_with_baseline
|
||||
def test_gist_buffering_build(neon_with_baseline: PgCompare):
|
||||
env = neon_with_baseline
|
||||
|
||||
with closing(env.pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
|
||||
@@ -8,7 +8,7 @@ from pytest_lazyfixture import lazy_fixture # type: ignore
|
||||
"env",
|
||||
[
|
||||
# The test is too slow to run in CI, but fast enough to run with remote tests
|
||||
pytest.param(lazy_fixture("zenith_compare"), id="zenith", marks=pytest.mark.slow),
|
||||
pytest.param(lazy_fixture("neon_compare"), id="neon", marks=pytest.mark.slow),
|
||||
pytest.param(lazy_fixture("vanilla_compare"), id="vanilla", marks=pytest.mark.slow),
|
||||
pytest.param(lazy_fixture("remote_compare"), id="remote", marks=pytest.mark.remote_cluster),
|
||||
])
|
||||
|
||||
@@ -8,7 +8,7 @@ from pytest_lazyfixture import lazy_fixture # type: ignore
|
||||
"env",
|
||||
[
|
||||
# The test is too slow to run in CI, but fast enough to run with remote tests
|
||||
pytest.param(lazy_fixture("zenith_compare"), id="zenith", marks=pytest.mark.slow),
|
||||
pytest.param(lazy_fixture("neon_compare"), id="neon", marks=pytest.mark.slow),
|
||||
pytest.param(lazy_fixture("vanilla_compare"), id="vanilla", marks=pytest.mark.slow),
|
||||
pytest.param(lazy_fixture("remote_compare"), id="remote", marks=pytest.mark.remote_cluster),
|
||||
])
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
from io import BytesIO
|
||||
import asyncio
|
||||
import asyncpg
|
||||
from fixtures.zenith_fixtures import ZenithEnv, Postgres, PgProtocol
|
||||
from fixtures.neon_fixtures import NeonEnv, Postgres, PgProtocol
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.benchmark_fixture import MetricReport, ZenithBenchmarker
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, NeonCompare
|
||||
|
||||
|
||||
async def repeat_bytes(buf, repetitions: int):
|
||||
@@ -36,9 +36,9 @@ async def parallel_load_different_tables(pg: PgProtocol, n_parallel: int):
|
||||
|
||||
|
||||
# Load 5 different tables in parallel with COPY TO
|
||||
def test_parallel_copy_different_tables(zenith_with_baseline: PgCompare, n_parallel=5):
|
||||
def test_parallel_copy_different_tables(neon_with_baseline: PgCompare, n_parallel=5):
|
||||
|
||||
env = zenith_with_baseline
|
||||
env = neon_with_baseline
|
||||
conn = env.pg.connect()
|
||||
cur = conn.cursor()
|
||||
|
||||
@@ -65,8 +65,8 @@ async def parallel_load_same_table(pg: PgProtocol, n_parallel: int):
|
||||
|
||||
|
||||
# Load data into one table with COPY TO from 5 parallel connections
|
||||
def test_parallel_copy_same_table(zenith_with_baseline: PgCompare, n_parallel=5):
|
||||
env = zenith_with_baseline
|
||||
def test_parallel_copy_same_table(neon_with_baseline: PgCompare, n_parallel=5):
|
||||
env = neon_with_baseline
|
||||
conn = env.pg.connect()
|
||||
cur = conn.cursor()
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import PgBin, VanillaPostgres, ZenithEnv, profiling_supported
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare
|
||||
from fixtures.neon_fixtures import PgBin, VanillaPostgres, NeonEnv, profiling_supported
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, NeonCompare
|
||||
|
||||
from fixtures.benchmark_fixture import PgBenchRunResult, MetricReport, ZenithBenchmarker
|
||||
from fixtures.benchmark_fixture import PgBenchRunResult, MetricReport, NeonBenchmarker
|
||||
from fixtures.log_helper import log
|
||||
|
||||
from pathlib import Path
|
||||
@@ -99,11 +99,11 @@ def get_scales_matrix():
|
||||
return list(map(int, scales.split(",")))
|
||||
|
||||
|
||||
# Run the pgbench tests against vanilla Postgres and zenith
|
||||
# Run the pgbench tests against vanilla Postgres and neon
|
||||
@pytest.mark.parametrize("scale", get_scales_matrix())
|
||||
@pytest.mark.parametrize("duration", get_durations_matrix())
|
||||
def test_pgbench(zenith_with_baseline: PgCompare, scale: int, duration: int):
|
||||
run_test_pgbench(zenith_with_baseline, scale, duration)
|
||||
def test_pgbench(neon_with_baseline: PgCompare, scale: int, duration: int):
|
||||
run_test_pgbench(neon_with_baseline, scale, duration)
|
||||
|
||||
|
||||
# Run the pgbench tests, and generate a flamegraph from it
|
||||
@@ -114,18 +114,18 @@ def test_pgbench(zenith_with_baseline: PgCompare, scale: int, duration: int):
|
||||
# can see how much overhead the profiling adds.
|
||||
@pytest.mark.parametrize("scale", get_scales_matrix())
|
||||
@pytest.mark.parametrize("duration", get_durations_matrix())
|
||||
def test_pgbench_flamegraph(zenbenchmark, pg_bin, zenith_env_builder, scale: int, duration: int):
|
||||
zenith_env_builder.num_safekeepers = 1
|
||||
zenith_env_builder.pageserver_config_override = '''
|
||||
def test_pgbench_flamegraph(zenbenchmark, pg_bin, neon_env_builder, scale: int, duration: int):
|
||||
neon_env_builder.num_safekeepers = 1
|
||||
neon_env_builder.pageserver_config_override = '''
|
||||
profiling="page_requests"
|
||||
'''
|
||||
if not profiling_supported():
|
||||
pytest.skip("pageserver was built without 'profiling' feature")
|
||||
|
||||
env = zenith_env_builder.init_start()
|
||||
env.zenith_cli.create_branch("empty", "main")
|
||||
env = neon_env_builder.init_start()
|
||||
env.neon_cli.create_branch("empty", "main")
|
||||
|
||||
run_test_pgbench(ZenithCompare(zenbenchmark, env, pg_bin, "pgbench"), scale, duration)
|
||||
run_test_pgbench(NeonCompare(zenbenchmark, env, pg_bin, "pgbench"), scale, duration)
|
||||
|
||||
|
||||
# Run the pgbench tests against an existing Postgres cluster
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import os
|
||||
from contextlib import closing
|
||||
from fixtures.benchmark_fixture import MetricReport
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, NeonCompare
|
||||
from fixtures.log_helper import log
|
||||
|
||||
import psycopg2.extras
|
||||
@@ -16,14 +16,14 @@ import time
|
||||
# A naive pageserver implementation would create a full image layer for each
|
||||
# dirty segment, leading to write_amplification = segment_size / page_size,
|
||||
# when compared to vanilla postgres. With segment_size = 10MB, that's 1250.
|
||||
def test_random_writes(zenith_with_baseline: PgCompare):
|
||||
env = zenith_with_baseline
|
||||
def test_random_writes(neon_with_baseline: PgCompare):
|
||||
env = neon_with_baseline
|
||||
|
||||
# Number of rows in the test database. 1M rows runs quickly, but implies
|
||||
# a small effective_checkpoint_distance, which makes the test less realistic.
|
||||
# Using a 300 TB database would imply a 250 MB effective_checkpoint_distance,
|
||||
# but it will take a very long time to run. From what I've seen so far,
|
||||
# increasing n_rows doesn't have impact on the (zenith_runtime / vanilla_runtime)
|
||||
# increasing n_rows doesn't have impact on the (neon_runtime / vanilla_runtime)
|
||||
# performance ratio.
|
||||
n_rows = 1 * 1000 * 1000 # around 36 MB table
|
||||
|
||||
@@ -65,7 +65,7 @@ def test_random_writes(zenith_with_baseline: PgCompare):
|
||||
env.zenbenchmark.record("table_size", table_size, 'bytes', MetricReport.TEST_PARAM)
|
||||
|
||||
# Decide how much to write, based on knowledge of pageserver implementation.
|
||||
# Avoiding segment collisions maximizes (zenith_runtime / vanilla_runtime).
|
||||
# Avoiding segment collisions maximizes (neon_runtime / vanilla_runtime).
|
||||
segment_size = 10 * 1024 * 1024
|
||||
n_segments = table_size // segment_size
|
||||
n_writes = load_factor * n_segments // 3
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
#
|
||||
from contextlib import closing
|
||||
from dataclasses import dataclass
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.benchmark_fixture import MetricReport, ZenithBenchmarker
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
import pytest
|
||||
|
||||
@@ -20,8 +20,8 @@ import pytest
|
||||
pytest.param(10000000, 1, 0),
|
||||
pytest.param(10000000, 1, 4)
|
||||
])
|
||||
def test_seqscans(zenith_with_baseline: PgCompare, rows: int, iters: int, workers: int):
|
||||
env = zenith_with_baseline
|
||||
def test_seqscans(neon_with_baseline: PgCompare, rows: int, iters: int, workers: int):
|
||||
env = neon_with_baseline
|
||||
|
||||
with closing(env.pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
import pytest
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
||||
from fixtures.benchmark_fixture import ZenithBenchmarker
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.benchmark_fixture import NeonBenchmarker
|
||||
|
||||
|
||||
# This test sometimes runs for longer than the global 5 minute timeout.
|
||||
@pytest.mark.timeout(600)
|
||||
def test_startup(zenith_env_builder: ZenithEnvBuilder, zenbenchmark: ZenithBenchmarker):
|
||||
zenith_env_builder.num_safekeepers = 3
|
||||
env = zenith_env_builder.init_start()
|
||||
def test_startup(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchmarker):
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
# Start
|
||||
env.zenith_cli.create_branch('test_startup')
|
||||
env.neon_cli.create_branch('test_startup')
|
||||
with zenbenchmark.record_duration("startup_time"):
|
||||
pg = env.postgres.create_start('test_startup')
|
||||
pg.safe_psql("select 1;")
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
import os
|
||||
from contextlib import closing
|
||||
from fixtures.benchmark_fixture import MetricReport
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.compare_fixtures import PgCompare, VanillaCompare, NeonCompare
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
def test_write_amplification(zenith_with_baseline: PgCompare):
|
||||
env = zenith_with_baseline
|
||||
def test_write_amplification(neon_with_baseline: PgCompare):
|
||||
env = neon_with_baseline
|
||||
|
||||
with closing(env.pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
import os
|
||||
|
||||
from fixtures.zenith_fixtures import ZenithEnv
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.log_helper import log
|
||||
"""
|
||||
Use this test to see what happens when tests fail.
|
||||
@@ -18,10 +18,10 @@ run_broken = pytest.mark.skipif(os.environ.get('RUN_BROKEN') is None,
|
||||
|
||||
|
||||
@run_broken
|
||||
def test_broken(zenith_simple_env: ZenithEnv, pg_bin):
|
||||
env = zenith_simple_env
|
||||
def test_broken(neon_simple_env: NeonEnv, pg_bin):
|
||||
env = neon_simple_env
|
||||
|
||||
env.zenith_cli.create_branch("test_broken", "empty")
|
||||
env.neon_cli.create_branch("test_broken", "empty")
|
||||
env.postgres.create_start("test_broken")
|
||||
log.info('postgres is running')
|
||||
|
||||
|
||||
Reference in New Issue
Block a user