Do not create timeline along with tenant

This commit is contained in:
Kirill Bulatov
2022-03-03 19:21:35 +02:00
committed by Kirill Bulatov
parent a5e10c4f64
commit dd74c66ef0
12 changed files with 97 additions and 113 deletions

View File

@@ -9,7 +9,7 @@ use anyhow::{bail, Context};
use nix::errno::Errno;
use nix::sys::signal::{kill, Signal};
use nix::unistd::Pid;
use pageserver::http::models::{TenantCreateRequest, TenantCreateResponse, TimelineCreateRequest};
use pageserver::http::models::{TenantCreateRequest, TimelineCreateRequest};
use pageserver::timelines::TimelineInfo;
use postgres::{Config, NoTls};
use reqwest::blocking::{Client, RequestBuilder, Response};
@@ -336,20 +336,19 @@ impl PageServerNode {
.json()?)
}
pub fn tenant_create(
&self,
new_tenant_id: Option<ZTenantId>,
initial_timeline_id: Option<ZTimelineId>,
) -> Result<TenantCreateResponse> {
Ok(self
pub fn tenant_create(&self, new_tenant_id: Option<ZTenantId>) -> anyhow::Result<ZTenantId> {
let tenant_id_string = self
.http_request(Method::POST, format!("{}/tenant", self.http_base_url))
.json(&TenantCreateRequest {
new_tenant_id,
initial_timeline_id,
})
.json(&TenantCreateRequest { new_tenant_id })
.send()?
.error_from_body()?
.json()?)
.json::<String>()?;
tenant_id_string.parse().with_context(|| {
format!(
"Failed to parse tennat creation response as tenant id: {}",
tenant_id_string
)
})
}
pub fn timeline_list(&self, tenant_id: &ZTenantId) -> Result<Vec<TimelineInfo>> {

View File

@@ -43,7 +43,7 @@ fn main() -> Result<()> {
Arg::new("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
.help("Initialize pageserver service: creates an initial config, tenant and timeline, if specified"),
)
.arg(
Arg::new("workdir")

View File

@@ -21,17 +21,6 @@ pub struct TenantCreateRequest {
#[serde(default)]
#[serde(with = "opt_display_serde")]
pub new_tenant_id: Option<ZTenantId>,
#[serde(default)]
#[serde(with = "opt_display_serde")]
pub initial_timeline_id: Option<ZTimelineId>,
}
#[derive(Deserialize, Serialize)]
pub struct TenantCreateResponse {
#[serde(with = "hex")]
pub tenant_id: ZTenantId,
#[serde(with = "hex")]
pub timeline_id: ZTimelineId,
}
#[derive(Serialize)]

View File

@@ -218,23 +218,14 @@ paths:
new_tenant_id:
type: string
format: hex
initial_timeline_id:
type: string
format: hex
responses:
"201":
description: CREATED
content:
application/json:
schema:
type: object
properties:
tenant_id:
type: string
format: hex
timeline_id:
type: string
format: hex
type: string
format: hex
"400":
description: Malformed tenant create request
content:

View File

@@ -20,7 +20,6 @@ use zenith_utils::zid::{HexZTimelineId, ZTimelineId};
use super::models::StatusResponse;
use super::models::TenantCreateRequest;
use super::models::TenantCreateResponse;
use super::models::TimelineCreateRequest;
use crate::repository::RepositoryTimeline;
use crate::timelines::TimelineInfo;
@@ -215,20 +214,17 @@ async fn tenant_create_handler(mut request: Request<Body>) -> Result<Response<Bo
let request_data: TenantCreateRequest = json_request(&mut request).await?;
let initial_timeline_id = tokio::task::spawn_blocking(move || {
let _enter = info_span!("tenant_create", tenant = ?request_data.new_tenant_id, initial_timeline = ?request_data.initial_timeline_id).entered();
tenant_mgr::create_repository_for_tenant(
get_config(&request),
request_data.new_tenant_id,
request_data.initial_timeline_id,
).map(|new_ids| TenantCreateResponse {
tenant_id: new_ids.tenant_id,
timeline_id: new_ids.timeline_id,
})
let new_tenant_id = tokio::task::spawn_blocking(move || {
let _enter = info_span!("tenant_create", tenant = ?request_data.new_tenant_id).entered();
// TODO kb this has to be changed to create tenant only
tenant_mgr::create_repository_for_tenant(get_config(&request), request_data.new_tenant_id)
})
.await
.map_err(ApiError::from_err)??;
Ok(json_response(StatusCode::CREATED, initial_timeline_id)?)
Ok(json_response(
StatusCode::CREATED,
new_tenant_id.to_string(),
)?)
}
async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {

View File

@@ -16,7 +16,7 @@ use serde::{Deserialize, Serialize};
use std::collections::{hash_map, HashMap};
use std::fmt;
use std::sync::{Arc, Mutex, MutexGuard};
use zenith_utils::zid::{ZTenantId, ZTenantTimelineId, ZTimelineId};
use zenith_utils::zid::{ZTenantId, ZTimelineId};
lazy_static! {
static ref TENANTS: Mutex<HashMap<ZTenantId, Tenant>> = Mutex::new(HashMap::new());
@@ -180,15 +180,13 @@ pub fn shutdown_all_tenants() {
pub fn create_repository_for_tenant(
conf: &'static PageServerConf,
new_tenant_id: Option<ZTenantId>,
initial_timeline_id: Option<ZTimelineId>,
) -> Result<ZTenantTimelineId> {
let tenant_id = new_tenant_id.unwrap_or_else(ZTenantId::generate);
let wal_redo_manager = Arc::new(PostgresRedoManager::new(conf, tenant_id));
let (initial_timeline_id, repo) =
timelines::create_repo(conf, tenant_id, initial_timeline_id, wal_redo_manager)?;
) -> Result<ZTenantId> {
let new_tenant_id = new_tenant_id.unwrap_or_else(ZTenantId::generate);
let wal_redo_manager = Arc::new(PostgresRedoManager::new(conf, new_tenant_id));
let repo = timelines::create_repo(conf, new_tenant_id, wal_redo_manager)?;
match access_tenants().entry(tenant_id) {
hash_map::Entry::Occupied(_) => bail!("tenant {} already exists", tenant_id),
match access_tenants().entry(new_tenant_id) {
hash_map::Entry::Occupied(_) => bail!("tenant {} already exists", new_tenant_id),
hash_map::Entry::Vacant(v) => {
v.insert(Tenant {
state: TenantState::Idle,
@@ -197,7 +195,7 @@ pub fn create_repository_for_tenant(
}
}
Ok(ZTenantTimelineId::new(tenant_id, initial_timeline_id))
Ok(new_tenant_id)
}
pub fn get_tenant_state(tenantid: ZTenantId) -> Option<TenantState> {

View File

@@ -17,9 +17,9 @@ use zenith_utils::lsn::Lsn;
use zenith_utils::zid::{opt_display_serde, ZTenantId, ZTimelineId};
use zenith_utils::{crashsafe_dir, logging};
use crate::walredo::WalRedoManager;
use crate::{config::PageServerConf, repository::Repository};
use crate::{import_datadir, LOG_FILE_NAME};
use crate::{layered_repository::LayeredRepository, walredo::WalRedoManager};
use crate::{repository::RepositoryTimeline, tenant_mgr};
use crate::{repository::Timeline, CheckpointConfig};
@@ -179,8 +179,13 @@ pub fn init_pageserver(
if let Some(tenant_id) = create_tenant {
println!("initializing tenantid {}", tenant_id);
create_repo(conf, tenant_id, initial_timeline_id, dummy_redo_mgr)
.context("failed to create repo")?;
let repo = create_repo(conf, tenant_id, dummy_redo_mgr).context("failed to create repo")?;
let new_timeline_id = initial_timeline_id.unwrap_or_else(ZTimelineId::generate);
bootstrap_timeline(conf, tenant_id, new_timeline_id, repo.as_ref())
.context("failed to create initial timeline")?;
println!("initial timeline {} created", new_timeline_id)
} else if initial_timeline_id.is_some() {
println!("Ignoring initial timeline parameter, due to no tenant id to create given");
}
crashsafe_dir::create_dir_all(conf.tenants_path())?;
@@ -191,9 +196,8 @@ pub fn init_pageserver(
pub fn create_repo(
conf: &'static PageServerConf,
tenant_id: ZTenantId,
init_timeline_id: Option<ZTimelineId>,
wal_redo_manager: Arc<dyn WalRedoManager + Send + Sync>,
) -> Result<(ZTimelineId, Arc<dyn Repository>)> {
) -> Result<Arc<dyn Repository>> {
let repo_dir = conf.tenant_path(&tenant_id);
if repo_dir.exists() {
bail!("repo for {} already exists", tenant_id)
@@ -207,25 +211,12 @@ pub fn create_repo(
info!("created directory structure in {}", repo_dir.display());
// create a new timeline directory
let timeline_id = init_timeline_id.unwrap_or_else(ZTimelineId::generate);
let timelinedir = conf.timeline_path(&timeline_id, &tenant_id);
crashsafe_dir::create_dir(&timelinedir)?;
let repo = Arc::new(crate::layered_repository::LayeredRepository::new(
Ok(Arc::new(LayeredRepository::new(
conf,
wal_redo_manager,
tenant_id,
conf.remote_storage_config.is_some(),
));
// Load data into pageserver
// TODO To implement zenith import we need to
// move data loading out of create_repo()
bootstrap_timeline(conf, tenant_id, timeline_id, repo.as_ref())?;
Ok((timeline_id, repo))
)))
}
// Returns checkpoint LSN from controlfile

View File

@@ -23,9 +23,18 @@ def check_client(client: ZenithPageserverHttpClient, initial_tenant: UUID):
client.tenant_create(tenant_id)
assert tenant_id.hex in {t['id'] for t in client.tenant_list()}
# check its timelines
timelines = client.timeline_list(tenant_id)
assert len(timelines) == 0, "initial tenant should not have any timelines"
# create timeline
timeline_id = uuid4()
client.timeline_create(tenant_id=tenant_id, timeline_id=timeline_id)
timelines = client.timeline_list(tenant_id)
assert len(timelines) > 0
# check it is there
assert timeline_id.hex in {b['timeline_id'] for b in client.timeline_list(tenant_id)}
for timeline in timelines:
timeline_id_str = str(timeline['timeline_id'])
timeline_details = client.timeline_detail(tenant_id=tenant_id,
@@ -34,13 +43,6 @@ def check_client(client: ZenithPageserverHttpClient, initial_tenant: UUID):
assert timeline_details['tenant_id'] == tenant_id.hex
assert timeline_details['timeline_id'] == timeline_id_str
# create timeline
timeline_id = uuid4()
client.timeline_create(tenant_id=tenant_id, timeline_id=timeline_id)
# check it is there
assert timeline_id.hex in {b['timeline_id'] for b in client.timeline_list(tenant_id)}
def test_pageserver_http_api_client(zenith_simple_env: ZenithEnv):
env = zenith_simple_env

View File

@@ -15,10 +15,10 @@ def test_tenants_normal_work(zenith_env_builder: ZenithEnvBuilder, with_wal_acce
tenant_1 = env.zenith_cli.create_tenant()
tenant_2 = env.zenith_cli.create_tenant()
env.zenith_cli.create_branch(f'test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}',
tenant_id=tenant_1)
env.zenith_cli.create_branch(f'test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}',
tenant_id=tenant_2)
env.zenith_cli.create_timeline(
f'test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}', tenant_id=tenant_1)
env.zenith_cli.create_timeline(
f'test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}', tenant_id=tenant_2)
pg_tenant1 = env.postgres.create_start(
f'test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}',

View File

@@ -57,6 +57,7 @@ Fn = TypeVar('Fn', bound=Callable[..., Any])
DEFAULT_OUTPUT_DIR = 'test_output'
DEFAULT_POSTGRES_DIR = 'tmp_install'
DEFAULT_BRANCH_NAME = 'main'
BASE_PORT = 15000
WORKER_PORT_NUM = 100
@@ -424,7 +425,7 @@ class ZenithEnvBuilder:
num_safekeepers: int = 0,
pageserver_auth_enabled: bool = False,
rust_log_override: Optional[str] = None,
default_branch_name='main'):
default_branch_name=DEFAULT_BRANCH_NAME):
self.repo_dir = repo_dir
self.rust_log_override = rust_log_override
self.port_distributor = port_distributor
@@ -547,7 +548,6 @@ class ZenithEnv:
self.rust_log_override = config.rust_log_override
self.port_distributor = config.port_distributor
self.s3_mock_server = config.s3_mock_server
self.default_branch_name = config.default_branch_name
self.zenith_cli = ZenithCli(env=self)
self.postgres = PostgresFactory(self)
self.safekeepers: List[Safekeeper] = []
@@ -639,7 +639,7 @@ def _shared_simple_env(request: Any, port_distributor) -> Iterator[ZenithEnv]:
env = builder.init_start()
# For convenience in tests, create a branch from the freshly-initialized cluster.
env.zenith_cli.create_branch("empty")
env.zenith_cli.create_branch('empty', ancestor_branch_name=DEFAULT_BRANCH_NAME)
yield env
@@ -750,20 +750,17 @@ class ZenithPageserverHttpClient(requests.Session):
assert isinstance(res_json, list)
return res_json
def tenant_create(self,
tenant_id: Optional[uuid.UUID] = None,
new_timeline_id: Optional[uuid.UUID] = None) -> Dict[Any, Any]:
def tenant_create(self, tenant_id: Optional[uuid.UUID] = None) -> uuid.UUID:
res = self.post(
f"http://localhost:{self.port}/v1/tenant",
json={
'new_tenant_id': tenant_id.hex if tenant_id else None,
'initial_timeline_id': new_timeline_id.hex if new_timeline_id else None,
},
)
self.verbose_error(res)
res_json = res.json()
assert isinstance(res_json, dict)
return res_json
new_tenant_id = res.json()
assert isinstance(new_tenant_id, str)
return uuid.UUID(new_tenant_id)
def timeline_list(self, tenant_id: uuid.UUID) -> List[Dict[Any, Any]]:
res = self.get(f"http://localhost:{self.port}/v1/tenant/{tenant_id.hex}/timeline")
@@ -834,8 +831,36 @@ class ZenithCli:
res.check_returncode()
return res
def create_timeline(self,
new_branch_name: str,
tenant_id: Optional[uuid.UUID] = None) -> uuid.UUID:
cmd = [
'timeline',
'create',
'--branch-name',
new_branch_name,
'--tenant-id',
(tenant_id or self.env.initial_tenant).hex,
]
res = self.raw_cli(cmd)
res.check_returncode()
create_timeline_id_extractor = re.compile(r"^Created timeline '(?P<timeline_id>[^']+)'",
re.MULTILINE)
matches = create_timeline_id_extractor.search(res.stdout)
created_timeline_id = None
if matches is not None:
created_timeline_id = matches.group('timeline_id')
if created_timeline_id is None:
raise Exception('could not find timeline id after `zenith timeline create` invocation')
else:
return uuid.UUID(created_timeline_id)
def create_branch(self,
new_branch_name: str,
new_branch_name: str = DEFAULT_BRANCH_NAME,
ancestor_branch_name: Optional[str] = None,
tenant_id: Optional[uuid.UUID] = None,
ancestor_start_lsn: Optional[str] = None) -> uuid.UUID:
@@ -846,9 +871,9 @@ class ZenithCli:
new_branch_name,
'--tenant-id',
(tenant_id or self.env.initial_tenant).hex,
'--ancestor-branch-name',
ancestor_branch_name or self.env.default_branch_name,
]
if ancestor_branch_name is not None:
cmd.extend(['--ancestor-branch-name', ancestor_branch_name])
if ancestor_start_lsn is not None:
cmd.extend(['--ancestor-start-lsn', ancestor_start_lsn])

View File

@@ -31,7 +31,7 @@ def test_bulk_tenant_create(
start = timeit.default_timer()
tenant = env.zenith_cli.create_tenant()
env.zenith_cli.create_branch(
env.zenith_cli.create_timeline(
f'test_bulk_tenant_create_{tenants_count}_{i}_{use_wal_acceptors}', tenant_id=tenant)
# FIXME: We used to start new safekeepers here. Did that make sense? Should we do it now?

View File

@@ -528,17 +528,10 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> Re
}
Some(("create", create_match)) => {
let initial_tenant_id = parse_tenant_id(create_match)?;
let initial_timeline_id_argument = parse_timeline_id(create_match)?;
let new_ds =
pageserver.tenant_create(initial_tenant_id, initial_timeline_id_argument)?;
env.register_branch_mapping(
DEFAULT_BRANCH_NAME.to_owned(),
new_ds.tenant_id,
new_ds.timeline_id,
)?;
let new_tenant_id = pageserver.tenant_create(initial_tenant_id)?;
println!(
"tenant {} successfully created on the pageserver, initial timeline: '{}'",
new_ds.tenant_id, new_ds.timeline_id
"tenant {} successfully created on the pageserver",
new_tenant_id
);
}
Some((sub_name, _)) => bail!("Unexpected tenant subcommand '{}'", sub_name),