import React, { useState, useEffect } from 'react'; import ReactDOM from 'react-dom'; import Loader from "react-loader-spinner"; import { Router, Route, Link, IndexRoute, hashHistory, browserHistory } from 'react-router'; function ServerStatus(props) { const datadir = props.server.datadir; const status = props.server.status; const port = props.server.port; return (

{ datadir == 'primary' ? 'Primary' : datadir }

status:
{status}

to connect: psql -h { window.location.hostname } -p { port } -U zenith postgres
); } function StandbyList(props) { const bucketSummary = props.bucketSummary; const standbys = props.standbys; const maxwalpos = bucketSummary.maxwal ? walpos_to_int(bucketSummary.maxwal) : 0; const [walposInput, setWalposInput] = useState({ src: 'text', value: '0/0'}); // find earliest base image const minwalpos = bucketSummary.nonrelimages ? bucketSummary.nonrelimages.reduce((minpos, imgpos_str, index, array) => { const imgpos = walpos_to_int(imgpos_str); return (minpos == 0 || imgpos < minpos) ? imgpos : minpos; }, 0) : 0; const can_create_standby = minwalpos > 0 && maxwalpos > 0 && maxwalpos >= minwalpos; var walpos_valid = true; function create_standby() { const formdata = new FormData(); formdata.append("walpos", walposStr); props.startOperation('Creating new standby at ' + walposStr + '...', fetch("/api/create_standby", { method: 'POST', body: formdata })); } function destroy_standby(datadir) { const formdata = new FormData(); formdata.append("datadir", datadir); props.startOperation('Destroying ' + datadir + '...', fetch("/api/destroy_server", { method: 'POST', body: formdata })); } const handleSliderChange = (event) => { setWalposInput({ src: 'slider', value: event.target.value }); } const handleWalposChange = (event) => { setWalposInput({ src: 'text', value: event.target.value }); } var sliderValue; var walposStr; if (walposInput.src == 'text') { const walpos = walpos_to_int(walposInput.value); if (walpos >= minwalpos && walpos <= maxwalpos) walpos_valid = true; else walpos_valid = false; sliderValue = Math.round((walpos - minwalpos) / (maxwalpos - minwalpos) * 100); walposStr = walposInput.value; } else { const slider = walposInput.value; const new_walpos = minwalpos + slider / 100 * (maxwalpos - minwalpos); console.log('minwalpos: '+ minwalpos); console.log('maxwalpos: '+ maxwalpos); walposStr = int_to_walpos(Math.round(new_walpos)); walpos_valid = true; console.log(walposStr); } var standbystatus = '' if (standbys) { standbystatus =
{ standbys.length > 0 ? standbys.map((server) => <> ) : "no standby servers" }
} return (

Standbys

at LSN
{ standbystatus }
); } function ServerList(props) { const primary = props.serverStatus ? props.serverStatus.primary : null; const standbys = props.serverStatus ? props.serverStatus.standbys : []; const bucketSummary = props.bucketSummary; var primarystatus = ''; function destroy_primary() { const formdata = new FormData(); formdata.append("datadir", 'primary'); props.startOperation('Destroying primary...', fetch("/api/destroy_server", { method: 'POST', body: formdata })); } function restore_primary() { props.startOperation('Restoring primary...', fetch("/api/restore_primary", { method: 'POST' })); } if (primary) { primarystatus =
} else { primarystatus =
no primary server
} return ( <> { primarystatus }

Should we list the WAL safekeeper nodes here? Or are they part of the Storage? Or not visible to users at all?

); } function BucketSummary(props) { const bucketSummary = props.bucketSummary; const startOperation = props.startOperation; function slicedice() { startOperation('Slicing sequential WAL to per-relation WAL...', fetch("/api/slicedice", { method: 'POST' })); } if (!bucketSummary.nonrelimages) { return <>loading... } return (
Base images at following WAL positions:
    {bucketSummary.nonrelimages.map((img) => (
  • {img}
  • ))}
Sliced WAL is available up to { bucketSummary.maxwal }
Raw WAL is available up to { bucketSummary.maxseqwal }

Currently, the slicing or "sharding" of the WAL needs to be triggered manually, by clicking the above button.
TODO: make it a continuous process that runs in the WAL safekeepers, or in the Page Servers, or as a standalone service.

); } function ProgressIndicator() { return (
) } function walpos_to_int(walpos) { const [hi, lo] = walpos.split('/'); return parseInt(hi, 16) + parseInt(lo, 16); } function int_to_walpos(x) { console.log('converting ' + x); return (Math.floor((x / 0x100000000)).toString(16) + '/' + (x % 0x100000000).toString(16)).toUpperCase(); } function OperationStatus(props) { const lastOperation = props.lastOperation; const inProgress = props.inProgress; const operationResult = props.operationResult; if (lastOperation) { return (

Last operation:

{lastOperation} { (!inProgress && lastOperation) ? 'done!' : '' }
{inProgress ? :
{operationResult}
}
); } else return ''; } function ActionButtons(props) { const startOperation = props.startOperation; const bucketSummary = props.bucketSummary; function reset_demo() { startOperation('resetting everything...', fetch("/api/reset_demo", { method: 'POST' })); } function init_primary() { startOperation('Initializing new primary...', fetch("/api/init_primary", { method: 'POST' })); } function zenith_push() { startOperation('Pushing new base image...', fetch("/api/zenith_push", { method: 'POST' })); } return (

RESET DEMO deletes everything in the storage bucket, and stops and destroys all servers. This resets the whole demo environment to the initial state.

Init Primary runs initdb to create a new primary server. Click this after Resetting the demo.

Push Base Image stops the primary, copies the current state of the primary to the storage bucket as a new base backup, and restarts the primary.
TODO: This should be handled by a continuous background process, probably running in the storage nodes. And without having to shut down the cluster, of course.

); } function Sidenav(props) { const toPage = (page) => (event) => { //event.preventDefault() props.switchPage(page); }; return (

Menu

Servers Storage Snapshots Demo Import / Export Jobs
); } function App() { const [page, setPage] = useState('servers'); const [serverStatus, setServerStatus] = useState({}); const [bucketSummary, setBucketSummary] = useState({}); const [lastOperation, setLastOperation] = useState(''); const [inProgress, setInProgress] = useState(''); const [operationResult, setOperationResult] = useState(''); useEffect(() => { reloadStatus(); }, []); function startOperation(operation, promise) { promise.then(result => result.text()).then(resultText => { operationFinished(resultText); }); setLastOperation(operation); setInProgress(true); setOperationResult(''); } function operationFinished(result) { setInProgress(false); setOperationResult(result); reloadStatus(); } function clearOperation() { setLastOperation('') setInProgress(''); setOperationResult(''); console.log("cleared"); } function reloadStatus() { fetch('/api/server_status').then(res => res.json()).then(data => { setServerStatus(data); }); fetch('/api/bucket_summary').then(res => res.json()).then(data => { setBucketSummary(data); }); } const content = () => { console.log(page); if (page === 'servers') { return ( <>

Server status

); } else if (page === 'storage') { return ( <>

Storage bucket status

); } else if (page === 'snapshots') { return ( <>

Snapshots

In Zenith, snapshots are just specific points (LSNs) in the WAL history, with a label. A snapshot prevents garbage collecting old data that's still needed to reconstruct the database at that LSN.

TODO:

  • List existing snapshots
  • Create new snapshot manually, from current state or from a given LSN
  • Drill into the WAL stream to see what have happened. Provide tools for e.g. finding point where a table was dropped
  • Create snapshots automatically based on events in the WAL, like if you call pg_create_restore_point(() in the primary
  • Launch new reader instance at a snapshot
  • Export snapshot
  • Rollback cluster to a snapshot

); } else if (page === 'demo') { return ( <>

Misc actions

); } else if (page === 'import') { return ( <>

Import & Export tools

TODO:

  • Initialize database from existing backup (pg_basebackup, WAL-G, pgbackrest)
  • Initialize from a pg_dump or other SQL script
  • Launch batch job to import data files from S3
  • Launch batch job to export database with pg_dump to S3
These jobs can be run in against reader processing nodes. We can even spawn a new reader node dedicated to a job, and destry it when the job is done.

); } else if (page === 'jobs') { return ( <>

Batch jobs

TODO:

  • List running jobs launched from Import & Export tools
  • List other batch jobs launched by the user
  • Launch new batch jobs

); } } function switchPage(page) { console.log("topage " + page); setPage(page) clearOperation(); }; return (
{ content() }
); } ReactDOM.render(, document.getElementById('reactApp'));