mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 20:32:56 +00:00
ci: add the simple framework of nightly functional tests (#2648)
This commit is contained in:
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Deploy GreptimeDB cluster
|
||||
description: Deploy GreptimeDB cluster on Kubernetes
|
||||
inputs:
|
||||
aws-ci-test-bucket:
|
||||
description: 'AWS S3 bucket name for testing'
|
||||
required: true
|
||||
aws-region:
|
||||
description: 'AWS region for testing'
|
||||
required: true
|
||||
data-root:
|
||||
description: 'Data root for testing'
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: 'AWS access key id for testing'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS secret access key for testing'
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Deploy GreptimeDB by Helm
|
||||
shell: bash
|
||||
env:
|
||||
DATA_ROOT: ${{ inputs.data-root }}
|
||||
AWS_CI_TEST_BUCKET: ${{ inputs.aws-ci-test-bucket }}
|
||||
AWS_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
./.github/scripts/deploy-greptimedb.sh
|
||||
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Run sqlness test
|
||||
description: Run sqlness test on GreptimeDB
|
||||
|
||||
inputs:
|
||||
aws-ci-test-bucket:
|
||||
description: 'AWS S3 bucket name for testing'
|
||||
required: true
|
||||
aws-region:
|
||||
description: 'AWS region for testing'
|
||||
required: true
|
||||
data-root:
|
||||
description: 'Data root for testing'
|
||||
required: true
|
||||
aws-access-key-id:
|
||||
description: 'AWS access key id for testing'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS secret access key for testing'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Deploy GreptimeDB cluster by Helm
|
||||
uses: ./.github/actions/deploy-greptimedb
|
||||
with:
|
||||
data-root: ${{ inputs.data-root }}
|
||||
aws-ci-test-bucket: ${{ inputs.aws-ci-test-bucket }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
# TODO(zyy17): The following tests will be replaced by the real sqlness test.
|
||||
- name: Run tests on greptimedb cluster
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 14002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 14002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Run tests on greptimedb cluster that uses S3
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 24002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 24002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Run tests on standalone greptimedb
|
||||
shell: bash
|
||||
run: |
|
||||
mysql -h 127.0.0.1 -P 34002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||
mysql -h 127.0.0.1 -P 34002 -e "SHOW TABLES;"
|
||||
|
||||
- name: Clean S3 data
|
||||
shell: bash
|
||||
env:
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||
172
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
172
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
|
||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
EOF
|
||||
}
|
||||
|
||||
# Add greptime Helm chart repo.
|
||||
function add_greptime_chart() {
|
||||
helm repo add greptime "$GREPTIME_CHART"
|
||||
helm repo update
|
||||
}
|
||||
|
||||
# Deploy a etcd cluster with 3 members.
|
||||
function deploy_etcd_cluster() {
|
||||
local namespace="$1"
|
||||
|
||||
helm install etcd "$ETCD_CHART" \
|
||||
--set replicaCount=3 \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
-n "$namespace"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
kubectl rollout status statefulset/etcd -n "$namespace"
|
||||
}
|
||||
|
||||
# Deploy greptimedb-operator.
|
||||
function deploy_greptimedb_operator() {
|
||||
# Use the latest chart and image.
|
||||
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||
--set image.tag=latest \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Wait for greptimedb-operator to be ready.
|
||||
kubectl rollout status deployment/greptimedb-operator -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
}
|
||||
|
||||
# Deploy greptimedb cluster by using local storage.
|
||||
# It will expose cluster service ports as '14000', '14001', '14002', '14003' to local access.
|
||||
function deploy_greptimedb_cluster() {
|
||||
local cluster_name=$1
|
||||
local install_namespace=$2
|
||||
|
||||
kubectl create ns "$install_namespace"
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
|
||||
# Expose greptimedb cluster to local access.
|
||||
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||
14000:4000 \
|
||||
14001:4001 \
|
||||
14002:4002 \
|
||||
14003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Deploy greptimedb cluster by using S3.
|
||||
# It will expose cluster service ports as '24000', '24001', '24002', '24003' to local access.
|
||||
function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
local cluster_name=$1
|
||||
local install_namespace=$2
|
||||
|
||||
kubectl create ns "$install_namespace"
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.s3.secretName=s3-credentials \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.secretCreation.enabled=true \
|
||||
--set storage.credentials.secretCreation.enableEncryption=false \
|
||||
--set storage.credentials.secretCreation.data.access-key-id="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretCreation.data.secret-access-key="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
|
||||
# Expose greptimedb cluster to local access.
|
||||
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||
24000:4000 \
|
||||
24001:4001 \
|
||||
24002:4002 \
|
||||
24003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Deploy standalone greptimedb.
|
||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||
function deploy_standalone_greptimedb() {
|
||||
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
kubectl rollout status statefulset/greptimedb-standalone -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
# Expose greptimedb to local access.
|
||||
kubectl -n "$DEFAULT_INSTALL_NAMESPACE" port-forward svc/greptimedb-standalone \
|
||||
34000:4000 \
|
||||
34001:4001 \
|
||||
34002:4002 \
|
||||
34003:4003 > /tmp/connections.out &
|
||||
}
|
||||
|
||||
# Entrypoint of the script.
|
||||
function main() {
|
||||
create_kind_cluster
|
||||
add_greptime_chart
|
||||
|
||||
# Deploy standalone greptimedb in the same K8s.
|
||||
if [ "$ENABLE_STANDALONE_MODE" == "true" ]; then
|
||||
deploy_standalone_greptimedb
|
||||
fi
|
||||
|
||||
deploy_greptimedb_operator
|
||||
deploy_greptimedb_cluster testcluster testcluster
|
||||
deploy_greptimedb_cluster_with_s3_storage testcluster-s3 testcluster-s3
|
||||
}
|
||||
|
||||
# Usages:
|
||||
# - Deploy greptimedb cluster: ./deploy-greptimedb.sh
|
||||
main
|
||||
26
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
26
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Nightly functional tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At 00:00 on Tuesday.
|
||||
- cron: '0 0 * * 2'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
Reference in New Issue
Block a user