add health check to wait for all service ready before next step (#501)

aws integration tests are flaky because we didn't wait for the services
to become healthy. (we only waited for the localstack service, this PR
adds wait for sub services)
This commit is contained in:
Rob Meng
2023-09-18 15:17:45 -04:00
committed by GitHub
parent 31dad71c94
commit 731f86e44c
3 changed files with 6 additions and 2 deletions

View File

@@ -9,6 +9,7 @@ on:
- node/**
- rust/ffi/node/**
- .github/workflows/node.yml
- docker-compose.yml
env:
# Disable full debug symbol generation to speed up CI build and keep memory down
@@ -133,7 +134,7 @@ jobs:
cache: 'npm'
cache-dependency-path: node/package-lock.json
- name: start local stack
run: docker compose -f ../docker-compose.yml up -d
run: docker compose -f ../docker-compose.yml up -d --wait
- name: create s3
run: aws s3 mb s3://lancedb-integtest --endpoint $AWS_ENDPOINT
- name: create ddb

View File

@@ -13,3 +13,6 @@ services:
- AWS_SECRET_ACCESS_KEY=SECRETKEY
healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost:4566/health" ]
interval: 5s
retries: 3
start_period: 10s

View File

@@ -24,7 +24,7 @@ chai.use(chaiAsPromised)
describe('LanceDB AWS Integration test', function () {
it('s3+ddb schema is processed correctly', async function () {
this.timeout(5000)
this.timeout(15000)
// WARNING: specifying engine is NOT a publicly supported feature in lancedb yet
// THE API WILL CHANGE