File size: 2,283 Bytes
287a0bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#!/usr/bin/env bash

set -e

function cleanup {
    # Restore the previous kube context
    kubectl config use-context $PREV_CHROMA_KUBE_CONTEXT
    # Kill the tunnel process
    kill $TUNNEL_PID
    minikube delete -p chroma-test
}

trap cleanup EXIT

# Save the current kube context into a variable
export PREV_CHROMA_KUBE_CONTEXT=$(kubectl config current-context)

# Create a new minikube cluster for the test
minikube start -p chroma-test

# Add the ingress addon to the cluster
minikube addons enable ingress -p chroma-test
minikube addons enable ingress-dns -p chroma-test

# Setup docker to build inside the minikube cluster and build the image
eval $(minikube -p chroma-test docker-env)
docker build -t server:latest -f Dockerfile .
docker build -t chroma-coordinator:latest -f go/coordinator/Dockerfile .
docker build -t worker -f rust/worker/Dockerfile . --build-arg CHROMA_KUBERNETES_INTEGRATION=1

# Apply the kubernetes manifests
kubectl apply -f k8s/deployment
kubectl apply -f k8s/crd
kubectl apply -f k8s/cr
kubectl apply -f k8s/test

# Wait for the pods in the chroma namespace to be ready
kubectl wait --namespace chroma --for=condition=Ready pods --all --timeout=400s

# Run mini kube tunnel in the background to expose the service
minikube tunnel -c true -p chroma-test &
TUNNEL_PID=$!

# Wait for the tunnel to be ready. There isn't an easy way to check if the tunnel is ready. So we just wait for 10 seconds
sleep 10

export CHROMA_CLUSTER_TEST_ONLY=1
export CHROMA_SERVER_HOST=$(kubectl get svc server -n chroma -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')
export PULSAR_BROKER_URL=$(kubectl get svc pulsar-lb -n chroma -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')
export CHROMA_COORDINATOR_HOST=$(kubectl get svc coordinator-lb -n chroma -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')
export CHROMA_SERVER_GRPC_PORT="50051"

echo "Chroma Server is running at port $CHROMA_SERVER_HOST"
echo "Pulsar Broker is running at port $PULSAR_BROKER_URL"
echo "Chroma Coordinator is running at port $CHROMA_COORDINATOR_HOST"

echo testing: python -m pytest "$@"
python -m pytest "$@"

export CHROMA_KUBERNETES_INTEGRATION=1
cd go/coordinator
go test -timeout 30s -run ^TestNodeWatcher$ github.com/chroma/chroma-coordinator/internal/memberlist_manager