-
Notifications
You must be signed in to change notification settings - Fork 24
Expand file tree
/
Copy pathtest.sh
More file actions
executable file
·304 lines (271 loc) · 11.5 KB
/
test.sh
File metadata and controls
executable file
·304 lines (271 loc) · 11.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
#!/usr/bin/env bash
#
# Build and install venafi-kubernetes-agent for VenafiConnection based authentication.
# Wait for it to log a message indicating successful data upload.
#
# A VenafiConnection resource is created which directly loads a bearer token
# from a Kubernetes Secret.
# This is the simplest way of testing the VenafiConnection integration,
# but it does not fully test "secretless" (workload identity federation) authentication.
#
# Prerequisites:
# * kubectl: https://kubernetes.io/docs/tasks/tools/#kubectl
# * venctl: https://docs.venafi.cloud/vaas/venctl/t-venctl-install/
# * jq: https://jqlang.github.io/jq/download/
# * step: https://smallstep.com/docs/step-cli/installation/
# * curl: https://www.man7.org/linux/man-pages/man1/curl.1.html
# * envsubst: https://www.man7.org/linux/man-pages/man1/envsubst.1.html
# * gcloud: https://cloud.google.com/sdk/docs/install
# * gke-gcloud-auth-plugin: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl
# > :warning: If you installed gcloud using snap, you have to install the kubectl plugin using apt:
# > https://github.com/actions/runner-images/issues/6778#issuecomment-1360360603
#
# In case metrics and logs are missing from your cluster, see:
# * https://cloud.google.com/kubernetes-engine/docs/troubleshooting/dashboards#write_permissions
set -o nounset
set -o errexit
set -o pipefail
# Commenting out for CI, uncomment for local debugging
#set -o xtrace
script_dir=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
root_dir=$(cd "${script_dir}/../.." && pwd)
export TERM=dumb
# Your Venafi Cloud API key.
: ${VEN_API_KEY?}
# Separate API Key for getting a pull secret, if your main venafi cloud tenant
# doesn't allow you to create registry service accounts.
: ${VEN_API_KEY_PULL?}
# The Venafi Cloud zone (application/issuing_template) which will be used by the
# issuer an policy.
: ${VEN_ZONE?}
# The hostname of the Venafi API server.
# US: api.venafi.cloud
# EU: api.venafi.eu
: ${VEN_API_HOST?}
# The base URL of the OCI registry used for Docker images and Helm charts
# E.g. ttl.sh/63773370-0bcf-4ac0-bd42-5515616089ff
: ${OCI_BASE?}
# Required gcloud environment variables
# https://cloud.google.com/sdk/docs/configurations#setting_configuration_properties
: ${CLOUDSDK_CORE_PROJECT?}
: ${CLOUDSDK_COMPUTE_ZONE?}
# The name of the cluster to create
: ${CLUSTER_NAME?}
cd "${script_dir}"
pushd "${root_dir}"
> release.env
make release \
OCI_SIGN_ON_PUSH=false \
oci_platforms=linux/amd64 \
oci_preflight_image_name=$OCI_BASE/images/venafi-agent \
helm_chart_image_name=$OCI_BASE/charts/venafi-kubernetes-agent \
GITHUB_OUTPUT=release.env
source release.env
popd
export USE_GKE_GCLOUD_AUTH_PLUGIN=True
if ! gcloud container clusters get-credentials "${CLUSTER_NAME}"; then
gcloud container clusters create "${CLUSTER_NAME}" \
--preemptible \
--machine-type e2-small \
--num-nodes 3
fi
kubectl create ns venafi || true
kubectl apply -n venafi -f - <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: coverage-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
EOF
# Pull secret for Venafi OCI registry
# IMPORTANT: we pick the first team as the owning team for the registry and
# workload identity service account as it doesn't matter.
if ! kubectl get secret venafi-image-pull-secret -n venafi; then
venctl iam service-accounts registry create \
--api-key $VEN_API_KEY_PULL \
--no-prompts \
--owning-team "$(curl --fail-with-body -sS "https://${VEN_API_HOST}/v1/teams" -H "tppl-api-key: ${VEN_API_KEY_PULL}" | jq '.teams[0].id' -r)" \
--name "venafi-kubernetes-agent-e2e-registry-${RANDOM}" \
--scopes enterprise-cert-manager,enterprise-venafi-issuer,enterprise-approver-policy \
| jq '{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "venafi-image-pull-secret"
},
"type": "kubernetes.io/dockerconfigjson",
"stringData": {
".dockerconfigjson": {
"auths": {
"\(.oci_registry)": {
"username": .username,
"password": .password
}
}
} | tostring
}
}' \
| kubectl create -n venafi -f -
fi
export VENAFI_KUBERNETES_AGENT_CLIENT_ID="not-used-but-required-by-venctl"
venctl components kubernetes apply \
--region $VEN_VCP_REGION \
--cert-manager \
--venafi-enhanced-issuer \
--approver-policy-enterprise \
--venafi-kubernetes-agent \
--venafi-kubernetes-agent-version "${RELEASE_HELM_CHART_VERSION}" \
--venafi-kubernetes-agent-values-files "${script_dir}/values.venafi-kubernetes-agent.yaml" \
--venafi-kubernetes-agent-custom-image-registry "${OCI_BASE}/images" \
--venafi-kubernetes-agent-custom-chart-repository "oci://${OCI_BASE}/charts"
kubectl apply -n venafi -f venafi-components.yaml
kubectl set env deployments/venafi-kubernetes-agent -n venafi GOCOVERDIR=/coverage
kubectl rollout status deployment/venafi-kubernetes-agent -n venafi --timeout=2m
subject="system:serviceaccount:venafi:venafi-components"
audience="https://${VEN_API_HOST}"
issuerURL="$(kubectl create token -n venafi venafi-components | step crypto jwt inspect --insecure | jq -r '.payload.iss')"
openidDiscoveryURL="${issuerURL}/.well-known/openid-configuration"
jwksURI=$(curl --fail-with-body -sSL ${openidDiscoveryURL} | jq -r '.jwks_uri')
# Create the Venafi agent service account if one does not already exist
# IMPORTANT: we pick the first team as the owning team for the registry and
# workload identity service account as it doesn't matter.
while true; do
tenantID=$(curl --fail-with-body -sSL -H "tppl-api-key: $VEN_API_KEY" https://${VEN_API_HOST}/v1/serviceaccounts \
| jq -r '.[] | select(.issuerURL==$issuerURL and .subject == $subject) | .companyId' \
--arg issuerURL "${issuerURL}" \
--arg subject "${subject}")
if [[ "${tenantID}" != "" ]]; then
break
fi
jq -n '{
"name": "venafi-kubernetes-agent-e2e-agent-\($random)",
"authenticationType": "rsaKeyFederated",
"scopes": ["kubernetes-discovery-federated", "certificate-issuance"],
"subject": $subject,
"audience": $audience,
"issuerURL": $issuerURL,
"jwksURI": $jwksURI,
"applications": [$applications.applications[].id],
"owner": $owningTeamID
}' \
--arg random "${RANDOM}" \
--arg subject "${subject}" \
--arg audience "${audience}" \
--arg issuerURL "${issuerURL}" \
--arg jwksURI "${jwksURI}" \
--arg owningTeamID "$(curl --fail-with-body -sS "https://${VEN_API_HOST}/v1/teams" -H "tppl-api-key: $VEN_API_KEY" | jq '.teams[0].id' -r)" \
--argjson applications "$(curl https://${VEN_API_HOST}/outagedetection/v1/applications --fail-with-body -sSL -H tppl-api-key:\ ${VEN_API_KEY})" \
| curl https://${VEN_API_HOST}/v1/serviceaccounts \
-H "tppl-api-key: $VEN_API_KEY" \
--fail-with-body \
-sSL --json @-
done
kubectl apply -n venafi -f - <<EOF
apiVersion: jetstack.io/v1alpha1
kind: VenafiConnection
metadata:
name: venafi-components
spec:
allowReferencesFrom: {}
vcp:
url: https://${VEN_API_HOST}
accessToken:
- serviceAccountToken:
name: venafi-components
audiences:
- ${audience}
- vcpOAuth:
tenantID: ${tenantID}
EOF
envsubst <application-team-1.yaml | kubectl apply -f -
kubectl -n team-1 wait certificate app-0 --for=condition=Ready
# Wait 60s for log message indicating success.
# Parse logs as JSON using jq to ensure logs are all JSON formatted.
# Disable pipefail to prevent SIGPIPE (141) errors from tee
# See https://unix.stackexchange.com/questions/274120/pipe-fail-141-when-piping-output-into-tee-why
set +o pipefail
kubectl logs deployments/venafi-kubernetes-agent \
--follow \
--namespace venafi \
| timeout 60 jq 'if .msg | test("Data sent successfully") then . | halt_error(0) end'
set -o pipefail
# Create a unique TLS Secret and wait for it to appear in the Venafi certificate
# inventory API. The case conversion is due to macOS' version of uuidgen which
# prints UUIDs in upper case, but DNS labels need lower case characters.
commonname="venafi-kubernetes-agent-e2e.$(uuidgen | tr '[:upper:]' '[:lower:]')"
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=$commonname"
kubectl create secret tls "$commonname" --cert=/tmp/tls.crt --key=/tmp/tls.key -o yaml --dry-run=client | kubectl apply -f -
getCertificate() {
jq -n '{
"expression": {
"field": "subjectCN",
"operator": "MATCH",
"value": $commonname
},
"ordering": {
"orders": [
{ "direction": "DESC", "field": "certificatInstanceModificationDate" }
]
},
"paging": { "pageNumber": 0, "pageSize": 10 }
}' --arg commonname "${commonname}" \
| curl "https://${VEN_API_HOST}/outagedetection/v1/certificatesearch?excludeSupersededInstances=true&ownershipTree=true" \
-fsSL \
-H "tppl-api-key: $VEN_API_KEY" \
--json @- \
| jq 'if .count == 0 then . | halt_error(1) end'
}
# Wait 5 minutes for the certificate to appear.
for ((i=0;;i++)); do if getCertificate; then exit 0; fi; sleep 30; done | timeout -v -- 5m cat
export AGENT_POD_NAME=$(kubectl get pods -n venafi -l app.kubernetes.io/name=venafi-kubernetes-agent -o jsonpath="{.items[0].metadata.name}")
echo "Sending SIGQUIT to agent pod '${AGENT_POD_NAME}' to trigger graceful shutdown and flush coverage..."
# Use kubectl debug to attach a busybox container to the running pod.
# --target specifies the container to share the process space with.
# --share-processes allows our new container to see and signal the agent process.
# We then run 'kill -s QUIT 1' to signal PID 1 (the agent) to quit gracefully.
kubectl debug -q -n venafi "${AGENT_POD_NAME}" \
--image=busybox:1.36 \
--target=venafi-kubernetes-agent \
--share-processes \
-- sh -c 'kill -s QUIT 1'
echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate gracefully..."
# The pod will now terminate because its main process is exiting.
# We wait for Kubernetes to recognize this and delete the pod object.
kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
echo "Scaling down deployment to prevent pod from restarting..."
# Now that the pod is gone and coverage is flushed, we scale the deployment
# to ensure the ReplicaSet controller doesn't create a new one.
kubectl scale deployment venafi-kubernetes-agent -n venafi --replicas=0
echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate as a result of the scale-down..."
kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
echo "Starting helper pod to retrieve coverage files from the PVC..."
kubectl apply -n venafi -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
name: coverage-helper-pod
spec:
containers:
- name: helper
image: alpine:latest
command: ["sleep", "infinity"]
volumeMounts:
- name: coverage-storage
mountPath: /coverage-data
volumes:
- name: coverage-storage
persistentVolumeClaim:
claimName: coverage-pvc
EOF
echo "Waiting for the helper pod to be ready..."
kubectl wait --for=condition=Ready pod/coverage-helper-pod -n venafi --timeout=2m
echo "Copying coverage files from the helper pod..."
mkdir -p $COVERAGE_HOST_PATH
kubectl cp -n venafi "coverage-helper-pod:/coverage-data/." $COVERAGE_HOST_PATH
echo "Coverage files retrieved. Listing contents:"
ls -la $COVERAGE_HOST_PATH