-
Notifications
You must be signed in to change notification settings - Fork 41
482 lines (438 loc) · 17.1 KB
/
sub_cli.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
# This workflow is a reusable one called by other workflows
name: (template) Elemental E2E single cluster CLI tests
on:
workflow_call:
# Variables to set when calling this reusable workflow
inputs:
backup_restore_version:
required: true
type: string
boot_type:
required: true
type: string
ca_type:
required: true
type: string
cert-manager_version:
required: true
type: string
cluster_name:
required: true
type: string
cluster_namespace:
required: true
type: string
cluster_type:
required: true
type: string
destroy_runner:
required: true
type: boolean
force_downgrade:
required: true
type: boolean
full_backup_restore:
required: true
type: boolean
k8s_downstream_version:
required: true
type: string
k8s_upstream_version:
required: true
type: string
node_number:
required: true
type: string
operator_install_type:
required: true
type: string
operator_repo:
required: true
type: string
operator_upgrade:
required: true
type: string
os_to_test:
required: true
type: string
public_domain:
required: true
type: string
public_fqdn:
required: true
type: string
qase_project_code:
required: true
type: string
qase_run_id:
required: true
type: string
rancher_version:
required: true
type: string
rancher_upgrade:
required: true
type: string
reset:
required: true
type: boolean
runner_label:
required: true
type: string
selinux:
required: true
type: boolean
sequential:
required: true
type: boolean
snap_type:
required: true
type: string
test_type:
required: true
type: string
upgrade_image:
required: true
type: string
upgrade_os_channel:
required: true
type: string
upgrade_type:
required: true
type: string
# Job outputs to export for caller workflow
outputs:
steps_status:
description: Status of the executed test jobs
value: ${{ jobs.cli.outputs.steps_status }}
# Variables to set when calling this reusable workflow
secrets:
qase_api_token:
jobs:
cli:
runs-on: ${{ inputs.runner_label }}
outputs:
# For this to work 'id:' in steps are mandatory!
steps_status: ${{ join(steps.*.conclusion, ' ') }}
env:
CERT_MANAGER_VERSION: ${{ inputs.cert-manager_version }}
CLUSTER_NAME: ${{ inputs.cluster_name }}
CLUSTER_NS: ${{ inputs.cluster_namespace }}
CLUSTER_TYPE: ${{ inputs.cluster_type }}
# QASE variables
QASE_API_TOKEN: ${{ secrets.qase_api_token }}
QASE_PROJECT_CODE: ${{ inputs.qase_project_code }}
QASE_RUN_ID: ${{ inputs.qase_run_id }}
# K3S / RKE2 flags to use for installation
INSTALL_K3S_SKIP_ENABLE: true
INSTALL_K3S_VERSION: ${{ inputs.k8s_upstream_version }}
INSTALL_RKE2_VERSION: ${{ inputs.k8s_upstream_version }}
K3S_KUBECONFIG_MODE: 0644
# Distribution to use to host Rancher Manager (K3s or RKE2)
K8S_UPSTREAM_VERSION: ${{ inputs.k8s_upstream_version }}
# For K8s cluster to provision with Rancher Manager
K8S_DOWNSTREAM_VERSION: ${{ inputs.k8s_downstream_version }}
OPERATOR_INSTALL_TYPE: ${{ inputs.operator_install_type }}
OS_TO_TEST: ${{ inputs.os_to_test }}
# For Rancher Manager
RANCHER_VERSION: ${{ inputs.rancher_version }}
SELINUX: ${{ inputs.selinux }}
TEST_TYPE: ${{ inputs.test_type }}
TIMEOUT_SCALE: 3
steps:
- name: Checkout
id: checkout
uses: actions/checkout@v4
- name: Setup Go
id: setup_go
uses: actions/setup-go@v5
with:
cache-dependency-path: tests/go.sum
go-version-file: tests/go.mod
- name: Define needed system variables
id: define_sys_vars
run: |
# Add missing PATH, removed in recent distributions for security reasons...
echo "/usr/local/bin" >> ${GITHUB_PATH}
- name: Install Rancher Manager and Elemental
id: install_rancher_elemental
env:
CA_TYPE: ${{ inputs.ca_type }}
OPERATOR_REPO: ${{ inputs.operator_repo }}
PUBLIC_FQDN: ${{ inputs.public_fqdn }}
PUBLIC_DOMAIN: ${{ inputs.public_domain }}
run: cd tests && make e2e-install-rancher
- name: Workaround for DynamicSchemas for operator uninstallation (if needed)
id: workaround_for_dynamicschemas
run: |
# Check if DynamicSchemas for MachineInventorySelectorTemplate exists
if ! kubectl get dynamicschema machineinventoryselectortemplate >/dev/null 2>&1; then
# If not we have to add it to avoid weird issues!
echo "WORKAROUND: DynamicSchemas for MachineInventorySelectorTemplate is missing!"
kubectl apply -f tests/assets/add_missing_dynamicschemas.yaml
fi
- name: Install backup-restore components
id: install_backup_restore
env:
BACKUP_RESTORE_VERSION: ${{ inputs.backup_restore_version }}
run: cd tests && make e2e-install-backup-restore
- name: Extract component versions/informations
id: component
run: |
# Extract rancher-backup-operator version
BACKUP_OPERATOR_VERSION=$(kubectl get pod \
--namespace cattle-resources-system \
-l app.kubernetes.io/name=rancher-backup \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Extract CertManager version
CERT_MANAGER_VERSION=$(kubectl get pod \
--namespace cert-manager \
-l app=cert-manager \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Extract elemental-operator version
OPERATOR_VERSION=$(kubectl get pod \
--namespace cattle-elemental-system \
-l app=elemental-operator \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Extract Rancher Manager version
RANCHER_VERSION=$(kubectl get pod \
--namespace cattle-system \
-l app=rancher \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Export values
echo "backup_operator_version=${BACKUP_OPERATOR_VERSION}" >> ${GITHUB_OUTPUT}
echo "cert_manager_version=${CERT_MANAGER_VERSION}" >> ${GITHUB_OUTPUT}
echo "operator_version=${OPERATOR_VERSION}" >> ${GITHUB_OUTPUT}
echo "rancher_image_version=${RANCHER_VERSION}" >> ${GITHUB_OUTPUT}
- name: Configure Rancher and Libvirt
id: configure_rancher
env:
SNAP_TYPE: ${{ inputs.snap_type }}
run: cd tests && make e2e-configure-rancher
- name: Create ISO image for master pool
id: create_iso_master
env:
EMULATE_TPM: true
POOL: master
run: |
# Only use ISO boot if the upstream cluster is RKE2
# due to issue with pxe, dhcp traffic
if ${{ contains(inputs.k8s_upstream_version, 'rke') }}; then
export BOOT_TYPE=iso
fi
cd tests && make e2e-iso-image
- name: Extract iPXE artifacts from ISO
id: extract_ipxe_artifacts
if: ${{ inputs.boot_type == 'pxe' }}
run: cd tests && make extract_kernel_init_squash && make ipxe
- name: Bootstrap node 1, 2 and 3 in pool "master" (use Emulated TPM if possible)
id: bootstrap_master_nodes
env:
EMULATE_TPM: true
POOL: master
VM_START: 1
VM_END: 3
run: |
# Only use ISO boot if the upstream cluster is RKE2
# due to issue with pxe, dhcp traffic
# Set RAM to 10GB for RKE2 and vCPU to 6, a bit more than the recommended values
if ${{ contains(inputs.k8s_upstream_version, 'rke') }}; then
export BOOT_TYPE=iso
export VM_MEM=10240
export VM_CPU=6
fi
# Execute bootstrapping test
if ${{ inputs.sequential == true }}; then
# Force node bootstrapping in sequential instead of parallel
cd tests
for ((i = VM_START ; i <= VM_END ; i++)); do
VM_INDEX=${i} make e2e-bootstrap-node
done
else
cd tests && VM_INDEX=${VM_START} VM_NUMBERS=${VM_END} make e2e-bootstrap-node
fi
- name: Install a simple application
id: install_simple_app
run: cd tests && make e2e-install-app && make e2e-check-app
- name: Reset a node in the cluster
id: reset_node
if: ${{ inputs.reset == true }}
run: cd tests && make e2e-reset && make e2e-check-app
- name: Upgrade Elemental Operator
id: operator_upgrade
if: ${{ inputs.operator_upgrade != '' }}
env:
OPERATOR_UPGRADE: ${{ inputs.operator_upgrade }}
run: |
cd tests
if make e2e-upgrade-operator; then
# Extract elemental-operator version
OPERATOR_VERSION=$(kubectl get pod \
--namespace cattle-elemental-system \
-l app=elemental-operator \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Export values
echo "operator_upgrade=${OPERATOR_UPGRADE}" >> ${GITHUB_OUTPUT}
echo "operator_version=${OPERATOR_VERSION}" >> ${GITHUB_OUTPUT}
# Check application
make e2e-check-app
else
# Needed to be sure that Github Action will see the failure
false
fi
- name: Upgrade Rancher Manager
id: rancher_upgrade
if: ${{ inputs.rancher_upgrade != '' }}
env:
CA_TYPE: ${{ inputs.ca_type }}
PUBLIC_FQDN: ${{ inputs.public_fqdn }}
PUBLIC_DOMAIN: ${{ inputs.public_domain }}
RANCHER_UPGRADE: ${{ inputs.rancher_upgrade }}
run: |
cd tests
if make e2e-upgrade-rancher-manager; then
# Extract Rancher Manager version
RANCHER_VERSION=$(kubectl get pod \
--namespace cattle-system \
-l app=rancher \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Export values
echo "rancher_image_version=${RANCHER_VERSION}" >> ${GITHUB_OUTPUT}
# Check application
make e2e-check-app
else
# Needed to be sure that Github Action will see the failure
false
fi
- name: Upgrade node 1 to specified OS version with osImage
id: upgrade_node_1
if: ${{ inputs.upgrade_image != '' }}
env:
FORCE_DOWNGRADE: ${{ inputs.force_downgrade }}
UPGRADE_IMAGE: ${{ inputs.upgrade_image }}
UPGRADE_TYPE: osImage
VM_INDEX: 1
run: |
cd tests && make e2e-upgrade-node && make e2e-check-app
- name: Upgrade other nodes to specified OS version with managedOSVersionName
id: upgrade_other_nodes
if: ${{ inputs.upgrade_os_channel != '' }}
env:
FORCE_DOWNGRADE: ${{ inputs.force_downgrade }}
UPGRADE_OS_CHANNEL: ${{ inputs.upgrade_os_channel }}
UPGRADE_TYPE: managedOSVersionName
VM_INDEX: 2
VM_NUMBERS: 3
run: |
cd tests && make e2e-upgrade-node && make e2e-check-app
- name: Test Backup/Restore Rancher Manager/Elemental resources
id: test_backup_restore
env:
CA_TYPE: ${{ inputs.ca_type }}
OPERATOR_REPO: ${{ inputs.operator_repo }}
PUBLIC_FQDN: ${{ inputs.public_fqdn }}
PUBLIC_DOMAIN: ${{ inputs.public_domain }}
run: |
cd tests
# Run simple or full backup/restore test
if ${{ inputs.full_backup_restore == true }}; then
make e2e-full-backup-restore
else
make e2e-simple-backup-restore
fi
# Check the installed application
make e2e-check-app
- name: Extract ISO version
id: iso_version
if: ${{ always() }}
run: |
# Extract OS version from ISO
ISO=$(file -Ls *.iso 2>/dev/null | awk -F':' '/boot sector/ { print $1 }')
if [[ -n "${ISO}" ]]; then
# NOTE: always keep 'initrd' at the end, as there is always a link with this name
for INITRD_NAME in elemental.initrd* initrd; do
INITRD_FILE=$(isoinfo -i ${ISO} -R -find -type f -name ${INITRD_NAME} -print 2>/dev/null)
if [[ -n "${INITRD_FILE}" ]]; then
isoinfo -i ${ISO} -R -x ${INITRD_FILE} 2>/dev/null \
| xz -dc \
| cpio -i --to-stdout usr/lib/initrd-release > os-release
eval $(grep IMAGE_TAG os-release 2>/dev/null)
# We found an initrd, stop here
break
fi
done
fi
# Export value (even if empty!)
echo "os_version=${IMAGE_TAG}" >> ${GITHUB_OUTPUT}
- name: Remove old built ISO image
id: clean_master_iso
# Only one at a time is allowed, the new one will be created after if needed
run: rm -f *.iso
- name: Create ISO image for worker pool
id: create_iso_worker
if: ${{ inputs.node_number > 3 }}
env:
BOOT_TYPE: iso
POOL: worker
run: cd tests && make e2e-iso-image
- name: Bootstrap additional nodes in pool "worker" (total of ${{ inputs.node_number }})
id: bootstrap_worker_nodes
if: ${{ inputs.node_number > 3 }}
env:
BOOT_TYPE: iso
POOL: worker
VM_START: 4
VM_END: ${{ inputs.node_number }}
run: |
# Set RAM to 10GB for RKE2 and vCPU to 6, a bit more than the recommended values
if ${{ contains(inputs.k8s_upstream_version, 'rke') }}; then
export VM_MEM=10240
export VM_CPU=6
fi
cd tests
if ${{ inputs.sequential == true }}; then
# Force node bootstrapping in sequential instead of parallel
for ((i = VM_START ; i <= VM_END ; i++)); do
VM_INDEX=${i} make e2e-bootstrap-node
done
else
VM_INDEX=${VM_START} VM_NUMBERS=${VM_END} make e2e-bootstrap-node
fi
# Check the installed application
make e2e-check-app
- name: Uninstall Elemental Operator
id: uninstall_elemental_operator
env:
OPERATOR_REPO: ${{ inputs.operator_repo }}
# Don't test Operator uninstall if we want to keep the runner for debugging purposes
if: ${{ inputs.destroy_runner == true }}
run: cd tests && make e2e-uninstall-operator
# This step must be called in each worklow that wants a summary!
- name: Get logs and add summary
id: logs_summary
if: ${{ always() }}
uses: ./.github/actions/logs-and-summary
with:
backup_operator_version: ${{ steps.component.outputs.backup_operator_version }}
ca_type: ${{ inputs.ca_type }}
cert_manager_version: ${{ steps.component.outputs.cert_manager_version }}
cluster_type: ${{ inputs.cluster_type }}
k8s_downstream_version: ${{ inputs.k8s_downstream_version }}
k8s_upstream_version: ${{ inputs.k8s_upstream_version }}
node_number: ${{ inputs.node_number }}
operator_upgrade: ${{ steps.operator_upgrade.outputs.operator_upgrade }}
operator_version: ${{ steps.component.outputs.operator_version }}
operator_version_upgrade: ${{ steps.operator_upgrade.outputs.operator_version }}
os_to_test: ${{ inputs.os_to_test }}
os_version: ${{ steps.iso_version.outputs.os_version }}
public_fqdn: ${{ inputs.public_fqdn }}
rancher_image_version: ${{ steps.component.outputs.rancher_image_version }}
rancher_image_version_upgrade: ${{ steps.rancher_upgrade.outputs.rancher_image_version }}
rancher_upgrade: ${{ inputs.rancher_upgrade }}
rancher_version: ${{ inputs.rancher_version }}
sequential: ${{ inputs.sequential }}
snap_type: ${{ inputs.snap_type }}
steps_status: ${{ join(steps.*.conclusion, ' ') }}
test_type: ${{ inputs.test_type }}
upgrade_image: ${{ inputs.upgrade_image }}
upgrade_os_channel: ${{ inputs.upgrade_os_channel }}