11steps :
2- - match : Then I download the OpenShift image for version "(?P<image_name>[^"]+)"
3- description : |-
4- This step downloads the OpenShift image for the version specified by in the step.
5-
6- If the image already exists locally, it skips the download.
7- outputs :
8- - name : image_path
9- - name : image_major
10- - name : image_minor
11- - name : image_patch
12- run : |
13- set -euo pipefail
14-
15- . ./workflows/cloudscale/scripts/semver.sh
16-
17- MAJOR=0
18- MINOR=0
19- PATCH=0
20- SPECIAL=""
21- semverParseInto "$MATCH_image_name" MAJOR MINOR PATCH SPECIAL
22-
23- image_path="rhcos-$MAJOR.$MINOR.qcow2"
24-
25- env -i "image_major=$MAJOR" >> "$OUTPUT"
26- env -i "image_minor=$MINOR" >> "$OUTPUT"
27- env -i "image_patch=$PATCH" >> "$OUTPUT"
28-
29- echo "Image is $image_path"
30-
31- if [ -f "$image_path" ]; then
32- echo "Image $image_path already exists, skipping download."
33- env -i "image_path=$image_path" >> "$OUTPUT"
34- exit 0
35- fi
36-
37- echo Downloading OpenShift image "$MATCH_image_name" to "$image_path"
38-
39- curl -L "https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/${MAJOR}.${MINOR}/${MATCH_image_name}/rhcos-${MATCH_image_name}-x86_64-openstack.x86_64.qcow2.gz" | gzip -d > "$image_path"
40- env -i "image_path=$image_path" >> "$OUTPUT"
41-
422- match : And I set up required S3 buckets
433 description : |-
444 This step sets up the required S3 buckets for the OpenShift cluster installation.
477 inputs :
488 - name : cloudscale_token
499 - name : commodore_cluster_id
50- - name : cloudscale_region
10+ - name : csp_region
5111 outputs :
5212 - name : bucket_user
5313 run : |
6828
6929 echo -n "Waiting for S3 credentials to become available ..."
7030 until mc alias set \
71- "${INPUT_commodore_cluster_id}" "https://objects.${INPUT_cloudscale_region }.cloudscale.ch" \
31+ "${INPUT_commodore_cluster_id}" "https://objects.${INPUT_csp_region }.cloudscale.ch" \
7232 "$(echo "$response" | jq -r '.keys[0].access_key')" \
7333 "$(echo "$response" | jq -r '.keys[0].secret_key')"
7434 do
@@ -91,11 +51,11 @@ steps:
9151
9252 echo "Configuring S3 bucket policies..."
9353 aws s3api put-public-access-block \
94- --endpoint-url "https://objects.${INPUT_cloudscale_region }.cloudscale.ch" \
54+ --endpoint-url "https://objects.${INPUT_csp_region }.cloudscale.ch" \
9555 --bucket "${INPUT_commodore_cluster_id}-image-registry" \
9656 --public-access-block-configuration BlockPublicAcls=false
9757 aws s3api put-bucket-lifecycle-configuration \
98- --endpoint-url "https://objects.${INPUT_cloudscale_region }.cloudscale.ch" \
58+ --endpoint-url "https://objects.${INPUT_csp_region }.cloudscale.ch" \
9959 --bucket "${INPUT_commodore_cluster_id}-image-registry" \
10060 --lifecycle-configuration '{
10161 "Rules": [
@@ -123,7 +83,7 @@ steps:
12383 inputs :
12484 - name : image_path
12585 - name : commodore_cluster_id
126- - name : cloudscale_region
86+ - name : csp_region
12787 - name : bucket_user
12888 - name : image_major
12989 - name : image_minor
@@ -141,7 +101,7 @@ steps:
141101 fi
142102
143103 mc alias set \
144- "${INPUT_commodore_cluster_id}" "https://objects.${INPUT_cloudscale_region }.cloudscale.ch" \
104+ "${INPUT_commodore_cluster_id}" "https://objects.${INPUT_csp_region }.cloudscale.ch" \
145105 "$(echo "$INPUT_bucket_user" | jq -r '.keys[0].access_key')" \
146106 "$(echo "$INPUT_bucket_user" | jq -r '.keys[0].secret_key')"
147107
@@ -156,7 +116,7 @@ steps:
156116 curl -i -H "$auth_header" \
157117 -F url="$(mc share download --json "${INPUT_commodore_cluster_id}/${INPUT_commodore_cluster_id}-bootstrap-ignition/rhcos-${INPUT_image_major}.${INPUT_image_minor}.qcow2" | jq -r .url)" \
158118 -F name="RHCOS ${INPUT_image_major}.${INPUT_image_minor}" \
159- -F zones="${INPUT_cloudscale_region }1" \
119+ -F zones="${INPUT_csp_region }1" \
160120 -F slug="rhcos-${INPUT_image_major}.${INPUT_image_minor}" \
161121 -F source_format=qcow2 \
162122 -F user_data_handling=pass-through \
0 commit comments