Skip to content

Commit e7f646e

Browse files
mads-hartmannroboquat
authored andcommitted
Initial step towards dedicated VMs for dev
This introduces a new option with-vm for the build Werft job which will start a new VM in our Harvester cluster. This is a tiny step towards providing dedicated k3s clusters for preview environments. For now it only boots a VM, so this is not useable by devs yet, but it gives us something to iterate on. Part of https://github.com/gitpod-io/harvester/issues/7
1 parent db0143f commit e7f646e

File tree

4 files changed

+269
-0
lines changed

4 files changed

+269
-0
lines changed

.werft/build.ts

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import { createHash } from "crypto";
1414
import { InstallMonitoringSatelliteParams, installMonitoringSatellite, observabilityStaticChecks } from './observability/monitoring-satellite';
1515
import { SpanStatusCode } from '@opentelemetry/api';
1616
import * as Tracing from './observability/tracing'
17+
import * as VM from './vm/vm'
1718

1819
// Will be set once tracing has been initialized
1920
let werft: Werft
@@ -58,6 +59,7 @@ const phases = {
5859
PREDEPLOY: 'predeploy',
5960
DEPLOY: 'deploy',
6061
TRIGGER_INTEGRATION_TESTS: 'trigger integration tests',
62+
VM: 'vm'
6163
}
6264

6365
// Werft slices for deploy phase via installer
@@ -74,6 +76,10 @@ const installerSlices = {
7476
DEPLOYMENT_WAITING: "monitor server deployment"
7577
}
7678

79+
const vmSlices = {
80+
BOOT_VM: 'Booting VM'
81+
}
82+
7783
export function parseVersion(context) {
7884
let buildConfig = context.Annotations || {};
7985
const explicitVersion = buildConfig.version;
@@ -142,6 +148,7 @@ export async function build(context, version) {
142148
const withPayment= "with-payment" in buildConfig;
143149
const withObservability = "with-observability" in buildConfig;
144150
const withHelm = "with-helm" in buildConfig;
151+
const withVM = "with-vm" in buildConfig;
145152

146153
const jobConfig = {
147154
buildConfig,
@@ -283,6 +290,23 @@ export async function build(context, version) {
283290
withObservability,
284291
};
285292

293+
if (withVM) {
294+
werft.phase(phases.VM, "Start VM");
295+
296+
if (!VM.vmExists({ name: destname })) {
297+
werft.log(vmSlices.BOOT_VM, 'Starting VM')
298+
VM.startVM({ name: destname })
299+
} else {
300+
werft.log(vmSlices.BOOT_VM, 'VM already exists')
301+
}
302+
303+
werft.log(vmSlices.BOOT_VM, 'Waiting for VM to be ready')
304+
VM.waitForVM({ name: destname, timeoutMS: 1000 * 60 * 3 })
305+
306+
werft.done(phases.VM)
307+
return
308+
}
309+
286310
werft.phase(phases.PREDEPLOY, "Checking for existing installations...");
287311
// the context namespace is not set at this point
288312
const hasGitpodHelmInstall = exec(`helm status ${helmInstallName} -n ${deploymentConfig.namespace}`, {slice: "check for Helm install", dontCheckRc: true}).code === 0;

.werft/build.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ pod:
3333
hostPath:
3434
path: /mnt/disks/ssd0/go-build-cache
3535
type: DirectoryOrCreate
36+
- name: harvester-kubeconfig
37+
secret:
38+
secretName: harvester-kubeconfig
3639
# - name: deploy-key
3740
# secret:
3841
# secretName: deploy-key
@@ -77,6 +80,8 @@ pod:
7780
- name: go-build-cache
7881
mountPath: /go-build-cache
7982
readOnly: false
83+
- name: harvester-kubeconfig
84+
mountPath: /mnt/secrets/harvester-kubeconfig
8085
# - name: deploy-key
8186
# mountPath: /mnt/secrets/deploy-key
8287
# readOnly: true

.werft/vm/manifests.ts

Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
type NamespaceManifestOptions = {
2+
namespace: string
3+
}
4+
5+
export function NamespaceManifest({ namespace }: NamespaceManifestOptions) {
6+
return `
7+
apiVersion: v1
8+
kind: Namespace
9+
metadata:
10+
name: ${namespace}
11+
`
12+
}
13+
14+
type VirtualMachineManifestArguments = {
15+
vmName: string
16+
namespace: string
17+
claimName: string,
18+
userDataSecretName: string
19+
}
20+
21+
export function VirtualMachineManifest({ vmName, namespace, claimName, userDataSecretName }: VirtualMachineManifestArguments) {
22+
return `
23+
apiVersion: kubevirt.io/v1
24+
type: kubevirt.io.virtualmachine
25+
kind: VirtualMachine
26+
metadata:
27+
namespace: ${namespace}
28+
annotations:
29+
harvesterhci.io/volumeClaimTemplates: '[{"metadata":{"name":"${claimName}","annotations":{"harvesterhci.io/imageId":"default/image-cjlm2"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"10Gi"}},"volumeMode":"Block","storageClassName":"longhorn-image-cjlm2"}}]'
30+
network.harvesterhci.io/ips: "[]"
31+
labels:
32+
harvesterhci.io/creator: harvester
33+
harvesterhci.io/os: ubuntu
34+
name: ${vmName}
35+
spec:
36+
running: true
37+
template:
38+
metadata:
39+
annotations:
40+
harvesterhci.io/sshNames: "[]"
41+
labels:
42+
harvesterhci.io/vmName: ${vmName}
43+
spec:
44+
domain:
45+
hostname: ${vmName}
46+
machine:
47+
type: q35
48+
cpu:
49+
cores: 1
50+
sockets: 1
51+
threads: 1
52+
devices:
53+
inputs:
54+
- bus: usb
55+
name: tablet
56+
type: tablet
57+
interfaces:
58+
- masquerade: {}
59+
model: virtio
60+
name: default
61+
disks:
62+
- name: system
63+
bootOrder: 1
64+
disk:
65+
bus: virtio
66+
- name: cloudinitdisk
67+
disk:
68+
bus: virtio
69+
resources:
70+
limits:
71+
memory: 2Gi
72+
cpu: 1
73+
evictionStrategy: LiveMigrate
74+
networks:
75+
- pod: {}
76+
name: default
77+
volumes:
78+
- name: system
79+
persistentVolumeClaim:
80+
claimName: ${claimName}
81+
- name: cloudinitdisk
82+
cloudInitNoCloud:
83+
networkDataSecretRef:
84+
name: ${userDataSecretName}
85+
secretRef:
86+
name: ${userDataSecretName}
87+
`
88+
}
89+
90+
type ServiceManifestOptions = {
91+
vmName: string
92+
namespace: string
93+
}
94+
95+
export function ServiceManifest({ vmName, namespace }: ServiceManifestOptions) {
96+
return `
97+
apiVersion: v1
98+
kind: Service
99+
metadata:
100+
name: proxy
101+
namespace: ${namespace}
102+
spec:
103+
ports:
104+
- name: ssh
105+
protocol: TCP
106+
port: 22
107+
targetPort: 22
108+
- name: http
109+
protocol: TCP
110+
port: 80
111+
targetPort: 80
112+
- name: https
113+
protocol: TCP
114+
port: 443
115+
targetPort: 443
116+
- name: kube-api
117+
protocol: TCP
118+
port: 6443
119+
targetPort: 6443
120+
selector:
121+
harvesterhci.io/vmName: ${vmName}
122+
type: ClusterIP
123+
`
124+
}
125+
126+
type UserDataSecretManifestOptions = {
127+
namespace: string,
128+
secretName: string
129+
}
130+
131+
export function UserDataSecretManifest({ namespace, secretName }: UserDataSecretManifestOptions) {
132+
const userdata = Buffer.from(`#cloud-config
133+
users:
134+
- name: ubuntu
135+
lock_passwd: false
136+
sudo: "ALL=(ALL) NOPASSWD: ALL"
137+
passwd: "$6$exDY1mhS4KUYCE/2$zmn9ToZwTKLhCw.b4/b.ZRTIZM30JZ4QrOQ2aOXJ8yk96xpcCof0kxKwuX1kqLG/ygbJ1f8wxED22bTL4F46P0"`).toString("base64")
138+
return `
139+
apiVersion: v1
140+
type: secret
141+
kind: Secret
142+
data:
143+
networkdata: ""
144+
userdata: ${userdata}
145+
metadata:
146+
name: ${secretName}
147+
namespace: ${namespace}
148+
`
149+
}

.werft/vm/vm.ts

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
import { exec } from '../util/shell';
2+
3+
import * as Manifests from './manifests'
4+
5+
const KUBECONFIG_PATH = '/mnt/secrets/harvester-kubeconfig/harvester-kubeconfig.yml'
6+
7+
/**
8+
* Convenience function to kubectl apply a manifest from stdin.
9+
*/
10+
function kubectlApplyManifest(manifest: string, options?: { validate?: boolean }) {
11+
exec(`
12+
cat <<EOF | kubectl --kubeconfig ${KUBECONFIG_PATH} apply --validate=${!!options?.validate} -f -
13+
${manifest}
14+
EOF
15+
`)
16+
}
17+
18+
/**
19+
* Start a VM
20+
* Does not wait for the VM to be ready.
21+
*/
22+
export function startVM(options: { name: string }) {
23+
const namespace = `preview-${options.name}`
24+
const userDataSecretName = `userdata-${options.name}`
25+
26+
kubectlApplyManifest(
27+
Manifests.NamespaceManifest({
28+
namespace
29+
})
30+
)
31+
32+
kubectlApplyManifest(
33+
Manifests.UserDataSecretManifest({
34+
namespace,
35+
secretName: userDataSecretName,
36+
})
37+
)
38+
39+
kubectlApplyManifest(
40+
Manifests.VirtualMachineManifest({
41+
namespace,
42+
vmName: options.name,
43+
claimName: `${options.name}-${Date.now()}`,
44+
userDataSecretName
45+
}),
46+
{ validate: false }
47+
)
48+
49+
kubectlApplyManifest(
50+
Manifests.ServiceManifest({
51+
vmName: options.name,
52+
namespace
53+
})
54+
)
55+
}
56+
57+
/**
58+
* Check if a VM with the given name already exists.
59+
* @returns true if the VM already exists
60+
*/
61+
export function vmExists(options: { name: string }) {
62+
const namespace = `preview-${options.name}`
63+
const status = exec(`kubectl --kubeconfig ${KUBECONFIG_PATH} -n ${namespace} get vmi ${options.name}`, { dontCheckRc: true, silent: true })
64+
return status.code == 0
65+
}
66+
67+
/**
68+
* Wait until the VM Instance reaches the Running status.
69+
* If the VM Instance doesn't reach Running before the timeoutMS it will throw an Error.
70+
*/
71+
export function waitForVM(options: { name: string, timeoutMS: number }) {
72+
const namespace = `preview-${options.name}`
73+
const startTime = Date.now()
74+
while (true) {
75+
76+
const status = exec(`kubectl --kubeconfig ${KUBECONFIG_PATH} -n ${namespace} get vmi ${options.name} -o jsonpath="{.status.phase}"`, { silent: true }).stdout.trim()
77+
78+
if (status == "Running") {
79+
return
80+
}
81+
82+
const elapsedTimeMs = Date.now() - startTime
83+
if (elapsedTimeMs > options.timeoutMS) {
84+
throw new Error("VM didn reach Running status before the timeout")
85+
}
86+
87+
console.log(`VM is not yet running. Current status is ${status}. Sleeping 5 seconds`)
88+
exec('sleep 5', { silent: true })
89+
}
90+
91+
}

0 commit comments

Comments
 (0)