Skip to content

Instantly share code, notes, and snippets.

@vxav
Last active February 13, 2025 09:58
Show Gist options
  • Select an option

  • Save vxav/a59edf7bc985ccae64dd5fc56b5a96f2 to your computer and use it in GitHub Desktop.

Select an option

Save vxav/a59edf7bc985ccae64dd5fc56b5a96f2 to your computer and use it in GitHub Desktop.

Ignition logs

journalctl --identifier=ignition --all

Query metadata

/usr/share/oem/bin/vmtoolsd --cmd "info-get guestinfo.ignition.config.data" | base64 -d

Test branches:

Re-run ignition: touch /boot/flatcar/first_boot

Proxmox flatcar template

apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
  name: proxmoxav
  namespace: baremetal-operator-system
spec:
  clusterNetwork:
    pods:
      cidrBlocks:
      - 192.168.0.0/16
  controlPlaneRef:
    apiVersion: controlplane.cluster.x-k8s.io/v1beta1
    kind: KubeadmControlPlane
    name: proxmoxav-control-plane
  infrastructureRef:
    apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
    kind: ProxmoxCluster
    name: proxmoxav
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: ProxmoxCluster
metadata:
  name: proxmoxav
  namespace: baremetal-operator-system
spec:
  allowedNodes:
  - pve
  controlPlaneEndpoint:
    host: 10.201.25.98
    port: 6443
  dnsServers:
  - 8.8.8.8
  - 1.1.1.1
  ipv4Config:
    addresses:
    - 10.201.25.80-10.201.25.97
    gateway: 10.201.25.254
    prefix: 23
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
  name: proxmoxav-control-plane
  namespace: baremetal-operator-system
spec:
  kubeadmConfigSpec:
    files:
    - content: |
        apiVersion: v1
        kind: Pod
        metadata:
          creationTimestamp: null
          name: kube-vip
          namespace: kube-system
        spec:
          containers:
          - args:
            - manager
            env:
            - name: cp_enable
              value: "true"
            - name: vip_interface
              value: ""
            - name: address
              value: 10.201.25.98
            - name: port
              value: "6443"
            - name: vip_arp
              value: "true"
            - name: vip_leaderelection
              value: "true"
            - name: vip_leaseduration
              value: "15"
            - name: vip_renewdeadline
              value: "10"
            - name: vip_retryperiod
              value: "2"
            image: ghcr.io/kube-vip/kube-vip:v0.7.1
            imagePullPolicy: IfNotPresent
            name: kube-vip
            resources: {}
            securityContext:
              capabilities:
                add:
                - NET_ADMIN
                - NET_RAW
            volumeMounts:
            - mountPath: /etc/kubernetes/admin.conf
              name: kubeconfig
          hostAliases:
          - hostnames:
            - localhost
            - kubernetes
            ip: 127.0.0.1
          hostNetwork: true
          volumes:
          - hostPath:
              path: /etc/kubernetes/admin.conf
              type: FileOrCreate
            name: kubeconfig
        status: {}
      owner: root:root
      path: /etc/kubernetes/manifests/kube-vip.yaml
      permissions: "0644"
    - content: |
        #!/bin/bash

        # Copyright 2020 The Kubernetes Authors.
        #
        # Licensed under the Apache License, Version 2.0 (the "License");
        # you may not use this file except in compliance with the License.
        # You may obtain a copy of the License at
        #
        #     http://www.apache.org/licenses/LICENSE-2.0
        #
        # Unless required by applicable law or agreed to in writing, software
        # distributed under the License is distributed on an "AS IS" BASIS,
        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
        # See the License for the specific language governing permissions and
        # limitations under the License.

        set -e

        # Configure the workaround required for kubeadm init with kube-vip:
        # xref: https://github.com/kube-vip/kube-vip/issues/684

        # Nothing to do for kubernetes < v1.29
        KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)"
        if [[ "$KUBEADM_MINOR" -lt "29" ]]; then
          exit 0
        fi

        IS_KUBEADM_INIT="false"

        # cloud-init kubeadm init
        if [[ -f /run/kubeadm/kubeadm.yaml ]]; then
          IS_KUBEADM_INIT="true"
        fi

        # ignition kubeadm init
        if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then
          IS_KUBEADM_INIT="true"
        fi

        if [[ "$IS_KUBEADM_INIT" == "true" ]]; then
          sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \
            /etc/kubernetes/manifests/kube-vip.yaml
        fi
      owner: root:root
      path: /etc/kube-vip-prepare.sh
      permissions: "0700"
    format: ignition
    ignition:
      containerLinuxConfig:
        additionalConfig: |-
          storage:
            files:
            - path: /opt/set-hostname
              filesystem: root
              mode: 0744
              contents:
                inline: |
                  #!/bin/sh
                  set -x
                  echo "${COREOS_CUSTOM_HOSTNAME}" > /etc/hostname
                  hostname "${COREOS_CUSTOM_HOSTNAME}"
                  echo "::1         ipv6-localhost ipv6-loopback" >/etc/hosts
                  echo "127.0.0.1   localhost" >>/etc/hosts
                  echo "127.0.0.1   ${COREOS_CUSTOM_HOSTNAME}" >>/etc/hosts
          systemd:
            units:
            - name: coreos-metadata.service
              contents: |
                [Unit]
                Description=Proxmox metadata agent
                After=nss-lookup.target
                After=network-online.target
                Wants=network-online.target
                [Service]
                Type=oneshot
                Restart=on-failure
                RemainAfterExit=yes
                EnvironmentFile=/etc/proxmox-env
                ExecStart=/usr/bin/mkdir --parent /run/metadata
                ExecStart=/bin/bash -c 'env > /run/metadata/flatcar'
                [Install]
                WantedBy=multi-user.target
            - name: set-hostname.service
              enabled: true
              contents: |
                [Unit]
                Description=Set the hostname for this machine
                Requires=coreos-metadata.service
                After=coreos-metadata.service
                [Service]
                Type=oneshot
                EnvironmentFile=/run/metadata/flatcar
                ExecStart=/opt/set-hostname
                [Install]
                WantedBy=multi-user.target
            - name: kubeadm.service
              enabled: true
              dropins:
              - name: 10-flatcar.conf
                contents: |
                  [Unit]
                  # kubeadm must run after coreos-metadata populated /run/metadata directory.
                  Requires=coreos-metadata.service
                  After=coreos-metadata.service
                  # kubeadm must run after containerd - see https://github.com/kubernetes-sigs/image-builder/issues/939.
                  After=containerd.service
                  [Service]
                  # Make metadata environment variables available for pre-kubeadm commands.
                  EnvironmentFile=/run/metadata/flatcar
                  # Log to file
                  StandardOutput=append:/var/log/kubeadm-service.log
                  StandardError=inherit
    initConfiguration:
      nodeRegistration:
        kubeletExtraArgs:
          provider-id: proxmox://'${COREOS_CUSTOM_INSTANCE_ID}'
    joinConfiguration:
      nodeRegistration:
        kubeletExtraArgs:
          provider-id: proxmox://'${COREOS_CUSTOM_INSTANCE_ID}'
    preKubeadmCommands:
    - rm /etc/proxmox-env
    - envsubst < /etc/kubeadm.yml > /etc/kubeadm.yml.tmp
    - cp /etc/kubeadm.yml.tmp /etc/kubeadm.yml
    - /etc/kube-vip-prepare.sh
    users:
    - name: core
      sshAuthorizedKeys:
      - ssh-rsa ####=
      sudo: ALL=(ALL) NOPASSWD:ALL
  machineTemplate:
    infrastructureRef:
      apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
      kind: ProxmoxMachineTemplate
      name: proxmoxav-control-plane
  replicas: 1
  version: v1.29.13
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: ProxmoxMachineTemplate
metadata:
  name: proxmoxav-control-plane
  namespace: baremetal-operator-system
spec:
  template:
    spec:
      checks:
        skipCloudInitStatus: true
        skipQemuGuestAgent: true
      disks:
        bootVolume:
          disk: scsi0
          sizeGb: 32
      format: qcow2
      full: true
      memoryMiB: 8048
      network:
        default:
          bridge: vmbr0
          model: virtio
      numCores: 4
      numSockets: 1
      sourceNode: pve
      templateID: 101
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
  name: proxmoxav-workers
  namespace: baremetal-operator-system
spec:
  clusterName: proxmoxav
  replicas: 1
  selector:
    matchLabels: null
  template:
    metadata:
      labels:
        node-role.kubernetes.io/node: ""
    spec:
      bootstrap:
        configRef:
          apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
          kind: KubeadmConfigTemplate
          name: proxmoxav-worker
      clusterName: proxmoxav
      infrastructureRef:
        apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
        kind: ProxmoxMachineTemplate
        name: proxmoxav-worker
      version: v1.29.13
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: ProxmoxMachineTemplate
metadata:
  name: proxmoxav-worker
  namespace: baremetal-operator-system
spec:
  template:
    spec:
      checks:
        skipCloudInitStatus: true
        skipQemuGuestAgent: true
      disks:
        bootVolume:
          disk: scsi0
          sizeGb: 32
      format: qcow2
      full: true
      memoryMiB: 8048
      network:
        default:
          bridge: vmbr0
          model: virtio
      numCores: 4
      numSockets: 1
      sourceNode: pve
      templateID: 101
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
  name: proxmoxav-worker
  namespace: baremetal-operator-system
spec:
  template:
    spec:
      format: ignition
      ignition:
        containerLinuxConfig:
          additionalConfig: |-
            storage:
              files:
              - path: /opt/set-hostname
                filesystem: root
                mode: 0744
                contents:
                  inline: |
                    #!/bin/sh
                    set -x
                    echo "${COREOS_CUSTOM_HOSTNAME}" > /etc/hostname
                    hostname "${COREOS_CUSTOM_HOSTNAME}"
                    echo "::1         ipv6-localhost ipv6-loopback" >/etc/hosts
                    echo "127.0.0.1   localhost" >>/etc/hosts
                    echo "127.0.0.1   ${COREOS_CUSTOM_HOSTNAME}" >>/etc/hosts
            systemd:
              units:
              - name: coreos-metadata.service
                contents: |
                  [Unit]
                  Description=Proxmox metadata agent
                  After=nss-lookup.target
                  After=network-online.target
                  Wants=network-online.target
                  [Service]
                  Type=oneshot
                  Restart=on-failure
                  RemainAfterExit=yes
                  EnvironmentFile=/etc/proxmox-env
                  ExecStart=/usr/bin/mkdir --parent /run/metadata
                  ExecStart=/bin/bash -c 'env > /run/metadata/flatcar'
                  [Install]
                  WantedBy=multi-user.target
              - name: set-hostname.service
                enabled: true
                contents: |
                  [Unit]
                  Description=Set the hostname for this machine
                  Requires=coreos-metadata.service
                  After=coreos-metadata.service
                  [Service]
                  Type=oneshot
                  EnvironmentFile=/run/metadata/flatcar
                  ExecStart=/opt/set-hostname
                  [Install]
                  WantedBy=multi-user.target
              - name: kubeadm.service
                enabled: true
                dropins:
                - name: 10-flatcar.conf
                  contents: |
                    [Unit]
                    # kubeadm must run after coreos-metadata populated /run/metadata directory.
                    Requires=coreos-metadata.service
                    After=coreos-metadata.service
                    # kubeadm must run after containerd - see https://github.com/kubernetes-sigs/image-builder/issues/939.
                    After=containerd.service
                    [Service]
                    # Make metadata environment variables available for pre-kubeadm commands.
                    EnvironmentFile=/run/metadata/flatcar
                    # Log to file
                    StandardOutput=append:/var/log/kubeadm-service.log
                    StandardError=inherit
      joinConfiguration:
        nodeRegistration:
          kubeletExtraArgs:
            provider-id: proxmox://'${COREOS_CUSTOM_INSTANCE_ID}'
      preKubeadmCommands:
      - rm /etc/proxmox-env
      - envsubst < /etc/kubeadm.yml > /etc/kubeadm.yml.tmp
      - cp /etc/kubeadm.yml.tmp /etc/kubeadm.yml
      users:
      - name: core
        sshAuthorizedKeys:
        - ssh-rsa ####=
        sudo: ALL=(ALL) NOPASSWD:ALL

ignition logs

builder@localhost ~ $ sudo journalctl --identifier=ignition --all
Feb 13 09:41:31 localhost ignition[659]: Ignition 2.20.0
Feb 13 09:41:31 localhost ignition[659]: Stage: fetch-offline
Feb 13 09:41:31 localhost ignition[659]: no configs at "/usr/lib/ignition/base.d"
Feb 13 09:41:31 localhost ignition[659]: no config dir at "/usr/lib/ignition/base.platform.d/qemu"
Feb 13 09:41:31 localhost ignition[659]: parsed url from cmdline: ""
Feb 13 09:41:31 localhost ignition[659]: no config URL provided
Feb 13 09:41:31 localhost ignition[659]: reading system config file "/usr/lib/ignition/user.ign"
Feb 13 09:41:31 localhost ignition[659]: no config at "/usr/lib/ignition/user.ign"
Feb 13 09:41:31 localhost ignition[659]: op(1): [started]  loading QEMU firmware config module
Feb 13 09:41:31 localhost ignition[659]: op(1): executing: "modprobe" "qemu_fw_cfg"
Feb 13 09:41:31 localhost ignition[659]: op(1): [finished] loading QEMU firmware config module
Feb 13 09:41:31 localhost ignition[659]: QEMU firmware config was not found. Ignoring...
Feb 13 09:41:31 localhost ignition[659]: QEMU firmware config was not found. Ignoring...
Feb 13 09:41:31 localhost ignition[659]: not a config (empty): provider config was empty, continuing with empty cache config
Feb 13 09:41:31 localhost ignition[659]: fetch-offline: fetch-offline passed
Feb 13 09:41:31 localhost ignition[659]: Ignition finished successfully
Feb 13 09:41:31 localhost ignition[735]: Ignition 2.20.0
Feb 13 09:41:31 localhost ignition[735]: Stage: kargs
Feb 13 09:41:31 localhost ignition[735]: no configs at "/usr/lib/ignition/base.d"
Feb 13 09:41:31 localhost ignition[735]: no config dir at "/usr/lib/ignition/base.platform.d/qemu"
Feb 13 09:41:31 localhost ignition[735]: kargs: kargs passed
Feb 13 09:41:31 localhost ignition[735]: Ignition finished successfully
Feb 13 09:41:31 localhost ignition[743]: Ignition 2.20.0
Feb 13 09:41:31 localhost ignition[743]: Stage: disks
Feb 13 09:41:31 localhost ignition[743]: no configs at "/usr/lib/ignition/base.d"
Feb 13 09:41:31 localhost ignition[743]: no config dir at "/usr/lib/ignition/base.platform.d/qemu"
Feb 13 09:41:31 localhost ignition[743]: disks: disks passed
Feb 13 09:41:31 localhost ignition[743]: Ignition finished successfully
Feb 13 09:41:33 localhost ignition[1134]: INFO     : Ignition 2.20.0
Feb 13 09:41:33 localhost ignition[1134]: INFO     : Stage: mount
Feb 13 09:41:33 localhost ignition[1134]: INFO     : no configs at "/usr/lib/ignition/base.d"
Feb 13 09:41:33 localhost ignition[1134]: INFO     : no config dir at "/usr/lib/ignition/base.platform.d/qemu"
Feb 13 09:41:33 localhost ignition[1134]: INFO     : mount: mount passed
Feb 13 09:41:33 localhost ignition[1134]: INFO     : Ignition finished successfully
Feb 13 09:41:33 localhost ignition[1164]: INFO     : Ignition 2.20.0
Feb 13 09:41:33 localhost ignition[1164]: INFO     : Stage: files
Feb 13 09:41:33 localhost ignition[1164]: INFO     : no configs at "/usr/lib/ignition/base.d"
Feb 13 09:41:33 localhost ignition[1164]: INFO     : no config dir at "/usr/lib/ignition/base.platform.d/qemu"
Feb 13 09:41:33 localhost ignition[1164]: DEBUG    : files: compiled without relabeling support, skipping
Feb 13 09:41:33 localhost ignition[1164]: WARNING  : files: createResultFile: Ignition has already run on this system. Unexpected behavior may occur. Ignition is not designed to run more than once per sy>
Feb 13 09:41:33 localhost ignition[1164]: INFO     : files: createResultFile: createFiles: op(1): [started]  writing file "/sysroot/etc/.ignition-result.json"
Feb 13 09:41:33 localhost ignition[1164]: INFO     : files: createResultFile: createFiles: op(1): [finished] writing file "/sysroot/etc/.ignition-result.json"
Feb 13 09:41:33 localhost ignition[1164]: INFO     : files: files passed
Feb 13 09:41:33 localhost ignition[1164]: INFO     : Ignition finished successfully
Feb 13 09:41:34 localhost ignition[1209]: INFO     : Ignition 2.20.0
Feb 13 09:41:34 localhost ignition[1209]: INFO     : Stage: umount
Feb 13 09:41:34 localhost ignition[1209]: INFO     : no configs at "/usr/lib/ignition/base.d"
Feb 13 09:41:34 localhost ignition[1209]: INFO     : no config dir at "/usr/lib/ignition/base.platform.d/qemu"
Feb 13 09:41:34 localhost ignition[1209]: INFO     : umount: umount passed
Feb 13 09:41:34 localhost ignition[1209]: INFO     : Ignition finished successfully
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment