Skip to content

Packer Configuration Example

Estimated time to read: 9 minutes

  • Originally Written: June, 2023

Overview

Packer makes it simple to build images such as a VM template which can be used in VMware.

Note

I tried a few examples of Packer scripts but always found issues with building images (and unfortunately I forgot to document them). The scripts from https://github.com/andif888/packer-ubuntu-vsphere-iso seemed to work as a base and I updated the scripts and some of the user-data for my environment.

Running the scripts

  • Install Packer onto your local system

https://developer.hashicorp.com/packer/install

  • Run one of the build- scripts. Alternatively you could just run the packer build command from your terminal. The build scripts mean you don't have to remember the exact Packer command
packer build -on-error=ask -force -var-file="ubuntu-2004.pkrvars.hcl" \
  -var='os_iso_checksum=5035be37a7e9abbdc09f0d257f3e33416c1a0fb322ba860d42d74aa75c3468d4' \
  -var='os_iso_url=https://releases.ubuntu.com/20.04/ubuntu-20.04.5-live-server-amd64.iso' \
  -var='vsphere_guest_os_type=ubuntu64Guest' \
  -var='vsphere_vm_name=conmurph-rancher-template' .

Directory Structure

Config Files

http/meta-data

This is an empty file

  • Don't forget to create your own password which you can do by using mkpasswd -m SHA-512 --rounds=4096
http/user-data (used by cloud-init)
#cloud-config
autoinstall:
    version: 1
    refresh-installer:
        update: yes
    early-commands:
        # Stop ssh for packer
        - sudo systemctl stop ssh
    locale: en_US
    keyboard:
        layout: en
        variant: us
    identity:
        hostname: ubuntu
        username: ubuntu
        password: '' #run `mkpasswd -m SHA-512 --rounds=4096` to generate a hash for a new password and then paste the output here
    ssh:
        install-server: yes
        allow-pw: yes
    storage:
        layout:
            name: direct
    apt:
        primary:
            - arches: [i386, amd64]
            uri: "http://de.archive.ubuntu.com/ubuntu/"
    packages:
        - curl
        - software-properties-common
    user-data:
        disable_root: false
    late-commands:
        #- sed -i -e 's/^#\?PasswordAuthentication.*/PasswordAuthentication yes/g' /target/etc/ssh/sshd_config
        - echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > /target/etc/sudoers.d/ubuntu
        - curtin in-target --target=/target -- chmod 440 /etc/sudoers.d/ubuntu
        - |
            sudo cat > /etc/netplan/00-installer-config.yaml <<EOF
            # This is the network config written by 'subiquity'
            network:
                version: 2
                ethernets:
                    ens192:
                        dhcp4: true
                        dhcp4-overrides:
                            use-routes: false
                        dhcp4-overrides:
                            use-dns: false
                        dhcp4-overrides:
                            use-ntp: false
                        routes:
                            - to: default
                            via: 10.113.105.1
                        nameservers:
                            addresses: [10.113.108.100]
            EOF
        - sudo netplan apply
        - cp /etc/netplan/00-installer-config.yaml /target/etc/netplan/00-installer-config.yaml
        - rm -rf /target/etc/cloud/cloud.cfg.d/50-curtin-networking.cfg
        - mkdir /etc/systemd/system/docker.service.d
        - |
            sudo cat > /etc/systemd/system/docker.service.d/http-proxy.conf <<EOF
            [Service]
            Environment="HTTP_PROXY=http://myproxy.com:80/"
            Environment="HTTPS_PROXY=http://my-proxy.com:80/"
            Environment="NO_PROXY=localhost,127.0.0.0"
            EOF
        -  sudo systemctl daemon-reload

        #- curtin in-target --target=/target -- apt-get upgrade --yes

        - sudo ssh-keygen -A
        - sudo systemctl start ssh
  • Since this will be used as a template, once the image is built the cleanup script removes any logs and sets the hostname to localhost. The script also cleans up the machine-id (I've seen issues with cloned VMs not obtaining the correct IP from DHCP caused by the machine-id)
setup/cleanup.sh
#!/bin/bash
echo "> Cleaning all audit logs ..."
if [ -f /var/log/audit/audit.log ]; then
cat /dev/null > /var/log/audit/audit.log
fi
if [ -f /var/log/wtmp ]; then
cat /dev/null > /var/log/wtmp
fi
if [ -f /var/log/lastlog ]; then
cat /dev/null > /var/log/lastlog
fi
# Cleans SSH keys.
echo "> Cleaning SSH keys ..."
#rm -f /etc/ssh/ssh_host_*
cd /etc/ssh
sudo ssh-keygen -A

# Sets hostname to localhost.
echo "> Setting hostname to localhost ..."
cat /dev/null > /etc/hostname
hostnamectl set-hostname localhost
# Cleans apt-get.
echo "> Cleaning apt-get ..."
apt-get clean
# Cleans the machine-id.
echo "> Cleaning the machine-id ..."
truncate -s 0 /etc/machine-id
rm /var/lib/dbus/machine-id
ln -s /etc/machine-id /var/lib/dbus/machine-id
  • This is already in the user-data file but could be a separate script
setup/configure-network.sh
#!/bin/bash
echo '> Configuring network settings for Rancher ...'
sudo bash -c 'cat > /etc/netplan/00-installer-config.yaml' << EOF
network:
version: 2
ethernets:
    ens192:
    dhcp4: true
    dhcp4-overrides:
        use-routes: false
    dhcp4-overrides:
        use-dns: false
    dhcp4-overrides:
        use-ntp: false
    routes:
        - to: default
        via: 10.113.105.1
    nameservers:
        addresses: [10.113.108.100]
EOF

sudo netplan apply
  • This is already in the user-data file but could be a separate script
setup/install-packages.sh
#!/bin/bash

# echo '> Setting proxy'
# sudo echo "http_proxy='http://my-proxy.com:80'" | sudo tee -a  /etc/environment
# sudo echo "https_proxy='http://my-proxy.com:80'"  | sudo tee -a /etc/environment
# . /etc/environment

# echo "Acquire::http::proxy  \"http://my-proxy.com:80/\";"  | sudo tee -a /etc/apt/apt.conf.d/proxy.conf
# echo "Acquire::https::proxy  \"http://my-proxy.com:80/\";"  | sudo tee -a /etc/apt/apt.conf.d/proxy.conf

echo '> sudo apt update ...'
sudo apt-get update

echo '> Installing python ...'
sudo apt-get -y install python3
sudo apt-get -y install python3-pip

echo '> Installing Docker ...'
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable"
apt-cache policy docker-ce
sudo groupadd docker
sudo usermod -aG docker $USER
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo systemctl status docker
docker ps
  • The command to build the image. Update the URL and checksum if they are no longer available.
build-2004.sh
#!/bin/bash
# variable files ending with .auto.pkrvars.hcl are automatically loaded
packer build -on-error=ask -force -var-file="ubuntu-2004.pkrvars.hcl" \
-var='os_iso_checksum=5035be37a7e9abbdc09f0d257f3e33416c1a0fb322ba860d42d74aa75c3468d4' \
-var='os_iso_url=https://releases.ubuntu.com/20.04/ubuntu-20.04.5-live-server-amd64.iso' \
-var='vsphere_guest_os_type=ubuntu64Guest' \
-var='vsphere_vm_name=conmurph-rancher-template' .
  • Same as above build-2204.sh but for Ubuntu 22.04
build-2204.sh
#!/bin/bash
# variable files ending with .auto.pkrvars.hcl are automatically loaded
packer build -var-file="ubuntu-2204.pkrvars.hcl" \
-var='os_iso_checksum=84aeaf7823c8c61baa0ae862d0a06b03409394800000b3235854a6b38eb4856f' \
-var='os_iso_url=http://ftp.halifax.rwth-aachen.de/ubuntu-releases/jammy/ubuntu-22.04-live-server-amd64.iso' \
-var='vsphere_guest_os_type=ubuntu64Guest' \
-var='vsphere_vm_name=conmurph-ubuntu-2204-template' .
  • This contains the boot commands for the OS to be installed. i.e. if you were to install this OS yourself these are the buttons to press and the commands to type for Ubuntu 20.04
ubuntu-2004.pkrvars.hcl
boot_command = [
"<esc><esc><esc>",
"<enter><wait>",
"/casper/vmlinuz ",
"root=/dev/sr0 ",
"initrd=/casper/initrd ",
"autoinstall ",
"ds=nocloud-net;",
"<enter>"
]
  • This contains the boot commands for the OS to be installed. i.e. if you were to install this OS yourself these are the buttons to press and the commands to type for Ubuntu 22.04
ubuntu-2204.pkrvars.hcl
boot_command = [
    "c<wait>",
    "linux /casper/vmlinuz --- autoinstall ds=\"nocloud-net\"",
    "<enter><wait>",
    "initrd /casper/initrd",
    "<enter><wait>",
    "boot",
    "<enter>"
    ]
  • This is the main Packer file and has the source to build (vsphere-iso) along with any additional build tasks e.g. running scripts
ubuntu.pkr.hcl
packer {
    required_plugins {
        vsphere = {
        version = ">= 0.0.1"
        source = "github.com/hashicorp/vsphere"
        }
    }
}

variable "cpu_num" {
type    = number
default = 6
}

variable "disk_size" {
type    = number
default = 122880
}

variable "mem_size" {
type    = number
default = 32768
}

variable "os_iso_checksum" {
type    = string
default = ""
}

variable "os_iso_url" {
type    = string
default = ""
}

variable "vsphere_datastore" {
type    = string
default = ""
}

variable "vsphere_datacenter" {
type    = string
default = ""
}

variable "vsphere_guest_os_type" {
type    = string
default = ""
}

variable "vsphere_cluster" {
type    = string
default = ""
}

variable "vsphere_folder" {
type    = string
default = ""
}

variable "vsphere_password" {
type      = string
default   = env("VSPHERE_PASSWORD")
sensitive = true
}

variable "vsphere_network" {
type    = string
default = ""
}

variable "vsphere_server" {
type    = string
default = ""
}

variable "vsphere_vm_name" {
type    = string
default = ""
}

variable "vsphere_username" {
type    = string
default = ""
}

variable "ssh_password" {
type      = string
default   = ""
sensitive = true
}

variable "ssh_username" {
type    = string
default = ""
}

variable "cloudinit_userdata" {
type = string
default = ""
}

variable "cloudinit_metadata" {
type = string
default = ""
}

variable "shell_scripts" {
type = list(string)
description = "A list of scripts."
default = []
}

variable "boot_command" {
type = list(string)
description = "Ubuntu boot command"
default = []
}

source "vsphere-iso" "ubuntu" {

vcenter_server        = var.vsphere_server
cluster               = var.vsphere_cluster
datacenter            = var.vsphere_datacenter
datastore             = var.vsphere_datastore
folder                = var.vsphere_folder
username              = var.vsphere_username
password              = var.vsphere_password
insecure_connection   = "true"


CPUs                  = var.cpu_num
RAM                   = var.mem_size
RAM_reserve_all       = true
disk_controller_type  = ["pvscsi"]
guest_os_type         = var.vsphere_guest_os_type
iso_checksum          = var.os_iso_checksum
iso_url               = var.os_iso_url
cd_content            = {
    "/meta-data" = file("${var.cloudinit_metadata}")
    "/user-data" = file("${var.cloudinit_userdata}")
}
cd_label              = "cidata"

network_adapters {
    network             = var.vsphere_network
    network_card        = "vmxnet3"
}

storage {
    disk_size             = var.disk_size
    disk_thin_provisioned = true
}

vm_name               = var.vsphere_vm_name
convert_to_template   = "true"
communicator          = "ssh"
ssh_username          = var.ssh_username
ssh_password          = var.ssh_password
ssh_timeout           = "30m"
ssh_handshake_attempts = "100"

boot_order            = "disk,cdrom,floppy"
boot_wait             = "3s"
boot_command          = var.boot_command
shutdown_command      = "echo '${var.ssh_password}' | sudo -S -E shutdown -P now"
shutdown_timeout      = "15m"

configuration_parameters = {
    "disk.EnableUUID" = "true"
}
}

build {
sources = ["source.vsphere-iso.ubuntu"]

provisioner "shell" {
    environment_vars = [
    "http_proxy=\"http://my-proxy.com:80/\"",
    "https_proxy=\"http://my-proxy.com:80/\"",
    "no_proxy=localhost,127.0.0.1",
    ]

    execute_command = "echo '${var.ssh_password}' | {{.Vars}} sudo -S -E bash '{{.Path}}'"
    scripts = var.shell_scripts
    expect_disconnect = true
}

}
  • This is like the terraform.tfvars file. It means you don’t have to have environment specific configuration in your main pkr.hcl file. Update it with all your environment details such as vCenter access information
variables.auto.pkrvars.hcl
vsphere_server          = ""
vsphere_username        = ""
vsphere_datacenter      = ""
vsphere_cluster         = ""
vsphere_network         = ""
vsphere_datastore       = ""
vsphere_folder          = ""

# cloud_init files for unattended configuration for Ubuntu
cloudinit_userdata      = "./http/user-data"
cloudinit_metadata      = "./http/meta-data"

# final clean up script
shell_scripts           = ["./setup/cleanup.sh", "./setup/install-packages.sh"]

# SSH username (created in user-data. If you change it here the please also adjust in ./http/user-data)
ssh_username            = "ubuntu"

# SSH password (created in autounattend.xml. If you change it here the please also adjust in ./http/user-data)
ssh_password            = "ubuntu"

Comments