DevOps: Running everything under Cygwin on Windows

Table of Contents

1 Setup Cygwin for DevOps

1.1 Directory Structure after setup

We assume everything is installed/configured under the D: drive.

tree /misc/packer /misc/ansible /misc/vagrant
# /misc/packer
# └── ol
#     ├── 6.10
#     │   ├── http
#     │   │   └── ks.cfg
#     │   ├── iso
#     │   │   ├── iso-info.json
#     │   │   └── V978757-01.iso
#     │   ├── output-ol610-base.vzell.de
#     │   │   ├── box.ovf
#     │   │   ├── info.json
#     │   │   ├── metadata.json
#     │   │   ├── packer-ol610-base.mf
#     │   │   ├── packer-ol610-base.vdi
#     │   │   ├── packer-ol610-base-disk001.vmdk
#     │   │   └── Vagrantfile
#     │   ├── packer.json
#     │   └── packer_cache
#     ├── 7.5
#     │   ├── http
#     │   │   └── ks.cfg
#     │   ├── iso
#     │   │   └── iso-info.json
#     │   ├── output-ol75-base.vzell.de
#     │   │   ├── box.ovf
#     │   │   ├── info.json
#     │   │   ├── metadata.json
#     │   │   ├── packer-ol75-base.mf
#     │   │   ├── packer-ol75-base.vdi
#     │   │   ├── packer-ol75-base-disk001.vmdk
#     │   │   └── Vagrantfile
#     │   ├── packer.json
#     │   └── packer_cache
#     └── 7.5-1d
#         ├── http
#         │   └── ks.cfg
#         ├── iso
#         │   ├── iso-info.json
#         │   └── V975367-01.iso
#         ├── output-ol75-base.vzell.de
#         │   ├── packer-ol75-1d-base.mf
#         │   ├── packer-ol75-1d-base.ovf
#         │   ├── packer-ol75-1d-base.vdi
#         │   ├── packer-ol75-1d-base-disk001.vmdk
#         │   └── packer-ol75-1d-base-disk002.vmdk
#         ├── packer.json
#         └── packer_cache
# /misc/ansible
# └── roles
#     └── vzell.yum-gnome
#         ├── defaults
#         │   └── main.yml
#         ├── files
#         ├── handlers
#         │   └── main.yml
#         ├── meta
#         │   └── main.yml
#         ├── README.md
#         ├── tasks
#         │   └── main.yml
#         ├── templates
#         ├── tests
#         │   ├── inventory
#         │   └── test.yml
#         └── vars
#             └── main.yml
# /misc/vagrant
# ├── boxes
# │   ├── packer-ol610-0.9.0.box
# │   ├── packer-ol610-0.9.0.metadata.json
# │   ├── packer-ol75-0.9.0.box
# │   └── packer-ol75-0.9.0.metadata.json
# ├── stage
# ├── vagrant-multihost
# │   ├── ansible
# │   │   ├── group_vars
# │   │   │   └── all.yml
# │   │   ├── roles
# │   │   │   ├── vzell.filesystem
# │   │   │   │   ├── defaults
# │   │   │   │   │   └── main.yml
# │   │   │   │   ├── handlers
# │   │   │   │   │   └── main.yml
# │   │   │   │   ├── meta
# │   │   │   │   │   └── main.yml
# │   │   │   │   ├── README.md
# │   │   │   │   ├── tasks
# │   │   │   │   │   └── main.yml
# │   │   │   │   ├── tests
# │   │   │   │   │   ├── inventory
# │   │   │   │   │   └── test.yml
# │   │   │   │   └── vars
# │   │   │   │       └── main.yml
# │   │   │   └── vzell.yum
# │   │   │       ├── defaults
# │   │   │       │   └── main.yml
# │   │   │       ├── handlers
# │   │   │       │   └── main.yml
# │   │   │       ├── meta
# │   │   │       │   └── main.yml
# │   │   │       ├── README.md
# │   │   │       ├── tasks
# │   │   │       │   └── main.yml
# │   │   │       ├── tests
# │   │   │       │   ├── inventory
# │   │   │       │   └── test.yml
# │   │   │       └── vars
# │   │   │           └── main.yml
# │   │   └── site.yml
# │   ├── ansible.cfg
# │   ├── custom-vagrant-hosts.yml
# │   ├── LICENSE
# │   ├── README.md
# │   ├── scripts
# │   │   └── inventory.py
# │   ├── test
# │   │   └── runbats.sh
# │   ├── Vagrantfile
# │   └── vagrant-hosts.yml
# └── vagrant-multihost.old
#     ├── ansible
#     │   ├── group_vars
#     │   │   └── all.yml
#     │   ├── roles
#     │   │   ├── vzell.filesystem
#     │   │   │   ├── defaults
#     │   │   │   │   └── main.yml
#     │   │   │   ├── files
#     │   │   │   ├── handlers
#     │   │   │   │   └── main.yml
#     │   │   │   ├── meta
#     │   │   │   │   └── main.yml
#     │   │   │   ├── README.md
#     │   │   │   ├── tasks
#     │   │   │   │   └── main.yml
#     │   │   │   ├── templates
#     │   │   │   ├── tests
#     │   │   │   │   ├── inventory
#     │   │   │   │   └── test.yml
#     │   │   │   └── vars
#     │   │   │       └── main.yml
#     │   │   └── vzell.yum
#     │   │       ├── defaults
#     │   │       │   └── main.yml
#     │   │       ├── files
#     │   │       ├── handlers
#     │   │       │   └── main.yml
#     │   │       ├── meta
#     │   │       │   └── main.yml
#     │   │       ├── README.md
#     │   │       ├── tasks
#     │   │       │   └── main.yml
#     │   │       ├── templates
#     │   │       ├── tests
#     │   │       │   ├── inventory
#     │   │       │   └── test.yml
#     │   │       └── vars
#     │   │           └── main.yml
#     │   ├── site.yml
#     │   └── vagrant-inventory.ini
#     ├── ansible.cfg
#     ├── custom-vagrant-hosts.yml
#     ├── LICENSE
#     ├── README.md
#     ├── scripts
#     │   └── inventory.py
#     ├── test
#     │   └── runbats.sh
#     ├── Vagrantfile
#     └── vagrant-hosts.yml
# 
# 72 directories, 95 files
[-] ..
 |--[+] bin
 |--[+] cygdrive
 |--[+] dev
 |--[+] etc
 |--[+] home
 |--[+] lib
 |--[-] misc
 |   |--[-] ansible
 |   |   |--[-] roles
 |   |   |   |--[+] database_common
 |   |   |   |--[+] database_config
 |   |   |   |--[+] database_grid_install
 |   |   |   |--[+] database_host
 |   |   |   |--[+] database_install
 |   |   |   |--[+] fmw_application_bpm_config
 |   |   |   |--[+] fmw_application_bpm_vars
 |   |   |   |--[+] fmw_application_ohs_vars
 |   |   |   |--[+] fmw_application_osb_vars
 |   |   |   |--[+] fmw_application_soa_config
 |   |   |   |--[+] fmw_application_soa_vars
 |   |   |   |--[+] fmw_common
 |   |   |   |--[+] fmw_config_admin_server
 |   |   |   |--[+] fmw_config_extent_environment
 |   |   |   |--[+] fmw_config_managed_servers
 |   |   |   |--[+] fmw_host
 |   |   |   |--[+] fmw_install
 |   |   |   |--[+] fmw_patch
 |   |   |   |--[+] fmw_repository
 |   |   |   |--[+] inventories
 |   |   |   |--[+] vzell.jdk
 |   |   |   |--[+] vzell.yum-gnome
 |   |--[-] docker
 |   |   `--[-] vagrant
 |   |       |--[-] files
 |   |       |   `----- readme.txt
 |   |       |----- DockerHostVagrantFile
 |   |       |----- Dockerfile
 |   |       `----- Vagrantfile
 |   |--[-] git
 |   |   |--[+] spar-sops
 |   |--[-] packer
 |   |   |--[-] ol
 |   |   |   |--[-] 6.10
 |   |   |   |   |--[-] http
 |   |   |   |   |   `----- ks.cfg
 |   |   |   |   |--[-] iso
 |   |   |   |   |   |----- V978757-01.iso
 |   |   |   |   |   `----- iso-info.json
 |   |   |   |   |--[-] output-ol610-base.vzell.de
 |   |   |   |   |   |----- Vagrantfile
 |   |   |   |   |   |----- box.ovf
 |   |   |   |   |   |----- info.json
 |   |   |   |   |   |----- metadata.json
 |   |   |   |   |   |----- packer-ol610-base-disk001.vmdk
 |   |   |   |   |   |----- packer-ol610-base.mf
 |   |   |   |   |   `----- packer-ol610-base.vdi
 |   |   |   |   |--[-] packer_cache
 |   |   |   |   `----- packer.json
 |   |   |   |--[-] 7.5
 |   |   |   |   |--[-] http
 |   |   |   |   |   `----- ks.cfg
 |   |   |   |   |--[-] iso
 |   |   |   |   |   |----- V975367-01.iso
 |   |   |   |   |   `----- iso-info.json
 |   |   |   |   |--[-] output-ol75-base.vzell.de
 |   |   |   |   |   |----- Vagrantfile
 |   |   |   |   |   |----- box.ovf
 |   |   |   |   |   |----- info.json
 |   |   |   |   |   |----- metadata.json
 |   |   |   |   |   |----- packer-ol75-base-disk001.vmdk
 |   |   |   |   |   |----- packer-ol75-base.mf
 |   |   |   |   |   `----- packer-ol75-base.vdi
 |   |   |   |   |--[-] packer_cache
 |   |   |   |   `----- packer.json
 |   |--[-] vagrant
 |   |   |--[-] boxes
 |   |   |   |----- ol75-0.9.0.box
 |   |   |   |----- ol75-0.9.0.metadata.json
 |   |   |   |----- packer-ol610-0.9.0.box
 |   |   |   |----- packer-ol610-0.9.0.metadata.json
 |   |   |   |----- packer-ol75-0.9.0.box
 |   |   |   `----- packer-ol75-0.9.0.metadata.json
 |   |   |--[+] ol75-controlhost
 |   |   |   |--[-] database
 |   |   |   |   |--[+] install
 |   |   |   |   |--[+] response
 |   |   |   |   |--[+] rpm
 |   |   |   |   |--[+] sshsetup
 |   |   |   |   |--[+] stage
 |   |   |   |   |----- runInstaller
 |   |   |   |   `----- welcome.html
 |   |   |   |--[-] file_stores
 |   |   |   |   |----- BPMJMSFILESTORE_AUTO_1000000.DAT
 |   |   |   |   |----- BPMJMSFILESTORE_AUTO_2000000.DAT
 |   |   |   |   |----- BPMJMSFILESTORE_AUTO_3000000.DAT
 |   |   |   |   |----- BPMJMSFILESTORE_AUTO_4000000.DAT
 |   |   |   |   |----- SOAJMSFILESTORE_AUTO_1000000.DAT
 |   |   |   |   |----- SOAJMSFILESTORE_AUTO_2000000.DAT
 |   |   |   |   |----- SOAJMSFILESTORE_AUTO_3000000.DAT
 |   |   |   |   |----- SOAJMSFILESTORE_AUTO_4000000.DAT
 |   |   |   |   |----- UMSJMSFILESTORE_AUTO_1000000.DAT
 |   |   |   |   |----- UMSJMSFILESTORE_AUTO_2000000.DAT
 |   |   |   |   |----- UMSJMSFILESTORE_AUTO_3000000.DAT
 |   |   |   |   `----- UMSJMSFILESTORE_AUTO_4000000.DAT
 |   |   |   |--[-] files
 |   |   |   |--[-] grid
 |   |   |   |   |--[+] install
 |   |   |   |   |--[+] response
 |   |   |   |   |--[+] rpm
 |   |   |   |   |--[+] sshsetup
 |   |   |   |   |--[+] stage
 |   |   |   |   |----- runInstaller
 |   |   |   |   |----- runcluvfy.sh
 |   |   |   |   `----- welcome.html
 |   |   |   |--[-] patches
 |   |   |   |   |----- p26045997_122130_Generic.zip
 |   |   |   |   `----- p6880880_132000_Generic.zip
 |   |   |   |--[+] soabpm_file_stores
 |   |   |   |----- Descript.ion
 |   |   |   |----- course-SOAEssentials.12c.tar.gz
 |   |   |   |----- fmw_12.2.1.3.0_infrastructure.jar
 |   |   |   |----- fmw_12.2.1.3.0_ohs_linux64.bin
 |   |   |   |----- fmw_12.2.1.3.0_osb.jar
 |   |   |   |----- fmw_12.2.1.3.0_soa.jar
 |   |   |   |----- fmw_12.2.1.3.0_wls.jar
 |   |   |   |----- jdk-8u181-linux-x64.tar.gz
 |   |   |   |----- osb_domain.jar
 |   |   |   |----- sca_AirlineAB_rev1.0.jar
 |   |   |   |----- soabpm_domain.jar
 |   |   |   `----- test_domain.jar
 |   |   |--[-] vagrant-docker
 |   |   |   |--[-] build
 |   |   |   |   |--[-] public-html
 |   |   |   |   |   `----- index.html
 |   |   |   |   |----- Dockerfile
 |   |   |   |   |----- Vagrantfile
 |   |   |   |   `----- ubuntu-xenial-16.04-cloudimg-console.log
 |   |   |   `--[-] image
 |   |   |       |--[-] public-html
 |   |   |       |   `----- index.html
 |   |   |       |----- Vagrantfile
 |   |   |       `----- ubuntu-xenial-16.04-cloudimg-console.log
 |   |   |--[-] vagrant-multihost
 |   |   |   |--[-] ansible
 |   |   |   |   |--[-] group_vars
 |   |   |   |   |   `----- all.yml
 |   |   |   |   |--[-] roles
 |   |   |   |   |   |--[-] vzell.filesystem
 |   |   |   |   |   |   |--[+] defaults
 |   |   |   |   |   |   |--[+] files
 |   |   |   |   |   |   |--[+] handlers
 |   |   |   |   |   |   |--[+] meta
 |   |   |   |   |   |   |--[-] tasks
 |   |   |   |   |   |   |   `----- main.yml
 |   |   |   |   |   |   |--[+] templates
 |   |   |   |   |   |   |--[+] tests
 |   |   |   |   |   |   |--[+] vars
 |   |   |   |   |   |   `----- README.md
 |   |   |   |   |   `--[-] vzell.yum
 |   |   |   |   |       |--[+] defaults
 |   |   |   |   |       |--[+] files
 |   |   |   |   |       |--[+] handlers
 |   |   |   |   |       |--[+] meta
 |   |   |   |   |       |--[-] tasks
 |   |   |   |   |       |   `----- main.yml
 |   |   |   |   |       |--[+] templates
 |   |   |   |   |       |--[+] tests
 |   |   |   |   |       |--[+] vars
 |   |   |   |   |       `----- README.md
 |   |   |   |   `----- site.yml
 |   |   |   |--[+] scripts
 |   |   |   |--[+] test
 |   |   |   |----- LICENSE
 |   |   |   |----- README.md
 |   |   |   |----- Vagrantfile
 |   |   |   |----- ansible.cfg
 |   |   |   |----- custom-vagrant-hosts.yml
 |   |   |   `----- vagrant-hosts.yml
 |   `--[+] virtualbox
 |--[+] opt
 |--[+] proc
 |--[+] sbin
 |--[+] share
 |--[+] srv
 |--[+] tmp
 |--[+] usr
 |--[+] var
 |----- Cygwin-Terminal.ico
 |----- Cygwin.bat
 |----- Cygwin.ico

1.2 Installing Cygwin

http://cygwin.com/setup-x86_64.exe

Open a Windows cmd terminal and execute

setup-x86_64.exe -q --packages=binutils,curl,gcc-g++,git,gmp,libffi-devel,libgmp-devel,make,nano,openssh,openssl-devel,python-crypto,python-paramiko,python2,python2-devel,python2-openssl,python2-pip,python2-setuptools

The above list is probably not complete. Note that I'm using a full cygwin installation anyway.

Install cygwin directly to the root of the D drive.

From now on we can either work in a bash shell or use mintty.

It's recommended to use mintty by clicking on the Cygwin64 Terminal icon on the desktop.

mintty

1.3 Configuring Cygwin

In $HOME/.bashrc add

  • /usr/local/bin - used for storing helper scripts
  • /opt/vagrant/bin - symlink to choco installed vagrant on C: drive
  • /cygdrive/c/ProgramData/chocolatey/bin - used for accessing choco installed programs
  • /cygdrive/c/WINDOWS/System32/WindowsPowerShell/v1.0 - needed by vagrant
source ~/.bashrc
echo $PATH
/usr/bin:/usr/local/bin:/usr/lib/lapack:/opt/vagrant/bin:/cygdrive/c/ProgramData/chocolatey/bin:/cygdrive/c/WINDOWS/System32/WindowsPowerShell/v1.0

1.4 Installing Chocolatey

https://chocolatey.org/

Open an Administrator cmd window and paste the following comand. This is the only time we leave our bash shell.

@"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe" -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" && SET "PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin"
Microsoft Windows [Version 10.0.17134.228]

VZE@NB-2483 C:\Users\VZE\AppData\Local\Temp
$ @"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe" -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" && SET "PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin"
Getting latest version of the Chocolatey package for download.
Getting Chocolatey from https://chocolatey.org/api/v2/package/chocolatey/0.10.11.
Downloading 7-Zip commandline tool prior to extraction.
Extracting C:\Users\VZE\AppData\Local\Temp\chocolatey\chocInstall\chocolatey.zip to C:\Users\VZE\AppData\Local\Temp\chocolatey\chocInstall...
Installing chocolatey on this machine
Creating ChocolateyInstall as an environment variable (targeting 'Machine')
  Setting ChocolateyInstall to 'C:\ProgramData\chocolatey'
WARNING: It's very likely you will need to close and reopen your shell
  before you can use choco.
Restricting write permissions to Administrators
We are setting up the Chocolatey package repository.
The packages themselves go to 'C:\ProgramData\chocolatey\lib'
  (i.e. C:\ProgramData\chocolatey\lib\yourPackageName).
A shim file for the command line goes to 'C:\ProgramData\chocolatey\bin'
  and points to an executable in 'C:\ProgramData\chocolatey\lib\yourPackageName'.

Creating Chocolatey folders if they do not already exist.

WARNING: You can safely ignore errors related to missing log files when
  upgrading from a version of Chocolatey less than 0.9.9.
  'Batch file could not be found' is also safe to ignore.
  'The system cannot find the file specified' - also safe.
chocolatey.nupkg file not installed in lib.
 Attempting to locate it from bootstrapper.
PATH environment variable does not have C:\ProgramData\chocolatey\bin in it. Adding...
WARNING: Not setting tab completion: Profile file does not exist at 'C:\Users\VZE\Documents\WindowsPowerShell\Microsoft.PowerShell_profile.ps1'.
Chocolatey (choco.exe) is now ready.
You can call choco from anywhere, command line or powershell by typing choco.
Run choco /? for a list of functions.
You may need to shut down and restart powershell and/or consoles
 first prior to using choco.
Ensuring chocolatey commands are on the path
Ensuring chocolatey.nupkg is in the lib folder

VZE@NB-2483 C:\Users\VZE\AppData\Local\Temp
$

1.5 Emulate sudo access under Cygwin for Windows Administrator rights

As a preparation for installing progams with chocolatey, which do not exist as a cygwin version, we setup a Unix sudo emulation with cygwin. Remember chocolatey need Windows Administrator rights for installation.

Create sudo wrapper scripts for command interpreters

mkdir -p /usr/local/bin
cat > /usr/local/bin/sudo <<-"_EOF"
#!/usr/bin/bash
if cygstart --wait --action=runas bash -c \"$@ \> /tmp/_sudo.out\"
then
  cat /tmp/_sudo.out && rm /tmp/_sudo.out
else
  cat /tmp/_sudo.out && rm /tmp/_sudo.out
  echo sudo $@ failed...
fi
_EOF
chmod 755 /usr/local/bin/sudo
cat > /usr/local/bin/subash <<-"_EOF"
#!/usr/bin/bash
cygstart --action=runas /bin/bash
_EOF
chmod 755 /usr/local/bin/subash
cat > /usr/local/bin/sucmd <<-"_EOF"
#!/usr/bin/bash
cygstart --action=runas /cygdrive/c/windows/system32/cmd
_EOF
chmod 755 /usr/local/bin/sucmd
cat > /usr/local/bin/supowershell <<-"_EOF"
#!/usr/bin/bash
cygstart --action=runas /cygdrive/c/WINDOWS/System32/WindowsPowerShell/v1.0/powershell
_EOF
chmod 755 /usr/local/bin/supowershell

Create convenience symbolic links to make our system look more Unix like

ln -s /cygdrive/c/ProgramData/chocolatey/bin/choco /usr/local/bin/choco
ln -s /cygdrive/c/Windows/System32/cmd /usr/local/bin/cmd
ln -s /cygdrive/c/WINDOWS/System32/WindowsPowerShell/v1.0/powershell /usr/local/bin/powershell

1.6 Installing Non-Cygwin tools with Chocolatey

Configuring chocolatey

type choco
# choco is /usr/local/bin/choco
type sudo
# sudo is /usr/local/bin/sudo
sudo choco feature enable -n allowGlobalConfirmation
# Chocolatey v0.10.11
# Enabled allowGlobalConfirmation

Installing VirtualBox

choco info virtualbox
# Chocolatey v0.10.11
# virtualbox 5.2.20 [Approved]
#  Title: VirtualBox | Published: 16.10.2018
#  Package approved as a trusted package on Okt 16 2018 18:45:59.
#  Package testing status: Exempted on Okt 16 2018 18:45:59.
#  Number of Downloads: 453828 | Downloads for this version: 3924
#  Package url
#  Chocolatey Package Source: https://github.com/chocolatey/chocolatey-coreteampackages/tree/master/automatic/virtualbox
#  Package Checksum: 'dCYfXhJC4Bf3HOGAzqHiMNCLfsViCQvWvOXjDgHr+JYj76EKWqlybE29X2mhbHjJ3OIKLon/KcLyBAG5oxVLrg==' (SHA512)
#  Tags: virtualbox virtualization virtual oracle admin foss cross-platform
#  Software Site: https://www.virtualbox.org/
#  Software License: https://www.virtualbox.org/wiki/VirtualBox_PUEL
#  Software Source: https://www.virtualbox.org/browser/trunk
#  Documentation: https://www.virtualbox.org/manual
#  Summary: VirtualBox is a general-purpose full virtualizer for x86 hardware, targeted at server, desktop and embedded use.
#  Description: VirtualBox is a cross-platform virtualization application. It installs on existing Intel or AMD-based computers, whether they are running Windows, Mac, Linux or Solaris operating systems. It extends the capabilities of your existing computer so that it can run multiple operating systems (inside multiple virtual machines) at the same time.
#   
#   ## Package parameters
#   
#   ### Installation/Upgrading
#   - `/CurrentUser`       - Install for current user only
#   - `/NoDesktopShortcut` - Do not create desktop shortcut
#   - `/NoQuickLaunch`     - Do not create quick launch icon
#   - `/NoRegister`        - Do not register virtualbox file extensions
#   - `/NoPath`            - Do not add virtualbox install directory to the PATH
#   - `/NoExtensionPack`   - Do not install extension pack
#   
#   ### Uninstalling
#   - `/KeepExtensions`    - Do not uninstall installed virtualbox extensions
#   
#   Example: `choco install virtualbox --params "/NoDesktopShortcut /NoExtensionPack"`
# 
# 1 packages found.
sudo choco install virtualbox
vbm --version
# 5.1.28r117968

Installing Packer

choco info packer
# Chocolatey v0.10.11
# packer 1.3.1 [Approved]
#  Title: Packer | Published: 18.09.2018
#  Package approved as a trusted package on Sep 18 2018 20:52:44.
#  Package testing status: Passing on Sep 18 2018 20:42:17.
#  Number of Downloads: 50441 | Downloads for this version: 1920
#  Package url
#  Chocolatey Package Source: https://github.com/StefanScherer/choco-packer
#  Package Checksum: 'xARYST2Ta9Is7g6wKibN9aCbZRaLKtuT+atmQfxI89JWnFq7HbajuCgKwDNDya2bxiJbtJSkSsD7Z2DqaKDOlQ==' (SHA512)
#  Tags: packer vagrant virtual machine VM VirtualBox VMware puppet chef hashicorp
#  Software Site: http://packer.io/
#  Software License: https://github.com/hashicorp/packer/blob/master/LICENSE
#  Documentation: https://packer.io/docs
#  Mailing List: https://groups.google.com/forum/#!forum/packer-tool
#  Issues: https://github.com/hashicorp/packer/issues
#  Summary: Packer is an open source tool for creating identical machine images for multiple platforms from a single source configuration.
#  Description: Packer is an open source tool for creating identical machine images for multiple platforms from a single source configuration. Packer is lightweight, runs on every major operating system, and is highly performant, creating machine images for multiple platforms in parallel. Packer does not replace configuration management like Chef or Puppet. In fact, when building images, Packer is able to use tools like Chef or Puppet to install software onto the image.
# 
# 1 packages found.
sudo choco install packer
# Chocolatey v0.10.11
# Installing the following packages:
# packer
# By installing you accept licenses for the packages.
# Progress: Downloading packer 1.3.1... 100%
# 
# packer v1.3.1 [Approved]
# packer package files install completed. Performing other installation steps.
# Removing old packer plugins
# Downloading packer 64 bit
#   from 'https://releases.hashicorp.com/packer/1.3.1/packer_1.3.1_windows_amd64.zip'
# Progress: 100% - Completed download of C:\Users\VZE\AppData\Local\Temp\chocolatey\packer\1.3.1\packer_1.3.1_windows_amd64.zip (25.62 MB).
# Download of packer_1.3.1_windows_amd64.zip (25.62 MB) completed.
# Hashes match.
# Extracting C:\Users\VZE\AppData\Local\Temp\chocolatey\packer\1.3.1\packer_1.3.1_windows_amd64.zip to C:\ProgramData\chocolatey\lib\packer\tools...
# C:\ProgramData\chocolatey\lib\packer\tools
#  ShimGen has successfully created a shim for packer.exe
#  The install of packer was successful.
#   Software installed to 'C:\ProgramData\chocolatey\lib\packer\tools'
# 
# Chocolatey installed 1/1 packages. 
#  See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
packer --version
# 1.3.1

Installing Vagrant

choco info vagrant
# Chocolatey v0.10.11
# vagrant 2.2.0 [Approved]
#  Title: Vagrant (Install) | Published: 17.10.2018
#  Package approved as a trusted package on Okt 17 2018 15:56:35.
#  Package testing status: Passing on Okt 17 2018 15:31:00.
#  Number of Downloads: 180850 | Downloads for this version: 647
#  Package url
#  Chocolatey Package Source: https://github.com/chocolatey/chocolatey-coreteampackages/tree/master/automatic/vagrant
#  Package Checksum: 'jWBgNNGsd2nEgdRY+cN0F6npCY2iDjME0nKCMu+VFQF+H8Xnv1fyPYTpl1rcCnc+2PP7ABJUPIiEbNCqk5fwqA==' (SHA512)
#  Tags: vagrant admin sandbox virtual machine testing VM VirtualBox VMware cross-platform foss cli
#  Software Site: https://www.vagrantup.com/
#  Software License: https://github.com/mitchellh/vagrant/blob/master/LICENSE
#  Software Source: https://github.com/mitchellh/vagrant
#  Documentation: https://docs.vagrantup.com/docs
#  Mailing List: https://groups.google.com/forum/#!forum/vagrant-up
#  Issues: https://github.com/mitchellh/vagrant/issues
#  Summary: Vagrant - Development environments made easy.
#  Description: Vagrant provides easy to configure, reproducible, and portable work environments built on top of industry-standard technology and controlled by a single consistent workflow to help maximize the productivity and flexibility of you and your team.
#   
#   To achieve its magic, Vagrant stands on the shoulders of giants. Machines are provisioned on top of VirtualBox, VMware, AWS, or any other provider. Then, industry-standard provisioning tools such as shell scripts, Chef, or Puppet, can be used to automatically install and configure software on the machine.
#  Release Notes: [CHANGELOG](https://github.com/mitchellh/vagrant/blob/master/CHANGELOG.md)
# 
# 1 packages found.
sudo choco install vagrant
# Chocolatey v0.10.11
# Installing the following packages:
# vagrant
# By installing you accept licenses for the packages.
# Progress: Downloading vagrant 2.2.0... 100%
# 
# vagrant v2.2.0 [Approved]
# vagrant package files install completed. Performing other installation steps.
# Downloading vagrant 64 bit
#   from 'https://releases.hashicorp.com/vagrant/2.2.0/vagrant_2.2.0_x86_64.msi'
# Progress: 100% - Completed download of C:\temp\chocolatey\vagrant\2.2.0\vagrant_2.2.0_x86_64.msi (227.88 MB).
# Download of vagrant_2.2.0_x86_64.msi (227.88 MB) completed.
# Hashes match.
# Installing vagrant...
# vagrant has been installed.
# Repairing currently installed global plugins. This may take a few minutes...
# Installed plugins successfully repaired!
#   vagrant may be able to be automatically uninstalled.
# Environment Vars (like PATH) have changed. Close/reopen your shell to
#  see the changes (or in powershell/cmd.exe just type `refreshenv`).
#  The install of vagrant was successful.
#   Software installed as 'msi', install location is likely default.
# 
# Chocolatey installed 1/1 packages. 
#  See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
# 
# Packages requiring reboot:
#  - vagrant (exit code 3010)
# 
# The recent package changes indicate a reboot is necessary.
#  Please reboot at your earliest convenience.

Please reboot at your earliest convenience.

After reboot execute mintty again.

mintty
ln -s /cygdrive/c/HashiCorp/Vagrant /opt/vagrant
type vagrant
vagrant --version
# Vagrant 2.2.0
vagrant --debug
#  INFO global: Vagrant version: 2.2.0
#  INFO global: Ruby version: 2.4.4
#  INFO global: RubyGems version: 2.6.14.1
#  INFO global: VAGRANT_EXECUTABLE="C:\\HashiCorp\\Vagrant\\embedded\\gems\\2.2.0\\gems\\vagrant-2.2.0\\bin\\vagrant"
#  INFO global: VAGRANT_HOME="D:\\misc\\vagrant\\.vagrant.d"
#  INFO global: VAGRANT_INSTALLER_EMBEDDED_DIR="C:\\HashiCorp\\Vagrant\\embedded"
#  INFO global: VAGRANT_INSTALLER_ENV="1"
#  INFO global: VAGRANT_INSTALLER_VERSION="2"
#  INFO global: VAGRANT_LOG="debug"
#  WARN global: resolv replacement has not been enabled!
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/box/plugin.rb
#  INFO manager: Registered plugin: box command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/cap/plugin.rb
#  INFO manager: Registered plugin: cap command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/cloud/plugin.rb
#  INFO manager: Registered plugin: vagrant-cloud
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/destroy/plugin.rb
#  INFO manager: Registered plugin: destroy command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/global-status/plugin.rb
#  INFO manager: Registered plugin: global-status command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/halt/plugin.rb
#  INFO manager: Registered plugin: halt command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/help/plugin.rb
#  INFO manager: Registered plugin: help command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/init/plugin.rb
#  INFO manager: Registered plugin: init command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/list-commands/plugin.rb
#  INFO manager: Registered plugin: list-commands command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/login/plugin.rb
#  INFO manager: Registered plugin: vagrant-login
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/package/plugin.rb
#  INFO manager: Registered plugin: package command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/plugin/plugin.rb
#  INFO manager: Registered plugin: plugin command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/port/plugin.rb
#  INFO manager: Registered plugin: port command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/powershell/plugin.rb
#  INFO manager: Registered plugin: powershell command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/provider/plugin.rb
#  INFO manager: Registered plugin: provider command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/provision/plugin.rb
#  INFO manager: Registered plugin: provision command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/push/plugin.rb
#  INFO manager: Registered plugin: push command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/rdp/plugin.rb
#  INFO manager: Registered plugin: rdp command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/reload/plugin.rb
#  INFO manager: Registered plugin: reload command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/resume/plugin.rb
#  INFO manager: Registered plugin: resume command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/snapshot/plugin.rb
#  INFO manager: Registered plugin: snapshot command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/ssh/plugin.rb
#  INFO manager: Registered plugin: ssh command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/ssh_config/plugin.rb
#  INFO manager: Registered plugin: ssh-config command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/status/plugin.rb
#  INFO manager: Registered plugin: status command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/suspend/plugin.rb
#  INFO manager: Registered plugin: suspend command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/up/plugin.rb
#  INFO manager: Registered plugin: up command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/upload/plugin.rb
#  INFO manager: Registered plugin: upload command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/validate/plugin.rb
#  INFO manager: Registered plugin: validate command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/version/plugin.rb
#  INFO manager: Registered plugin: version command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/winrm/plugin.rb
#  INFO manager: Registered plugin: winrm command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/commands/winrm_config/plugin.rb
#  INFO manager: Registered plugin: winrm-config command
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/communicators/ssh/plugin.rb
#  INFO manager: Registered plugin: ssh communicator
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/communicators/winrm/plugin.rb
#  INFO manager: Registered plugin: winrm communicator
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/communicators/winssh/plugin.rb
#  INFO manager: Registered plugin: windows ssh communicator
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/alt/plugin.rb
#  INFO manager: Registered plugin: ALT Platform guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/amazon/plugin.rb
#  INFO manager: Registered plugin: Amazon Linux guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/arch/plugin.rb
#  INFO manager: Registered plugin: Arch guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/atomic/plugin.rb
#  INFO manager: Registered plugin: Atomic Host guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/bsd/plugin.rb
#  INFO manager: Registered plugin: BSD-based guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/coreos/plugin.rb
#  INFO manager: Registered plugin: CoreOS guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/darwin/plugin.rb
#  INFO manager: Registered plugin: Darwin guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/debian/plugin.rb
#  INFO manager: Registered plugin: Debian guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/dragonflybsd/plugin.rb
#  INFO manager: Registered plugin: DragonFly BSD guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/elementary/plugin.rb
#  INFO manager: Registered plugin: Elementary guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/esxi/plugin.rb
#  INFO manager: Registered plugin: ESXi guest.
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/fedora/plugin.rb
#  INFO manager: Registered plugin: Fedora guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/freebsd/plugin.rb
#  INFO manager: Registered plugin: FreeBSD guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/funtoo/plugin.rb
#  INFO manager: Registered plugin: Funtoo guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/gentoo/plugin.rb
#  INFO manager: Registered plugin: Gentoo guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/haiku/plugin.rb
#  INFO manager: Registered plugin: Haiku guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/kali/plugin.rb
#  INFO manager: Registered plugin: Kali guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/linux/plugin.rb
#  INFO manager: Registered plugin: Linux guest.
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/mint/plugin.rb
#  INFO manager: Registered plugin: Mint guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/netbsd/plugin.rb
#  INFO manager: Registered plugin: NetBSD guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/nixos/plugin.rb
#  INFO manager: Registered plugin: NixOS guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/omnios/plugin.rb
#  INFO manager: Registered plugin: OmniOS guest.
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/openbsd/plugin.rb
#  INFO manager: Registered plugin: OpenBSD guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/photon/plugin.rb
#  INFO manager: Registered plugin: VMware Photon guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/pld/plugin.rb
#  INFO manager: Registered plugin: PLD Linux guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/redhat/plugin.rb
#  INFO manager: Registered plugin: Red Hat Enterprise Linux guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/slackware/plugin.rb
#  INFO manager: Registered plugin: Slackware guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/smartos/plugin.rb
#  INFO manager: Registered plugin: SmartOS guest.
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/solaris/plugin.rb
#  INFO manager: Registered plugin: Solaris guest.
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/solaris11/plugin.rb
#  INFO manager: Registered plugin: Solaris 11 guest.
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/suse/plugin.rb
#  INFO manager: Registered plugin: SUSE guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/tinycore/plugin.rb
#  INFO manager: Registered plugin: TinyCore Linux guest.
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/trisquel/plugin.rb
#  INFO manager: Registered plugin: Trisquel guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/ubuntu/plugin.rb
#  INFO manager: Registered plugin: Ubuntu guest
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/guests/windows/plugin.rb
#  INFO manager: Registered plugin: Windows guest.
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/alt/plugin.rb
#  INFO manager: Registered plugin: ALT Platform host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/arch/plugin.rb
#  INFO manager: Registered plugin: Arch host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/bsd/plugin.rb
#  INFO manager: Registered plugin: BSD host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/darwin/plugin.rb
#  INFO manager: Registered plugin: Mac OS X host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/freebsd/plugin.rb
#  INFO manager: Registered plugin: FreeBSD host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/gentoo/plugin.rb
#  INFO manager: Registered plugin: Gentoo host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/linux/plugin.rb
#  INFO manager: Registered plugin: Linux host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/null/plugin.rb
#  INFO manager: Registered plugin: null host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/redhat/plugin.rb
#  INFO manager: Registered plugin: Red Hat Enterprise Linux host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/slackware/plugin.rb
#  INFO manager: Registered plugin: Slackware host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/suse/plugin.rb
#  INFO manager: Registered plugin: SUSE host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/hosts/windows/plugin.rb
#  INFO manager: Registered plugin: Windows host
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/kernel_v1/plugin.rb
#  INFO manager: Registered plugin: kernel
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/kernel_v2/plugin.rb
#  INFO manager: Registered plugin: kernel
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/providers/docker/plugin.rb
#  INFO manager: Registered plugin: docker-provider
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/providers/hyperv/plugin.rb
#  INFO manager: Registered plugin: Hyper-V provider
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/providers/virtualbox/plugin.rb
#  INFO manager: Registered plugin: VirtualBox provider
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/provisioners/ansible/plugin.rb
#  INFO manager: Registered plugin: ansible
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/provisioners/cfengine/plugin.rb
#  INFO manager: Registered plugin: CFEngine Provisioner
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/provisioners/chef/plugin.rb
#  INFO manager: Registered plugin: chef
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/provisioners/docker/plugin.rb
#  INFO manager: Registered plugin: docker
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/provisioners/file/plugin.rb
#  INFO manager: Registered plugin: file
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/provisioners/puppet/plugin.rb
#  INFO manager: Registered plugin: puppet
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/provisioners/salt/plugin.rb
#  INFO manager: Registered plugin: salt
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/provisioners/shell/plugin.rb
#  INFO manager: Registered plugin: shell
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/pushes/atlas/plugin.rb
#  INFO manager: Registered plugin: atlas
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/pushes/ftp/plugin.rb
#  INFO manager: Registered plugin: ftp
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/pushes/heroku/plugin.rb
#  INFO manager: Registered plugin: heroku
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/pushes/local-exec/plugin.rb
#  INFO manager: Registered plugin: local-exec
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/pushes/noop/plugin.rb
#  INFO manager: Registered plugin: noop
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/synced_folders/nfs/plugin.rb
#  INFO manager: Registered plugin: NFS synced folders
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/synced_folders/rsync/plugin.rb
#  INFO manager: Registered plugin: RSync synced folders
# DEBUG global: Loading core plugin: C:/HashiCorp/Vagrant/embedded/gems/2.2.0/gems/vagrant-2.2.0/plugins/synced_folders/smb/plugin.rb
#  INFO manager: Registered plugin: SMB synced folders
#  INFO vagrant: `vagrant` invoked: ["--debug"]
# DEBUG vagrant: Creating Vagrant environment
#  INFO environment: Environment initialized (#<Vagrant::Environment:0x00000000043ff3c8>)
#  INFO environment:   - cwd: D:/misc/vagrant/vagrant-multihost
#  INFO environment: Home path: D:/misc/vagrant/.vagrant.d
# DEBUG environment: Effective local data path: D:/misc/vagrant/vagrant-multihost/.vagrant
#  INFO environment: Local data path: D:/misc/vagrant/vagrant-multihost/.vagrant
# DEBUG environment: Creating: D:/misc/vagrant/vagrant-multihost/.vagrant
# DEBUG manager: Enabling localized plugins
#  INFO manager: Plugins:
# DEBUG bundler: Current generated plugin dependency list: []
# DEBUG bundler: Generating new builtin set instance.
# DEBUG bundler: Generating new plugin set instance. Skip gems - []
# DEBUG bundler: Activating solution set: []
#  INFO manager: Loading plugins...
# DEBUG manager: Enabling globalized plugins
#  INFO manager: Plugins:
# DEBUG bundler: Current generated plugin dependency list: []
# DEBUG bundler: Generating new builtin set instance.
# DEBUG bundler: Generating new plugin set instance. Skip gems - []
# DEBUG bundler: Activating solution set: []
#  INFO manager: Loading plugins...
#  INFO loader: Set :root = ["#<Pathname:D:/misc/vagrant/vagrant-multihost/Vagrantfile>"]
# DEBUG loader: Populating proc cache for #<Pathname:D:/misc/vagrant/vagrant-multihost/Vagrantfile>
# DEBUG loader: Load procs for pathname: D:/misc/vagrant/vagrant-multihost/Vagrantfile
#  INFO loader: Loading configuration in order: [:home, :root]
# DEBUG loader: Loading from: root (evaluating)
# DEBUG loader: Configuration loaded successfully, finalizing and returning
# DEBUG push: finalizing
#  INFO environment: Running hook: environment_plugins_loaded
#  INFO runner: Preparing hooks for middleware sequence...
#  INFO runner: 1 hooks defined.
#  INFO runner: Running action: environment_plugins_loaded #<Vagrant::Action::Builder:0x0000000004372658>
#  INFO environment: Running hook: environment_load
#  INFO runner: Preparing hooks for middleware sequence...
#  INFO runner: 1 hooks defined.
#  INFO runner: Running action: environment_load #<Vagrant::Action::Builder:0x0000000003964dc8>
# DEBUG checkpoint_client: starting plugin check
#  INFO cli: CLI: [] nil []
# DEBUG checkpoint_client: plugin check complete
# WARNING: This command has been deprecated in favor of `vagrant cloud auth login`
#  INFO interface: Machine: cli-command ["box"]
#  INFO interface: Machine: cli-command ["cloud"]
#  INFO interface: Machine: cli-command ["destroy"]
#  INFO interface: Machine: cli-command ["global-status"]
#  INFO interface: Machine: cli-command ["halt"]
#  INFO interface: Machine: cli-command ["help"]
#  INFO interface: Machine: cli-command ["init"]
#  INFO interface: Machine: cli-command ["login"]
#  INFO interface: Machine: cli-command ["package"]
#  INFO interface: Machine: cli-command ["plugin"]
#  INFO interface: Machine: cli-command ["port"]
#  INFO interface: Machine: cli-command ["powershell"]
#  INFO interface: Machine: cli-command ["provision"]
#  INFO interface: Machine: cli-command ["push"]
#  INFO interface: Machine: cli-command ["rdp"]
#  INFO interface: Machine: cli-command ["reload"]
#  INFO interface: Machine: cli-command ["resume"]
#  INFO interface: Machine: cli-command ["snapshot"]
#  INFO interface: Machine: cli-command ["ssh"]
#  INFO interface: Machine: cli-command ["ssh-config"]
#  INFO interface: Machine: cli-command ["status"]
#  INFO interface: Machine: cli-command ["suspend"]
#  INFO interface: Machine: cli-command ["up"]
#  INFO interface: Machine: cli-command ["upload"]
#  INFO interface: Machine: cli-command ["validate"]
#  INFO interface: Machine: cli-command ["version"]
#  INFO interface: Machine: cli-command ["winrm"]
#  INFO interface: Machine: cli-command ["winrm-config"]
#  INFO interface: info: Usage: vagrant [options] <command> [<args>]
# 
#     -v, --version                    Print the version and exit.
#     -h, --help                       Print this help.
# 
# Common commands:
#      box             manages boxes: installation, removal, etc.
#      cloud           manages everything related to Vagrant Cloud
#      destroy         stops and deletes all traces of the vagrant machine
#      global-status   outputs status Vagrant environments for this user
#      halt            stops the vagrant machine
#      help            shows the help for a subcommand
#      init            initializes a new Vagrant environment by creating a Vagrantfile
#      login           
#      package         packages a running vagrant environment into a box
#      plugin          manages plugins: install, uninstall, update, etc.
#      port            displays information about guest port mappings
#      powershell      connects to machine via powershell remoting
#      provision       provisions the vagrant machine
#      push            deploys code in this environment to a configured destination
#      rdp             connects to machine via RDP
#      reload          restarts vagrant machine, loads new Vagrantfile configuration
#      resume          resume a suspended vagrant machine
#      snapshot        manages snapshots: saving, restoring, etc.
#      ssh             connects to machine via SSH
#      ssh-config      outputs OpenSSH valid configuration to connect to the machine
#      status          outputs status of the vagrant machine
#      suspend         suspends the machine
#      up              starts and provisions the vagrant environment
#      upload          upload to machine via communicator
#      validate        validates the Vagrantfile
#      version         prints current and latest Vagrant version
#      winrm           executes commands on a machine via WinRM
#      winrm-config    outputs WinRM configuration to connect to the machine
# 
# For help on any individual command run `vagrant COMMAND -h`
# 
# Additional subcommands are available, but are either more advanced
# or not commonly used. To see all subcommands, run the command
# `vagrant list-commands`.
# 
# Usage: vagrant [options] <command> [<args>]
# 
#     -v, --version                    Print the version and exit.
#     -h, --help                       Print this help.
# 
# Common commands:
#      box             manages boxes: installation, removal, etc.
#      cloud           manages everything related to Vagrant Cloud
#      destroy         stops and deletes all traces of the vagrant machine
#      global-status   outputs status Vagrant environments for this user
#      halt            stops the vagrant machine
#      help            shows the help for a subcommand
#      init            initializes a new Vagrant environment by creating a Vagrantfile
#      login           
#      package         packages a running vagrant environment into a box
#      plugin          manages plugins: install, uninstall, update, etc.
#      port            displays information about guest port mappings
#      powershell      connects to machine via powershell remoting
#      provision       provisions the vagrant machine
#      push            deploys code in this environment to a configured destination
#      rdp             connects to machine via RDP
#      reload          restarts vagrant machine, loads new Vagrantfile configuration
#      resume          resume a suspended vagrant machine
#      snapshot        manages snapshots: saving, restoring, etc.
#      ssh             connects to machine via SSH
#      ssh-config      outputs OpenSSH valid configuration to connect to the machine
#      status          outputs status of the vagrant machine
#      suspend         suspends the machine
#      up              starts and provisions the vagrant environment
#      upload          upload to machine via communicator
#      validate        validates the Vagrantfile
#      version         prints current and latest Vagrant version
#      winrm           executes commands on a machine via WinRM
#      winrm-config    outputs WinRM configuration to connect to the machine
# 
# For help on any individual command run `vagrant COMMAND -h`
# 
# Additional subcommands are available, but are either more advanced
# or not commonly used. To see all subcommands, run the command
# `vagrant list-commands`.
#  INFO environment: Running hook: environment_unload
#  INFO host: Autodetecting host type for [#<Vagrant::Environment: D:/misc/vagrant/vagrant-multihost>]
# DEBUG host: Trying: alt
# DEBUG host: Trying: arch
# DEBUG host: Trying: darwin
# DEBUG host: Trying: freebsd
# DEBUG host: Trying: gentoo
# DEBUG host: Trying: redhat
# DEBUG host: Trying: slackware
# DEBUG host: Trying: suse
# DEBUG host: Trying: bsd
# DEBUG host: Trying: linux
# DEBUG host: Trying: null
# DEBUG host: Trying: windows
#  INFO host: Detected: windows!
#  INFO runner: Preparing hooks for middleware sequence...
#  INFO runner: 1 hooks defined.
#  INFO runner: Running action: environment_unload #<Vagrant::Action::Builder:0x000000000483d7a0>

Configuring Vagrant

Remember that Vagrant itself is a native Windows program so we must create some workarounds to make it work under Cygwin.

Set the Vagrant home directory

Include export VAGRANT_HOME="D:\misc\vagrant\.vagrant.d" (yes, it should be in Windows syntax) in your ~/.bashrc where Vagrant stores global state. The Vagrant home directory is where things such as boxes are stored, so it can actually become quite large on disk.

source ~/.bashrc
env | grep VAGRANT
# VAGRANT_HOME=D:\misc\vagrant\.vagrant.d
Create wrapper scripts around ansible commands

Because we are using the cygwin version of ansible, commands like ansible-playbook, which are used internally by the Windows version of vagrant during provisioning, cannot be called directly from the vagrant executable because they are python scripts. So we have to create corresponding Windows .BAT files which basically call the underlying python script via the Cygwin bash shell.

  • If you installed ansible in virtualenv
    cat > /misc/.venv/bin/cygwin-shim.bat <<-"_EOF"
    @echo off
    set COMMAND=%1
    
    REM If you used the standard Cygwin installer this will be C:\cygwin
    set CYGWIN=E:\
    
    REM You can switch this to work with bash with %CYGWIN%\bin\bash.exe
    set SH=%CYGWIN%\bin\bash.exe
    
    if not exist "%SH%" (
        echo cygwin's sh.exe not found. Did you delete %CYGWIN% ?
        exit /b 255
    )
    
    "%SH%" -c "[[ -x "%COMMAND%" ]]"
    if not errorlevel 0 (
        echo %COMMAND% not found. Did you uninstall it ?
        exit /b 255
    )
    
    "%SH%" -c "%*"
    _EOF
    
    cat > /misc/.venv/bin/ansible-playbook.bat <<-"_EOF"
    @echo off
    cygwin-shim.bat /misc/.venv/bin/ansible-playbook %*
    _EOF
    
    cat > /misc/.venv/bin/ansible-galaxy.bat <<-"_EOF"
    @echo off
    cygwin-shim.bat /misc/.venv/bin/ansible-galaxy %*
    _EOF
    
  • If you installed ansible in your main cygwin installation
    cat > /usr/local/bin/cygwin-shim.bat <<-"_EOF"
    @echo off
    set COMMAND=%1
    
    REM If you used the standard Cygwin installer this will be C:\cygwin
    set CYGWIN=E:\
    
    REM You can switch this to work with bash with %CYGWIN%\bin\bash.exe
    set SH=%CYGWIN%\bin\bash.exe
    
    if not exist "%SH%" (
        echo cygwin's sh.exe not found. Did you delete %CYGWIN% ?
        exit /b 255
    )
    
    "%SH%" -c "[[ -x "%COMMAND%" ]]"
    if not errorlevel 0 (
        echo %COMMAND% not found. Did you uninstall it ?
        exit /b 255
    )
    
    "%SH%" -c "%*"
    _EOF
    
    cat > /usr/local/bin/ansible-playbook.bat <<-"_EOF"
    @echo off
    cygwin-shim.bat /bin/ansible-playbook %*
    _EOF
    
    cat > /usr/local/bin/ansible-galaxy.bat <<-"_EOF"
    @echo off
    cygwin-shim.bat /bin/ansible-galaxy %*
    _EOF
    

Installing Terraform

choco info terraform
# Chocolatey v0.10.11
# terraform 0.11.8 [Approved]
#  Title: Terraform | Published: 30.08.2018
#  Package approved as a trusted package on Aug 30 2018 09:28:14.
#  Package testing status: Passing on Aug 30 2018 08:53:20.
#  Number of Downloads: 44950 | Downloads for this version: 4165
#  Package url
#  Chocolatey Package Source: https://github.com/jamestoyer/chocolatey-packages
#  Package Checksum: 'nERh3zOgB1RZw9ciD7bePXlkG56zM64zHoz1V3eQfDBJ81qD957D4QGOTPMfpejQNSTzrDlyqI64G7fI+2TFdw==' (SHA512)
#  Tags: terraform hashicorp
#  Software Site: http://www.terraform.io/
#  Software License: https://github.com/hashicorp/terraform/blob/master/LICENSE
#  Software Source: https://github.com/hashicorp/terraform
#  Documentation: https://www.terraform.io/docs/index.html
#  Issues: https://github.com/hashicorp/terraform/issues
#  Summary: Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions.
#  Description: Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions.
#   
#   The key features of Terraform are:
#   
#   * **Infrastructure as Code**: Infrastructure is described using a high-level configuration syntax. This allows a blueprint of your datacenter to be versioned and treated as you would any other code. Additionally, infrastructure can be shared and re-used.
#   * **Execution Plans**: Terraform has a "planning" step where it generates an *execution plan*. The execution plan shows what Terraform will do when you call apply. This lets you avoid any surprises when Terraform manipulates infrastructure.
#   * **Resource Graph**: Terraform builds a graph of all your resources, and parallelizes the creation and modification of any non-dependent resources. Because of this, Terraform builds infrastructure as efficiently as possible, and operators get insight into dependencies in their infrastructure.
#   * **Change Automation**: Complex changesets can be applied to your infrastructure with minimal human interaction. With the previously mentioned execution plan and resource graph, you know exactly what Terraform will change and in what order, avoiding many possible human errors.
#   
#   For more information, see the [introduction section](http://www.terraform.io/intro) of the Terraform website.
#  Release Notes: ## 0.11.8 (August 15, 2018)
#   
#   NEW FEATURES:
#   
#   * **New `remote` backend**: Inital release of the `remote` backend for use with Terraform Enterprise and Private Terraform Enterprise [[#18596](https://github.com/hashicorp/terraform/issues/18596)] 
#   
#   IMPROVEMENTS:
#   
#   * cli: display workspace name in apply and destroy commands if not default ([#18253](https://github.com/hashicorp/terraform/issues/18253))
#   * cli: Remove error on empty outputs when `-json` is set ([#11721](https://github.com/hashicorp/terraform/issues/11721))
#   * helper/schema: Resources have a new `DeprecationMessage` property that can be set to a string, allowing full resources to be deprecated ([#18286](https://github.com/hashicorp/terraform/issues/18286))
#   * backend/s3: Allow fallback to session-derived credentials (e.g. session via `AWS_PROFILE` environment variable and shared configuration) ([#17901](https://github.com/hashicorp/terraform/issues/17901))
#   * backend/s3: Allow usage of `AWS_EC2_METADATA_DISABLED` environment variable ([#17901](https://github.com/hashicorp/terraform/issues/17901))
#   
#   BUG FIXES:
#   
#   * config: The `rsadecrypt` interpolation function will no longer include the private key in an error message if it cannot be processed. ([#18333](https://github.com/hashicorp/terraform/issues/18333))
#   * provisioner/habitat: add missing space for service url ([#18400](https://github.com/hashicorp/terraform/issues/18400))
#   * backend/s3: Skip extraneous EC2 metadata redirect ([#18570](https://github.com/hashicorp/terraform/issues/18570))
#     
#   ## Previous Releases
#   For more information on previous releases, check out the changelog on [GitHub](https://github.com/hashicorp/terraform/blob/v0.11.8/CHANGELOG.md).
# 
# 1 packages found.
sudo choco install terraform
# Chocolatey v0.10.11
# Installing the following packages:
# terraform
# By installing you accept licenses for the packages.
# Progress: Downloading terraform 0.11.8... 100%
# 
# terraform v0.11.8 [Approved]
# terraform package files install completed. Performing other installation steps.
# The package terraform wants to run 'chocolateyInstall.ps1'.
# Note: If you don't run this script, the installation will fail.
# Note: To confirm automatically next time, use '-y' or consider:
# choco feature enable -n allowGlobalConfirmation
# Do you want to run the script?([Y]es/[N]o/[P]rint): 
# Removing old terraform plugins
# Downloading terraform 64 bit
#   from 'https://releases.hashicorp.com/terraform/0.11.8/terraform_0.11.8_windows_amd64.zip'
# Progress: 100% - Completed download of C:\Users\VZE\AppData\Local\Temp\chocolatey\terraform\0.11.8\terraform_0.11.8_windows_amd64.zip (17.13 MB).
# Download of terraform_0.11.8_windows_amd64.zip (17.13 MB) completed.
# Hashes match.
# Extracting C:\Users\VZE\AppData\Local\Temp\chocolatey\terraform\0.11.8\terraform_0.11.8_windows_amd64.zip to C:\ProgramData\chocolatey\lib\terraform\tools...
# C:\ProgramData\chocolatey\lib\terraform\tools
#  ShimGen has successfully created a shim for terraform.exe
#  The install of terraform was successful.
#   Software installed to 'C:\ProgramData\chocolatey\lib\terraform\tools'
# 
# Chocolatey installed 1/1 packages. 
#  See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
terraform --version
# Terraform v0.11.8
# 

Installing docker client

choco info docker
# Chocolatey v0.10.11
# docker 18.06.1 [Approved] Downloads cached for licensed users
#  Title: Docker CLI | Published: 22.08.2018
#  Package approved as a trusted package on Aug 22 2018 18:43:38.
#  Package testing status: Passing on Aug 22 2018 10:52:43.
#  Number of Downloads: 117777 | Downloads for this version: 8930
#  Package url
#  Chocolatey Package Source: https://github.com/ahmetb/docker-chocolatey
#  Package Checksum: '+c8k8zlfmVQBQVl82LuUEo0RLDIUlACUGbP4YRRo5KRiWE7ne/ojPoR+P3PdropRRPgXUDVXov86s4ZxT/XLqA==' (SHA512)
#  Tags: docker devops containers
#  Software Site: https://www.docker.com/
#  Software License: http://www.apache.org/licenses/LICENSE-2.0
#  Software Source: https://github.com/docker/docker
#  Documentation: https://docs.docker.com/engine/getstarted/
#  Mailing List: https://github.com/docker/docker#talking-to-other-docker-users-and-contributors
#  Issues: https://github.com/docker/docker/issues
#  Summary: Docker is an open platform for developers and sysadmins to build, ship, and run distributed applications.
#  Description: Docker is an open platform for developers and sysadmins to build, ship, and run distributed applications. This package contains the docker client for Windows and not the Docker engine to run containers on Windows hosts.
#   
#   NOTE: Docker client for Windows is not a Docker container engine for Windows. You can use this to manage your Linux and Windows machines running as Docker hosts. You might want to have a look at the "docker-for-windows" package.
#  Release Notes: Updated to the latest Docker version.
# 
# 1 packages found.
sudo choco install docker
# Chocolatey v0.10.11
# Installing the following packages:
# docker
# By installing you accept licenses for the packages.
# Progress: Downloading docker 18.06.1... 100%
# 
# docker v18.06.1 [Approved]
# docker package files install completed. Performing other installation steps.
# Downloading docker 64 bit
#   from 'https://github.com/StefanScherer/docker-cli-builder/releases/download/18.06.1-ce/docker.exe'
# Progress: 100% - Completed download of C:\ProgramData\chocolatey\lib\docker\tools\docker.exe (52.45 MB).
# Download of docker.exe (52.45 MB) completed.
# C:\ProgramData\chocolatey\lib\docker\tools\docker.exe
#  ShimGen has successfully created a shim for docker.exe
#  The install of docker was successful.
#   Software install location not explicitly set, could be in package or
#   default install location if installer.
# 
# Chocolatey installed 1/1 packages. 
#  See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
docker --version
# Docker version 18.06.1-ce, build 67f9a391

Installing docker-machine

choco info docker-machine
# Chocolatey v0.10.11
# docker-machine 0.15.0 [Approved]
#  Title: docker-machine | Published: 16.06.2018
#  Package approved as a trusted package on Jul 26 2018 20:11:38.
#  Package testing status: Passing on Jun 16 2018 10:38:39.
#  Number of Downloads: 36125 | Downloads for this version: 4615
#  Package url
#  Chocolatey Package Source: https://github.com/silarsis/choco-docker-machine
#  Package Checksum: 'zZFBlqf8iG2TuRrCx2oTI9/xp1K1Y2LsL1Alran5wSlrDRGU0U/GSeBHoINdZyDUf+M/BiF0y26EDfPA8vwgcg==' (SHA512)
#  Tags: docker-machine docker
#  Software Site: https://github.com/docker/machine
#  Software License: https://github.com/docker/machine/blob/master/LICENSE
#  Software Source: https://github.com/docker/machine
#  Documentation: https://docs.docker.com/machine/
#  Issues: https://github.com/docker/machine/issues
#  Summary: Machine management for a container-centric world
#  Description: Machine lets you create Docker hosts on your computer, on cloud providers, and inside your own data center. It creates servers, installs Docker on them, then configures the Docker client to talk to them.
#  Release Notes: https://github.com/docker/machine/releases
# 
# 1 packages found.
sudo choco install docker-machine
# Chocolatey v0.10.11
# Installing the following packages:
# docker-machine
# By installing you accept licenses for the packages.
# Progress: Downloading docker-machine 0.15.0... 100%
# 
# docker-machine v0.15.0 [Approved]
# docker-machine package files install completed. Performing other installation steps.
# Removing old docker-machine plugins
# C:\ProgramData\chocolatey\lib\docker-machine\bin
# Downloading docker-machine 64 bit
#   from 'https://github.com/docker/machine/releases/download/v0.15.0/docker-machine-Windows-x86_64.exe'
# Progress: 100% - Completed download of C:\ProgramData\chocolatey\lib\docker-machine\bin\docker-machine.exe (27.25 MB).
# Download of docker-machine.exe (27.25 MB) completed.
# Hashes match.
# C:\ProgramData\chocolatey\lib\docker-machine\bin\docker-machine.exe
#  ShimGen has successfully created a shim for docker-machine.exe
#  The install of docker-machine was successful.
#   Software install location not explicitly set, could be in package or
#   default install location if installer.
# 
# Chocolatey installed 1/1 packages. 
#  See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
docker-machine --version
# docker-machine.exe version 0.15.0, build b48dc28d

Installing docker-compose

choco info docker-compose
# Chocolatey v0.10.11
# docker-compose 1.22.0 [Approved] Downloads cached for licensed users
#  Title: docker-compose | Published: 18.07.2018
#  Package approved as a trusted package on Jul 18 2018 06:57:05.
#  Package testing status: Passing on Jul 18 2018 06:53:30.
#  Number of Downloads: 47827 | Downloads for this version: 4925
#  Package url
#  Chocolatey Package Source: https://github.com/StefanScherer/choco-docker-compose
#  Package Checksum: 'MhEG7Vkrf5R00YvoDRjR79cf4NBc7Jds+D6i0ZXhJCgQ6X5/U+w7mpbkc9PnJSVw9kuZ2OhkrSfFgmBlgdIhjw==' (SHA512)
#  Tags: Docker Compose docker-compose
#  Software Site: https://github.com/docker/compose
#  Software License: https://github.com/docker/compose/blob/master/LICENSE
#  Documentation: https://docs.docker.com/compose/
#  Issues: https://github.com/docker/compose/issues
#  Summary: Define and run multi-container applications with Docker.
#  Description: Compose is a tool for defining and running multi-container applications with Docker. With Compose, you define a multi-container application in a single file, then spin your application up in a single command which does everything that needs to be done to get it running.
# 
# 1 packages found.
sudo choco install docker-compose
# Chocolatey v0.10.11
# Installing the following packages:
# docker-compose
# By installing you accept licenses for the packages.
# Progress: Downloading docker-compose 1.22.0... 100%
# 
# docker-compose v1.22.0 [Approved]
# docker-compose package files install completed. Performing other installation steps.
# Downloading docker-compose 64 bit
#   from 'https://github.com/docker/compose/releases/download/1.22.0/docker-compose-Windows-x86_64.exe'
# Progress: 100% - Completed download of C:\ProgramData\chocolatey\lib\docker-compose\tools\docker-compose.exe (7.21 MB).
# Download of docker-compose.exe (7.21 MB) completed.
# C:\ProgramData\chocolatey\lib\docker-compose\tools\docker-compose.exe
#  ShimGen has successfully created a shim for docker-compose.exe
#  The install of docker-compose was successful.
#   Software install location not explicitly set, could be in package or
#   default install location if installer.
# 
# Chocolatey installed 1/1 packages. 
#  See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
docker-compose --version
# docker-compose version 1.22.0, build f46880fe

Installing kubectl

choco info kubernetes-cli
# Chocolatey v0.10.11
# kubernetes-cli 1.12.1 [Approved]
#  Title: Kubernetes Command Line Interface (CLI) | Published: 05.10.2018
#  Package approved as a trusted package on Okt 06 2018 01:32:52.
#  Package testing status: Passing on Okt 06 2018 00:39:52.
#  Number of Downloads: 77641 | Downloads for this version: 4360
#  Package url
#  Chocolatey Package Source: https://github.com/chocolatey/chocolatey-coreteampackages/tree/master/automatic/kubernetes-cli
#  Package Checksum: 'nie/kC4gN1mvPuf6px7blOBavLlPPF8VR2we2uTCZaT5yzfGx07PcvtepZvc2OBsi++UyXNh8N5Q9Awn9/Uvhw==' (SHA512)
#  Tags: kubernetes kubectl docker rkt containers devops cli foss
#  Software Site: https://kubernetes.io/
#  Software License: https://github.com/kubernetes/kubernetes/blob/master/LICENSE
#  Software Source: https://github.com/kubernetes/kubernetes
#  Documentation: https://kubernetes.io/docs
#  Issues: https://github.com/kubernetes/kubernetes/issues
#  Summary: Kubernetes is an open source system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications.
#  Description: ## Production-Grade Container Orchestration
#   ### Automated container deployment, scaling, and management
#   
#   Kubernetes is an open source system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications. It groups containers that make up an application into logical units for easy management and discovery.
#   
#   Kubernetes builds upon 15 years of experience of running production workloads at Google, combined with best-of-breed ideas and practices from the community.
#   
#   #### Planet Scale
#   Designed on the same principles that allows Google to run billions of containers a week, Kubernetes can scale without increasing your ops team.
#   
#   #### Never Outgrow
#   Whether testing locally or running a global enterprise, Kubernetes flexibility grows with you to deliver your applications consistently and easily no matter how complex your need is.
#   
#   #### Run Anywhere
#   Kubernetes is open source giving you the freedom to take advantage of on-premises, hybrid, or public cloud infrastructure, letting you effortlessly move workloads to where it matters to you.
#   
#   
#   ### Features
#   #### Automatic binpacking
#   Automatically places containers based on their resource requirements and other constraints, while not sacrificing availability. Mix critical and best-effort workloads in order to drive up utilization and save even more resources.
#   
#   #### Self-healing
#   Restarts containers that fail, replaces and reschedules containers when nodes die, kills containers that don't respond to your user-defined health check, and doesn't advertise them to clients until they are ready to serve.
#   
#   #### Horizontal scaling
#   Scale your application up and down with a simple command, with a UI, or automatically based on CPU usage.
#   
#   #### Service discovery and load balancing
#   No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives containers their own IP addresses and a single DNS name for a set of containers, and can load-balance across them.
#   
#   #### Automated rollouts and rollbacks
#   Kubernetes progressively rolls out changes to your application or its configuration, while monitoring application health to ensure it doesn't kill all your instances at the same time. If something goes wrong, Kubernetes will rollback the change for you. Take advantage of a growing ecosystem of deployment solutions.
#   
#   #### Secret and configuration management
#   Deploy and update secrets and application configuration without rebuilding your image and without exposing secrets in your stack configuration.
#   
#   #### Storage orchestration
#   Automatically mount the storage system of your choice, whether from local storage, a public cloud provider such as GCP or AWS, or a network storage system such as NFS, iSCSI, Gluster, Ceph, Cinder, or Flocker.
#   
#   #### Batch execution
#   In addition to services, Kubernetes can manage your batch and CI workloads, replacing containers that fail, if desired.
#  Release Notes: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.1.md#v1121
# 
# 1 packages found.
sudo choco install kubernetes-cli
# Chocolatey v0.10.11
# Installing the following packages:
# kubernetes-cli
# By installing you accept licenses for the packages.
# Progress: Downloading kubernetes-cli 1.12.1... 100%
# 
# kubernetes-cli v1.12.1 [Approved]
# kubernetes-cli package files install completed. Performing other installation steps.
# Extracting 64-bit C:\ProgramData\chocolatey\lib\kubernetes-cli\tools\kubernetes-client-windows-amd64.tar.gz to C:\ProgramData\chocolatey\lib\kubernetes-cli\tools...
# C:\ProgramData\chocolatey\lib\kubernetes-cli\tools
# Extracting 64-bit C:\ProgramData\chocolatey\lib\kubernetes-cli\tools\kubernetes-client-windows-amd64.tar to C:\ProgramData\chocolatey\lib\kubernetes-cli\tools...
# C:\ProgramData\chocolatey\lib\kubernetes-cli\tools
#  ShimGen has successfully created a shim for kubectl.exe
#  The install of kubernetes-cli was successful.
#   Software installed to 'C:\ProgramData\chocolatey\lib\kubernetes-cli\tools'
# 
# Chocolatey installed 1/1 packages. 
#  See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
kubectl version
# Client Version: version.Info{Major:"1", Minor:"12", GitVersion:"v1.12.1", GitCommit:"4ed3216f3ec431b140b1d899130a69fc671678f4", GitTreeState:"clean", BuildDate:"2018-10-05T16:46:06Z", GoVersion:"go1.10.4", Compiler:"gc", Platform:"windows/amd64"}
# Unable to connect to the server: dial tcp [::1]:8080: connectex: No connection could be made because the target machine actively refused it.

Installing minicube

choco info minikube
# Chocolatey v0.10.11
# Minikube 0.30.0 [Approved]
#  Title: Minikube: Run Kubernetes locally | Published: 05.10.2018
#  Package approved as a trusted package on Okt 06 2018 01:35:10.
#  Package testing status: Passing on Okt 06 2018 00:39:54.
#  Number of Downloads: 39735 | Downloads for this version: 2603
#  Package url
#  Chocolatey Package Source: https://github.com/chocolatey/chocolatey-coreteampackages/tree/master/automatic/minikube
#  Package Checksum: 'AbLsm972Ol06bA2r3SmqhXet37x9eU1QRn619cQRpflyMHeQzxvshhwdwrcsVcItMcvjLLXTfJWEgTS3M1pNNQ==' (SHA512)
#  Tags: kubernetes minikube docker rkt containers devops cli foss
#  Software Site: https://kubernetes.io/
#  Software License: https://github.com/kubernetes/minikube/blob/master/LICENSE
#  Software Source: https://github.com/kubernetes/minikube
#  Documentation: https://kubernetes.io/docs
#  Issues: https://github.com/kubernetes/minikube/issues
#  Summary: Minikube is a tool that makes it easy to run Kubernetes locally.
#  Description: Minikube is a tool that makes it easy to run Kubernetes locally.
#   
#   Minikube runs a single-node Kubernetes cluster inside a VM on your laptop for users looking to try out Kubernetes or develop with it day-to-day.
#   
#   ### Minikube Features
#   Minikube supports Kubernetes features such as:
#   * DNS
#   * NodePorts
#   * ConfigMaps and Secrets
#   * Dashboards
#   * Container Runtime: Docker, rkt and CRI-O
#   * Enabling CNI (Container Network Interface)
#   * Ingress
#   
#   #### Note: Windows support is limited to 64bit systems.
#  Release Notes: https://github.com/kubernetes/minikube/blob/v0.30.0/CHANGELOG.md
# 
# 1 packages found.
sudo choco install minikube
# Chocolatey v0.10.11
# Installing the following packages:
# minikube
# By installing you accept licenses for the packages.
# Progress: Downloading Minikube 0.30.0... 100%
# 
# Minikube v0.30.0 [Approved]
# minikube package files install completed. Performing other installation steps.
#  ShimGen has successfully created a shim for minikube.exe
#  The install of minikube was successful.
#   Software install location not explicitly set, could be in package or
#   default install location if installer.
# 
# Chocolatey installed 1/1 packages. 
#  See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
minikube version
# minikube version: v0.30.0

Upgrading choco installed packages

sudo choco upgrade all

1.7 Installing "virtualenv" with pip

This is optional but could be useful for an ansible virtual environment. If you decide to not use this feature, you have to install ansible in your main cygwin installation in the next step.

type pip2
# pip2 is /usr/bin/pip2
pip2 install virtualenv
# Collecting virtualenv
#   Downloading https://files.pythonhosted.org/packages/b6/30/96a02b2287098b23b875bc8c2f58071c35d2efe84f747b64d523721dc2b5/virtualenv-16.0.0-py2.py3-none-any.whl (1.9MB)
#     100% |████████████████████████████████| 1.9MB 457kB/s 
# Installing collected packages: virtualenv
# Successfully installed virtualenv-16.0.0
# You are using pip version 9.0.1, however version 18.0 is available.
# You should consider upgrading via the 'pip install --upgrade pip' command.
virtualenv --version
# 16.0.0

1.8 Installing Ansible in "virtualenv"

mkdir -p /misc/.venv && cd /misc

Create a python virtual environment.

virtualenv .venv
# New python executable in /misc/.venv/bin/python2
# Also creating executable in /misc/.venv/bin/python
# Installing setuptools, pip, wheel...done.

Enable the virtual environment

source .venv/bin/activate

Prompt changes from VZE:/misc> to (.venv) VZE:/misc>

To exit your virtualenv just type deactivate.

type pip
# pip is /misc/.venv/bin/pip
pip install ansible
# Collecting ansible
#   Downloading https://files.pythonhosted.org/packages/97/1b/13fe0d24588db0426c41be442d6d74bc861a72ed1338dfbce94e8082148d/ansible-2.6.3.tar.gz (10.7MB)
#     100% |████████████████████████████████| 10.8MB 925kB/s 
# Collecting jinja2 (from ansible)
#   Downloading https://files.pythonhosted.org/packages/7f/ff/ae64bacdfc95f27a016a7bed8e8686763ba4d277a78ca76f32659220a731/Jinja2-2.10-py2.py3-none-any.whl (126kB)
#     100% |████████████████████████████████| 133kB 2.2MB/s 
# Collecting PyYAML (from ansible)
#   Downloading https://files.pythonhosted.org/packages/9e/a3/1d13970c3f36777c583f136c136f804d70f500168edc1edea6daa7200769/PyYAML-3.13.tar.gz (270kB)
#     100% |████████████████████████████████| 276kB 2.3MB/s 
# Collecting paramiko (from ansible)
#   Downloading https://files.pythonhosted.org/packages/3e/db/cb7b6656e0e7387637ce850689084dc0b94b44df31cc52e5fc5c2c4fd2c1/paramiko-2.4.1-py2.py3-none-any.whl (194kB)
#     100% |████████████████████████████████| 194kB 7.4MB/s 
# Collecting cryptography (from ansible)
#   Downloading https://files.pythonhosted.org/packages/22/21/233e38f74188db94e8451ef6385754a98f3cad9b59bedf3a8e8b14988be4/cryptography-2.3.1.tar.gz (449kB)
#     100% |████████████████████████████████| 450kB 2.3MB/s 
# Requirement already satisfied: setuptools in ./.venv/lib/python2.7/site-packages (from ansible) (40.2.0)
# Collecting MarkupSafe>=0.23 (from jinja2->ansible)
#   Downloading https://files.pythonhosted.org/packages/4d/de/32d741db316d8fdb7680822dd37001ef7a448255de9699ab4bfcbdf4172b/MarkupSafe-1.0.tar.gz
# Collecting bcrypt>=3.1.3 (from paramiko->ansible)
#   Downloading https://files.pythonhosted.org/packages/f3/ec/bb6b384b5134fd881b91b6aa3a88ccddaad0103857760711a5ab8c799358/bcrypt-3.1.4.tar.gz (42kB)
#     100% |████████████████████████████████| 51kB 2.3MB/s 
# Collecting pyasn1>=0.1.7 (from paramiko->ansible)
#   Downloading https://files.pythonhosted.org/packages/d1/a1/7790cc85db38daa874f6a2e6308131b9953feb1367f2ae2d1123bb93a9f5/pyasn1-0.4.4-py2.py3-none-any.whl (72kB)
#     100% |████████████████████████████████| 81kB 2.6MB/s 
# Collecting pynacl>=1.0.1 (from paramiko->ansible)
#   Downloading https://files.pythonhosted.org/packages/08/19/cf56e60efd122fa6d2228118a9b345455b13ffe16a14be81d025b03b261f/PyNaCl-1.2.1.tar.gz (3.3MB)
#     100% |████████████████████████████████| 3.3MB 863kB/s 
# Collecting idna>=2.1 (from cryptography->ansible)
#   Downloading https://files.pythonhosted.org/packages/4b/2a/0276479a4b3caeb8a8c1af2f8e4355746a97fab05a372e4a2c6a6b876165/idna-2.7-py2.py3-none-any.whl (58kB)
#     100% |████████████████████████████████| 61kB 2.2MB/s 
# Collecting asn1crypto>=0.21.0 (from cryptography->ansible)
#   Downloading https://files.pythonhosted.org/packages/ea/cd/35485615f45f30a510576f1a56d1e0a7ad7bd8ab5ed7cdc600ef7cd06222/asn1crypto-0.24.0-py2.py3-none-any.whl (101kB)
#     100% |████████████████████████████████| 102kB 1.9MB/s 
# Collecting six>=1.4.1 (from cryptography->ansible)
#   Downloading https://files.pythonhosted.org/packages/67/4b/141a581104b1f6397bfa78ac9d43d8ad29a7ca43ea90a2d863fe3056e86a/six-1.11.0-py2.py3-none-any.whl
# Collecting cffi!=1.11.3,>=1.7 (from cryptography->ansible)
#   Downloading https://files.pythonhosted.org/packages/e7/a7/4cd50e57cc6f436f1cc3a7e8fa700ff9b8b4d471620629074913e3735fb2/cffi-1.11.5.tar.gz (438kB)
#     100% |████████████████████████████████| 440kB 1.1MB/s 
# Collecting enum34 (from cryptography->ansible)
#   Downloading https://files.pythonhosted.org/packages/c5/db/e56e6b4bbac7c4a06de1c50de6fe1ef3810018ae11732a50f15f62c7d050/enum34-1.1.6-py2-none-any.whl
# Collecting ipaddress (from cryptography->ansible)
#   Downloading https://files.pythonhosted.org/packages/fc/d0/7fc3a811e011d4b388be48a0e381db8d990042df54aa4ef4599a31d39853/ipaddress-1.0.22-py2.py3-none-any.whl
# Collecting pycparser (from cffi!=1.11.3,>=1.7->cryptography->ansible)
#   Downloading https://files.pythonhosted.org/packages/8c/2d/aad7f16146f4197a11f8e91fb81df177adcc2073d36a17b1491fd09df6ed/pycparser-2.18.tar.gz (245kB)
#     100% |████████████████████████████████| 256kB 2.2MB/s 
# Building wheels for collected packages: ansible, PyYAML, cryptography, MarkupSafe, bcrypt, pynacl, cffi, pycparser
#   Running setup.py bdist_wheel for ansible ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/aa/55/03/f33988e2533ea04056a2bf2636a5bf645bd4a3d2665151d3f2
#   Running setup.py bdist_wheel for PyYAML ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/ad/da/0c/74eb680767247273e2cf2723482cb9c924fe70af57c334513f
#   Running setup.py bdist_wheel for cryptography ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/fb/6a/1c/07080dc38a589b201a0bb17a4a148cb528ee0e323f6f68c254
#   Running setup.py bdist_wheel for MarkupSafe ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/33/56/20/ebe49a5c612fffe1c5a632146b16596f9e64676768661e4e46
#   Running setup.py bdist_wheel for bcrypt ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/4a/80/07/163d01f8b5d52ed815960b532dd7aece342bccbeb2b47361fd
#   Running setup.py bdist_wheel for pynacl ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/22/ad/26/c5592436ff0cad6f4f629e8267cab5ff5795ef0acb8234972e
#   Running setup.py bdist_wheel for cffi ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/e6/9e/cd/9f0b900a715af094d5a08cc50230728c08ef745ffb195c67e8
#   Running setup.py bdist_wheel for pycparser ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/c0/a1/27/5ba234bd77ea5a290cbf6d675259ec52293193467a12ef1f46
# Successfully built ansible PyYAML cryptography MarkupSafe bcrypt pynacl cffi pycparser
# Installing collected packages: MarkupSafe, jinja2, PyYAML, pycparser, cffi, six, bcrypt, pyasn1, pynacl, idna, asn1crypto, enum34, ipaddress, cryptography, paramiko, ansible
# Successfully installed MarkupSafe-1.0 PyYAML-3.13 ansible-2.6.3 asn1crypto-0.24.0 bcrypt-3.1.4 cffi-1.11.5 cryptography-2.3.1 enum34-1.1.6 idna-2.7 ipaddress-1.0.22 jinja2-2.10 paramiko-2.4.1 pyasn1-0.4.4 pycparser-2.18 pynacl-1.2.1 six-1.11.0
ansible --version
# ansible 2.6.3
#   config file = None
#   configured module search path = [u'/home/VZE/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
#   ansible python module location = /misc/.venv/lib/python2.7/site-packages/ansible
#   executable location = /misc/.venv/bin/ansible
#   python version = 2.7.14 (default, Oct 31 2017, 21:12:13) [GCC 6.4.0]

1.9 Configuring Ansible

Changes can be made and used in a configuration file which will be processed in the following order:

  • ANSIBLE_CONFIG (an environment variable)
  • ansible.cfg (in the current directory)
  • .ansible.cfg (in the home directory)
  • /etc/ansible/ansible.cfg

Ansible will process the above list and use the first file found. Settings in files are not merged.

ansible on cygwin needs the following entry to work:

[ssh_connection]
ssh_args = -o ControlMaster=no
cat > ~/.ansible.cfg <<-"_EOF"
# config file for ansible -- https://ansible.com/
# ===============================================

# nearly all parameters can be overridden in ansible-playbook
# or with command line flags. ansible will read ANSIBLE_CONFIG,
# ansible.cfg in the current working directory, .ansible.cfg in
# the home directory or /etc/ansible/ansible.cfg, whichever it
# finds first

[defaults]

# some basic default values...

#inventory      = /etc/ansible/hosts
#library        = /usr/share/my_modules/
#module_utils   = /usr/share/my_module_utils/
#remote_tmp     = ~/.ansible/tmp
#local_tmp      = ~/.ansible/tmp
#plugin_filters_cfg = /etc/ansible/plugin_filters.yml
#forks          = 5
#poll_interval  = 15
#sudo_user      = root
#ask_sudo_pass = True
#ask_pass      = True
#transport      = smart
#remote_port    = 22
#module_lang    = C
#module_set_locale = False

# plays will gather facts by default, which contain information about
# the remote system.
#
# smart - gather by default, but don't regather if already gathered
# implicit - gather by default, turn off with gather_facts: False
# explicit - do not gather by default, must say gather_facts: True
#gathering = implicit

# This only affects the gathering done by a play's gather_facts directive,
# by default gathering retrieves all facts subsets
# all - gather all subsets
# network - gather min and network facts
# hardware - gather hardware facts (longest facts to retrieve)
# virtual - gather min and virtual facts
# facter - import facts from facter
# ohai - import facts from ohai
# You can combine them using comma (ex: network,virtual)
# You can negate them using ! (ex: !hardware,!facter,!ohai)
# A minimal set of facts is always gathered.
#gather_subset = all

# some hardware related facts are collected
# with a maximum timeout of 10 seconds. This
# option lets you increase or decrease that
# timeout to something more suitable for the
# environment.
# gather_timeout = 10

# additional paths to search for roles in, colon separated
#roles_path    = /etc/ansible/roles
roles_path = /misc/ansible/roles

# uncomment this to disable SSH key host checking
#host_key_checking = False

# change the default callback, you can only have one 'stdout' type  enabled at a time.
#stdout_callback = skippy


## Ansible ships with some plugins that require whitelisting,
## this is done to avoid running all of a type by default.
## These setting lists those that you want enabled for your system.
## Custom plugins should not need this unless plugin author specifies it.

# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
#callback_whitelist = timer, mail

# Determine whether includes in tasks and handlers are "static" by
# default. As of 2.0, includes are dynamic by default. Setting these
# values to True will make includes behave more like they did in the
# 1.x versions.
#task_includes_static = False
#handler_includes_static = False

# Controls if a missing handler for a notification event is an error or a warning
#error_on_missing_handler = True

# change this for alternative sudo implementations
#sudo_exe = sudo

# What flags to pass to sudo
# WARNING: leaving out the defaults might create unexpected behaviours
#sudo_flags = -H -S -n

# SSH timeout
#timeout = 10

# default user to use for playbooks if user is not specified
# (/usr/bin/ansible will use current user as default)
#remote_user = root

# logging is off by default unless this path is defined
# if so defined, consider logrotate
#log_path = /var/log/ansible.log

# default module name for /usr/bin/ansible
#module_name = command

# use this shell for commands executed under sudo
# you may need to change this to bin/bash in rare instances
# if sudo is constrained
#executable = /bin/sh

# if inventory variables overlap, does the higher precedence one win
# or are hash values merged together?  The default is 'replace' but
# this can also be set to 'merge'.
#hash_behaviour = replace

# by default, variables from roles will be visible in the global variable
# scope. To prevent this, the following option can be enabled, and only
# tasks and handlers within the role will see the variables there
#private_role_vars = yes

# list any Jinja2 extensions to enable here:
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n

# if set, always use this private key file for authentication, same as
# if passing --private-key to ansible or ansible-playbook
#private_key_file = /path/to/file

# If set, configures the path to the Vault password file as an alternative to
# specifying --vault-password-file on the command line.
#vault_password_file = /path/to/vault_password_file

# format of string {{ ansible_managed }} available within Jinja2
# templates indicates to users editing templates files will be replaced.
# replacing {file}, {host} and {uid} and strftime codes with proper values.
#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
# in some situations so the default is a static string:
#ansible_managed = Ansible managed

# by default, ansible-playbook will display "Skipping [host]" if it determines a task
# should not be run on a host.  Set this to "False" if you don't want to see these "Skipping"
# messages. NOTE: the task header will still be shown regardless of whether or not the
# task is skipped.
#display_skipped_hosts = True

# by default, if a task in a playbook does not include a name: field then
# ansible-playbook will construct a header that includes the task's action but
# not the task's args.  This is a security feature because ansible cannot know
# if the *module* considers an argument to be no_log at the time that the
# header is printed.  If your environment doesn't have a problem securing
# stdout from ansible-playbook (or you have manually specified no_log in your
# playbook on all of the tasks where you have secret information) then you can
# safely set this to True to get more informative messages.
#display_args_to_stdout = False

# by default (as of 1.3), Ansible will raise errors when attempting to dereference
# Jinja2 variables that are not set in templates or action lines. Uncomment this line
# to revert the behavior to pre-1.3.
#error_on_undefined_vars = False

# by default (as of 1.6), Ansible may display warnings based on the configuration of the
# system running ansible itself. This may include warnings about 3rd party packages or
# other conditions that should be resolved if possible.
# to disable these warnings, set the following value to False:
#system_warnings = True

# by default (as of 1.4), Ansible may display deprecation warnings for language
# features that should no longer be used and will be removed in future versions.
# to disable these warnings, set the following value to False:
#deprecation_warnings = True

# (as of 1.8), Ansible can optionally warn when usage of the shell and
# command module appear to be simplified by using a default Ansible module
# instead.  These warnings can be silenced by adjusting the following
# setting or adding warn=yes or warn=no to the end of the command line
# parameter string.  This will for example suggest using the git module
# instead of shelling out to the git command.
# command_warnings = False


# set plugin path directories here, separate with colons
#action_plugins     = /usr/share/ansible/plugins/action
#cache_plugins      = /usr/share/ansible/plugins/cache
#callback_plugins   = /usr/share/ansible/plugins/callback
#connection_plugins = /usr/share/ansible/plugins/connection
#lookup_plugins     = /usr/share/ansible/plugins/lookup
#inventory_plugins  = /usr/share/ansible/plugins/inventory
#vars_plugins       = /usr/share/ansible/plugins/vars
#filter_plugins     = /usr/share/ansible/plugins/filter
#test_plugins       = /usr/share/ansible/plugins/test
#terminal_plugins   = /usr/share/ansible/plugins/terminal
#strategy_plugins   = /usr/share/ansible/plugins/strategy


# by default, ansible will use the 'linear' strategy but you may want to try
# another one
#strategy = free

# by default callbacks are not loaded for /bin/ansible, enable this if you
# want, for example, a notification or logging callback to also apply to
# /bin/ansible runs
#bin_ansible_callbacks = False


# don't like cows?  that's unfortunate.
# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
#nocows = 1

# set which cowsay stencil you'd like to use by default. When set to 'random',
# a random stencil will be selected for each task. The selection will be filtered
# against the `cow_whitelist` option below.
#cow_selection = default
#cow_selection = random

# when using the 'random' option for cowsay, stencils will be restricted to this list.
# it should be formatted as a comma-separated list with no spaces between names.
# NOTE: line continuations here are for formatting purposes only, as the INI parser
#       in python does not support them.
#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
#              hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
#              stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www

# don't like colors either?
# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
#nocolor = 1

# if set to a persistent type (not 'memory', for example 'redis') fact values
# from previous runs in Ansible will be stored.  This may be useful when
# wanting to use, for example, IP information from one group of servers
# without having to talk to them in the same playbook run to get their
# current IP information.
#fact_caching = memory

#This option tells Ansible where to cache facts. The value is plugin dependent.
#For the jsonfile plugin, it should be a path to a local directory.
#For the redis plugin, the value is a host:port:database triplet: fact_caching_connection = localhost:6379:0

#fact_caching_connection=/tmp



# retry files
# When a playbook fails by default a .retry file will be created in ~/
# You can disable this feature by setting retry_files_enabled to False
# and you can change the location of the files by setting retry_files_save_path

#retry_files_enabled = False
#retry_files_save_path = ~/.ansible-retry

# squash actions
# Ansible can optimise actions that call modules with list parameters
# when looping. Instead of calling the module once per with_ item, the
# module is called once with all items at once. Currently this only works
# under limited circumstances, and only with parameters named 'name'.
#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper

# prevents logging of task data, off by default
#no_log = False

# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
#no_target_syslog = False

# controls whether Ansible will raise an error or warning if a task has no
# choice but to create world readable temporary files to execute a module on
# the remote machine.  This option is False by default for security.  Users may
# turn this on to have behaviour more like Ansible prior to 2.1.x.  See
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
# for more secure ways to fix this than enabling this option.
#allow_world_readable_tmpfiles = False

# controls the compression level of variables sent to
# worker processes. At the default of 0, no compression
# is used. This value must be an integer from 0 to 9.
#var_compression_level = 9

# controls what compression method is used for new-style ansible modules when
# they are sent to the remote system.  The compression types depend on having
# support compiled into both the controller's python and the client's python.
# The names should match with the python Zipfile compression types:
# * ZIP_STORED (no compression. available everywhere)
# * ZIP_DEFLATED (uses zlib, the default)
# These values may be set per host via the ansible_module_compression inventory
# variable
#module_compression = 'ZIP_DEFLATED'

# This controls the cutoff point (in bytes) on --diff for files
# set to 0 for unlimited (RAM may suffer!).
#max_diff_size = 1048576

# This controls how ansible handles multiple --tags and --skip-tags arguments
# on the CLI.  If this is True then multiple arguments are merged together.  If
# it is False, then the last specified argument is used and the others are ignored.
# This option will be removed in 2.8.
#merge_multiple_cli_flags = True

# Controls showing custom stats at the end, off by default
#show_custom_stats = True

# Controls which files to ignore when using a directory as inventory with
# possibly multiple sources (both static and dynamic)
#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo

# This family of modules use an alternative execution path optimized for network appliances
# only update this setting if you know how this works, otherwise it can break module execution
#network_group_modules=eos, nxos, ios, iosxr, junos, vyos

# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
# jinja2 templating language which will be run through the templating engine.
# ENABLING THIS COULD BE A SECURITY RISK
#allow_unsafe_lookups = False

# set default errors for all plays
#any_errors_fatal = False

[inventory]
# enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini'
#enable_plugins = host_list, virtualbox, yaml, constructed

# ignore these extensions when parsing a directory as inventory source
#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry

# ignore files matching these patterns when parsing a directory as inventory source
#ignore_patterns=

# If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise.
#unparsed_is_failed=False

[privilege_escalation]
#become=True
#become_method=sudo
#become_user=root
#become_ask_pass=False

[paramiko_connection]

# uncomment this line to cause the paramiko connection plugin to not record new host
# keys encountered.  Increases performance on new host additions.  Setting works independently of the
# host key checking setting above.
#record_host_keys=False

# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
# line to disable this behaviour.
#pty=False

# paramiko will default to looking for SSH keys initially when trying to
# authenticate to remote devices.  This is a problem for some network devices
# that close the connection after a key failure.  Uncomment this line to
# disable the Paramiko look for keys function
#look_for_keys = False

# When using persistent connections with Paramiko, the connection runs in a
# background process.  If the host doesn't already have a valid SSH key, by
# default Ansible will prompt to add the host key.  This will cause connections
# running in background processes to fail.  Uncomment this line to have
# Paramiko automatically add host keys.
#host_key_auto_add = True

[ssh_connection]

# ssh arguments to use
# Leaving off ControlPersist will result in poor performance, so use
# paramiko on older platforms rather than removing it, -C controls compression use
#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
ssh_args = -o ControlMaster=no

# The base directory for the ControlPath sockets.
# This is the "%(directory)s" in the control_path option
#
# Example:
# control_path_dir = /tmp/.ansible/cp
#control_path_dir = ~/.ansible/cp

# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
# port and username (empty string in the config). The hash mitigates a common problem users
# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
# In those cases, a "too long for Unix domain socket" ssh error would occur.
#
# Example:
# control_path = %(directory)s/%%h-%%r
#control_path =

# Enabling pipelining reduces the number of SSH operations required to
# execute a module on the remote server. This can result in a significant
# performance improvement when enabled, however when using "sudo:" you must
# first disable 'requiretty' in /etc/sudoers
#
# By default, this option is disabled to preserve compatibility with
# sudoers configurations that have requiretty (the default on many distros).
#
#pipelining = False

# Control the mechanism for transferring files (old)
#   * smart = try sftp and then try scp [default]
#   * True = use scp only
#   * False = use sftp only
#scp_if_ssh = smart

# Control the mechanism for transferring files (new)
# If set, this will override the scp_if_ssh option
#   * sftp  = use sftp to transfer files
#   * scp   = use scp to transfer files
#   * piped = use 'dd' over SSH to transfer files
#   * smart = try sftp, scp, and piped, in that order [default]
#transfer_method = smart

# if False, sftp will not use batch mode to transfer files. This may cause some
# types of file transfer failures impossible to catch however, and should
# only be disabled if your sftp version has problems with batch mode
#sftp_batch_mode = False

# The -tt argument is passed to ssh when pipelining is not enabled because sudo 
# requires a tty by default. 
#use_tty = True

# Number of times to retry an SSH connection to a host, in case of UNREACHABLE.
# For each retry attempt, there is an exponential backoff,
# so after the first attempt there is 1s wait, then 2s, 4s etc. up to 30s (max).
#retries = 3

[persistent_connection]

# Configures the persistent connection timeout value in seconds.  This value is
# how long the persistent connection will remain idle before it is destroyed.
# If the connection doesn't receive a request before the timeout value
# expires, the connection is shutdown. The default value is 30 seconds.
#connect_timeout = 30

# Configures the persistent connection retry timeout.  This value configures the
# the retry timeout that ansible-connection will wait to connect
# to the local domain socket. This value must be larger than the
# ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout).
# The default value is 15 seconds.
#connect_retry_timeout = 15

# The command timeout value defines the amount of time to wait for a command
# or RPC call before timing out. The value for the command timeout must
# be less than the value of the persistent connection idle timeout (connect_timeout)
# The default value is 10 second.
#command_timeout = 10

[accelerate]
#accelerate_port = 5099
#accelerate_timeout = 30
#accelerate_connect_timeout = 5.0

# The daemon timeout is measured in minutes. This time is measured
# from the last activity to the accelerate daemon.
#accelerate_daemon_timeout = 30

# If set to yes, accelerate_multi_key will allow multiple
# private keys to be uploaded to it, though each user must
# have access to the system via SSH to add a new key. The default
# is "no".
#accelerate_multi_key = yes

[selinux]
# file systems that require special treatment when dealing with security context
# the default behaviour that copies the existing context or uses the user default
# needs to be changed to use the file system dependent context.
#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p

# Set this to yes to allow libvirt_lxc connections to work without SELinux.
#libvirt_lxc_noseclabel = yes

[colors]
#highlight = white
#verbose = blue
#warn = bright purple
#error = red
#debug = dark gray
#deprecate = purple
#skip = cyan
#unreachable = red
#ok = green
#changed = yellow
#diff_add = green
#diff_remove = red
#diff_lines = cyan


[diff]
# Always print diff when running ( same as always running with -D/--diff )
# always = no

# Set how many context lines to show in diff
# context = 3
_EOF
ansible --version
# ansible 2.6.3
#   config file = /home/VZE/.ansible.cfg
#   configured module search path = [u'/home/VZE/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
#   ansible python module location = /misc/.venv/lib/python2.7/site-packages/ansible
#   executable location = /misc/.venv/bin/ansible
#   python version = 2.7.14 (default, Oct 31 2017, 21:12:13) [GCC 6.4.0]

1.10 Upgrading Ansible

pip list --outdated
# Package   Version Latest Type 
# --------- ------- ------ -----
# ansible   2.6.3   2.7.0  sdist
# paramiko  2.4.1   2.4.2  wheel
# pycparser 2.18    2.19   sdist
# PyNaCl    1.2.1   1.3.0  sdist
pip install ansible --upgrade
# Collecting ansible
#   Downloading https://files.pythonhosted.org/packages/7c/4f/a1843687642b8e33637e312b9fb17ca7f68be0641131a92d883368dc6d1c/ansible-2.7.0.tar.gz (11.8MB)
#     100% |████████████████████████████████| 11.8MB 1.6MB/s 
# Requirement already satisfied, skipping upgrade: jinja2 in ./.venv/lib/python2.7/site-packages (from ansible) (2.10)
# Requirement already satisfied, skipping upgrade: PyYAML in ./.venv/lib/python2.7/site-packages (from ansible) (3.13)
# Requirement already satisfied, skipping upgrade: paramiko in ./.venv/lib/python2.7/site-packages (from ansible) (2.4.1)
# Requirement already satisfied, skipping upgrade: cryptography in ./.venv/lib/python2.7/site-packages (from ansible) (2.3.1)
# Requirement already satisfied, skipping upgrade: setuptools in ./.venv/lib/python2.7/site-packages (from ansible) (40.4.3)
# Requirement already satisfied, skipping upgrade: MarkupSafe>=0.23 in ./.venv/lib/python2.7/site-packages (from jinja2->ansible) (1.0)
# Requirement already satisfied, skipping upgrade: bcrypt>=3.1.3 in ./.venv/lib/python2.7/site-packages (from paramiko->ansible) (3.1.4)
# Requirement already satisfied, skipping upgrade: pyasn1>=0.1.7 in ./.venv/lib/python2.7/site-packages (from paramiko->ansible) (0.4.4)
# Requirement already satisfied, skipping upgrade: pynacl>=1.0.1 in ./.venv/lib/python2.7/site-packages (from paramiko->ansible) (1.2.1)
# Requirement already satisfied, skipping upgrade: cffi!=1.11.3,>=1.7 in ./.venv/lib/python2.7/site-packages (from cryptography->ansible) (1.11.5)
# Requirement already satisfied, skipping upgrade: idna>=2.1 in ./.venv/lib/python2.7/site-packages (from cryptography->ansible) (2.7)
# Requirement already satisfied, skipping upgrade: enum34; python_version < "3" in ./.venv/lib/python2.7/site-packages (from cryptography->ansible) (1.1.6)
# Requirement already satisfied, skipping upgrade: six>=1.4.1 in ./.venv/lib/python2.7/site-packages (from cryptography->ansible) (1.11.0)
# Requirement already satisfied, skipping upgrade: asn1crypto>=0.21.0 in ./.venv/lib/python2.7/site-packages (from cryptography->ansible) (0.24.0)
# Requirement already satisfied, skipping upgrade: ipaddress; python_version < "3" in ./.venv/lib/python2.7/site-packages (from cryptography->ansible) (1.0.22)
# Requirement already satisfied, skipping upgrade: pycparser in ./.venv/lib/python2.7/site-packages (from cffi!=1.11.3,>=1.7->cryptography->ansible) (2.18)
# Building wheels for collected packages: ansible
#   Running setup.py bdist_wheel for ansible ... done
#   Stored in directory: /home/VZE/.cache/pip/wheels/4d/9b/69/922392aa2f3189d99e672afbd178887927ce5420e2cc874dcd
# Successfully built ansible
# Installing collected packages: ansible
#   Found existing installation: ansible 2.6.3
#     Uninstalling ansible-2.6.3:
#       Successfully uninstalled ansible-2.6.3
# Successfully installed ansible-2.7.0
ansible --version
# ansible 2.7.0
#   config file = /home/VZE/.ansible.cfg
#   configured module search path = [u'/home/VZE/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
#   ansible python module location = /misc/.venv/lib/python2.7/site-packages/ansible
#   executable location = /misc/.venv/bin/ansible
#   python version = 2.7.14 (default, Oct 31 2017, 21:12:13) [GCC 6.4.0]

2 Using Packer

Packer is an open source tool for creating identical machine images for multiple platforms from a single source configuration. Packer is lightweight, runs on every major operating system, and is highly performant, creating machine images for multiple platforms in parallel. Packer does not replace configuration management like Chef or Puppet. In fact, when building images, Packer is able to use tools like Chef or Puppet to install software onto the image.

A machine image is a single static unit that contains a pre-configured operating system and installed software which is used to quickly create new running machines.

2.1 Creating base VMs

Oracle Linux 7.5

export ol_ver=7.5
mkdir -p /misc/packer/ol/${ol_ver}/{iso,http} && cd /misc/packer/ol/${ol_ver}
Download Oracle Linux DVD ISO image from Oracle

Donwload from https://edelivery.oracle.com the Oracle Linux DVD and place V975367-01.iso into the /misc/packer/ol/7.5/iso folder.

Kickstart file
hostname=ol75-base \
domain=vzell.de \
swap_size=16376 # in MB (16 GB) \
root_size=392192 # in MB (400 GB) \
cat > http/ks.cfg <<-_EOF
#version=DEVEL

# Install OS instead of upgrade
install

# Reboot after installation
reboot

# System authorization information
auth --enableshadow --passalgo=sha512

# Additional yum repositories that may be used as sources for package installation
repo --name="Server-HighAvailability" --baseurl=file:///run/install/repo/addons/HighAvailability
repo --name="Server-ResilientStorage" --baseurl=file:///run/install/repo/addons/ResilientStorage

# Use CDROM installation media
cdrom

# Use text mode install
text

# Use graphical install
# graphical

# Do NOT Run the Setup Agent on first boot
firstboot --disable

# Keyboard layouts
keyboard --vckeymap=de --xlayouts='de'

# System language
lang en_US.UTF-8

# Network information
network --bootproto=dhcp --device=enp0s3 --ipv6=auto --activate
network --hostname=${hostname}.${domain}

# Root password
rootpw vagrant

# System services
services --disabled="chronyd"

# System timezone
timezone Europe/Berlin --isUtc --nontp

# Create additional user
user --name=vagrant --plaintext --password=vagrant --gecos="Vagrant"

# Specifies a list of disks for the installation program to use
ignoredisk --only-use=sda

# System bootloader configuration
bootloader --location=mbr --boot-drive=sda

# Partition clearing information
clearpart --none --initlabel

# Disk partitioning information
part pv.157 --fstype="lvmpv" --ondisk=sda --size=408575
part /boot --fstype="xfs" --ondisk=sda --size=1024
volgroup ol --pesize=4096 pv.157
logvol swap --fstype="swap" --size=${swap_size} --name=swap --vgname=ol
logvol / --fstype="xfs" --size=${root_size} --name=root --vgname=ol

# Firewall configuration
# firewall --enabled --service=ssh

# SELinux configuration
# selinux --enforcing

# Installation logging level
# logging --level=info

# Do not configure the X Window System
skipx

# Packages section (minimal + packages needed for building VirtualBox Guest Additions)
%packages --ignoremissing
@^minimal
@core
bzip2
gcc
make
kernel-uek
kernel-uek-devel
perl
%end

%addon com_redhat_kdump --disable --reserve-mb='auto'

%end

%anaconda
pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty
pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok
pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty
%end
_EOF
Packer JSON script
md5sum /misc/packer/ol/${ol_ver}/iso/V975367-01.iso | awk '{ print $1; }'
# 3be1a456984ada84f19c6ea89ccb027a
cat > iso/iso-info.json <<-_EOF
{
  "iso_url": "V975367-01.iso",
  "iso_checksum": "$(md5sum /misc/packer/ol/${ol_ver}/iso/V975367-01.iso | awk '{ print $1; }')",
  "iso_checksum_type": "md5"
}
_EOF
cat > packer.json <<-"_EOF"
{
  "variables": {
    "vm_name": "packer-ol75-base",
    "vm_description": "{{env `vm_description`}}",
    "vm_version": "1.0.0",
    "group_name": "/Oracle Linux/Oracle Linux 7 Update 5",
    "ssh_username": "root",
    "ssh_password": "vagrant",
    "hostname": "ol75-base.vzell.de",
    "compression": "6",
    "vagrantfile": ""
  },
  "builders": [
    {
      "type": "virtualbox-iso",
      "communicator": "ssh",
      "ssh_username": "{{user `ssh_username`}}",
      "ssh_password": "{{user `ssh_password`}}",
      "ssh_timeout": "15m",
      "guest_os_type": "Oracle_64",
      "guest_additions_url": "",
      "guest_additions_sha256": "",
      "guest_additions_path": "",
      "guest_additions_mode": "upload",
      "output_directory": "output-{{user `hostname`}}",
      "iso_url": "iso/{{user `iso_url`}}",
      "iso_checksum": "{{user `iso_checksum`}}",
      "iso_checksum_type": "{{user `iso_checksum_type`}}",
      "http_directory": "http",
      "http_port_min": 8080,
      "http_port_max": 8082,
      "vm_name": "{{user `vm_name`}}",
      "keep_registered": true,
      "export_opts": [
        "--manifest",
        "--vsys",
        "0",
        "--description",
        "{{user `vm_description`}}",
        "--version",
        "{{user `vm_version`}}"
      ],
      "vboxmanage": [
        [
          "modifyvm",
          "{{.Name}}",
          "--groups",
          "{{user `group_name`}}"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--boot1",
          "disk",
          "--boot2",
          "dvd",
          "--boot3",
          "none",
          "--boot4",
          "none"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--vram",
          "32"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--memory",
          "2048"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--cpus",
          "2"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--audio",
          "none"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--vrde",
          "off"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--rtcuseutc",
          "on"
        ]
      ],
      "hard_drive_interface": "sata",
      "sata_port_count": 4,
      "disk_size": 409600,
      "headless": false,
      "shutdown_command": "shutdown -h now",
      "shutdown_timeout": "30m",
      "boot_wait": "5s",
      "boot_command": [
        "<tab>",
        " text ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/ks.cfg ",
        "<enter>"
      ]
    }
  ],
  "provisioners": [
    {
      "type": "shell",
      "execute_command": "sh '{{ .Path }}'",
      "pause_before": "1s",
      "inline": [
        "yum -y update",
        "yum clean all",
        "[ -d /var/cache/yum ] && rm -fr /var/cache/yum",
        "useradd vagrant",
        "cp /etc/sudoers /etc/sudoers.orig",
        "sed -i -e 's/Defaults\\s*requiretty$/#Defaults\trequiretty/' /etc/sudoers",
        "sed -i -e '/# %wheel\tALL=(ALL)\tNOPASSWD: ALL/a %vagrant\tALL=(ALL)\tNOPASSWD: ALL' /etc/sudoers",
        "mkdir ~vagrant/.ssh",
        "chmod 700 ~vagrant/.ssh",
        "echo 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key' > ~vagrant/.ssh/authorized_keys",
        "chmod 600 ~vagrant/.ssh/authorized_keys",
        "chown -R vagrant: ~vagrant/.ssh"
      ]
    },
    {
      "type": "shell",
      "only": [
        "virtualbox-iso"
      ],
      "execute_command": "sh '{{ .Path }}'",
      "pause_before": "1s",
      "inline": [
        "mkdir -p /media/dvd",
        "mount -o loop,ro VBoxGuestAdditions*.iso /media/dvd",
        "sh /media/dvd/VBoxLinuxAdditions.run --nox11",
        "umount /media/dvd",
        "rm VBoxGuestAdditions*.iso"
      ]
    }
  ]
}
_EOF
Packer execution
packer
# Usage: packer [--version] [--help] <command> [<args>]
# 
# Available commands are:
#     build       build image(s) from template
#     fix         fixes templates from old versions of packer
#     inspect     see components of a template
#     push        push a template and supporting files to a Packer build service
#     validate    check that a template is valid
#     version     Prints the Packer version
packer validate -var-file=iso/iso-info.json packer.json
# Template validated successfully.
packer inspect packer.json
# Optional variables and their defaults:
# 
#   compression    = 6
#   group_name     = /Oracle Linux/Oracle Linux 7 Update 5
#   hostname       = ol75-base.vzell.de
#   ssh_password   = vagrant
#   ssh_username   = root
#   vagrantfile    = 
#   vm_description = {{env `vm_description`}}
#   vm_name        = packer-ol75-base
#   vm_version     = 1.0.0
# 
# Builders:
# 
#   virtualbox-iso
# 
# Provisioners:
# 
#   shell
#   shell
# 
# Note: If your build names contain user variables or template
# functions such as 'timestamp', these are processed at build time,
# and therefore only show in their raw form here.

When you execute the next command be prepared to react on the Windows Firewall popup window and give access to Virtualbox.

vm_description='Oracle Linux 7 Update 5

prepared by Dr. Volker Zell'
vm_version='0.9.0'
time packer build \
    -var "vm_description=${vm_description}" \
    -var "vm_version=${vm_version}"         \
    -var-file=iso/iso-info.json             \
    packer.json
# virtualbox-iso output will be in this color.
#
# ==> virtualbox-iso: Retrieving Guest additions
#     virtualbox-iso: Using file in-place: file:///C:/Program%20Files/Oracle/VirtualBox/VBoxGuestAdditions.iso
# ==> virtualbox-iso: Retrieving ISO
#     virtualbox-iso: Using file in-place: file:///D:/misc/packer/ol/7.5/iso/V975367-01.iso
# ==> virtualbox-iso: Starting HTTP server on port 8082
# ==> virtualbox-iso: Creating virtual machine...
# ==> virtualbox-iso: Creating hard drive...
# ==> virtualbox-iso: Creating forwarded port mapping for communicator (SSH, WinRM, etc) (host port 2751)
# ==> virtualbox-iso: Executing custom VBoxManage commands...
#     virtualbox-iso: Executing: modifyvm packer-ol75-base --groups /Oracle Linux/Oracle Linux 7 Update 5
#     virtualbox-iso: Executing: modifyvm packer-ol75-base --boot1 disk --boot2 dvd --boot3 none --boot4 none
#     virtualbox-iso: Executing: modifyvm packer-ol75-base --vram 32
#     virtualbox-iso: Executing: modifyvm packer-ol75-base --memory 2048
#     virtualbox-iso: Executing: modifyvm packer-ol75-base --cpus 2
#     virtualbox-iso: Executing: modifyvm packer-ol75-base --audio none
#     virtualbox-iso: Executing: modifyvm packer-ol75-base --vrde off
#     virtualbox-iso: Executing: modifyvm packer-ol75-base --rtcuseutc on
# ==> virtualbox-iso: Starting the virtual machine...
# ==> virtualbox-iso: Waiting 5s for boot...
# ==> virtualbox-iso: Typing the boot command...
# ==> virtualbox-iso: Using ssh communicator to connect: 127.0.0.1
# ==> virtualbox-iso: Waiting for SSH to become available...
# ==> virtualbox-iso: Connected to SSH!
# ==> virtualbox-iso: Uploading VirtualBox version info (5.1.28)
# ==> virtualbox-iso: Uploading VirtualBox guest additions ISO...
# ==> virtualbox-iso: Pausing 1s before the next provisioner...
# ==> virtualbox-iso: Provisioning with shell script: E:\tmp\packer-shell946603807
#     virtualbox-iso: Loaded plugins: ulninfo
#     virtualbox-iso: Resolving Dependencies
#     virtualbox-iso: --> Running transaction check
#     virtualbox-iso: ---> Package NetworkManager.x86_64 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager.x86_64 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package NetworkManager-config-server.noarch 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager-config-server.noarch 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package NetworkManager-libnm.x86_64 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager-libnm.x86_64 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package NetworkManager-team.x86_64 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager-team.x86_64 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package NetworkManager-tui.x86_64 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager-tui.x86_64 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package audit.x86_64 0:2.8.1-3.el7 will be updated
#     virtualbox-iso: ---> Package audit.x86_64 0:2.8.1-3.el7_5.1 will be an update
#     virtualbox-iso: ---> Package audit-libs.x86_64 0:2.8.1-3.el7 will be updated
#     virtualbox-iso: ---> Package audit-libs.x86_64 0:2.8.1-3.el7_5.1 will be an update
#     virtualbox-iso: ---> Package bind-libs-lite.x86_64 32:9.9.4-61.el7 will be updated
#     virtualbox-iso: ---> Package bind-libs-lite.x86_64 32:9.9.4-61.el7_5.1 will be an update
#     virtualbox-iso: ---> Package bind-license.noarch 32:9.9.4-61.el7 will be updated
#     virtualbox-iso: ---> Package bind-license.noarch 32:9.9.4-61.el7_5.1 will be an update
#     virtualbox-iso: ---> Package binutils.x86_64 0:2.27-27.base.el7 will be updated
#     virtualbox-iso: ---> Package binutils.x86_64 0:2.27-28.base.el7_5.1 will be an update
#     virtualbox-iso: ---> Package ca-certificates.noarch 0:2017.2.20-71.el7 will be updated
#     virtualbox-iso: ---> Package ca-certificates.noarch 0:2018.2.22-70.0.el7_5 will be an update
#     virtualbox-iso: ---> Package cpp.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package cpp.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package device-mapper.x86_64 7:1.02.146-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package device-mapper.x86_64 7:1.02.146-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package device-mapper-event.x86_64 7:1.02.146-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package device-mapper-event.x86_64 7:1.02.146-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package device-mapper-event-libs.x86_64 7:1.02.146-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package device-mapper-event-libs.x86_64 7:1.02.146-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package device-mapper-libs.x86_64 7:1.02.146-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package device-mapper-libs.x86_64 7:1.02.146-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package dhclient.x86_64 12:4.2.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dhclient.x86_64 12:4.2.5-68.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dhcp-common.x86_64 12:4.2.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dhcp-common.x86_64 12:4.2.5-68.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dhcp-libs.x86_64 12:4.2.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dhcp-libs.x86_64 12:4.2.5-68.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dracut.x86_64 0:033-535.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dracut.x86_64 0:033-535.0.5.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dracut-config-rescue.x86_64 0:033-535.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dracut-config-rescue.x86_64 0:033-535.0.5.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dracut-network.x86_64 0:033-535.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dracut-network.x86_64 0:033-535.0.5.el7_5.1 will be an update
#     virtualbox-iso: ---> Package e2fsprogs.x86_64 0:1.42.9-11.0.1.el7 will be updated
#     virtualbox-iso: ---> Package e2fsprogs.x86_64 0:1.42.9-12.el7_5 will be an update
#     virtualbox-iso: ---> Package e2fsprogs-libs.x86_64 0:1.42.9-11.0.1.el7 will be updated
#     virtualbox-iso: ---> Package e2fsprogs-libs.x86_64 0:1.42.9-12.el7_5 will be an update
#     virtualbox-iso: ---> Package firewalld.noarch 0:0.4.4.4-14.el7 will be updated
#     virtualbox-iso: ---> Package firewalld.noarch 0:0.4.4.4-15.el7_5 will be an update
#     virtualbox-iso: ---> Package firewalld-filesystem.noarch 0:0.4.4.4-14.el7 will be updated
#     virtualbox-iso: ---> Package firewalld-filesystem.noarch 0:0.4.4.4-15.el7_5 will be an update
#     virtualbox-iso: ---> Package gcc.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package gcc.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package glibc.x86_64 0:2.17-222.el7 will be updated
#     virtualbox-iso: ---> Package glibc.x86_64 0:2.17-222.0.7.el7 will be an update
#     virtualbox-iso: ---> Package glibc-common.x86_64 0:2.17-222.el7 will be updated
#     virtualbox-iso: ---> Package glibc-common.x86_64 0:2.17-222.0.7.el7 will be an update
#     virtualbox-iso: ---> Package glibc-devel.x86_64 0:2.17-222.el7 will be updated
#     virtualbox-iso: ---> Package glibc-devel.x86_64 0:2.17-222.0.7.el7 will be an update
#     virtualbox-iso: ---> Package glibc-headers.x86_64 0:2.17-222.el7 will be updated
#     virtualbox-iso: ---> Package glibc-headers.x86_64 0:2.17-222.0.7.el7 will be an update
#     virtualbox-iso: ---> Package gnupg2.x86_64 0:2.0.22-4.el7 will be updated
#     virtualbox-iso: ---> Package gnupg2.x86_64 0:2.0.22-5.el7_5 will be an update
#     virtualbox-iso: ---> Package initscripts.x86_64 0:9.49.41-1.0.1.el7 will be updated
#     virtualbox-iso: ---> Package initscripts.x86_64 0:9.49.41-1.0.4.el7_5.2 will be an update
#     virtualbox-iso: ---> Package iptables.x86_64 0:1.4.21-24.el7 will be updated
#     virtualbox-iso: ---> Package iptables.x86_64 0:1.4.21-24.1.el7_5 will be an update
#     virtualbox-iso: ---> Package iwl100-firmware.noarch 0:39.31.5.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl100-firmware.noarch 0:39.31.5.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl1000-firmware.noarch 1:39.31.5.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl1000-firmware.noarch 1:39.31.5.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl105-firmware.noarch 0:18.168.6.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl105-firmware.noarch 0:18.168.6.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl135-firmware.noarch 0:18.168.6.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl135-firmware.noarch 0:18.168.6.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl2000-firmware.noarch 0:18.168.6.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl2000-firmware.noarch 0:18.168.6.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl2030-firmware.noarch 0:18.168.6.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl2030-firmware.noarch 0:18.168.6.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl3160-firmware.noarch 0:22.0.7.0-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl3160-firmware.noarch 0:22.0.7.0-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl3945-firmware.noarch 0:15.32.2.9-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl3945-firmware.noarch 0:15.32.2.9-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl4965-firmware.noarch 0:228.61.2.24-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl4965-firmware.noarch 0:228.61.2.24-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl5000-firmware.noarch 0:8.83.5.1_1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl5000-firmware.noarch 0:8.83.5.1_1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl5150-firmware.noarch 0:8.24.2.2-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl5150-firmware.noarch 0:8.24.2.2-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl6000-firmware.noarch 0:9.221.4.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl6000-firmware.noarch 0:9.221.4.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl6000g2a-firmware.noarch 0:17.168.5.3-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl6000g2a-firmware.noarch 0:17.168.5.3-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl6000g2b-firmware.noarch 0:17.168.5.2-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl6000g2b-firmware.noarch 0:17.168.5.2-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl6050-firmware.noarch 0:41.28.5.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl6050-firmware.noarch 0:41.28.5.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl7260-firmware.noarch 0:22.0.7.0-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl7260-firmware.noarch 0:22.0.7.0-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl7265-firmware.noarch 0:22.0.7.0-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl7265-firmware.noarch 0:22.0.7.0-999.el7 will be an update
#     virtualbox-iso: ---> Package kernel.x86_64 0:3.10.0-862.14.4.el7 will be installed
#     virtualbox-iso: ---> Package kernel-devel.x86_64 0:3.10.0-862.14.4.el7 will be installed
#     virtualbox-iso: ---> Package kernel-headers.x86_64 0:3.10.0-862.el7 will be updated
#     virtualbox-iso: ---> Package kernel-headers.x86_64 0:3.10.0-862.14.4.el7 will be an update
#     virtualbox-iso: ---> Package kernel-tools.x86_64 0:3.10.0-862.el7 will be updated
#     virtualbox-iso: ---> Package kernel-tools.x86_64 0:3.10.0-862.14.4.el7 will be an update
#     virtualbox-iso: ---> Package kernel-tools-libs.x86_64 0:3.10.0-862.el7 will be updated
#     virtualbox-iso: ---> Package kernel-tools-libs.x86_64 0:3.10.0-862.14.4.el7 will be an update
#     virtualbox-iso: ---> Package kernel-uek.x86_64 0:4.1.12-124.20.3.el7uek will be installed
#     virtualbox-iso: ---> Package kernel-uek-devel.x86_64 0:4.1.12-124.20.3.el7uek will be installed
#     virtualbox-iso: ---> Package kernel-uek-firmware.noarch 0:4.1.12-124.20.3.el7uek will be installed
#     virtualbox-iso: ---> Package kexec-tools.x86_64 0:2.0.15-13.0.1.el7 will be updated
#     virtualbox-iso: ---> Package kexec-tools.x86_64 0:2.0.15-13.0.1.el7_5.2 will be an update
#     virtualbox-iso: ---> Package kpartx.x86_64 0:0.4.9-119.el7 will be updated
#     virtualbox-iso: ---> Package kpartx.x86_64 0:0.4.9-119.el7_5.1 will be an update
#     virtualbox-iso: ---> Package krb5-libs.x86_64 0:1.15.1-18.el7 will be updated
#     virtualbox-iso: ---> Package krb5-libs.x86_64 0:1.15.1-19.el7 will be an update
#     virtualbox-iso: ---> Package libblkid.x86_64 0:2.23.2-52.el7 will be updated
#     virtualbox-iso: ---> Package libblkid.x86_64 0:2.23.2-52.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libcom_err.x86_64 0:1.42.9-11.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libcom_err.x86_64 0:1.42.9-12.el7_5 will be an update
#     virtualbox-iso: ---> Package libdtrace-ctf.x86_64 0:0.7.0-1.el7 will be updated
#     virtualbox-iso: ---> Package libdtrace-ctf.x86_64 0:0.8.0-1.el7 will be an update
#     virtualbox-iso: ---> Package libgcc.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libgcc.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libgomp.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libgomp.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libgudev1.x86_64 0:219-57.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libgudev1.x86_64 0:219-57.0.1.el7_5.3 will be an update
#     virtualbox-iso: ---> Package libmount.x86_64 0:2.23.2-52.el7 will be updated
#     virtualbox-iso: ---> Package libmount.x86_64 0:2.23.2-52.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libselinux.x86_64 0:2.5-12.el7 will be updated
#     virtualbox-iso: ---> Package libselinux.x86_64 0:2.5-12.0.1.el7 will be an update
#     virtualbox-iso: ---> Package libselinux-python.x86_64 0:2.5-12.el7 will be updated
#     virtualbox-iso: ---> Package libselinux-python.x86_64 0:2.5-12.0.1.el7 will be an update
#     virtualbox-iso: ---> Package libselinux-utils.x86_64 0:2.5-12.el7 will be updated
#     virtualbox-iso: ---> Package libselinux-utils.x86_64 0:2.5-12.0.1.el7 will be an update
#     virtualbox-iso: ---> Package libss.x86_64 0:1.42.9-11.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libss.x86_64 0:1.42.9-12.el7_5 will be an update
#     virtualbox-iso: ---> Package libstdc++.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libstdc++.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libuuid.x86_64 0:2.23.2-52.el7 will be updated
#     virtualbox-iso: ---> Package libuuid.x86_64 0:2.23.2-52.el7_5.1 will be an update
#     virtualbox-iso: ---> Package linux-firmware.noarch 0:20180220-62.git6d51311.0.1.el7 will be updated
#     virtualbox-iso: ---> Package linux-firmware.noarch 0:20180906-999.git85c5d90f.el7 will be an update
#     virtualbox-iso: ---> Package lvm2.x86_64 7:2.02.177-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package lvm2.x86_64 7:2.02.177-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package lvm2-libs.x86_64 7:2.02.177-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package lvm2-libs.x86_64 7:2.02.177-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package mariadb-libs.x86_64 1:5.5.56-2.el7 will be updated
#     virtualbox-iso: ---> Package mariadb-libs.x86_64 1:5.5.60-1.el7_5 will be an update
#     virtualbox-iso: ---> Package microcode_ctl.x86_64 2:2.1-29.0.2.el7 will be updated
#     virtualbox-iso: ---> Package microcode_ctl.x86_64 2:2.1-29.16.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package nspr.x86_64 0:4.17.0-1.el7 will be updated
#     virtualbox-iso: ---> Package nspr.x86_64 0:4.19.0-1.el7_5 will be an update
#     virtualbox-iso: ---> Package nss.x86_64 0:3.34.0-4.el7 will be updated
#     virtualbox-iso: ---> Package nss.x86_64 0:3.36.0-7.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-softokn.x86_64 0:3.34.0-2.0.1.el7 will be updated
#     virtualbox-iso: ---> Package nss-softokn.x86_64 0:3.36.0-5.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-softokn-freebl.x86_64 0:3.34.0-2.0.1.el7 will be updated
#     virtualbox-iso: ---> Package nss-softokn-freebl.x86_64 0:3.36.0-5.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-sysinit.x86_64 0:3.34.0-4.el7 will be updated
#     virtualbox-iso: ---> Package nss-sysinit.x86_64 0:3.36.0-7.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-tools.x86_64 0:3.34.0-4.el7 will be updated
#     virtualbox-iso: ---> Package nss-tools.x86_64 0:3.36.0-7.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-util.x86_64 0:3.34.0-2.el7 will be updated
#     virtualbox-iso: ---> Package nss-util.x86_64 0:3.36.0-1.el7_5 will be an update
#     virtualbox-iso: ---> Package openldap.x86_64 0:2.4.44-13.el7 will be updated
#     virtualbox-iso: ---> Package openldap.x86_64 0:2.4.44-15.el7_5 will be an update
#     virtualbox-iso: ---> Package openssl.x86_64 1:1.0.2k-12.0.1.el7 will be updated
#     virtualbox-iso: ---> Package openssl.x86_64 1:1.0.2k-12.0.3.el7 will be an update
#     virtualbox-iso: ---> Package openssl-libs.x86_64 1:1.0.2k-12.0.1.el7 will be updated
#     virtualbox-iso: ---> Package openssl-libs.x86_64 1:1.0.2k-12.0.3.el7 will be an update
#     virtualbox-iso: ---> Package oraclelinux-release.x86_64 7:7.5-1.0.3.el7 will be updated
#     virtualbox-iso: ---> Package oraclelinux-release.x86_64 7:7.5-1.0.5.el7 will be an update
#     virtualbox-iso: ---> Package polkit.x86_64 0:0.112-14.el7 will be updated
#     virtualbox-iso: ---> Package polkit.x86_64 0:0.112-14.0.1.el7 will be an update
#     virtualbox-iso: ---> Package procps-ng.x86_64 0:3.3.10-17.el7 will be updated
#     virtualbox-iso: ---> Package procps-ng.x86_64 0:3.3.10-17.el7_5.2 will be an update
#     virtualbox-iso: ---> Package python.x86_64 0:2.7.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package python.x86_64 0:2.7.5-69.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package python-firewall.noarch 0:0.4.4.4-14.el7 will be updated
#     virtualbox-iso: ---> Package python-firewall.noarch 0:0.4.4.4-15.el7_5 will be an update
#     virtualbox-iso: ---> Package python-libs.x86_64 0:2.7.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package python-libs.x86_64 0:2.7.5-69.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package python-perf.x86_64 0:3.10.0-862.el7 will be updated
#     virtualbox-iso: ---> Package python-perf.x86_64 0:3.10.0-862.14.4.el7 will be an update
#     virtualbox-iso: ---> Package qemu-guest-agent.x86_64 10:2.8.0-2.el7 will be updated
#     virtualbox-iso: ---> Package qemu-guest-agent.x86_64 10:2.8.0-2.el7_5.1 will be an update
#     virtualbox-iso: ---> Package redhat-release-server.x86_64 1:7.5-8.0.1.el7 will be updated
#     virtualbox-iso: ---> Package redhat-release-server.x86_64 1:7.5-8.0.5.el7 will be an update
#     virtualbox-iso: ---> Package rhn-check.noarch 0:2.0.2-21.0.3.el7 will be updated
#     virtualbox-iso: ---> Package rhn-check.noarch 0:2.0.2-21.0.9.el7 will be an update
#     virtualbox-iso: ---> Package rhn-client-tools.noarch 0:2.0.2-21.0.3.el7 will be updated
#     virtualbox-iso: ---> Package rhn-client-tools.noarch 0:2.0.2-21.0.9.el7 will be an update
#     virtualbox-iso: ---> Package rhn-setup.noarch 0:2.0.2-21.0.3.el7 will be updated
#     virtualbox-iso: ---> Package rhn-setup.noarch 0:2.0.2-21.0.9.el7 will be an update
#     virtualbox-iso: ---> Package rsyslog.x86_64 0:8.24.0-16.el7 will be updated
#     virtualbox-iso: ---> Package rsyslog.x86_64 0:8.24.0-16.el7_5.4 will be an update
#     virtualbox-iso: ---> Package selinux-policy.noarch 0:3.13.1-192.0.1.el7 will be updated
#     virtualbox-iso: ---> Package selinux-policy.noarch 0:3.13.1-192.0.6.el7_5.6 will be an update
#     virtualbox-iso: ---> Package selinux-policy-targeted.noarch 0:3.13.1-192.0.1.el7 will be updated
#     virtualbox-iso: ---> Package selinux-policy-targeted.noarch 0:3.13.1-192.0.6.el7_5.6 will be an update
#     virtualbox-iso: ---> Package sudo.x86_64 0:1.8.19p2-13.el7 will be updated
#     virtualbox-iso: ---> Package sudo.x86_64 0:1.8.19p2-14.el7_5 will be an update
#     virtualbox-iso: ---> Package systemd.x86_64 0:219-57.0.1.el7 will be updated
#     virtualbox-iso: ---> Package systemd.x86_64 0:219-57.0.1.el7_5.3 will be an update
#     virtualbox-iso: ---> Package systemd-libs.x86_64 0:219-57.0.1.el7 will be updated
#     virtualbox-iso: ---> Package systemd-libs.x86_64 0:219-57.0.1.el7_5.3 will be an update
#     virtualbox-iso: ---> Package systemd-sysv.x86_64 0:219-57.0.1.el7 will be updated
#     virtualbox-iso: ---> Package systemd-sysv.x86_64 0:219-57.0.1.el7_5.3 will be an update
#     virtualbox-iso: ---> Package tuned.noarch 0:2.9.0-1.el7 will be updated
#     virtualbox-iso: ---> Package tuned.noarch 0:2.9.0-1.el7_5.2 will be an update
#     virtualbox-iso: ---> Package tzdata.noarch 0:2018c-1.el7 will be updated
#     virtualbox-iso: ---> Package tzdata.noarch 0:2018e-3.el7 will be an update
#     virtualbox-iso: ---> Package util-linux.x86_64 0:2.23.2-52.el7 will be updated
#     virtualbox-iso: ---> Package util-linux.x86_64 0:2.23.2-52.el7_5.1 will be an update
#     virtualbox-iso: ---> Package yum.noarch 0:3.4.3-158.0.1.el7 will be updated
#     virtualbox-iso: ---> Package yum.noarch 0:3.4.3-158.0.2.el7 will be an update
#     virtualbox-iso: --> Finished Dependency Resolution
#     virtualbox-iso:
#     virtualbox-iso: Dependencies Resolved
#     virtualbox-iso:
#     virtualbox-iso: ================================================================================
#     virtualbox-iso:  Package                   Arch   Version                      Repository  Size
#     virtualbox-iso: ================================================================================
#     virtualbox-iso: Installing:
#     virtualbox-iso:  kernel                    x86_64 3.10.0-862.14.4.el7          ol7_latest  46 M
#     virtualbox-iso:  kernel-devel              x86_64 3.10.0-862.14.4.el7          ol7_latest  16 M
#     virtualbox-iso:  kernel-uek                x86_64 4.1.12-124.20.3.el7uek       ol7_UEKR4   44 M
#     virtualbox-iso:  kernel-uek-devel          x86_64 4.1.12-124.20.3.el7uek       ol7_UEKR4   11 M
#     virtualbox-iso:  kernel-uek-firmware       noarch 4.1.12-124.20.3.el7uek       ol7_UEKR4  2.5 M
#     virtualbox-iso: Updating:
#     virtualbox-iso:  NetworkManager            x86_64 1:1.10.2-16.el7_5            ol7_latest 1.7 M
#     virtualbox-iso:  NetworkManager-config-server
#     virtualbox-iso:                            noarch 1:1.10.2-16.el7_5            ol7_latest 143 k
#     virtualbox-iso:  NetworkManager-libnm      x86_64 1:1.10.2-16.el7_5            ol7_latest 1.3 M
#     virtualbox-iso:  NetworkManager-team       x86_64 1:1.10.2-16.el7_5            ol7_latest 161 k
#     virtualbox-iso:  NetworkManager-tui        x86_64 1:1.10.2-16.el7_5            ol7_latest 235 k
#     virtualbox-iso:  audit                     x86_64 2.8.1-3.el7_5.1              ol7_latest 246 k
#     virtualbox-iso:  audit-libs                x86_64 2.8.1-3.el7_5.1              ol7_latest  99 k
#     virtualbox-iso:  bind-libs-lite            x86_64 32:9.9.4-61.el7_5.1          ol7_latest 733 k
#     virtualbox-iso:  bind-license              noarch 32:9.9.4-61.el7_5.1          ol7_latest  85 k
#     virtualbox-iso:  binutils                  x86_64 2.27-28.base.el7_5.1         ol7_latest 5.9 M
#     virtualbox-iso:  ca-certificates           noarch 2018.2.22-70.0.el7_5         ol7_latest 391 k
#     virtualbox-iso:  cpp                       x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest 5.9 M
#     virtualbox-iso:  device-mapper             x86_64 7:1.02.146-4.0.2.el7         ol7_latest 289 k
#     virtualbox-iso:  device-mapper-event       x86_64 7:1.02.146-4.0.2.el7         ol7_latest 185 k
#     virtualbox-iso:  device-mapper-event-libs  x86_64 7:1.02.146-4.0.2.el7         ol7_latest 184 k
#     virtualbox-iso:  device-mapper-libs        x86_64 7:1.02.146-4.0.2.el7         ol7_latest 316 k
#     virtualbox-iso:  dhclient                  x86_64 12:4.2.5-68.0.1.el7_5.1      ol7_latest 283 k
#     virtualbox-iso:  dhcp-common               x86_64 12:4.2.5-68.0.1.el7_5.1      ol7_latest 174 k
#     virtualbox-iso:  dhcp-libs                 x86_64 12:4.2.5-68.0.1.el7_5.1      ol7_latest 131 k
#     virtualbox-iso:  dracut                    x86_64 033-535.0.5.el7_5.1          ol7_latest 326 k
#     virtualbox-iso:  dracut-config-rescue      x86_64 033-535.0.5.el7_5.1          ol7_latest  58 k
#     virtualbox-iso:  dracut-network            x86_64 033-535.0.5.el7_5.1          ol7_latest 101 k
#     virtualbox-iso:  e2fsprogs                 x86_64 1.42.9-12.el7_5              ol7_latest 698 k
#     virtualbox-iso:  e2fsprogs-libs            x86_64 1.42.9-12.el7_5              ol7_latest 166 k
#     virtualbox-iso:  firewalld                 noarch 0.4.4.4-15.el7_5             ol7_latest 418 k
#     virtualbox-iso:  firewalld-filesystem      noarch 0.4.4.4-15.el7_5             ol7_latest  48 k
#     virtualbox-iso:  gcc                       x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest  16 M
#     virtualbox-iso:  glibc                     x86_64 2.17-222.0.7.el7             ol7_latest 3.6 M
#     virtualbox-iso:  glibc-common              x86_64 2.17-222.0.7.el7             ol7_latest  11 M
#     virtualbox-iso:  glibc-devel               x86_64 2.17-222.0.7.el7             ol7_latest 1.1 M
#     virtualbox-iso:  glibc-headers             x86_64 2.17-222.0.7.el7             ol7_latest 679 k
#     virtualbox-iso:  gnupg2                    x86_64 2.0.22-5.el7_5               ol7_latest 1.5 M
#     virtualbox-iso:  initscripts               x86_64 9.49.41-1.0.4.el7_5.2        ol7_latest 437 k
#     virtualbox-iso:  iptables                  x86_64 1.4.21-24.1.el7_5            ol7_latest 431 k
#     virtualbox-iso:  iwl100-firmware           noarch 39.31.5.1-999.el7            ol7_latest 145 k
#     virtualbox-iso:  iwl1000-firmware          noarch 1:39.31.5.1-999.el7          ol7_latest 208 k
#     virtualbox-iso:  iwl105-firmware           noarch 18.168.6.1-999.el7           ol7_latest 229 k
#     virtualbox-iso:  iwl135-firmware           noarch 18.168.6.1-999.el7           ol7_latest 238 k
#     virtualbox-iso:  iwl2000-firmware          noarch 18.168.6.1-999.el7           ol7_latest 232 k
#     virtualbox-iso:  iwl2030-firmware          noarch 18.168.6.1-999.el7           ol7_latest 241 k
#     virtualbox-iso:  iwl3160-firmware          noarch 22.0.7.0-999.el7             ol7_latest 1.6 M
#     virtualbox-iso:  iwl3945-firmware          noarch 15.32.2.9-999.el7            ol7_latest  83 k
#     virtualbox-iso:  iwl4965-firmware          noarch 228.61.2.24-999.el7          ol7_latest  96 k
#     virtualbox-iso:  iwl5000-firmware          noarch 8.83.5.1_1-999.el7           ol7_latest 289 k
#     virtualbox-iso:  iwl5150-firmware          noarch 8.24.2.2-999.el7             ol7_latest 142 k
#     virtualbox-iso:  iwl6000-firmware          noarch 9.221.4.1-999.el7            ol7_latest 162 k
#     virtualbox-iso:  iwl6000g2a-firmware       noarch 17.168.5.3-999.el7           ol7_latest 305 k
#     virtualbox-iso:  iwl6000g2b-firmware       noarch 17.168.5.2-999.el7           ol7_latest 305 k
#     virtualbox-iso:  iwl6050-firmware          noarch 41.28.5.1-999.el7            ol7_latest 238 k
#     virtualbox-iso:  iwl7260-firmware          noarch 22.0.7.0-999.el7             ol7_latest 1.1 M
#     virtualbox-iso:  iwl7265-firmware          noarch 22.0.7.0-999.el7             ol7_latest 6.4 M
#     virtualbox-iso:  kernel-headers            x86_64 3.10.0-862.14.4.el7          ol7_latest 7.1 M
#     virtualbox-iso:  kernel-tools              x86_64 3.10.0-862.14.4.el7          ol7_latest 6.3 M
#     virtualbox-iso:  kernel-tools-libs         x86_64 3.10.0-862.14.4.el7          ol7_latest 6.2 M
#     virtualbox-iso:  kexec-tools               x86_64 2.0.15-13.0.1.el7_5.2        ol7_latest 341 k
#     virtualbox-iso:  kpartx                    x86_64 0.4.9-119.el7_5.1            ol7_latest  75 k
#     virtualbox-iso:  krb5-libs                 x86_64 1.15.1-19.el7                ol7_latest 747 k
#     virtualbox-iso:  libblkid                  x86_64 2.23.2-52.el7_5.1            ol7_latest 178 k
#     virtualbox-iso:  libcom_err                x86_64 1.42.9-12.el7_5              ol7_latest  40 k
#     virtualbox-iso:  libdtrace-ctf             x86_64 0.8.0-1.el7                  ol7_UEKR4   34 k
#     virtualbox-iso:  libgcc                    x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest 100 k
#     virtualbox-iso:  libgomp                   x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest 156 k
#     virtualbox-iso:  libgudev1                 x86_64 219-57.0.1.el7_5.3           ol7_latest  92 k
#     virtualbox-iso:  libmount                  x86_64 2.23.2-52.el7_5.1            ol7_latest 179 k
#     virtualbox-iso:  libselinux                x86_64 2.5-12.0.1.el7               ol7_latest 161 k
#     virtualbox-iso:  libselinux-python         x86_64 2.5-12.0.1.el7               ol7_latest 235 k
#     virtualbox-iso:  libselinux-utils          x86_64 2.5-12.0.1.el7               ol7_latest 151 k
#     virtualbox-iso:  libss                     x86_64 1.42.9-12.el7_5              ol7_latest  45 k
#     virtualbox-iso:  libstdc++                 x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest 303 k
#     virtualbox-iso:  libuuid                   x86_64 2.23.2-52.el7_5.1            ol7_latest  80 k
#     virtualbox-iso:  linux-firmware            noarch 20180906-999.git85c5d90f.el7 ol7_latest  67 M
#     virtualbox-iso:  lvm2                      x86_64 7:2.02.177-4.0.2.el7         ol7_latest 1.3 M
#     virtualbox-iso:  lvm2-libs                 x86_64 7:2.02.177-4.0.2.el7         ol7_latest 1.0 M
#     virtualbox-iso:  mariadb-libs              x86_64 1:5.5.60-1.el7_5             ol7_latest 758 k
#     virtualbox-iso:  microcode_ctl             x86_64 2:2.1-29.16.0.1.el7_5        ol7_latest 1.4 M
#     virtualbox-iso:  nspr                      x86_64 4.19.0-1.el7_5               ol7_latest 126 k
#     virtualbox-iso:  nss                       x86_64 3.36.0-7.el7_5               ol7_latest 834 k
#     virtualbox-iso:  nss-softokn               x86_64 3.36.0-5.0.1.el7_5           ol7_latest 315 k
#     virtualbox-iso:  nss-softokn-freebl        x86_64 3.36.0-5.0.1.el7_5           ol7_latest 222 k
#     virtualbox-iso:  nss-sysinit               x86_64 3.36.0-7.el7_5               ol7_latest  62 k
#     virtualbox-iso:  nss-tools                 x86_64 3.36.0-7.el7_5               ol7_latest 514 k
#     virtualbox-iso:  nss-util                  x86_64 3.36.0-1.el7_5               ol7_latest  77 k
#     virtualbox-iso:  openldap                  x86_64 2.4.44-15.el7_5              ol7_latest 355 k
#     virtualbox-iso:  openssl                   x86_64 1:1.0.2k-12.0.3.el7          ol7_latest 492 k
#     virtualbox-iso:  openssl-libs              x86_64 1:1.0.2k-12.0.3.el7          ol7_latest 1.2 M
#     virtualbox-iso:  oraclelinux-release       x86_64 7:7.5-1.0.5.el7              ol7_latest  58 k
#     virtualbox-iso:  polkit                    x86_64 0.112-14.0.1.el7             ol7_latest 167 k
#     virtualbox-iso:  procps-ng                 x86_64 3.3.10-17.el7_5.2            ol7_latest 289 k
#     virtualbox-iso:  python                    x86_64 2.7.5-69.0.1.el7_5           ol7_latest  93 k
#     virtualbox-iso:  python-firewall           noarch 0.4.4.4-15.el7_5             ol7_latest 328 k
#     virtualbox-iso:  python-libs               x86_64 2.7.5-69.0.1.el7_5           ol7_latest 5.6 M
#     virtualbox-iso:  python-perf               x86_64 3.10.0-862.14.4.el7          ol7_latest 6.3 M
#     virtualbox-iso:  qemu-guest-agent          x86_64 10:2.8.0-2.el7_5.1           ol7_latest 149 k
#     virtualbox-iso:  redhat-release-server     x86_64 1:7.5-8.0.5.el7              ol7_latest 9.4 k
#     virtualbox-iso:  rhn-check                 noarch 2.0.2-21.0.9.el7             ol7_latest  57 k
#     virtualbox-iso:  rhn-client-tools          noarch 2.0.2-21.0.9.el7             ol7_latest 416 k
#     virtualbox-iso:  rhn-setup                 noarch 2.0.2-21.0.9.el7             ol7_latest  94 k
#     virtualbox-iso:  rsyslog                   x86_64 8.24.0-16.el7_5.4            ol7_latest 606 k
#     virtualbox-iso:  selinux-policy            noarch 3.13.1-192.0.6.el7_5.6       ol7_latest 454 k
#     virtualbox-iso:  selinux-policy-targeted   noarch 3.13.1-192.0.6.el7_5.6       ol7_latest 6.8 M
#     virtualbox-iso:  sudo                      x86_64 1.8.19p2-14.el7_5            ol7_latest 1.1 M
#     virtualbox-iso:  systemd                   x86_64 219-57.0.1.el7_5.3           ol7_latest 5.0 M
#     virtualbox-iso:  systemd-libs              x86_64 219-57.0.1.el7_5.3           ol7_latest 402 k
#     virtualbox-iso:  systemd-sysv              x86_64 219-57.0.1.el7_5.3           ol7_latest  79 k
#     virtualbox-iso:  tuned                     noarch 2.9.0-1.el7_5.2              ol7_latest 244 k
#     virtualbox-iso:  tzdata                    noarch 2018e-3.el7                  ol7_latest 481 k
#     virtualbox-iso:  util-linux                x86_64 2.23.2-52.el7_5.1            ol7_latest 2.0 M
#     virtualbox-iso:  yum                       noarch 3.4.3-158.0.2.el7            ol7_latest 1.2 M
#     virtualbox-iso:
#     virtualbox-iso: Transaction Summary
#     virtualbox-iso: ================================================================================
#     virtualbox-iso: Install    5 Packages
#     virtualbox-iso: Upgrade  108 Packages
#     virtualbox-iso:
#     virtualbox-iso: Total download size: 317 M
#     virtualbox-iso: Downloading packages:
#     virtualbox-iso: Delta RPMs disabled because /usr/bin/applydeltarpm not installed.
#     virtualbox-iso: warning: /var/cache/yum/x86_64/7Server/ol7_latest/packages/NetworkManager-config-server-1.10.2-16.el7_5.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID ec551f03: NOKEY
#     virtualbox-iso: Public key for NetworkManager-config-server-1.10.2-16.el7_5.noarch.rpm is not installed
#     virtualbox-iso: Public key for kernel-uek-devel-4.1.12-124.20.3.el7uek.x86_64.rpm is not installed
#     virtualbox-iso: --------------------------------------------------------------------------------
#     virtualbox-iso: Total                                              5.8 MB/s | 317 MB  00:54
#     virtualbox-iso: Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
#     virtualbox-iso: Importing GPG key 0xEC551F03:
#     virtualbox-iso:  Userid     : "Oracle OSS group (Open Source Software group) <build@oss.oracle.com>"
#     virtualbox-iso:  Fingerprint: 4214 4123 fecf c55b 9086 313d 72f9 7b74 ec55 1f03
#     virtualbox-iso:  Package    : 7:oraclelinux-release-7.5-1.0.3.el7.x86_64 (@anaconda/7.5)
#     virtualbox-iso:  From       : /etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
#     virtualbox-iso: Running transaction check
#     virtualbox-iso: Running transaction test
#     virtualbox-iso: Transaction test succeeded
#     virtualbox-iso: Running transaction
#     virtualbox-iso:   Updating   : libgcc-4.8.5-28.0.1.el7_5.1.x86_64                         1/221
#     virtualbox-iso:   Updating   : 1:redhat-release-server-7.5-8.0.5.el7.x86_64               2/221
#     virtualbox-iso:   Updating   : 7:oraclelinux-release-7.5-1.0.5.el7.x86_64                 3/221
#     virtualbox-iso:   Updating   : linux-firmware-20180906-999.git85c5d90f.el7.noarch         4/221
#     virtualbox-iso:   Updating   : tzdata-2018e-3.el7.noarch                                  5/221
#     virtualbox-iso:   Updating   : glibc-common-2.17-222.0.7.el7.x86_64                       6/221
#     virtualbox-iso:   Updating   : nss-softokn-freebl-3.36.0-5.0.1.el7_5.x86_64               7/221
#     virtualbox-iso:   Updating   : glibc-2.17-222.0.7.el7.x86_64                              8/221
#     virtualbox-iso:   Updating   : libselinux-2.5-12.0.1.el7.x86_64                           9/221
#     virtualbox-iso:   Updating   : nspr-4.19.0-1.el7_5.x86_64                                10/221
#     virtualbox-iso:   Updating   : nss-util-3.36.0-1.el7_5.x86_64                            11/221
#     virtualbox-iso:   Updating   : systemd-libs-219-57.0.1.el7_5.3.x86_64                    12/221
#     virtualbox-iso:   Updating   : libuuid-2.23.2-52.el7_5.1.x86_64                          13/221
#     virtualbox-iso:   Updating   : libcom_err-1.42.9-12.el7_5.x86_64                         14/221
#     virtualbox-iso:   Updating   : libblkid-2.23.2-52.el7_5.1.x86_64                         15/221
#     virtualbox-iso:   Updating   : audit-libs-2.8.1-3.el7_5.1.x86_64                         16/221
#     virtualbox-iso:   Updating   : libmount-2.23.2-52.el7_5.1.x86_64                         17/221
#     virtualbox-iso:   Updating   : systemd-219-57.0.1.el7_5.3.x86_64                         18/221
#     virtualbox-iso:   Updating   : util-linux-2.23.2-52.el7_5.1.x86_64                       19/221
#     virtualbox-iso:   Updating   : 7:device-mapper-libs-1.02.146-4.0.2.el7.x86_64            20/221
#     virtualbox-iso:   Updating   : 7:device-mapper-1.02.146-4.0.2.el7.x86_64                 21/221
#     virtualbox-iso:   Updating   : 7:device-mapper-event-libs-1.02.146-4.0.2.el7.x86_64      22/221
#     virtualbox-iso:   Updating   : polkit-0.112-14.0.1.el7.x86_64                            23/221
#     virtualbox-iso:   Updating   : procps-ng-3.3.10-17.el7_5.2.x86_64                        24/221
#     virtualbox-iso:   Updating   : initscripts-9.49.41-1.0.4.el7_5.2.x86_64                  25/221
#     virtualbox-iso:   Updating   : nss-softokn-3.36.0-5.0.1.el7_5.x86_64                     26/221
#     virtualbox-iso:   Updating   : nss-sysinit-3.36.0-7.el7_5.x86_64                         27/221
#     virtualbox-iso:   Updating   : nss-3.36.0-7.el7_5.x86_64                                 28/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-libnm-1.10.2-16.el7_5.x86_64             29/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-1.10.2-16.el7_5.x86_64                   30/221
#     virtualbox-iso:   Updating   : libstdc++-4.8.5-28.0.1.el7_5.1.x86_64                     31/221
#     virtualbox-iso:   Updating   : nss-tools-3.36.0-7.el7_5.x86_64                           32/221
#     virtualbox-iso:   Updating   : 7:device-mapper-event-1.02.146-4.0.2.el7.x86_64           33/221
#     virtualbox-iso:   Updating   : 7:lvm2-libs-2.02.177-4.0.2.el7.x86_64                     34/221
#     virtualbox-iso:   Updating   : kpartx-0.4.9-119.el7_5.1.x86_64                           35/221
#     virtualbox-iso:   Updating   : dracut-033-535.0.5.el7_5.1.x86_64                         36/221
#     virtualbox-iso:   Updating   : libss-1.42.9-12.el7_5.x86_64                              37/221
#     virtualbox-iso:   Updating   : e2fsprogs-libs-1.42.9-12.el7_5.x86_64                     38/221
#     virtualbox-iso:   Updating   : iptables-1.4.21-24.1.el7_5.x86_64                         39/221
#     virtualbox-iso:   Updating   : cpp-4.8.5-28.0.1.el7_5.1.x86_64                           40/221
#     virtualbox-iso:   Updating   : binutils-2.27-28.base.el7_5.1.x86_64                      41/221
#     virtualbox-iso:   Updating   : libgomp-4.8.5-28.0.1.el7_5.1.x86_64                       42/221
#     virtualbox-iso:   Updating   : kernel-tools-libs-3.10.0-862.14.4.el7.x86_64              43/221
#     virtualbox-iso:   Installing : kernel-uek-firmware-4.1.12-124.20.3.el7uek.noarch         44/221
#     virtualbox-iso:   Updating   : ca-certificates-2018.2.22-70.0.el7_5.noarch               45/221
#     virtualbox-iso:   Updating   : 1:openssl-libs-1.0.2k-12.0.3.el7.x86_64                   46/221
#     virtualbox-iso:   Updating   : krb5-libs-1.15.1-19.el7.x86_64                            47/221
#     virtualbox-iso:   Updating   : openldap-2.4.44-15.el7_5.x86_64                           48/221
#     virtualbox-iso:   Updating   : 12:dhcp-libs-4.2.5-68.0.1.el7_5.1.x86_64                  49/221
#     virtualbox-iso:   Updating   : python-libs-2.7.5-69.0.1.el7_5.x86_64                     50/221
#     virtualbox-iso:   Updating   : python-2.7.5-69.0.1.el7_5.x86_64                          51/221
#     virtualbox-iso:   Updating   : python-firewall-0.4.4.4-15.el7_5.noarch                   52/221
#     virtualbox-iso:   Updating   : yum-3.4.3-158.0.2.el7.noarch                              53/221
#     virtualbox-iso:   Updating   : systemd-sysv-219-57.0.1.el7_5.3.x86_64                    54/221
#     virtualbox-iso:   Updating   : python-perf-3.10.0-862.14.4.el7.x86_64                    55/221
#     virtualbox-iso:   Updating   : 12:dhcp-common-4.2.5-68.0.1.el7_5.1.x86_64                56/221
#     virtualbox-iso:   Updating   : gnupg2-2.0.22-5.el7_5.x86_64                              57/221
#     virtualbox-iso:   Updating   : rhn-client-tools-2.0.2-21.0.9.el7.noarch                  58/221
#     virtualbox-iso:   Updating   : selinux-policy-3.13.1-192.0.6.el7_5.6.noarch              59/221
#     virtualbox-iso:   Updating   : kernel-headers-3.10.0-862.14.4.el7.x86_64                 60/221
#     virtualbox-iso:   Updating   : glibc-headers-2.17-222.0.7.el7.x86_64                     61/221
#     virtualbox-iso:   Updating   : glibc-devel-2.17-222.0.7.el7.x86_64                       62/221
#     virtualbox-iso:   Updating   : gcc-4.8.5-28.0.1.el7_5.1.x86_64                           63/221
#     virtualbox-iso:   Updating   : libdtrace-ctf-0.8.0-1.el7.x86_64                          64/221
#     virtualbox-iso:   Updating   : 32:bind-license-9.9.4-61.el7_5.1.noarch                   65/221
#     virtualbox-iso:   Updating   : 32:bind-libs-lite-9.9.4-61.el7_5.1.x86_64                 66/221
#     virtualbox-iso:   Updating   : 12:dhclient-4.2.5-68.0.1.el7_5.1.x86_64                   67/221
#     virtualbox-iso:   Updating   : dracut-network-033-535.0.5.el7_5.1.x86_64                 68/221
#     virtualbox-iso:   Updating   : firewalld-filesystem-0.4.4.4-15.el7_5.noarch              69/221
#     virtualbox-iso:   Updating   : firewalld-0.4.4.4-15.el7_5.noarch                         70/221
#     virtualbox-iso:   Updating   : kexec-tools-2.0.15-13.0.1.el7_5.2.x86_64                  71/221
#     virtualbox-iso:   Installing : kernel-uek-devel-4.1.12-124.20.3.el7uek.x86_64            72/221
#     virtualbox-iso:   Updating   : selinux-policy-targeted-3.13.1-192.0.6.el7_5.6.noarch     73/221
#     virtualbox-iso:   Updating   : rhn-check-2.0.2-21.0.9.el7.noarch                         74/221
#     virtualbox-iso:   Updating   : rhn-setup-2.0.2-21.0.9.el7.noarch                         75/221
#     virtualbox-iso:   Updating   : tuned-2.9.0-1.el7_5.2.noarch                              76/221
#     virtualbox-iso:   Updating   : audit-2.8.1-3.el7_5.1.x86_64                              77/221
#     virtualbox-iso:   Updating   : libselinux-python-2.5-12.0.1.el7.x86_64                   78/221
#     virtualbox-iso:   Updating   : sudo-1.8.19p2-14.el7_5.x86_64                             79/221
#     virtualbox-iso:   Updating   : 1:openssl-1.0.2k-12.0.3.el7.x86_64                        80/221
#     virtualbox-iso:   Updating   : 1:mariadb-libs-5.5.60-1.el7_5.x86_64                      81/221
#     virtualbox-iso:   Installing : kernel-uek-4.1.12-124.20.3.el7uek.x86_64                  82/221
#     virtualbox-iso:   Updating   : kernel-tools-3.10.0-862.14.4.el7.x86_64                   83/221
#     virtualbox-iso:   Updating   : e2fsprogs-1.42.9-12.el7_5.x86_64                          84/221
#     virtualbox-iso:   Updating   : dracut-config-rescue-033-535.0.5.el7_5.1.x86_64           85/221
#     virtualbox-iso:   Installing : kernel-3.10.0-862.14.4.el7.x86_64                         86/221
#     virtualbox-iso:   Updating   : 7:lvm2-2.02.177-4.0.2.el7.x86_64                          87/221
#     virtualbox-iso:   Updating   : 10:qemu-guest-agent-2.8.0-2.el7_5.1.x86_64                88/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-tui-1.10.2-16.el7_5.x86_64               89/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-team-1.10.2-16.el7_5.x86_64              90/221
#     virtualbox-iso:   Updating   : 2:microcode_ctl-2.1-29.16.0.1.el7_5.x86_64                91/221
#     virtualbox-iso:   Updating   : rsyslog-8.24.0-16.el7_5.4.x86_64                          92/221
#     virtualbox-iso:   Updating   : libgudev1-219-57.0.1.el7_5.3.x86_64                       93/221
#     virtualbox-iso:   Updating   : libselinux-utils-2.5-12.0.1.el7.x86_64                    94/221
#     virtualbox-iso:   Updating   : iwl3945-firmware-15.32.2.9-999.el7.noarch                 95/221
#     virtualbox-iso:   Updating   : iwl100-firmware-39.31.5.1-999.el7.noarch                  96/221
#     virtualbox-iso:   Updating   : iwl6050-firmware-41.28.5.1-999.el7.noarch                 97/221
#     virtualbox-iso:   Updating   : iwl135-firmware-18.168.6.1-999.el7.noarch                 98/221
#     virtualbox-iso:   Updating   : iwl6000g2a-firmware-17.168.5.3-999.el7.noarch             99/221
#     virtualbox-iso:   Installing : kernel-devel-3.10.0-862.14.4.el7.x86_64                  100/221
#     virtualbox-iso:   Updating   : 1:iwl1000-firmware-39.31.5.1-999.el7.noarch              101/221
#     virtualbox-iso:   Updating   : iwl4965-firmware-228.61.2.24-999.el7.noarch              102/221
#     virtualbox-iso:   Updating   : iwl6000-firmware-9.221.4.1-999.el7.noarch                103/221
#     virtualbox-iso:   Updating   : iwl2030-firmware-18.168.6.1-999.el7.noarch               104/221
#     virtualbox-iso:   Updating   : iwl105-firmware-18.168.6.1-999.el7.noarch                105/221
#     virtualbox-iso:   Updating   : iwl7265-firmware-22.0.7.0-999.el7.noarch                 106/221
#     virtualbox-iso:   Updating   : iwl5000-firmware-8.83.5.1_1-999.el7.noarch               107/221
#     virtualbox-iso:   Updating   : iwl6000g2b-firmware-17.168.5.2-999.el7.noarch            108/221
#     virtualbox-iso:   Updating   : iwl2000-firmware-18.168.6.1-999.el7.noarch               109/221
#     virtualbox-iso:   Updating   : iwl7260-firmware-22.0.7.0-999.el7.noarch                 110/221
#     virtualbox-iso:   Updating   : iwl5150-firmware-8.24.2.2-999.el7.noarch                 111/221
#     virtualbox-iso:   Updating   : iwl3160-firmware-22.0.7.0-999.el7.noarch                 112/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-config-server-1.10.2-16.el7_5.noarch    113/221
#     virtualbox-iso:   Cleanup    : tuned-2.9.0-1.el7.noarch                                 114/221
#     virtualbox-iso:   Cleanup    : firewalld-0.4.4.4-14.el7.noarch                          115/221
#     virtualbox-iso:   Cleanup    : rhn-setup-2.0.2-21.0.3.el7.noarch                        116/221
#     virtualbox-iso:   Cleanup    : rhn-check-2.0.2-21.0.3.el7.noarch                        117/221
#     virtualbox-iso:   Cleanup    : rhn-client-tools-2.0.2-21.0.3.el7.noarch                 118/221
#     virtualbox-iso:   Cleanup    : 7:oraclelinux-release-7.5-1.0.3.el7.x86_64               119/221
#     virtualbox-iso:   Cleanup    : yum-3.4.3-158.0.1.el7.noarch                             120/221
#     virtualbox-iso:   Cleanup    : python-firewall-0.4.4.4-14.el7.noarch                    121/221
#     virtualbox-iso:   Cleanup    : selinux-policy-targeted-3.13.1-192.0.1.el7.noarch        122/221
#     virtualbox-iso:   Cleanup    : dracut-config-rescue-033-535.0.1.el7.x86_64              123/221
#     virtualbox-iso:   Cleanup    : selinux-policy-3.13.1-192.0.1.el7.noarch                 124/221
#     virtualbox-iso:   Cleanup    : firewalld-filesystem-0.4.4.4-14.el7.noarch               125/221
#     virtualbox-iso:   Cleanup    : iwl3945-firmware-15.32.2.9-62.el7.noarch                 126/221
#     virtualbox-iso:   Cleanup    : iwl100-firmware-39.31.5.1-62.el7.noarch                  127/221
#     virtualbox-iso:   Cleanup    : iwl6050-firmware-41.28.5.1-62.el7.noarch                 128/221
#     virtualbox-iso:   Cleanup    : iwl135-firmware-18.168.6.1-62.el7.noarch                 129/221
#     virtualbox-iso:   Cleanup    : linux-firmware-20180220-62.git6d51311.0.1.el7.noarch     130/221
#     virtualbox-iso:   Cleanup    : iwl6000g2a-firmware-17.168.5.3-62.el7.noarch             131/221
#     virtualbox-iso:   Cleanup    : 1:iwl1000-firmware-39.31.5.1-62.el7.noarch               132/221
#     virtualbox-iso:   Cleanup    : iwl4965-firmware-228.61.2.24-62.el7.noarch               133/221
#     virtualbox-iso:   Cleanup    : iwl6000-firmware-9.221.4.1-62.el7.noarch                 134/221
#     virtualbox-iso:   Cleanup    : iwl2030-firmware-18.168.6.1-62.el7.noarch                135/221
#     virtualbox-iso:   Cleanup    : iwl105-firmware-18.168.6.1-62.el7.noarch                 136/221
#     virtualbox-iso:   Cleanup    : iwl7265-firmware-22.0.7.0-62.el7.noarch                  137/221
#     virtualbox-iso:   Cleanup    : iwl5000-firmware-8.83.5.1_1-62.el7.noarch                138/221
#     virtualbox-iso:   Cleanup    : iwl6000g2b-firmware-17.168.5.2-62.el7.noarch             139/221
#     virtualbox-iso:   Cleanup    : iwl2000-firmware-18.168.6.1-62.el7.noarch                140/221
#     virtualbox-iso:   Cleanup    : iwl7260-firmware-22.0.7.0-62.el7.noarch                  141/221
#     virtualbox-iso:   Cleanup    : iwl5150-firmware-8.24.2.2-62.el7.noarch                  142/221
#     virtualbox-iso:   Cleanup    : iwl3160-firmware-22.0.7.0-62.el7.noarch                  143/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-config-server-1.10.2-13.el7.noarch      144/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-tui-1.10.2-13.el7.x86_64                145/221
#     virtualbox-iso:   Cleanup    : 7:lvm2-2.02.177-4.0.1.el7.x86_64                         146/221
#     virtualbox-iso:   Cleanup    : 7:lvm2-libs-2.02.177-4.0.1.el7.x86_64                    147/221
#     virtualbox-iso:   Cleanup    : 7:device-mapper-event-1.02.146-4.0.1.el7.x86_64          148/221
#     virtualbox-iso:   Cleanup    : e2fsprogs-1.42.9-11.0.1.el7.x86_64                       149/221
#     virtualbox-iso:   Cleanup    : 1:openssl-1.0.2k-12.0.1.el7.x86_64                       150/221
#     virtualbox-iso:   Cleanup    : rsyslog-8.24.0-16.el7.x86_64                             151/221
#     virtualbox-iso:   Cleanup    : 1:mariadb-libs-5.5.56-2.el7.x86_64                       152/221
#     virtualbox-iso:   Cleanup    : audit-2.8.1-3.el7.x86_64                                 153/221
#     virtualbox-iso:   Cleanup    : 10:qemu-guest-agent-2.8.0-2.el7.x86_64                   154/221
#     virtualbox-iso:   Cleanup    : sudo-1.8.19p2-13.el7.x86_64                              155/221
#     virtualbox-iso:   Cleanup    : python-perf-3.10.0-862.el7.x86_64                        156/221
#     virtualbox-iso:   Cleanup    : libselinux-python-2.5-12.el7.x86_64                      157/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-team-1.10.2-13.el7.x86_64               158/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-1.10.2-13.el7.x86_64                    159/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-libnm-1.10.2-13.el7.x86_64              160/221
#     virtualbox-iso:   Cleanup    : polkit-0.112-14.el7.x86_64                               161/221
#     virtualbox-iso:   Cleanup    : libgudev1-219-57.0.1.el7.x86_64                          162/221
#     virtualbox-iso:   Cleanup    : kexec-tools-2.0.15-13.0.1.el7.x86_64                     163/221
#     virtualbox-iso:   Cleanup    : kernel-tools-3.10.0-862.el7.x86_64                       164/221
#     virtualbox-iso:   Cleanup    : libstdc++-4.8.5-28.0.1.el7.x86_64                        165/221
#     virtualbox-iso:   Cleanup    : e2fsprogs-libs-1.42.9-11.0.1.el7.x86_64                  166/221
#     virtualbox-iso:   Cleanup    : libss-1.42.9-11.0.1.el7.x86_64                           167/221
#     virtualbox-iso:   Cleanup    : 7:device-mapper-event-libs-1.02.146-4.0.1.el7.x86_64     168/221
#     virtualbox-iso:   Cleanup    : gnupg2-2.0.22-4.el7.x86_64                               169/221
#     virtualbox-iso:   Cleanup    : libselinux-utils-2.5-12.el7.x86_64                       170/221
#     virtualbox-iso:   Cleanup    : dracut-network-033-535.0.1.el7.x86_64                    171/221
#     virtualbox-iso:   Cleanup    : 12:dhclient-4.2.5-68.0.1.el7.x86_64                      172/221
#     virtualbox-iso:   Cleanup    : systemd-sysv-219-57.0.1.el7.x86_64                       173/221
#     virtualbox-iso:   Cleanup    : 32:bind-libs-lite-9.9.4-61.el7.x86_64                    174/221
#     virtualbox-iso:   Cleanup    : dracut-033-535.0.1.el7.x86_64                            175/221
#     virtualbox-iso:   Cleanup    : initscripts-9.49.41-1.0.1.el7.x86_64                     176/221
#     virtualbox-iso:   Cleanup    : python-2.7.5-68.0.1.el7.x86_64                           177/221
#     virtualbox-iso:   Cleanup    : python-libs-2.7.5-68.0.1.el7.x86_64                      178/221
#     virtualbox-iso:   Cleanup    : procps-ng-3.3.10-17.el7.x86_64                           179/221
#     virtualbox-iso:   Cleanup    : kpartx-0.4.9-119.el7.x86_64                              180/221
#     virtualbox-iso:   Cleanup    : 7:device-mapper-1.02.146-4.0.1.el7.x86_64                181/221
#     virtualbox-iso:   Cleanup    : 7:device-mapper-libs-1.02.146-4.0.1.el7.x86_64           182/221
#     virtualbox-iso:   Cleanup    : util-linux-2.23.2-52.el7.x86_64                          183/221
#     virtualbox-iso:   Cleanup    : libdtrace-ctf-0.7.0-1.el7.x86_64                         184/221
#     virtualbox-iso:   Cleanup    : gcc-4.8.5-28.0.1.el7.x86_64                              185/221
#     virtualbox-iso:   Cleanup    : glibc-devel-2.17-222.el7.x86_64                          186/221
#     virtualbox-iso:   Cleanup    : glibc-headers-2.17-222.el7.x86_64                        187/221
#     virtualbox-iso:   Cleanup    : 2:microcode_ctl-2.1-29.0.2.el7.x86_64                    188/221
#     virtualbox-iso:   Cleanup    : 12:dhcp-common-4.2.5-68.0.1.el7.x86_64                   189/221
#     virtualbox-iso:   Cleanup    : systemd-219-57.0.1.el7.x86_64                            190/221
#     virtualbox-iso:   Cleanup    : 12:dhcp-libs-4.2.5-68.0.1.el7.x86_64                     191/221
#     virtualbox-iso:   Cleanup    : openldap-2.4.44-13.el7.x86_64                            192/221
#     virtualbox-iso:   Cleanup    : nss-tools-3.34.0-4.el7.x86_64                            193/221
#     virtualbox-iso:   Cleanup    : nss-sysinit-3.34.0-4.el7.x86_64                          194/221
#     virtualbox-iso:   Cleanup    : nss-3.34.0-4.el7.x86_64                                  195/221
#     virtualbox-iso:   Cleanup    : nss-softokn-3.34.0-2.0.1.el7.x86_64                      196/221
#     virtualbox-iso:   Cleanup    : krb5-libs-1.15.1-18.el7.x86_64                           197/221
#     virtualbox-iso:   Cleanup    : 1:openssl-libs-1.0.2k-12.0.1.el7.x86_64                  198/221
#     virtualbox-iso:   Cleanup    : libmount-2.23.2-52.el7.x86_64                            199/221
#     virtualbox-iso:   Cleanup    : systemd-libs-219-57.0.1.el7.x86_64                       200/221
#     virtualbox-iso:   Cleanup    : libblkid-2.23.2-52.el7.x86_64                            201/221
#     virtualbox-iso:   Cleanup    : libuuid-2.23.2-52.el7.x86_64                             202/221
#     virtualbox-iso:   Cleanup    : libcom_err-1.42.9-11.0.1.el7.x86_64                      203/221
#     virtualbox-iso:   Cleanup    : audit-libs-2.8.1-3.el7.x86_64                            204/221
#     virtualbox-iso:   Cleanup    : binutils-2.27-27.base.el7.x86_64                         205/221
#     virtualbox-iso:   Cleanup    : cpp-4.8.5-28.0.1.el7.x86_64                              206/221
#     virtualbox-iso:   Cleanup    : libgomp-4.8.5-28.0.1.el7.x86_64                          207/221
#     virtualbox-iso:   Cleanup    : kernel-tools-libs-3.10.0-862.el7.x86_64                  208/221
#     virtualbox-iso:   Cleanup    : iptables-1.4.21-24.el7.x86_64                            209/221
#     virtualbox-iso:   Cleanup    : ca-certificates-2017.2.20-71.el7.noarch                  210/221
#     virtualbox-iso:   Cleanup    : 1:redhat-release-server-7.5-8.0.1.el7.x86_64             211/221
#     virtualbox-iso:   Cleanup    : kernel-headers-3.10.0-862.el7.x86_64                     212/221
#     virtualbox-iso:   Cleanup    : 32:bind-license-9.9.4-61.el7.noarch                      213/221
#     virtualbox-iso:   Cleanup    : libselinux-2.5-12.el7.x86_64                             214/221
#     virtualbox-iso:   Cleanup    : glibc-common-2.17-222.el7.x86_64                         215/221
#     virtualbox-iso:   Cleanup    : nspr-4.17.0-1.el7.x86_64                                 216/221
#     virtualbox-iso:   Cleanup    : nss-util-3.34.0-2.el7.x86_64                             217/221
#     virtualbox-iso:   Cleanup    : nss-softokn-freebl-3.34.0-2.0.1.el7.x86_64               218/221
#     virtualbox-iso:   Cleanup    : glibc-2.17-222.el7.x86_64                                219/221
#     virtualbox-iso:   Cleanup    : tzdata-2018c-1.el7.noarch                                220/221
#     virtualbox-iso:   Cleanup    : libgcc-4.8.5-28.0.1.el7.x86_64                           221/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-config-server-1.10.2-16.el7_5.noarch      1/221
#     virtualbox-iso:   Verifying  : nss-softokn-3.36.0-5.0.1.el7_5.x86_64                      2/221
#     virtualbox-iso:   Verifying  : firewalld-filesystem-0.4.4.4-15.el7_5.noarch               3/221
#     virtualbox-iso:   Verifying  : glibc-devel-2.17-222.0.7.el7.x86_64                        4/221
#     virtualbox-iso:   Verifying  : polkit-0.112-14.0.1.el7.x86_64                             5/221
#     virtualbox-iso:   Verifying  : rhn-check-2.0.2-21.0.9.el7.noarch                          6/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-event-1.02.146-4.0.2.el7.x86_64            7/221
#     virtualbox-iso:   Verifying  : iwl3160-firmware-22.0.7.0-999.el7.noarch                   8/221
#     virtualbox-iso:   Verifying  : iwl5150-firmware-8.24.2.2-999.el7.noarch                   9/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-tui-1.10.2-16.el7_5.x86_64               10/221
#     virtualbox-iso:   Verifying  : kernel-uek-4.1.12-124.20.3.el7uek.x86_64                  11/221
#     virtualbox-iso:   Verifying  : iwl7260-firmware-22.0.7.0-999.el7.noarch                  12/221
#     virtualbox-iso:   Verifying  : openldap-2.4.44-15.el7_5.x86_64                           13/221
#     virtualbox-iso:   Verifying  : 32:bind-license-9.9.4-61.el7_5.1.noarch                   14/221
#     virtualbox-iso:   Verifying  : iptables-1.4.21-24.1.el7_5.x86_64                         15/221
#     virtualbox-iso:   Verifying  : kernel-headers-3.10.0-862.14.4.el7.x86_64                 16/221
#     virtualbox-iso:   Verifying  : kernel-uek-devel-4.1.12-124.20.3.el7uek.x86_64            17/221
#     virtualbox-iso:   Verifying  : libcom_err-1.42.9-12.el7_5.x86_64                         18/221
#     virtualbox-iso:   Verifying  : nss-sysinit-3.36.0-7.el7_5.x86_64                         19/221
#     virtualbox-iso:   Verifying  : iwl2000-firmware-18.168.6.1-999.el7.noarch                20/221
#     virtualbox-iso:   Verifying  : 12:dhclient-4.2.5-68.0.1.el7_5.1.x86_64                   21/221
#     virtualbox-iso:   Verifying  : python-firewall-0.4.4.4-15.el7_5.noarch                   22/221
#     virtualbox-iso:   Verifying  : util-linux-2.23.2-52.el7_5.1.x86_64                       23/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-1.10.2-16.el7_5.x86_64                   24/221
#     virtualbox-iso:   Verifying  : iwl6000g2b-firmware-17.168.5.2-999.el7.noarch             25/221
#     virtualbox-iso:   Verifying  : glibc-headers-2.17-222.0.7.el7.x86_64                     26/221
#     virtualbox-iso:   Verifying  : iwl5000-firmware-8.83.5.1_1-999.el7.noarch                27/221
#     virtualbox-iso:   Verifying  : iwl7265-firmware-22.0.7.0-999.el7.noarch                  28/221
#     virtualbox-iso:   Verifying  : systemd-219-57.0.1.el7_5.3.x86_64                         29/221
#     virtualbox-iso:   Verifying  : 7:lvm2-2.02.177-4.0.2.el7.x86_64                          30/221
#     virtualbox-iso:   Verifying  : libuuid-2.23.2-52.el7_5.1.x86_64                          31/221
#     virtualbox-iso:   Verifying  : kpartx-0.4.9-119.el7_5.1.x86_64                           32/221
#     virtualbox-iso:   Verifying  : nss-util-3.36.0-1.el7_5.x86_64                            33/221
#     virtualbox-iso:   Verifying  : 32:bind-libs-lite-9.9.4-61.el7_5.1.x86_64                 34/221
#     virtualbox-iso:   Verifying  : 2:microcode_ctl-2.1-29.16.0.1.el7_5.x86_64                35/221
#     virtualbox-iso:   Verifying  : cpp-4.8.5-28.0.1.el7_5.1.x86_64                           36/221
#     virtualbox-iso:   Verifying  : 10:qemu-guest-agent-2.8.0-2.el7_5.1.x86_64                37/221
#     virtualbox-iso:   Verifying  : tuned-2.9.0-1.el7_5.2.noarch                              38/221
#     virtualbox-iso:   Verifying  : kernel-tools-3.10.0-862.14.4.el7.x86_64                   39/221
#     virtualbox-iso:   Verifying  : iwl105-firmware-18.168.6.1-999.el7.noarch                 40/221
#     virtualbox-iso:   Verifying  : e2fsprogs-1.42.9-12.el7_5.x86_64                          41/221
#     virtualbox-iso:   Verifying  : libselinux-utils-2.5-12.0.1.el7.x86_64                    42/221
#     virtualbox-iso:   Verifying  : selinux-policy-3.13.1-192.0.6.el7_5.6.noarch              43/221
#     virtualbox-iso:   Verifying  : rsyslog-8.24.0-16.el7_5.4.x86_64                          44/221
#     virtualbox-iso:   Verifying  : libstdc++-4.8.5-28.0.1.el7_5.1.x86_64                     45/221
#     virtualbox-iso:   Verifying  : sudo-1.8.19p2-14.el7_5.x86_64                             46/221
#     virtualbox-iso:   Verifying  : kexec-tools-2.0.15-13.0.1.el7_5.2.x86_64                  47/221
#     virtualbox-iso:   Verifying  : iwl2030-firmware-18.168.6.1-999.el7.noarch                48/221
#     virtualbox-iso:   Verifying  : ca-certificates-2018.2.22-70.0.el7_5.noarch               49/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-event-libs-1.02.146-4.0.2.el7.x86_64      50/221
#     virtualbox-iso:   Verifying  : dracut-config-rescue-033-535.0.5.el7_5.1.x86_64           51/221
#     virtualbox-iso:   Verifying  : iwl6000-firmware-9.221.4.1-999.el7.noarch                 52/221
#     virtualbox-iso:   Verifying  : libgudev1-219-57.0.1.el7_5.3.x86_64                       53/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-team-1.10.2-16.el7_5.x86_64              54/221
#     virtualbox-iso:   Verifying  : yum-3.4.3-158.0.2.el7.noarch                              55/221
#     virtualbox-iso:   Verifying  : kernel-3.10.0-862.14.4.el7.x86_64                         56/221
#     virtualbox-iso:   Verifying  : python-2.7.5-69.0.1.el7_5.x86_64                          57/221
#     virtualbox-iso:   Verifying  : systemd-libs-219-57.0.1.el7_5.3.x86_64                    58/221
#     virtualbox-iso:   Verifying  : iwl4965-firmware-228.61.2.24-999.el7.noarch               59/221
#     virtualbox-iso:   Verifying  : rhn-setup-2.0.2-21.0.9.el7.noarch                         60/221
#     virtualbox-iso:   Verifying  : gnupg2-2.0.22-5.el7_5.x86_64                              61/221
#     virtualbox-iso:   Verifying  : libss-1.42.9-12.el7_5.x86_64                              62/221
#     virtualbox-iso:   Verifying  : 1:iwl1000-firmware-39.31.5.1-999.el7.noarch               63/221
#     virtualbox-iso:   Verifying  : initscripts-9.49.41-1.0.4.el7_5.2.x86_64                  64/221
#     virtualbox-iso:   Verifying  : glibc-common-2.17-222.0.7.el7.x86_64                      65/221
#     virtualbox-iso:   Verifying  : 7:lvm2-libs-2.02.177-4.0.2.el7.x86_64                     66/221
#     virtualbox-iso:   Verifying  : 1:openssl-1.0.2k-12.0.3.el7.x86_64                        67/221
#     virtualbox-iso:   Verifying  : e2fsprogs-libs-1.42.9-12.el7_5.x86_64                     68/221
#     virtualbox-iso:   Verifying  : kernel-devel-3.10.0-862.14.4.el7.x86_64                   69/221
#     virtualbox-iso:   Verifying  : iwl6000g2a-firmware-17.168.5.3-999.el7.noarch             70/221
#     virtualbox-iso:   Verifying  : libselinux-python-2.5-12.0.1.el7.x86_64                   71/221
#     virtualbox-iso:   Verifying  : binutils-2.27-28.base.el7_5.1.x86_64                      72/221
#     virtualbox-iso:   Verifying  : procps-ng-3.3.10-17.el7_5.2.x86_64                        73/221
#     virtualbox-iso:   Verifying  : linux-firmware-20180906-999.git85c5d90f.el7.noarch        74/221
#     virtualbox-iso:   Verifying  : glibc-2.17-222.0.7.el7.x86_64                             75/221
#     virtualbox-iso:   Verifying  : 1:openssl-libs-1.0.2k-12.0.3.el7.x86_64                   76/221
#     virtualbox-iso:   Verifying  : audit-libs-2.8.1-3.el7_5.1.x86_64                         77/221
#     virtualbox-iso:   Verifying  : nspr-4.19.0-1.el7_5.x86_64                                78/221
#     virtualbox-iso:   Verifying  : firewalld-0.4.4.4-15.el7_5.noarch                         79/221
#     virtualbox-iso:   Verifying  : libdtrace-ctf-0.8.0-1.el7.x86_64                          80/221
#     virtualbox-iso:   Verifying  : python-libs-2.7.5-69.0.1.el7_5.x86_64                     81/221
#     virtualbox-iso:   Verifying  : libgomp-4.8.5-28.0.1.el7_5.1.x86_64                       82/221
#     virtualbox-iso:   Verifying  : kernel-tools-libs-3.10.0-862.14.4.el7.x86_64              83/221
#     virtualbox-iso:   Verifying  : 7:oraclelinux-release-7.5-1.0.5.el7.x86_64                84/221
#     virtualbox-iso:   Verifying  : systemd-sysv-219-57.0.1.el7_5.3.x86_64                    85/221
#     virtualbox-iso:   Verifying  : iwl135-firmware-18.168.6.1-999.el7.noarch                 86/221
#     virtualbox-iso:   Verifying  : iwl6050-firmware-41.28.5.1-999.el7.noarch                 87/221
#     virtualbox-iso:   Verifying  : audit-2.8.1-3.el7_5.1.x86_64                              88/221
#     virtualbox-iso:   Verifying  : iwl100-firmware-39.31.5.1-999.el7.noarch                  89/221
#     virtualbox-iso:   Verifying  : nss-3.36.0-7.el7_5.x86_64                                 90/221
#     virtualbox-iso:   Verifying  : nss-softokn-freebl-3.36.0-5.0.1.el7_5.x86_64              91/221
#     virtualbox-iso:   Verifying  : kernel-uek-firmware-4.1.12-124.20.3.el7uek.noarch         92/221
#     virtualbox-iso:   Verifying  : 1:redhat-release-server-7.5-8.0.5.el7.x86_64              93/221
#     virtualbox-iso:   Verifying  : 12:dhcp-libs-4.2.5-68.0.1.el7_5.1.x86_64                  94/221
#     virtualbox-iso:   Verifying  : selinux-policy-targeted-3.13.1-192.0.6.el7_5.6.noarch     95/221
#     virtualbox-iso:   Verifying  : 1:mariadb-libs-5.5.60-1.el7_5.x86_64                      96/221
#     virtualbox-iso:   Verifying  : 12:dhcp-common-4.2.5-68.0.1.el7_5.1.x86_64                97/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-libnm-1.10.2-16.el7_5.x86_64             98/221
#     virtualbox-iso:   Verifying  : libmount-2.23.2-52.el7_5.1.x86_64                         99/221
#     virtualbox-iso:   Verifying  : rhn-client-tools-2.0.2-21.0.9.el7.noarch                 100/221
#     virtualbox-iso:   Verifying  : python-perf-3.10.0-862.14.4.el7.x86_64                   101/221
#     virtualbox-iso:   Verifying  : libblkid-2.23.2-52.el7_5.1.x86_64                        102/221
#     virtualbox-iso:   Verifying  : gcc-4.8.5-28.0.1.el7_5.1.x86_64                          103/221
#     virtualbox-iso:   Verifying  : tzdata-2018e-3.el7.noarch                                104/221
#     virtualbox-iso:   Verifying  : krb5-libs-1.15.1-19.el7.x86_64                           105/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-1.02.146-4.0.2.el7.x86_64                106/221
#     virtualbox-iso:   Verifying  : dracut-033-535.0.5.el7_5.1.x86_64                        107/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-libs-1.02.146-4.0.2.el7.x86_64           108/221
#     virtualbox-iso:   Verifying  : iwl3945-firmware-15.32.2.9-999.el7.noarch                109/221
#     virtualbox-iso:   Verifying  : nss-tools-3.36.0-7.el7_5.x86_64                          110/221
#     virtualbox-iso:   Verifying  : dracut-network-033-535.0.5.el7_5.1.x86_64                111/221
#     virtualbox-iso:   Verifying  : libgcc-4.8.5-28.0.1.el7_5.1.x86_64                       112/221
#     virtualbox-iso:   Verifying  : libselinux-2.5-12.0.1.el7.x86_64                         113/221
#     virtualbox-iso:   Verifying  : libselinux-python-2.5-12.el7.x86_64                      114/221
#     virtualbox-iso:   Verifying  : 12:dhcp-common-4.2.5-68.0.1.el7.x86_64                   115/221
#     virtualbox-iso:   Verifying  : nss-sysinit-3.34.0-4.el7.x86_64                          116/221
#     virtualbox-iso:   Verifying  : iwl5150-firmware-8.24.2.2-62.el7.noarch                  117/221
#     virtualbox-iso:   Verifying  : glibc-devel-2.17-222.el7.x86_64                          118/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-event-libs-1.02.146-4.0.1.el7.x86_64     119/221
#     virtualbox-iso:   Verifying  : iwl4965-firmware-228.61.2.24-62.el7.noarch               120/221
#     virtualbox-iso:   Verifying  : iwl6000-firmware-9.221.4.1-62.el7.noarch                 121/221
#     virtualbox-iso:   Verifying  : 1:iwl1000-firmware-39.31.5.1-62.el7.noarch               122/221
#     virtualbox-iso:   Verifying  : polkit-0.112-14.el7.x86_64                               123/221
#     virtualbox-iso:   Verifying  : libss-1.42.9-11.0.1.el7.x86_64                           124/221
#     virtualbox-iso:   Verifying  : 1:redhat-release-server-7.5-8.0.1.el7.x86_64             125/221
#     virtualbox-iso:   Verifying  : iwl7265-firmware-22.0.7.0-62.el7.noarch                  126/221
#     virtualbox-iso:   Verifying  : systemd-219-57.0.1.el7.x86_64                            127/221
#     virtualbox-iso:   Verifying  : python-libs-2.7.5-68.0.1.el7.x86_64                      128/221
#     virtualbox-iso:   Verifying  : firewalld-0.4.4.4-14.el7.noarch                          129/221
#     virtualbox-iso:   Verifying  : libgomp-4.8.5-28.0.1.el7.x86_64                          130/221
#     virtualbox-iso:   Verifying  : initscripts-9.49.41-1.0.1.el7.x86_64                     131/221
#     virtualbox-iso:   Verifying  : gcc-4.8.5-28.0.1.el7.x86_64                              132/221
#     virtualbox-iso:   Verifying  : libgudev1-219-57.0.1.el7.x86_64                          133/221
#     virtualbox-iso:   Verifying  : cpp-4.8.5-28.0.1.el7.x86_64                              134/221
#     virtualbox-iso:   Verifying  : util-linux-2.23.2-52.el7.x86_64                          135/221
#     virtualbox-iso:   Verifying  : nss-3.34.0-4.el7.x86_64                                  136/221
#     virtualbox-iso:   Verifying  : tzdata-2018c-1.el7.noarch                                137/221
#     virtualbox-iso:   Verifying  : yum-3.4.3-158.0.1.el7.noarch                             138/221
#     virtualbox-iso:   Verifying  : nss-util-3.34.0-2.el7.x86_64                             139/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-config-server-1.10.2-13.el7.noarch      140/221
#     virtualbox-iso:   Verifying  : libselinux-2.5-12.el7.x86_64                             141/221
#     virtualbox-iso:   Verifying  : iptables-1.4.21-24.el7.x86_64                            142/221
#     virtualbox-iso:   Verifying  : 32:bind-libs-lite-9.9.4-61.el7.x86_64                    143/221
#     virtualbox-iso:   Verifying  : gnupg2-2.0.22-4.el7.x86_64                               144/221
#     virtualbox-iso:   Verifying  : selinux-policy-targeted-3.13.1-192.0.1.el7.noarch        145/221
#     virtualbox-iso:   Verifying  : nss-softokn-freebl-3.34.0-2.0.1.el7.x86_64               146/221
#     virtualbox-iso:   Verifying  : iwl3160-firmware-22.0.7.0-62.el7.noarch                  147/221
#     virtualbox-iso:   Verifying  : dracut-config-rescue-033-535.0.1.el7.x86_64              148/221
#     virtualbox-iso:   Verifying  : 12:dhcp-libs-4.2.5-68.0.1.el7.x86_64                     149/221
#     virtualbox-iso:   Verifying  : iwl100-firmware-39.31.5.1-62.el7.noarch                  150/221
#     virtualbox-iso:   Verifying  : 2:microcode_ctl-2.1-29.0.2.el7.x86_64                    151/221
#     virtualbox-iso:   Verifying  : iwl6050-firmware-41.28.5.1-62.el7.noarch                 152/221
#     virtualbox-iso:   Verifying  : iwl6000g2a-firmware-17.168.5.3-62.el7.noarch             153/221
#     virtualbox-iso:   Verifying  : iwl7260-firmware-22.0.7.0-62.el7.noarch                  154/221
#     virtualbox-iso:   Verifying  : libdtrace-ctf-0.7.0-1.el7.x86_64                         155/221
#     virtualbox-iso:   Verifying  : 1:openssl-1.0.2k-12.0.1.el7.x86_64                       156/221
#     virtualbox-iso:   Verifying  : binutils-2.27-27.base.el7.x86_64                         157/221
#     virtualbox-iso:   Verifying  : tuned-2.9.0-1.el7.noarch                                 158/221
#     virtualbox-iso:   Verifying  : libgcc-4.8.5-28.0.1.el7.x86_64                           159/221
#     virtualbox-iso:   Verifying  : glibc-2.17-222.el7.x86_64                                160/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-1.10.2-13.el7.x86_64                    161/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-libs-1.02.146-4.0.1.el7.x86_64           162/221
#     virtualbox-iso:   Verifying  : 1:openssl-libs-1.0.2k-12.0.1.el7.x86_64                  163/221
#     virtualbox-iso:   Verifying  : audit-2.8.1-3.el7.x86_64                                 164/221
#     virtualbox-iso:   Verifying  : e2fsprogs-libs-1.42.9-11.0.1.el7.x86_64                  165/221
#     virtualbox-iso:   Verifying  : dracut-033-535.0.1.el7.x86_64                            166/221
#     virtualbox-iso:   Verifying  : kernel-tools-libs-3.10.0-862.el7.x86_64                  167/221
#     virtualbox-iso:   Verifying  : iwl105-firmware-18.168.6.1-62.el7.noarch                 168/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-event-1.02.146-4.0.1.el7.x86_64          169/221
#     virtualbox-iso:   Verifying  : 32:bind-license-9.9.4-61.el7.noarch                      170/221
#     virtualbox-iso:   Verifying  : kernel-headers-3.10.0-862.el7.x86_64                     171/221
#     virtualbox-iso:   Verifying  : iwl3945-firmware-15.32.2.9-62.el7.noarch                 172/221
#     virtualbox-iso:   Verifying  : 10:qemu-guest-agent-2.8.0-2.el7.x86_64                   173/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-tui-1.10.2-13.el7.x86_64                174/221
#     virtualbox-iso:   Verifying  : ca-certificates-2017.2.20-71.el7.noarch                  175/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-libnm-1.10.2-13.el7.x86_64              176/221
#     virtualbox-iso:   Verifying  : libuuid-2.23.2-52.el7.x86_64                             177/221
#     virtualbox-iso:   Verifying  : linux-firmware-20180220-62.git6d51311.0.1.el7.noarch     178/221
#     virtualbox-iso:   Verifying  : libcom_err-1.42.9-11.0.1.el7.x86_64                      179/221
#     virtualbox-iso:   Verifying  : 7:oraclelinux-release-7.5-1.0.3.el7.x86_64               180/221
#     virtualbox-iso:   Verifying  : rsyslog-8.24.0-16.el7.x86_64                             181/221
#     virtualbox-iso:   Verifying  : libmount-2.23.2-52.el7.x86_64                            182/221
#     virtualbox-iso:   Verifying  : audit-libs-2.8.1-3.el7.x86_64                            183/221
#     virtualbox-iso:   Verifying  : rhn-setup-2.0.2-21.0.3.el7.noarch                        184/221
#     virtualbox-iso:   Verifying  : iwl6000g2b-firmware-17.168.5.2-62.el7.noarch             185/221
#     virtualbox-iso:   Verifying  : openldap-2.4.44-13.el7.x86_64                            186/221
#     virtualbox-iso:   Verifying  : selinux-policy-3.13.1-192.0.1.el7.noarch                 187/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-team-1.10.2-13.el7.x86_64               188/221
#     virtualbox-iso:   Verifying  : firewalld-filesystem-0.4.4.4-14.el7.noarch               189/221
#     virtualbox-iso:   Verifying  : rhn-client-tools-2.0.2-21.0.3.el7.noarch                 190/221
#     virtualbox-iso:   Verifying  : 7:lvm2-libs-2.02.177-4.0.1.el7.x86_64                    191/221
#     virtualbox-iso:   Verifying  : kexec-tools-2.0.15-13.0.1.el7.x86_64                     192/221
#     virtualbox-iso:   Verifying  : nspr-4.17.0-1.el7.x86_64                                 193/221
#     virtualbox-iso:   Verifying  : iwl2000-firmware-18.168.6.1-62.el7.noarch                194/221
#     virtualbox-iso:   Verifying  : systemd-libs-219-57.0.1.el7.x86_64                       195/221
#     virtualbox-iso:   Verifying  : iwl2030-firmware-18.168.6.1-62.el7.noarch                196/221
#     virtualbox-iso:   Verifying  : glibc-common-2.17-222.el7.x86_64                         197/221
#     virtualbox-iso:   Verifying  : sudo-1.8.19p2-13.el7.x86_64                              198/221
#     virtualbox-iso:   Verifying  : python-perf-3.10.0-862.el7.x86_64                        199/221
#     virtualbox-iso:   Verifying  : iwl135-firmware-18.168.6.1-62.el7.noarch                 200/221
#     virtualbox-iso:   Verifying  : nss-softokn-3.34.0-2.0.1.el7.x86_64                      201/221
#     virtualbox-iso:   Verifying  : procps-ng-3.3.10-17.el7.x86_64                           202/221
#     virtualbox-iso:   Verifying  : python-2.7.5-68.0.1.el7.x86_64                           203/221
#     virtualbox-iso:   Verifying  : libselinux-utils-2.5-12.el7.x86_64                       204/221
#     virtualbox-iso:   Verifying  : glibc-headers-2.17-222.el7.x86_64                        205/221
#     virtualbox-iso:   Verifying  : libblkid-2.23.2-52.el7.x86_64                            206/221
#     virtualbox-iso:   Verifying  : python-firewall-0.4.4.4-14.el7.noarch                    207/221
#     virtualbox-iso:   Verifying  : nss-tools-3.34.0-4.el7.x86_64                            208/221
#     virtualbox-iso:   Verifying  : krb5-libs-1.15.1-18.el7.x86_64                           209/221
#     virtualbox-iso:   Verifying  : e2fsprogs-1.42.9-11.0.1.el7.x86_64                       210/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-1.02.146-4.0.1.el7.x86_64                211/221
#     virtualbox-iso:   Verifying  : kernel-tools-3.10.0-862.el7.x86_64                       212/221
#     virtualbox-iso:   Verifying  : dracut-network-033-535.0.1.el7.x86_64                    213/221
#     virtualbox-iso:   Verifying  : 12:dhclient-4.2.5-68.0.1.el7.x86_64                      214/221
#     virtualbox-iso:   Verifying  : rhn-check-2.0.2-21.0.3.el7.noarch                        215/221
#     virtualbox-iso:   Verifying  : kpartx-0.4.9-119.el7.x86_64                              216/221
#     virtualbox-iso:   Verifying  : iwl5000-firmware-8.83.5.1_1-62.el7.noarch                217/221
#     virtualbox-iso:   Verifying  : libstdc++-4.8.5-28.0.1.el7.x86_64                        218/221
#     virtualbox-iso:   Verifying  : systemd-sysv-219-57.0.1.el7.x86_64                       219/221
#     virtualbox-iso:   Verifying  : 1:mariadb-libs-5.5.56-2.el7.x86_64                       220/221
#     virtualbox-iso:   Verifying  : 7:lvm2-2.02.177-4.0.1.el7.x86_64                         221/221
#     virtualbox-iso:
#     virtualbox-iso: Installed:
#     virtualbox-iso:   kernel.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kernel-devel.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kernel-uek.x86_64 0:4.1.12-124.20.3.el7uek
#     virtualbox-iso:   kernel-uek-devel.x86_64 0:4.1.12-124.20.3.el7uek
#     virtualbox-iso:   kernel-uek-firmware.noarch 0:4.1.12-124.20.3.el7uek
#     virtualbox-iso:
#     virtualbox-iso: Updated:
#     virtualbox-iso:   NetworkManager.x86_64 1:1.10.2-16.el7_5
#     virtualbox-iso:   NetworkManager-config-server.noarch 1:1.10.2-16.el7_5
#     virtualbox-iso:   NetworkManager-libnm.x86_64 1:1.10.2-16.el7_5
#     virtualbox-iso:   NetworkManager-team.x86_64 1:1.10.2-16.el7_5
#     virtualbox-iso:   NetworkManager-tui.x86_64 1:1.10.2-16.el7_5
#     virtualbox-iso:   audit.x86_64 0:2.8.1-3.el7_5.1
#     virtualbox-iso:   audit-libs.x86_64 0:2.8.1-3.el7_5.1
#     virtualbox-iso:   bind-libs-lite.x86_64 32:9.9.4-61.el7_5.1
#     virtualbox-iso:   bind-license.noarch 32:9.9.4-61.el7_5.1
#     virtualbox-iso:   binutils.x86_64 0:2.27-28.base.el7_5.1
#     virtualbox-iso:   ca-certificates.noarch 0:2018.2.22-70.0.el7_5
#     virtualbox-iso:   cpp.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   device-mapper.x86_64 7:1.02.146-4.0.2.el7
#     virtualbox-iso:   device-mapper-event.x86_64 7:1.02.146-4.0.2.el7
#     virtualbox-iso:   device-mapper-event-libs.x86_64 7:1.02.146-4.0.2.el7
#     virtualbox-iso:   device-mapper-libs.x86_64 7:1.02.146-4.0.2.el7
#     virtualbox-iso:   dhclient.x86_64 12:4.2.5-68.0.1.el7_5.1
#     virtualbox-iso:   dhcp-common.x86_64 12:4.2.5-68.0.1.el7_5.1
#     virtualbox-iso:   dhcp-libs.x86_64 12:4.2.5-68.0.1.el7_5.1
#     virtualbox-iso:   dracut.x86_64 0:033-535.0.5.el7_5.1
#     virtualbox-iso:   dracut-config-rescue.x86_64 0:033-535.0.5.el7_5.1
#     virtualbox-iso:   dracut-network.x86_64 0:033-535.0.5.el7_5.1
#     virtualbox-iso:   e2fsprogs.x86_64 0:1.42.9-12.el7_5
#     virtualbox-iso:   e2fsprogs-libs.x86_64 0:1.42.9-12.el7_5
#     virtualbox-iso:   firewalld.noarch 0:0.4.4.4-15.el7_5
#     virtualbox-iso:   firewalld-filesystem.noarch 0:0.4.4.4-15.el7_5
#     virtualbox-iso:   gcc.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   glibc.x86_64 0:2.17-222.0.7.el7
#     virtualbox-iso:   glibc-common.x86_64 0:2.17-222.0.7.el7
#     virtualbox-iso:   glibc-devel.x86_64 0:2.17-222.0.7.el7
#     virtualbox-iso:   glibc-headers.x86_64 0:2.17-222.0.7.el7
#     virtualbox-iso:   gnupg2.x86_64 0:2.0.22-5.el7_5
#     virtualbox-iso:   initscripts.x86_64 0:9.49.41-1.0.4.el7_5.2
#     virtualbox-iso:   iptables.x86_64 0:1.4.21-24.1.el7_5
#     virtualbox-iso:   iwl100-firmware.noarch 0:39.31.5.1-999.el7
#     virtualbox-iso:   iwl1000-firmware.noarch 1:39.31.5.1-999.el7
#     virtualbox-iso:   iwl105-firmware.noarch 0:18.168.6.1-999.el7
#     virtualbox-iso:   iwl135-firmware.noarch 0:18.168.6.1-999.el7
#     virtualbox-iso:   iwl2000-firmware.noarch 0:18.168.6.1-999.el7
#     virtualbox-iso:   iwl2030-firmware.noarch 0:18.168.6.1-999.el7
#     virtualbox-iso:   iwl3160-firmware.noarch 0:22.0.7.0-999.el7
#     virtualbox-iso:   iwl3945-firmware.noarch 0:15.32.2.9-999.el7
#     virtualbox-iso:   iwl4965-firmware.noarch 0:228.61.2.24-999.el7
#     virtualbox-iso:   iwl5000-firmware.noarch 0:8.83.5.1_1-999.el7
#     virtualbox-iso:   iwl5150-firmware.noarch 0:8.24.2.2-999.el7
#     virtualbox-iso:   iwl6000-firmware.noarch 0:9.221.4.1-999.el7
#     virtualbox-iso:   iwl6000g2a-firmware.noarch 0:17.168.5.3-999.el7
#     virtualbox-iso:   iwl6000g2b-firmware.noarch 0:17.168.5.2-999.el7
#     virtualbox-iso:   iwl6050-firmware.noarch 0:41.28.5.1-999.el7
#     virtualbox-iso:   iwl7260-firmware.noarch 0:22.0.7.0-999.el7
#     virtualbox-iso:   iwl7265-firmware.noarch 0:22.0.7.0-999.el7
#     virtualbox-iso:   kernel-headers.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kernel-tools.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kernel-tools-libs.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kexec-tools.x86_64 0:2.0.15-13.0.1.el7_5.2
#     virtualbox-iso:   kpartx.x86_64 0:0.4.9-119.el7_5.1
#     virtualbox-iso:   krb5-libs.x86_64 0:1.15.1-19.el7
#     virtualbox-iso:   libblkid.x86_64 0:2.23.2-52.el7_5.1
#     virtualbox-iso:   libcom_err.x86_64 0:1.42.9-12.el7_5
#     virtualbox-iso:   libdtrace-ctf.x86_64 0:0.8.0-1.el7
#     virtualbox-iso:   libgcc.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   libgomp.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   libgudev1.x86_64 0:219-57.0.1.el7_5.3
#     virtualbox-iso:   libmount.x86_64 0:2.23.2-52.el7_5.1
#     virtualbox-iso:   libselinux.x86_64 0:2.5-12.0.1.el7
#     virtualbox-iso:   libselinux-python.x86_64 0:2.5-12.0.1.el7
#     virtualbox-iso:   libselinux-utils.x86_64 0:2.5-12.0.1.el7
#     virtualbox-iso:   libss.x86_64 0:1.42.9-12.el7_5
#     virtualbox-iso:   libstdc++.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   libuuid.x86_64 0:2.23.2-52.el7_5.1
#     virtualbox-iso:   linux-firmware.noarch 0:20180906-999.git85c5d90f.el7
#     virtualbox-iso:   lvm2.x86_64 7:2.02.177-4.0.2.el7
#     virtualbox-iso:   lvm2-libs.x86_64 7:2.02.177-4.0.2.el7
#     virtualbox-iso:   mariadb-libs.x86_64 1:5.5.60-1.el7_5
#     virtualbox-iso:   microcode_ctl.x86_64 2:2.1-29.16.0.1.el7_5
#     virtualbox-iso:   nspr.x86_64 0:4.19.0-1.el7_5
#     virtualbox-iso:   nss.x86_64 0:3.36.0-7.el7_5
#     virtualbox-iso:   nss-softokn.x86_64 0:3.36.0-5.0.1.el7_5
#     virtualbox-iso:   nss-softokn-freebl.x86_64 0:3.36.0-5.0.1.el7_5
#     virtualbox-iso:   nss-sysinit.x86_64 0:3.36.0-7.el7_5
#     virtualbox-iso:   nss-tools.x86_64 0:3.36.0-7.el7_5
#     virtualbox-iso:   nss-util.x86_64 0:3.36.0-1.el7_5
#     virtualbox-iso:   openldap.x86_64 0:2.4.44-15.el7_5
#     virtualbox-iso:   openssl.x86_64 1:1.0.2k-12.0.3.el7
#     virtualbox-iso:   openssl-libs.x86_64 1:1.0.2k-12.0.3.el7
#     virtualbox-iso:   oraclelinux-release.x86_64 7:7.5-1.0.5.el7
#     virtualbox-iso:   polkit.x86_64 0:0.112-14.0.1.el7
#     virtualbox-iso:   procps-ng.x86_64 0:3.3.10-17.el7_5.2
#     virtualbox-iso:   python.x86_64 0:2.7.5-69.0.1.el7_5
#     virtualbox-iso:   python-firewall.noarch 0:0.4.4.4-15.el7_5
#     virtualbox-iso:   python-libs.x86_64 0:2.7.5-69.0.1.el7_5
#     virtualbox-iso:   python-perf.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   qemu-guest-agent.x86_64 10:2.8.0-2.el7_5.1
#     virtualbox-iso:   redhat-release-server.x86_64 1:7.5-8.0.5.el7
#     virtualbox-iso:   rhn-check.noarch 0:2.0.2-21.0.9.el7
#     virtualbox-iso:   rhn-client-tools.noarch 0:2.0.2-21.0.9.el7
#     virtualbox-iso:   rhn-setup.noarch 0:2.0.2-21.0.9.el7
#     virtualbox-iso:   rsyslog.x86_64 0:8.24.0-16.el7_5.4
#     virtualbox-iso:   selinux-policy.noarch 0:3.13.1-192.0.6.el7_5.6
#     virtualbox-iso:   selinux-policy-targeted.noarch 0:3.13.1-192.0.6.el7_5.6
#     virtualbox-iso:   sudo.x86_64 0:1.8.19p2-14.el7_5
#     virtualbox-iso:   systemd.x86_64 0:219-57.0.1.el7_5.3
#     virtualbox-iso:   systemd-libs.x86_64 0:219-57.0.1.el7_5.3
#     virtualbox-iso:   systemd-sysv.x86_64 0:219-57.0.1.el7_5.3
#     virtualbox-iso:   tuned.noarch 0:2.9.0-1.el7_5.2
#     virtualbox-iso:   tzdata.noarch 0:2018e-3.el7
#     virtualbox-iso:   util-linux.x86_64 0:2.23.2-52.el7_5.1
#     virtualbox-iso:   yum.noarch 0:3.4.3-158.0.2.el7
#     virtualbox-iso:
#     virtualbox-iso: Complete!
#     virtualbox-iso: Loaded plugins: ulninfo
#     virtualbox-iso: Cleaning repos: ol7_UEKR4 ol7_latest
#     virtualbox-iso: Cleaning up everything
#     virtualbox-iso: Maybe you want: rm -rf /var/cache/yum, to also free up space taken by orphaned data from disabled or removed repos
#     virtualbox-iso: useradd: user 'vagrant' already exists
# ==> virtualbox-iso: Pausing 1s before the next provisioner...
# ==> virtualbox-iso: Provisioning with shell script: E:\tmp\packer-shell383972175
#     virtualbox-iso: Verifying archive integrity... All good.
#     virtualbox-iso: Uncompressing VirtualBox 5.1.28 Guest Additions for Linux...........
#     virtualbox-iso: VirtualBox Guest Additions installer
#     virtualbox-iso: Copying additional installer modules ...
#     virtualbox-iso: Installing additional modules ...
#     virtualbox-iso: vboxadd.sh: Starting the VirtualBox Guest Additions.
#     virtualbox-iso:
#     virtualbox-iso: Could not find the X.Org or XFree86 Window System, skipping.
# ==> virtualbox-iso: Gracefully halting virtual machine...
# ==> virtualbox-iso: Preparing to export machine...
#     virtualbox-iso: Deleting forwarded port mapping for the communicator (SSH, WinRM, etc) (host port 2751)
# ==> virtualbox-iso: Exporting virtual machine...
#     virtualbox-iso: Executing: export packer-ol75-base --output output-ol75-base.vzell.de\packer-ol75-base.ovf --manifest --vsys 0 --description Oracle Linux 7 Update 5
#     virtualbox-iso: 
#     virtualbox-iso: prepared by Dr. Volker Zell --version 0.9.0
# ==> virtualbox-iso: Keeping virtual machine registered with VirtualBox host (keep_registered = true)
# Build 'virtualbox-iso' finished.
# 
# ==> Builds finished. The artifacts of successful builds are:
# --> virtualbox-iso: VM files in directory: output-ol75-base.vzell.de
Resulting filesystem structure
tree -a /misc/packer
# /misc/packer
# └── ol
#     └── 7.5
#         ├── http
#         │   └── ks.cfg
#         ├── iso
#         │   ├── iso-info.json
#         │   └── V975367-01.iso
#         ├── output-ol75-base.vzell.de
#         │   ├── packer-ol75-base.ovf
#         │   ├── packer-ol75-base.mf
#         │   ├── packer-ol75-base.vdi
#         │   ├── packer-ol75-base-disk001.vmdk
#         ├── packer.json
#         └── packer_cache
# 
# 8 directories, 12 files

Oracle Linux 7.5 with one additional data disk

export ol_ver=7.5-1d
mkdir -p /misc/packer/ol/${ol_ver}/{iso,http} && cd /misc/packer/ol/${ol_ver}
Move Oracle Linux DVD ISO image into place
mv -v /misc/packer/ol/7.5/iso/V975367-01.iso /misc/packer/ol/${ol_ver}/iso/V975367-01.iso
# '/misc/packer/ol/7.5/iso/V975367-01.iso' -> '/misc/packer/ol/7.5-1d/iso/V975367-01.iso'
Kickstart file
hostname=ol75-base \
domain=vzell.de \
swap_size=16376 # in MB (16 GB) \
root_size=392192 # in MB (400 GB) \
cat > http/ks.cfg <<-_EOF
#version=DEVEL

# Install OS instead of upgrade
install

# Reboot after installation
reboot

# System authorization information
auth --enableshadow --passalgo=sha512

# Additional yum repositories that may be used as sources for package installation
repo --name="Server-HighAvailability" --baseurl=file:///run/install/repo/addons/HighAvailability
repo --name="Server-ResilientStorage" --baseurl=file:///run/install/repo/addons/ResilientStorage

# Use CDROM installation media
cdrom

# Use text mode install
text

# Use graphical install
# graphical

# Do NOT Run the Setup Agent on first boot
firstboot --disable

# Keyboard layouts
keyboard --vckeymap=de --xlayouts='de'

# System language
lang en_US.UTF-8

# Network information
network --bootproto=dhcp --device=enp0s3 --ipv6=auto --activate
network --hostname=${hostname}.${domain}

# Root password
rootpw vagrant

# System services
services --disabled="chronyd"

# System timezone
timezone Europe/Berlin --isUtc --nontp

# Create additional user
user --name=vagrant --plaintext --password=vagrant --gecos="Vagrant"

# Specifies a list of disks for the installation program to use
ignoredisk --only-use=sda

# System bootloader configuration
bootloader --location=mbr --boot-drive=sda

# Partition clearing information
clearpart --none --initlabel

# Disk partitioning information
part pv.157 --fstype="lvmpv" --ondisk=sda --size=408575
part /boot --fstype="xfs" --ondisk=sda --size=1024
volgroup ol --pesize=4096 pv.157
logvol swap --fstype="swap" --size=${swap_size} --name=swap --vgname=ol
logvol / --fstype="xfs" --size=${root_size} --name=root --vgname=ol

# Firewall configuration
# firewall --enabled --service=ssh

# SELinux configuration
# selinux --enforcing

# Installation logging level
# logging --level=info

# Do not configure the X Window System
skipx

# Packages section (minimal + packages needed for building VirtualBox Guest Additions)
%packages --ignoremissing
@^minimal
@core
bzip2
gcc
make
kernel-uek
kernel-uek-devel
perl
%end

%addon com_redhat_kdump --disable --reserve-mb='auto'

%end

%anaconda
pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty
pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok
pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty
%end
_EOF
Packer JSON script
md5sum /misc/packer/ol/${ol_ver}/iso/V975367-01.iso | awk '{ print $1; }'
# 3be1a456984ada84f19c6ea89ccb027a
cat > iso/iso-info.json <<-_EOF
{
  "iso_url": "V975367-01.iso",
  "iso_checksum": "$(md5sum /misc/packer/ol/${ol_ver}/iso/V975367-01.iso | awk '{ print $1; }')",
  "iso_checksum_type": "md5"
}
_EOF
cat > packer.json <<-"_EOF"
{
  "variables": {
    "vm_name": "packer-ol75-1d-base",
    "vm_description": "{{env `vm_description`}}",
    "vm_version": "1.0.0",
    "group_name": "/Oracle Linux/Oracle Linux 7 Update 5",
    "ssh_username": "root",
    "ssh_password": "vagrant",
    "hostname": "ol75-base.vzell.de",
    "compression": "6",
    "vagrantfile": ""
  },
  "builders": [
    {
      "type": "virtualbox-iso",
      "communicator": "ssh",
      "ssh_username": "{{user `ssh_username`}}",
      "ssh_password": "{{user `ssh_password`}}",
      "ssh_timeout": "15m",
      "guest_os_type": "Oracle_64",
      "guest_additions_url": "",
      "guest_additions_sha256": "",
      "guest_additions_path": "",
      "guest_additions_mode": "upload",
      "output_directory": "output-{{user `hostname`}}",
      "iso_url": "iso/{{user `iso_url`}}",
      "iso_checksum": "{{user `iso_checksum`}}",
      "iso_checksum_type": "{{user `iso_checksum_type`}}",
      "http_directory": "http",
      "http_port_min": 8080,
      "http_port_max": 8082,
      "vm_name": "{{user `vm_name`}}",
      "keep_registered": true,
      "export_opts": [
        "--manifest",
        "--vsys",
        "0",
        "--description",
        "{{user `vm_description`}}",
        "--version",
        "{{user `vm_version`}}"
      ],
      "vboxmanage": [
        [
          "modifyvm",
          "{{.Name}}",
          "--groups",
          "{{user `group_name`}}"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--boot1",
          "disk",
          "--boot2",
          "dvd",
          "--boot3",
          "none",
          "--boot4",
          "none"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--vram",
          "32"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--memory",
          "2048"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--cpus",
          "2"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--audio",
          "none"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--vrde",
          "off"
        ],
        [
          "modifyvm",
          "{{.Name}}",
          "--rtcuseutc",
          "on"
        ],
        [
          "storageattach",
          "{{.Name}}",
          "--storagectl",
          "SATA Controller",
          "--port",
          "1",
          "--device",
          "0",
          "--type",
          "hdd",
          "--medium",
          "./.virtualbox/data1.vdi"
        ]
      ],
      "hard_drive_interface": "sata",
      "sata_port_count": 4,
      "disk_size": 409600,
      "headless": false,
      "shutdown_command": "shutdown -h now",
      "shutdown_timeout": "30m",
      "boot_wait": "5s",
      "boot_command": [
        "<tab>",
        " text ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/ks.cfg ",
        "<enter>"
      ]
    }
  ],
  "provisioners": [
    {
      "type": "shell",
      "execute_command": "sh '{{ .Path }}'",
      "pause_before": "1s",
      "inline": [
        "yum -y update",
        "yum clean all",
        "[ -d /var/cache/yum ] && rm -fr /var/cache/yum",
        "useradd vagrant",
        "cp /etc/sudoers /etc/sudoers.orig",
        "sed -i -e 's/Defaults\\s*requiretty$/#Defaults\trequiretty/' /etc/sudoers",
        "sed -i -e '/# %wheel\tALL=(ALL)\tNOPASSWD: ALL/a %vagrant\tALL=(ALL)\tNOPASSWD: ALL' /etc/sudoers",
        "mkdir ~vagrant/.ssh",
        "chmod 700 ~vagrant/.ssh",
        "echo 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key' > ~vagrant/.ssh/authorized_keys",
        "chmod 600 ~vagrant/.ssh/authorized_keys",
        "chown -R vagrant: ~vagrant/.ssh"
      ]
    },
    {
      "type": "shell",
      "only": [
        "virtualbox-iso"
      ],
      "execute_command": "sh '{{ .Path }}'",
      "pause_before": "1s",
      "inline": [
        "mkdir -p /media/dvd",
        "mount -o loop,ro VBoxGuestAdditions*.iso /media/dvd",
        "sh /media/dvd/VBoxLinuxAdditions.run --nox11",
        "umount /media/dvd",
        "rm VBoxGuestAdditions*.iso"
      ]
    }
  ]
}
_EOF
Create additional disk(s) with VBoxManage
vbm createmedium --filename ./.virtualbox/data1.vdi --size 2048000 --format vdi --variant Standard
# 0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
# Medium created. UUID: 6e4fc890-4c34-475b-8713-6fe8fb21659f
tree -a /misc/packer/ol/${ol_ver}
# /misc/packer/ol/7.5-1d
# ├── .virtualbox
# │   └── data1.vdi
# ├── http
# │   └── ks.cfg
# ├── iso
# │   ├── iso-info.json
# │   └── V975367-01.iso
# └── packer.json
# 
# 3 directories, 5 files
Packer execution
packer validate -var-file=iso/iso-info.json packer.json
# Template validated successfully.
packer inspect packer.json
# Optional variables and their defaults:
# 
#   compression    = 6
#   group_name     = /Oracle Linux/Oracle Linux 7 Update 5
#   hostname       = ol75-base.vzell.de
#   ssh_password   = vagrant
#   ssh_username   = root
#   vagrantfile    = 
#   vm_description = {{env `vm_description`}}
#   vm_name        = packer-ol75-1d-base
#   vm_version     = 1.0.0
# 
# Builders:
# 
#   virtualbox-iso
# 
# Provisioners:
# 
#   shell
#   shell
# 
# Note: If your build names contain user variables or template
# functions such as 'timestamp', these are processed at build time,
# and therefore only show in their raw form here.
vm_description='Oracle Linux 7 Update 5, with one additional 2TB data disk

prepared by Dr. Volker Zell'
vm_version='0.9.1'
time packer build \
    -var "vm_description=${vm_description}" \
    -var "vm_version=${vm_version}"         \
    -var-file=iso/iso-info.json             \
    packer.json
# > > (.venv) vzell:/misc/packer/ol/7.5-1d> (.venv) vzell:/misc/packer/ol/7.5-1d> > > > > virtualbox-iso output will be in this color.
# 
# ==> virtualbox-iso: Retrieving Guest additions
#     virtualbox-iso: Using file in-place: file:///C:/Program%20Files/Oracle/VirtualBox/VBoxGuestAdditions.iso
# ==> virtualbox-iso: Retrieving ISO
#     virtualbox-iso: Using file in-place: file:///D:/misc/packer/ol/7.5-1d/iso/V975367-01.iso
# ==> virtualbox-iso: Starting HTTP server on port 8082
# ==> virtualbox-iso: Creating virtual machine...
# ==> virtualbox-iso: Creating hard drive...
# ==> virtualbox-iso: Creating forwarded port mapping for communicator (SSH, WinRM, etc) (host port 3482)
# ==> virtualbox-iso: Executing custom VBoxManage commands...
#     virtualbox-iso: Executing: modifyvm packer-ol75-1d-base --groups /Oracle Linux/Oracle Linux 7 Update 5
#     virtualbox-iso: Executing: modifyvm packer-ol75-1d-base --boot1 disk --boot2 dvd --boot3 none --boot4 none
#     virtualbox-iso: Executing: modifyvm packer-ol75-1d-base --vram 32
#     virtualbox-iso: Executing: modifyvm packer-ol75-1d-base --memory 2048
#     virtualbox-iso: Executing: modifyvm packer-ol75-1d-base --cpus 2
#     virtualbox-iso: Executing: modifyvm packer-ol75-1d-base --audio none
#     virtualbox-iso: Executing: modifyvm packer-ol75-1d-base --vrde off
#     virtualbox-iso: Executing: modifyvm packer-ol75-1d-base --rtcuseutc on
#     virtualbox-iso: Executing: storageattach packer-ol75-1d-base --storagectl SATA Controller --port 1 --device 0 --type hdd --medium ./.virtualbox/data1.vdi
# ==> virtualbox-iso: Starting the virtual machine...
# ==> virtualbox-iso: Waiting 5s for boot...
# ==> virtualbox-iso: Typing the boot command...
# ==> virtualbox-iso: Using ssh communicator to connect: 127.0.0.1
# ==> virtualbox-iso: Waiting for SSH to become available...
# ==> virtualbox-iso: Connected to SSH!
# ==> virtualbox-iso: Uploading VirtualBox version info (5.1.28)
# ==> virtualbox-iso: Uploading VirtualBox guest additions ISO...
# ==> virtualbox-iso: Pausing 1s before the next provisioner...
# ==> virtualbox-iso: Provisioning with shell script: E:\tmp\packer-shell438758439
#     virtualbox-iso: Loaded plugins: ulninfo
#     virtualbox-iso: Resolving Dependencies
#     virtualbox-iso: --> Running transaction check
#     virtualbox-iso: ---> Package NetworkManager.x86_64 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager.x86_64 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package NetworkManager-config-server.noarch 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager-config-server.noarch 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package NetworkManager-libnm.x86_64 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager-libnm.x86_64 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package NetworkManager-team.x86_64 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager-team.x86_64 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package NetworkManager-tui.x86_64 1:1.10.2-13.el7 will be updated
#     virtualbox-iso: ---> Package NetworkManager-tui.x86_64 1:1.10.2-16.el7_5 will be an update
#     virtualbox-iso: ---> Package audit.x86_64 0:2.8.1-3.el7 will be updated
#     virtualbox-iso: ---> Package audit.x86_64 0:2.8.1-3.el7_5.1 will be an update
#     virtualbox-iso: ---> Package audit-libs.x86_64 0:2.8.1-3.el7 will be updated
#     virtualbox-iso: ---> Package audit-libs.x86_64 0:2.8.1-3.el7_5.1 will be an update
#     virtualbox-iso: ---> Package bind-libs-lite.x86_64 32:9.9.4-61.el7 will be updated
#     virtualbox-iso: ---> Package bind-libs-lite.x86_64 32:9.9.4-61.el7_5.1 will be an update
#     virtualbox-iso: ---> Package bind-license.noarch 32:9.9.4-61.el7 will be updated
#     virtualbox-iso: ---> Package bind-license.noarch 32:9.9.4-61.el7_5.1 will be an update
#     virtualbox-iso: ---> Package binutils.x86_64 0:2.27-27.base.el7 will be updated
#     virtualbox-iso: ---> Package binutils.x86_64 0:2.27-28.base.el7_5.1 will be an update
#     virtualbox-iso: ---> Package ca-certificates.noarch 0:2017.2.20-71.el7 will be updated
#     virtualbox-iso: ---> Package ca-certificates.noarch 0:2018.2.22-70.0.el7_5 will be an update
#     virtualbox-iso: ---> Package cpp.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package cpp.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package device-mapper.x86_64 7:1.02.146-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package device-mapper.x86_64 7:1.02.146-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package device-mapper-event.x86_64 7:1.02.146-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package device-mapper-event.x86_64 7:1.02.146-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package device-mapper-event-libs.x86_64 7:1.02.146-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package device-mapper-event-libs.x86_64 7:1.02.146-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package device-mapper-libs.x86_64 7:1.02.146-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package device-mapper-libs.x86_64 7:1.02.146-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package dhclient.x86_64 12:4.2.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dhclient.x86_64 12:4.2.5-68.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dhcp-common.x86_64 12:4.2.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dhcp-common.x86_64 12:4.2.5-68.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dhcp-libs.x86_64 12:4.2.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dhcp-libs.x86_64 12:4.2.5-68.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dracut.x86_64 0:033-535.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dracut.x86_64 0:033-535.0.5.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dracut-config-rescue.x86_64 0:033-535.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dracut-config-rescue.x86_64 0:033-535.0.5.el7_5.1 will be an update
#     virtualbox-iso: ---> Package dracut-network.x86_64 0:033-535.0.1.el7 will be updated
#     virtualbox-iso: ---> Package dracut-network.x86_64 0:033-535.0.5.el7_5.1 will be an update
#     virtualbox-iso: ---> Package e2fsprogs.x86_64 0:1.42.9-11.0.1.el7 will be updated
#     virtualbox-iso: ---> Package e2fsprogs.x86_64 0:1.42.9-12.el7_5 will be an update
#     virtualbox-iso: ---> Package e2fsprogs-libs.x86_64 0:1.42.9-11.0.1.el7 will be updated
#     virtualbox-iso: ---> Package e2fsprogs-libs.x86_64 0:1.42.9-12.el7_5 will be an update
#     virtualbox-iso: ---> Package firewalld.noarch 0:0.4.4.4-14.el7 will be updated
#     virtualbox-iso: ---> Package firewalld.noarch 0:0.4.4.4-15.el7_5 will be an update
#     virtualbox-iso: ---> Package firewalld-filesystem.noarch 0:0.4.4.4-14.el7 will be updated
#     virtualbox-iso: ---> Package firewalld-filesystem.noarch 0:0.4.4.4-15.el7_5 will be an update
#     virtualbox-iso: ---> Package gcc.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package gcc.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package glibc.x86_64 0:2.17-222.el7 will be updated
#     virtualbox-iso: ---> Package glibc.x86_64 0:2.17-222.0.7.el7 will be an update
#     virtualbox-iso: ---> Package glibc-common.x86_64 0:2.17-222.el7 will be updated
#     virtualbox-iso: ---> Package glibc-common.x86_64 0:2.17-222.0.7.el7 will be an update
#     virtualbox-iso: ---> Package glibc-devel.x86_64 0:2.17-222.el7 will be updated
#     virtualbox-iso: ---> Package glibc-devel.x86_64 0:2.17-222.0.7.el7 will be an update
#     virtualbox-iso: ---> Package glibc-headers.x86_64 0:2.17-222.el7 will be updated
#     virtualbox-iso: ---> Package glibc-headers.x86_64 0:2.17-222.0.7.el7 will be an update
#     virtualbox-iso: ---> Package gnupg2.x86_64 0:2.0.22-4.el7 will be updated
#     virtualbox-iso: ---> Package gnupg2.x86_64 0:2.0.22-5.el7_5 will be an update
#     virtualbox-iso: ---> Package initscripts.x86_64 0:9.49.41-1.0.1.el7 will be updated
#     virtualbox-iso: ---> Package initscripts.x86_64 0:9.49.41-1.0.4.el7_5.2 will be an update
#     virtualbox-iso: ---> Package iptables.x86_64 0:1.4.21-24.el7 will be updated
#     virtualbox-iso: ---> Package iptables.x86_64 0:1.4.21-24.1.el7_5 will be an update
#     virtualbox-iso: ---> Package iwl100-firmware.noarch 0:39.31.5.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl100-firmware.noarch 0:39.31.5.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl1000-firmware.noarch 1:39.31.5.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl1000-firmware.noarch 1:39.31.5.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl105-firmware.noarch 0:18.168.6.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl105-firmware.noarch 0:18.168.6.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl135-firmware.noarch 0:18.168.6.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl135-firmware.noarch 0:18.168.6.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl2000-firmware.noarch 0:18.168.6.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl2000-firmware.noarch 0:18.168.6.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl2030-firmware.noarch 0:18.168.6.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl2030-firmware.noarch 0:18.168.6.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl3160-firmware.noarch 0:22.0.7.0-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl3160-firmware.noarch 0:22.0.7.0-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl3945-firmware.noarch 0:15.32.2.9-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl3945-firmware.noarch 0:15.32.2.9-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl4965-firmware.noarch 0:228.61.2.24-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl4965-firmware.noarch 0:228.61.2.24-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl5000-firmware.noarch 0:8.83.5.1_1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl5000-firmware.noarch 0:8.83.5.1_1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl5150-firmware.noarch 0:8.24.2.2-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl5150-firmware.noarch 0:8.24.2.2-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl6000-firmware.noarch 0:9.221.4.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl6000-firmware.noarch 0:9.221.4.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl6000g2a-firmware.noarch 0:17.168.5.3-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl6000g2a-firmware.noarch 0:17.168.5.3-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl6000g2b-firmware.noarch 0:17.168.5.2-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl6000g2b-firmware.noarch 0:17.168.5.2-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl6050-firmware.noarch 0:41.28.5.1-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl6050-firmware.noarch 0:41.28.5.1-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl7260-firmware.noarch 0:22.0.7.0-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl7260-firmware.noarch 0:22.0.7.0-999.el7 will be an update
#     virtualbox-iso: ---> Package iwl7265-firmware.noarch 0:22.0.7.0-62.el7 will be updated
#     virtualbox-iso: ---> Package iwl7265-firmware.noarch 0:22.0.7.0-999.el7 will be an update
#     virtualbox-iso: ---> Package kernel.x86_64 0:3.10.0-862.14.4.el7 will be installed
#     virtualbox-iso: ---> Package kernel-devel.x86_64 0:3.10.0-862.14.4.el7 will be installed
#     virtualbox-iso: ---> Package kernel-headers.x86_64 0:3.10.0-862.el7 will be updated
#     virtualbox-iso: ---> Package kernel-headers.x86_64 0:3.10.0-862.14.4.el7 will be an update
#     virtualbox-iso: ---> Package kernel-tools.x86_64 0:3.10.0-862.el7 will be updated
#     virtualbox-iso: ---> Package kernel-tools.x86_64 0:3.10.0-862.14.4.el7 will be an update
#     virtualbox-iso: ---> Package kernel-tools-libs.x86_64 0:3.10.0-862.el7 will be updated
#     virtualbox-iso: ---> Package kernel-tools-libs.x86_64 0:3.10.0-862.14.4.el7 will be an update
#     virtualbox-iso: ---> Package kernel-uek.x86_64 0:4.1.12-124.20.3.el7uek will be installed
#     virtualbox-iso: ---> Package kernel-uek-devel.x86_64 0:4.1.12-124.20.3.el7uek will be installed
#     virtualbox-iso: ---> Package kernel-uek-firmware.noarch 0:4.1.12-124.20.3.el7uek will be installed
#     virtualbox-iso: ---> Package kexec-tools.x86_64 0:2.0.15-13.0.1.el7 will be updated
#     virtualbox-iso: ---> Package kexec-tools.x86_64 0:2.0.15-13.0.1.el7_5.2 will be an update
#     virtualbox-iso: ---> Package kpartx.x86_64 0:0.4.9-119.el7 will be updated
#     virtualbox-iso: ---> Package kpartx.x86_64 0:0.4.9-119.el7_5.1 will be an update
#     virtualbox-iso: ---> Package krb5-libs.x86_64 0:1.15.1-18.el7 will be updated
#     virtualbox-iso: ---> Package krb5-libs.x86_64 0:1.15.1-19.el7 will be an update
#     virtualbox-iso: ---> Package libblkid.x86_64 0:2.23.2-52.el7 will be updated
#     virtualbox-iso: ---> Package libblkid.x86_64 0:2.23.2-52.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libcom_err.x86_64 0:1.42.9-11.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libcom_err.x86_64 0:1.42.9-12.el7_5 will be an update
#     virtualbox-iso: ---> Package libdtrace-ctf.x86_64 0:0.7.0-1.el7 will be updated
#     virtualbox-iso: ---> Package libdtrace-ctf.x86_64 0:0.8.0-1.el7 will be an update
#     virtualbox-iso: ---> Package libgcc.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libgcc.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libgomp.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libgomp.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libgudev1.x86_64 0:219-57.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libgudev1.x86_64 0:219-57.0.1.el7_5.3 will be an update
#     virtualbox-iso: ---> Package libmount.x86_64 0:2.23.2-52.el7 will be updated
#     virtualbox-iso: ---> Package libmount.x86_64 0:2.23.2-52.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libselinux.x86_64 0:2.5-12.el7 will be updated
#     virtualbox-iso: ---> Package libselinux.x86_64 0:2.5-12.0.1.el7 will be an update
#     virtualbox-iso: ---> Package libselinux-python.x86_64 0:2.5-12.el7 will be updated
#     virtualbox-iso: ---> Package libselinux-python.x86_64 0:2.5-12.0.1.el7 will be an update
#     virtualbox-iso: ---> Package libselinux-utils.x86_64 0:2.5-12.el7 will be updated
#     virtualbox-iso: ---> Package libselinux-utils.x86_64 0:2.5-12.0.1.el7 will be an update
#     virtualbox-iso: ---> Package libss.x86_64 0:1.42.9-11.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libss.x86_64 0:1.42.9-12.el7_5 will be an update
#     virtualbox-iso: ---> Package libstdc++.x86_64 0:4.8.5-28.0.1.el7 will be updated
#     virtualbox-iso: ---> Package libstdc++.x86_64 0:4.8.5-28.0.1.el7_5.1 will be an update
#     virtualbox-iso: ---> Package libuuid.x86_64 0:2.23.2-52.el7 will be updated
#     virtualbox-iso: ---> Package libuuid.x86_64 0:2.23.2-52.el7_5.1 will be an update
#     virtualbox-iso: ---> Package linux-firmware.noarch 0:20180220-62.git6d51311.0.1.el7 will be updated
#     virtualbox-iso: ---> Package linux-firmware.noarch 0:20180906-999.git85c5d90f.el7 will be an update
#     virtualbox-iso: ---> Package lvm2.x86_64 7:2.02.177-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package lvm2.x86_64 7:2.02.177-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package lvm2-libs.x86_64 7:2.02.177-4.0.1.el7 will be updated
#     virtualbox-iso: ---> Package lvm2-libs.x86_64 7:2.02.177-4.0.2.el7 will be an update
#     virtualbox-iso: ---> Package mariadb-libs.x86_64 1:5.5.56-2.el7 will be updated
#     virtualbox-iso: ---> Package mariadb-libs.x86_64 1:5.5.60-1.el7_5 will be an update
#     virtualbox-iso: ---> Package microcode_ctl.x86_64 2:2.1-29.0.2.el7 will be updated
#     virtualbox-iso: ---> Package microcode_ctl.x86_64 2:2.1-29.16.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package nspr.x86_64 0:4.17.0-1.el7 will be updated
#     virtualbox-iso: ---> Package nspr.x86_64 0:4.19.0-1.el7_5 will be an update
#     virtualbox-iso: ---> Package nss.x86_64 0:3.34.0-4.el7 will be updated
#     virtualbox-iso: ---> Package nss.x86_64 0:3.36.0-7.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-softokn.x86_64 0:3.34.0-2.0.1.el7 will be updated
#     virtualbox-iso: ---> Package nss-softokn.x86_64 0:3.36.0-5.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-softokn-freebl.x86_64 0:3.34.0-2.0.1.el7 will be updated
#     virtualbox-iso: ---> Package nss-softokn-freebl.x86_64 0:3.36.0-5.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-sysinit.x86_64 0:3.34.0-4.el7 will be updated
#     virtualbox-iso: ---> Package nss-sysinit.x86_64 0:3.36.0-7.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-tools.x86_64 0:3.34.0-4.el7 will be updated
#     virtualbox-iso: ---> Package nss-tools.x86_64 0:3.36.0-7.el7_5 will be an update
#     virtualbox-iso: ---> Package nss-util.x86_64 0:3.34.0-2.el7 will be updated
#     virtualbox-iso: ---> Package nss-util.x86_64 0:3.36.0-1.el7_5 will be an update
#     virtualbox-iso: ---> Package openldap.x86_64 0:2.4.44-13.el7 will be updated
#     virtualbox-iso: ---> Package openldap.x86_64 0:2.4.44-15.el7_5 will be an update
#     virtualbox-iso: ---> Package openssl.x86_64 1:1.0.2k-12.0.1.el7 will be updated
#     virtualbox-iso: ---> Package openssl.x86_64 1:1.0.2k-12.0.3.el7 will be an update
#     virtualbox-iso: ---> Package openssl-libs.x86_64 1:1.0.2k-12.0.1.el7 will be updated
#     virtualbox-iso: ---> Package openssl-libs.x86_64 1:1.0.2k-12.0.3.el7 will be an update
#     virtualbox-iso: ---> Package oraclelinux-release.x86_64 7:7.5-1.0.3.el7 will be updated
#     virtualbox-iso: ---> Package oraclelinux-release.x86_64 7:7.5-1.0.5.el7 will be an update
#     virtualbox-iso: ---> Package polkit.x86_64 0:0.112-14.el7 will be updated
#     virtualbox-iso: ---> Package polkit.x86_64 0:0.112-14.0.1.el7 will be an update
#     virtualbox-iso: ---> Package procps-ng.x86_64 0:3.3.10-17.el7 will be updated
#     virtualbox-iso: ---> Package procps-ng.x86_64 0:3.3.10-17.el7_5.2 will be an update
#     virtualbox-iso: ---> Package python.x86_64 0:2.7.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package python.x86_64 0:2.7.5-69.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package python-firewall.noarch 0:0.4.4.4-14.el7 will be updated
#     virtualbox-iso: ---> Package python-firewall.noarch 0:0.4.4.4-15.el7_5 will be an update
#     virtualbox-iso: ---> Package python-libs.x86_64 0:2.7.5-68.0.1.el7 will be updated
#     virtualbox-iso: ---> Package python-libs.x86_64 0:2.7.5-69.0.1.el7_5 will be an update
#     virtualbox-iso: ---> Package python-perf.x86_64 0:3.10.0-862.el7 will be updated
#     virtualbox-iso: ---> Package python-perf.x86_64 0:3.10.0-862.14.4.el7 will be an update
#     virtualbox-iso: ---> Package qemu-guest-agent.x86_64 10:2.8.0-2.el7 will be updated
#     virtualbox-iso: ---> Package qemu-guest-agent.x86_64 10:2.8.0-2.el7_5.1 will be an update
#     virtualbox-iso: ---> Package redhat-release-server.x86_64 1:7.5-8.0.1.el7 will be updated
#     virtualbox-iso: ---> Package redhat-release-server.x86_64 1:7.5-8.0.5.el7 will be an update
#     virtualbox-iso: ---> Package rhn-check.noarch 0:2.0.2-21.0.3.el7 will be updated
#     virtualbox-iso: ---> Package rhn-check.noarch 0:2.0.2-21.0.9.el7 will be an update
#     virtualbox-iso: ---> Package rhn-client-tools.noarch 0:2.0.2-21.0.3.el7 will be updated
#     virtualbox-iso: ---> Package rhn-client-tools.noarch 0:2.0.2-21.0.9.el7 will be an update
#     virtualbox-iso: ---> Package rhn-setup.noarch 0:2.0.2-21.0.3.el7 will be updated
#     virtualbox-iso: ---> Package rhn-setup.noarch 0:2.0.2-21.0.9.el7 will be an update
#     virtualbox-iso: ---> Package rsyslog.x86_64 0:8.24.0-16.el7 will be updated
#     virtualbox-iso: ---> Package rsyslog.x86_64 0:8.24.0-16.el7_5.4 will be an update
#     virtualbox-iso: ---> Package selinux-policy.noarch 0:3.13.1-192.0.1.el7 will be updated
#     virtualbox-iso: ---> Package selinux-policy.noarch 0:3.13.1-192.0.6.el7_5.6 will be an update
#     virtualbox-iso: ---> Package selinux-policy-targeted.noarch 0:3.13.1-192.0.1.el7 will be updated
#     virtualbox-iso: ---> Package selinux-policy-targeted.noarch 0:3.13.1-192.0.6.el7_5.6 will be an update
#     virtualbox-iso: ---> Package sudo.x86_64 0:1.8.19p2-13.el7 will be updated
#     virtualbox-iso: ---> Package sudo.x86_64 0:1.8.19p2-14.el7_5 will be an update
#     virtualbox-iso: ---> Package systemd.x86_64 0:219-57.0.1.el7 will be updated
#     virtualbox-iso: ---> Package systemd.x86_64 0:219-57.0.1.el7_5.3 will be an update
#     virtualbox-iso: ---> Package systemd-libs.x86_64 0:219-57.0.1.el7 will be updated
#     virtualbox-iso: ---> Package systemd-libs.x86_64 0:219-57.0.1.el7_5.3 will be an update
#     virtualbox-iso: ---> Package systemd-sysv.x86_64 0:219-57.0.1.el7 will be updated
#     virtualbox-iso: ---> Package systemd-sysv.x86_64 0:219-57.0.1.el7_5.3 will be an update
#     virtualbox-iso: ---> Package tuned.noarch 0:2.9.0-1.el7 will be updated
#     virtualbox-iso: ---> Package tuned.noarch 0:2.9.0-1.el7_5.2 will be an update
#     virtualbox-iso: ---> Package tzdata.noarch 0:2018c-1.el7 will be updated
#     virtualbox-iso: ---> Package tzdata.noarch 0:2018e-3.el7 will be an update
#     virtualbox-iso: ---> Package util-linux.x86_64 0:2.23.2-52.el7 will be updated
#     virtualbox-iso: ---> Package util-linux.x86_64 0:2.23.2-52.el7_5.1 will be an update
#     virtualbox-iso: ---> Package yum.noarch 0:3.4.3-158.0.1.el7 will be updated
#     virtualbox-iso: ---> Package yum.noarch 0:3.4.3-158.0.2.el7 will be an update
#     virtualbox-iso: --> Finished Dependency Resolution
#     virtualbox-iso:
#     virtualbox-iso: Dependencies Resolved
#     virtualbox-iso:
#     virtualbox-iso: ================================================================================
#     virtualbox-iso:  Package                   Arch   Version                      Repository  Size
#     virtualbox-iso: ================================================================================
#     virtualbox-iso: Installing:
#     virtualbox-iso:  kernel                    x86_64 3.10.0-862.14.4.el7          ol7_latest  46 M
#     virtualbox-iso:  kernel-devel              x86_64 3.10.0-862.14.4.el7          ol7_latest  16 M
#     virtualbox-iso:  kernel-uek                x86_64 4.1.12-124.20.3.el7uek       ol7_UEKR4   44 M
#     virtualbox-iso:  kernel-uek-devel          x86_64 4.1.12-124.20.3.el7uek       ol7_UEKR4   11 M
#     virtualbox-iso:  kernel-uek-firmware       noarch 4.1.12-124.20.3.el7uek       ol7_UEKR4  2.5 M
#     virtualbox-iso: Updating:
#     virtualbox-iso:  NetworkManager            x86_64 1:1.10.2-16.el7_5            ol7_latest 1.7 M
#     virtualbox-iso:  NetworkManager-config-server
#     virtualbox-iso:                            noarch 1:1.10.2-16.el7_5            ol7_latest 143 k
#     virtualbox-iso:  NetworkManager-libnm      x86_64 1:1.10.2-16.el7_5            ol7_latest 1.3 M
#     virtualbox-iso:  NetworkManager-team       x86_64 1:1.10.2-16.el7_5            ol7_latest 161 k
#     virtualbox-iso:  NetworkManager-tui        x86_64 1:1.10.2-16.el7_5            ol7_latest 235 k
#     virtualbox-iso:  audit                     x86_64 2.8.1-3.el7_5.1              ol7_latest 246 k
#     virtualbox-iso:  audit-libs                x86_64 2.8.1-3.el7_5.1              ol7_latest  99 k
#     virtualbox-iso:  bind-libs-lite            x86_64 32:9.9.4-61.el7_5.1          ol7_latest 733 k
#     virtualbox-iso:  bind-license              noarch 32:9.9.4-61.el7_5.1          ol7_latest  85 k
#     virtualbox-iso:  binutils                  x86_64 2.27-28.base.el7_5.1         ol7_latest 5.9 M
#     virtualbox-iso:  ca-certificates           noarch 2018.2.22-70.0.el7_5         ol7_latest 391 k
#     virtualbox-iso:  cpp                       x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest 5.9 M
#     virtualbox-iso:  device-mapper             x86_64 7:1.02.146-4.0.2.el7         ol7_latest 289 k
#     virtualbox-iso:  device-mapper-event       x86_64 7:1.02.146-4.0.2.el7         ol7_latest 185 k
#     virtualbox-iso:  device-mapper-event-libs  x86_64 7:1.02.146-4.0.2.el7         ol7_latest 184 k
#     virtualbox-iso:  device-mapper-libs        x86_64 7:1.02.146-4.0.2.el7         ol7_latest 316 k
#     virtualbox-iso:  dhclient                  x86_64 12:4.2.5-68.0.1.el7_5.1      ol7_latest 283 k
#     virtualbox-iso:  dhcp-common               x86_64 12:4.2.5-68.0.1.el7_5.1      ol7_latest 174 k
#     virtualbox-iso:  dhcp-libs                 x86_64 12:4.2.5-68.0.1.el7_5.1      ol7_latest 131 k
#     virtualbox-iso:  dracut                    x86_64 033-535.0.5.el7_5.1          ol7_latest 326 k
#     virtualbox-iso:  dracut-config-rescue      x86_64 033-535.0.5.el7_5.1          ol7_latest  58 k
#     virtualbox-iso:  dracut-network            x86_64 033-535.0.5.el7_5.1          ol7_latest 101 k
#     virtualbox-iso:  e2fsprogs                 x86_64 1.42.9-12.el7_5              ol7_latest 698 k
#     virtualbox-iso:  e2fsprogs-libs            x86_64 1.42.9-12.el7_5              ol7_latest 166 k
#     virtualbox-iso:  firewalld                 noarch 0.4.4.4-15.el7_5             ol7_latest 418 k
#     virtualbox-iso:  firewalld-filesystem      noarch 0.4.4.4-15.el7_5             ol7_latest  48 k
#     virtualbox-iso:  gcc                       x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest  16 M
#     virtualbox-iso:  glibc                     x86_64 2.17-222.0.7.el7             ol7_latest 3.6 M
#     virtualbox-iso:  glibc-common              x86_64 2.17-222.0.7.el7             ol7_latest  11 M
#     virtualbox-iso:  glibc-devel               x86_64 2.17-222.0.7.el7             ol7_latest 1.1 M
#     virtualbox-iso:  glibc-headers             x86_64 2.17-222.0.7.el7             ol7_latest 679 k
#     virtualbox-iso:  gnupg2                    x86_64 2.0.22-5.el7_5               ol7_latest 1.5 M
#     virtualbox-iso:  initscripts               x86_64 9.49.41-1.0.4.el7_5.2        ol7_latest 437 k
#     virtualbox-iso:  iptables                  x86_64 1.4.21-24.1.el7_5            ol7_latest 431 k
#     virtualbox-iso:  iwl100-firmware           noarch 39.31.5.1-999.el7            ol7_latest 145 k
#     virtualbox-iso:  iwl1000-firmware          noarch 1:39.31.5.1-999.el7          ol7_latest 208 k
#     virtualbox-iso:  iwl105-firmware           noarch 18.168.6.1-999.el7           ol7_latest 229 k
#     virtualbox-iso:  iwl135-firmware           noarch 18.168.6.1-999.el7           ol7_latest 238 k
#     virtualbox-iso:  iwl2000-firmware          noarch 18.168.6.1-999.el7           ol7_latest 232 k
#     virtualbox-iso:  iwl2030-firmware          noarch 18.168.6.1-999.el7           ol7_latest 241 k
#     virtualbox-iso:  iwl3160-firmware          noarch 22.0.7.0-999.el7             ol7_latest 1.6 M
#     virtualbox-iso:  iwl3945-firmware          noarch 15.32.2.9-999.el7            ol7_latest  83 k
#     virtualbox-iso:  iwl4965-firmware          noarch 228.61.2.24-999.el7          ol7_latest  96 k
#     virtualbox-iso:  iwl5000-firmware          noarch 8.83.5.1_1-999.el7           ol7_latest 289 k
#     virtualbox-iso:  iwl5150-firmware          noarch 8.24.2.2-999.el7             ol7_latest 142 k
#     virtualbox-iso:  iwl6000-firmware          noarch 9.221.4.1-999.el7            ol7_latest 162 k
#     virtualbox-iso:  iwl6000g2a-firmware       noarch 17.168.5.3-999.el7           ol7_latest 305 k
#     virtualbox-iso:  iwl6000g2b-firmware       noarch 17.168.5.2-999.el7           ol7_latest 305 k
#     virtualbox-iso:  iwl6050-firmware          noarch 41.28.5.1-999.el7            ol7_latest 238 k
#     virtualbox-iso:  iwl7260-firmware          noarch 22.0.7.0-999.el7             ol7_latest 1.1 M
#     virtualbox-iso:  iwl7265-firmware          noarch 22.0.7.0-999.el7             ol7_latest 6.4 M
#     virtualbox-iso:  kernel-headers            x86_64 3.10.0-862.14.4.el7          ol7_latest 7.1 M
#     virtualbox-iso:  kernel-tools              x86_64 3.10.0-862.14.4.el7          ol7_latest 6.3 M
#     virtualbox-iso:  kernel-tools-libs         x86_64 3.10.0-862.14.4.el7          ol7_latest 6.2 M
#     virtualbox-iso:  kexec-tools               x86_64 2.0.15-13.0.1.el7_5.2        ol7_latest 341 k
#     virtualbox-iso:  kpartx                    x86_64 0.4.9-119.el7_5.1            ol7_latest  75 k
#     virtualbox-iso:  krb5-libs                 x86_64 1.15.1-19.el7                ol7_latest 747 k
#     virtualbox-iso:  libblkid                  x86_64 2.23.2-52.el7_5.1            ol7_latest 178 k
#     virtualbox-iso:  libcom_err                x86_64 1.42.9-12.el7_5              ol7_latest  40 k
#     virtualbox-iso:  libdtrace-ctf             x86_64 0.8.0-1.el7                  ol7_UEKR4   34 k
#     virtualbox-iso:  libgcc                    x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest 100 k
#     virtualbox-iso:  libgomp                   x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest 156 k
#     virtualbox-iso:  libgudev1                 x86_64 219-57.0.1.el7_5.3           ol7_latest  92 k
#     virtualbox-iso:  libmount                  x86_64 2.23.2-52.el7_5.1            ol7_latest 179 k
#     virtualbox-iso:  libselinux                x86_64 2.5-12.0.1.el7               ol7_latest 161 k
#     virtualbox-iso:  libselinux-python         x86_64 2.5-12.0.1.el7               ol7_latest 235 k
#     virtualbox-iso:  libselinux-utils          x86_64 2.5-12.0.1.el7               ol7_latest 151 k
#     virtualbox-iso:  libss                     x86_64 1.42.9-12.el7_5              ol7_latest  45 k
#     virtualbox-iso:  libstdc++                 x86_64 4.8.5-28.0.1.el7_5.1         ol7_latest 303 k
#     virtualbox-iso:  libuuid                   x86_64 2.23.2-52.el7_5.1            ol7_latest  80 k
#     virtualbox-iso:  linux-firmware            noarch 20180906-999.git85c5d90f.el7 ol7_latest  67 M
#     virtualbox-iso:  lvm2                      x86_64 7:2.02.177-4.0.2.el7         ol7_latest 1.3 M
#     virtualbox-iso:  lvm2-libs                 x86_64 7:2.02.177-4.0.2.el7         ol7_latest 1.0 M
#     virtualbox-iso:  mariadb-libs              x86_64 1:5.5.60-1.el7_5             ol7_latest 758 k
#     virtualbox-iso:  microcode_ctl             x86_64 2:2.1-29.16.0.1.el7_5        ol7_latest 1.4 M
#     virtualbox-iso:  nspr                      x86_64 4.19.0-1.el7_5               ol7_latest 126 k
#     virtualbox-iso:  nss                       x86_64 3.36.0-7.el7_5               ol7_latest 834 k
#     virtualbox-iso:  nss-softokn               x86_64 3.36.0-5.0.1.el7_5           ol7_latest 315 k
#     virtualbox-iso:  nss-softokn-freebl        x86_64 3.36.0-5.0.1.el7_5           ol7_latest 222 k
#     virtualbox-iso:  nss-sysinit               x86_64 3.36.0-7.el7_5               ol7_latest  62 k
#     virtualbox-iso:  nss-tools                 x86_64 3.36.0-7.el7_5               ol7_latest 514 k
#     virtualbox-iso:  nss-util                  x86_64 3.36.0-1.el7_5               ol7_latest  77 k
#     virtualbox-iso:  openldap                  x86_64 2.4.44-15.el7_5              ol7_latest 355 k
#     virtualbox-iso:  openssl                   x86_64 1:1.0.2k-12.0.3.el7          ol7_latest 492 k
#     virtualbox-iso:  openssl-libs              x86_64 1:1.0.2k-12.0.3.el7          ol7_latest 1.2 M
#     virtualbox-iso:  oraclelinux-release       x86_64 7:7.5-1.0.5.el7              ol7_latest  58 k
#     virtualbox-iso:  polkit                    x86_64 0.112-14.0.1.el7             ol7_latest 167 k
#     virtualbox-iso:  procps-ng                 x86_64 3.3.10-17.el7_5.2            ol7_latest 289 k
#     virtualbox-iso:  python                    x86_64 2.7.5-69.0.1.el7_5           ol7_latest  93 k
#     virtualbox-iso:  python-firewall           noarch 0.4.4.4-15.el7_5             ol7_latest 328 k
#     virtualbox-iso:  python-libs               x86_64 2.7.5-69.0.1.el7_5           ol7_latest 5.6 M
#     virtualbox-iso:  python-perf               x86_64 3.10.0-862.14.4.el7          ol7_latest 6.3 M
#     virtualbox-iso:  qemu-guest-agent          x86_64 10:2.8.0-2.el7_5.1           ol7_latest 149 k
#     virtualbox-iso:  redhat-release-server     x86_64 1:7.5-8.0.5.el7              ol7_latest 9.4 k
#     virtualbox-iso:  rhn-check                 noarch 2.0.2-21.0.9.el7             ol7_latest  57 k
#     virtualbox-iso:  rhn-client-tools          noarch 2.0.2-21.0.9.el7             ol7_latest 416 k
#     virtualbox-iso:  rhn-setup                 noarch 2.0.2-21.0.9.el7             ol7_latest  94 k
#     virtualbox-iso:  rsyslog                   x86_64 8.24.0-16.el7_5.4            ol7_latest 606 k
#     virtualbox-iso:  selinux-policy            noarch 3.13.1-192.0.6.el7_5.6       ol7_latest 454 k
#     virtualbox-iso:  selinux-policy-targeted   noarch 3.13.1-192.0.6.el7_5.6       ol7_latest 6.8 M
#     virtualbox-iso:  sudo                      x86_64 1.8.19p2-14.el7_5            ol7_latest 1.1 M
#     virtualbox-iso:  systemd                   x86_64 219-57.0.1.el7_5.3           ol7_latest 5.0 M
#     virtualbox-iso:  systemd-libs              x86_64 219-57.0.1.el7_5.3           ol7_latest 402 k
#     virtualbox-iso:  systemd-sysv              x86_64 219-57.0.1.el7_5.3           ol7_latest  79 k
#     virtualbox-iso:  tuned                     noarch 2.9.0-1.el7_5.2              ol7_latest 244 k
#     virtualbox-iso:  tzdata                    noarch 2018e-3.el7                  ol7_latest 481 k
#     virtualbox-iso:  util-linux                x86_64 2.23.2-52.el7_5.1            ol7_latest 2.0 M
#     virtualbox-iso:  yum                       noarch 3.4.3-158.0.2.el7            ol7_latest 1.2 M
#     virtualbox-iso:
#     virtualbox-iso: Transaction Summary
#     virtualbox-iso: ================================================================================
#     virtualbox-iso: Install    5 Packages
#     virtualbox-iso: Upgrade  108 Packages
#     virtualbox-iso:
#     virtualbox-iso: Total download size: 317 M
#     virtualbox-iso: Downloading packages:
#     virtualbox-iso: Delta RPMs disabled because /usr/bin/applydeltarpm not installed.
#     virtualbox-iso: warning: /var/cache/yum/x86_64/7Server/ol7_latest/packages/NetworkManager-config-server-1.10.2-16.el7_5.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID ec551f03: NOKEY
#     virtualbox-iso: Public key for NetworkManager-config-server-1.10.2-16.el7_5.noarch.rpm is not installed
#     virtualbox-iso: Public key for kernel-uek-devel-4.1.12-124.20.3.el7uek.x86_64.rpm is not installed
#     virtualbox-iso: --------------------------------------------------------------------------------
#     virtualbox-iso: Total                                              5.8 MB/s | 317 MB  00:54
#     virtualbox-iso: Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
#     virtualbox-iso: Importing GPG key 0xEC551F03:
#     virtualbox-iso:  Userid     : "Oracle OSS group (Open Source Software group) <build@oss.oracle.com>"
#     virtualbox-iso:  Fingerprint: 4214 4123 fecf c55b 9086 313d 72f9 7b74 ec55 1f03
#     virtualbox-iso:  Package    : 7:oraclelinux-release-7.5-1.0.3.el7.x86_64 (@anaconda/7.5)
#     virtualbox-iso:  From       : /etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
#     virtualbox-iso: Running transaction check
#     virtualbox-iso: Running transaction test
#     virtualbox-iso: Transaction test succeeded
#     virtualbox-iso: Running transaction
#     virtualbox-iso:   Updating   : libgcc-4.8.5-28.0.1.el7_5.1.x86_64                         1/221
#     virtualbox-iso:   Updating   : 1:redhat-release-server-7.5-8.0.5.el7.x86_64               2/221
#     virtualbox-iso:   Updating   : 7:oraclelinux-release-7.5-1.0.5.el7.x86_64                 3/221
#     virtualbox-iso:   Updating   : linux-firmware-20180906-999.git85c5d90f.el7.noarch         4/221
#     virtualbox-iso:   Updating   : tzdata-2018e-3.el7.noarch                                  5/221
#     virtualbox-iso:   Updating   : glibc-common-2.17-222.0.7.el7.x86_64                       6/221
#     virtualbox-iso:   Updating   : nss-softokn-freebl-3.36.0-5.0.1.el7_5.x86_64               7/221
#     virtualbox-iso:   Updating   : glibc-2.17-222.0.7.el7.x86_64                              8/221
#     virtualbox-iso:   Updating   : libselinux-2.5-12.0.1.el7.x86_64                           9/221
#     virtualbox-iso:   Updating   : nspr-4.19.0-1.el7_5.x86_64                                10/221
#     virtualbox-iso:   Updating   : nss-util-3.36.0-1.el7_5.x86_64                            11/221
#     virtualbox-iso:   Updating   : systemd-libs-219-57.0.1.el7_5.3.x86_64                    12/221
#     virtualbox-iso:   Updating   : libuuid-2.23.2-52.el7_5.1.x86_64                          13/221
#     virtualbox-iso:   Updating   : libcom_err-1.42.9-12.el7_5.x86_64                         14/221
#     virtualbox-iso:   Updating   : libblkid-2.23.2-52.el7_5.1.x86_64                         15/221
#     virtualbox-iso:   Updating   : audit-libs-2.8.1-3.el7_5.1.x86_64                         16/221
#     virtualbox-iso:   Updating   : libmount-2.23.2-52.el7_5.1.x86_64                         17/221
#     virtualbox-iso:   Updating   : systemd-219-57.0.1.el7_5.3.x86_64                         18/221
#     virtualbox-iso:   Updating   : util-linux-2.23.2-52.el7_5.1.x86_64                       19/221
#     virtualbox-iso:   Updating   : 7:device-mapper-libs-1.02.146-4.0.2.el7.x86_64            20/221
#     virtualbox-iso:   Updating   : 7:device-mapper-1.02.146-4.0.2.el7.x86_64                 21/221
#     virtualbox-iso:   Updating   : 7:device-mapper-event-libs-1.02.146-4.0.2.el7.x86_64      22/221
#     virtualbox-iso:   Updating   : polkit-0.112-14.0.1.el7.x86_64                            23/221
#     virtualbox-iso:   Updating   : procps-ng-3.3.10-17.el7_5.2.x86_64                        24/221
#     virtualbox-iso:   Updating   : initscripts-9.49.41-1.0.4.el7_5.2.x86_64                  25/221
#     virtualbox-iso:   Updating   : nss-softokn-3.36.0-5.0.1.el7_5.x86_64                     26/221
#     virtualbox-iso:   Updating   : nss-sysinit-3.36.0-7.el7_5.x86_64                         27/221
#     virtualbox-iso:   Updating   : nss-3.36.0-7.el7_5.x86_64                                 28/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-libnm-1.10.2-16.el7_5.x86_64             29/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-1.10.2-16.el7_5.x86_64                   30/221
#     virtualbox-iso:   Updating   : libstdc++-4.8.5-28.0.1.el7_5.1.x86_64                     31/221
#     virtualbox-iso:   Updating   : nss-tools-3.36.0-7.el7_5.x86_64                           32/221
#     virtualbox-iso:   Updating   : 7:device-mapper-event-1.02.146-4.0.2.el7.x86_64           33/221
#     virtualbox-iso:   Updating   : 7:lvm2-libs-2.02.177-4.0.2.el7.x86_64                     34/221
#     virtualbox-iso:   Updating   : kpartx-0.4.9-119.el7_5.1.x86_64                           35/221
#     virtualbox-iso:   Updating   : dracut-033-535.0.5.el7_5.1.x86_64                         36/221
#     virtualbox-iso:   Updating   : libss-1.42.9-12.el7_5.x86_64                              37/221
#     virtualbox-iso:   Updating   : e2fsprogs-libs-1.42.9-12.el7_5.x86_64                     38/221
#     virtualbox-iso:   Updating   : iptables-1.4.21-24.1.el7_5.x86_64                         39/221
#     virtualbox-iso:   Updating   : cpp-4.8.5-28.0.1.el7_5.1.x86_64                           40/221
#     virtualbox-iso:   Updating   : binutils-2.27-28.base.el7_5.1.x86_64                      41/221
#     virtualbox-iso:   Updating   : libgomp-4.8.5-28.0.1.el7_5.1.x86_64                       42/221
#     virtualbox-iso:   Updating   : kernel-tools-libs-3.10.0-862.14.4.el7.x86_64              43/221
#     virtualbox-iso:   Installing : kernel-uek-firmware-4.1.12-124.20.3.el7uek.noarch         44/221
#     virtualbox-iso:   Updating   : ca-certificates-2018.2.22-70.0.el7_5.noarch               45/221
#     virtualbox-iso:   Updating   : 1:openssl-libs-1.0.2k-12.0.3.el7.x86_64                   46/221
#     virtualbox-iso:   Updating   : krb5-libs-1.15.1-19.el7.x86_64                            47/221
#     virtualbox-iso:   Updating   : openldap-2.4.44-15.el7_5.x86_64                           48/221
#     virtualbox-iso:   Updating   : 12:dhcp-libs-4.2.5-68.0.1.el7_5.1.x86_64                  49/221
#     virtualbox-iso:   Updating   : python-libs-2.7.5-69.0.1.el7_5.x86_64                     50/221
#     virtualbox-iso:   Updating   : python-2.7.5-69.0.1.el7_5.x86_64                          51/221
#     virtualbox-iso:   Updating   : python-firewall-0.4.4.4-15.el7_5.noarch                   52/221
#     virtualbox-iso:   Updating   : yum-3.4.3-158.0.2.el7.noarch                              53/221
#     virtualbox-iso:   Updating   : systemd-sysv-219-57.0.1.el7_5.3.x86_64                    54/221
#     virtualbox-iso:   Updating   : python-perf-3.10.0-862.14.4.el7.x86_64                    55/221
#     virtualbox-iso:   Updating   : 12:dhcp-common-4.2.5-68.0.1.el7_5.1.x86_64                56/221
#     virtualbox-iso:   Updating   : gnupg2-2.0.22-5.el7_5.x86_64                              57/221
#     virtualbox-iso:   Updating   : rhn-client-tools-2.0.2-21.0.9.el7.noarch                  58/221
#     virtualbox-iso:   Updating   : selinux-policy-3.13.1-192.0.6.el7_5.6.noarch              59/221
#     virtualbox-iso:   Updating   : kernel-headers-3.10.0-862.14.4.el7.x86_64                 60/221
#     virtualbox-iso:   Updating   : glibc-headers-2.17-222.0.7.el7.x86_64                     61/221
#     virtualbox-iso:   Updating   : glibc-devel-2.17-222.0.7.el7.x86_64                       62/221
#     virtualbox-iso:   Updating   : gcc-4.8.5-28.0.1.el7_5.1.x86_64                           63/221
#     virtualbox-iso:   Updating   : libdtrace-ctf-0.8.0-1.el7.x86_64                          64/221
#     virtualbox-iso:   Updating   : 32:bind-license-9.9.4-61.el7_5.1.noarch                   65/221
#     virtualbox-iso:   Updating   : 32:bind-libs-lite-9.9.4-61.el7_5.1.x86_64                 66/221
#     virtualbox-iso:   Updating   : 12:dhclient-4.2.5-68.0.1.el7_5.1.x86_64                   67/221
#     virtualbox-iso:   Updating   : dracut-network-033-535.0.5.el7_5.1.x86_64                 68/221
#     virtualbox-iso:   Updating   : firewalld-filesystem-0.4.4.4-15.el7_5.noarch              69/221
#     virtualbox-iso:   Updating   : firewalld-0.4.4.4-15.el7_5.noarch                         70/221
#     virtualbox-iso:   Updating   : kexec-tools-2.0.15-13.0.1.el7_5.2.x86_64                  71/221
#     virtualbox-iso:   Installing : kernel-uek-devel-4.1.12-124.20.3.el7uek.x86_64            72/221
#     virtualbox-iso:   Updating   : selinux-policy-targeted-3.13.1-192.0.6.el7_5.6.noarch     73/221
#     virtualbox-iso:   Updating   : rhn-check-2.0.2-21.0.9.el7.noarch                         74/221
#     virtualbox-iso:   Updating   : rhn-setup-2.0.2-21.0.9.el7.noarch                         75/221
#     virtualbox-iso:   Updating   : tuned-2.9.0-1.el7_5.2.noarch                              76/221
#     virtualbox-iso:   Updating   : audit-2.8.1-3.el7_5.1.x86_64                              77/221
#     virtualbox-iso:   Updating   : libselinux-python-2.5-12.0.1.el7.x86_64                   78/221
#     virtualbox-iso:   Updating   : sudo-1.8.19p2-14.el7_5.x86_64                             79/221
#     virtualbox-iso:   Updating   : 1:openssl-1.0.2k-12.0.3.el7.x86_64                        80/221
#     virtualbox-iso:   Updating   : 1:mariadb-libs-5.5.60-1.el7_5.x86_64                      81/221
#     virtualbox-iso:   Installing : kernel-uek-4.1.12-124.20.3.el7uek.x86_64                  82/221
#     virtualbox-iso:   Updating   : kernel-tools-3.10.0-862.14.4.el7.x86_64                   83/221
#     virtualbox-iso:   Updating   : e2fsprogs-1.42.9-12.el7_5.x86_64                          84/221
#     virtualbox-iso:   Updating   : dracut-config-rescue-033-535.0.5.el7_5.1.x86_64           85/221
#     virtualbox-iso:   Installing : kernel-3.10.0-862.14.4.el7.x86_64                         86/221
#     virtualbox-iso:   Updating   : 7:lvm2-2.02.177-4.0.2.el7.x86_64                          87/221
#     virtualbox-iso:   Updating   : 10:qemu-guest-agent-2.8.0-2.el7_5.1.x86_64                88/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-tui-1.10.2-16.el7_5.x86_64               89/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-team-1.10.2-16.el7_5.x86_64              90/221
#     virtualbox-iso:   Updating   : 2:microcode_ctl-2.1-29.16.0.1.el7_5.x86_64                91/221
#     virtualbox-iso:   Updating   : rsyslog-8.24.0-16.el7_5.4.x86_64                          92/221
#     virtualbox-iso:   Updating   : libgudev1-219-57.0.1.el7_5.3.x86_64                       93/221
#     virtualbox-iso:   Updating   : libselinux-utils-2.5-12.0.1.el7.x86_64                    94/221
#     virtualbox-iso:   Updating   : iwl3945-firmware-15.32.2.9-999.el7.noarch                 95/221
#     virtualbox-iso:   Updating   : iwl100-firmware-39.31.5.1-999.el7.noarch                  96/221
#     virtualbox-iso:   Updating   : iwl6050-firmware-41.28.5.1-999.el7.noarch                 97/221
#     virtualbox-iso:   Updating   : iwl135-firmware-18.168.6.1-999.el7.noarch                 98/221
#     virtualbox-iso:   Updating   : iwl6000g2a-firmware-17.168.5.3-999.el7.noarch             99/221
#     virtualbox-iso:   Installing : kernel-devel-3.10.0-862.14.4.el7.x86_64                  100/221
#     virtualbox-iso:   Updating   : 1:iwl1000-firmware-39.31.5.1-999.el7.noarch              101/221
#     virtualbox-iso:   Updating   : iwl4965-firmware-228.61.2.24-999.el7.noarch              102/221
#     virtualbox-iso:   Updating   : iwl6000-firmware-9.221.4.1-999.el7.noarch                103/221
#     virtualbox-iso:   Updating   : iwl2030-firmware-18.168.6.1-999.el7.noarch               104/221
#     virtualbox-iso:   Updating   : iwl105-firmware-18.168.6.1-999.el7.noarch                105/221
#     virtualbox-iso:   Updating   : iwl7265-firmware-22.0.7.0-999.el7.noarch                 106/221
#     virtualbox-iso:   Updating   : iwl5000-firmware-8.83.5.1_1-999.el7.noarch               107/221
#     virtualbox-iso:   Updating   : iwl6000g2b-firmware-17.168.5.2-999.el7.noarch            108/221
#     virtualbox-iso:   Updating   : iwl2000-firmware-18.168.6.1-999.el7.noarch               109/221
#     virtualbox-iso:   Updating   : iwl7260-firmware-22.0.7.0-999.el7.noarch                 110/221
#     virtualbox-iso:   Updating   : iwl5150-firmware-8.24.2.2-999.el7.noarch                 111/221
#     virtualbox-iso:   Updating   : iwl3160-firmware-22.0.7.0-999.el7.noarch                 112/221
#     virtualbox-iso:   Updating   : 1:NetworkManager-config-server-1.10.2-16.el7_5.noarch    113/221
#     virtualbox-iso:   Cleanup    : tuned-2.9.0-1.el7.noarch                                 114/221
#     virtualbox-iso:   Cleanup    : firewalld-0.4.4.4-14.el7.noarch                          115/221
#     virtualbox-iso:   Cleanup    : rhn-setup-2.0.2-21.0.3.el7.noarch                        116/221
#     virtualbox-iso:   Cleanup    : rhn-check-2.0.2-21.0.3.el7.noarch                        117/221
#     virtualbox-iso:   Cleanup    : rhn-client-tools-2.0.2-21.0.3.el7.noarch                 118/221
#     virtualbox-iso:   Cleanup    : 7:oraclelinux-release-7.5-1.0.3.el7.x86_64               119/221
#     virtualbox-iso:   Cleanup    : yum-3.4.3-158.0.1.el7.noarch                             120/221
#     virtualbox-iso:   Cleanup    : python-firewall-0.4.4.4-14.el7.noarch                    121/221
#     virtualbox-iso:   Cleanup    : selinux-policy-targeted-3.13.1-192.0.1.el7.noarch        122/221
#     virtualbox-iso:   Cleanup    : dracut-config-rescue-033-535.0.1.el7.x86_64              123/221
#     virtualbox-iso:   Cleanup    : selinux-policy-3.13.1-192.0.1.el7.noarch                 124/221
#     virtualbox-iso:   Cleanup    : firewalld-filesystem-0.4.4.4-14.el7.noarch               125/221
#     virtualbox-iso:   Cleanup    : iwl3945-firmware-15.32.2.9-62.el7.noarch                 126/221
#     virtualbox-iso:   Cleanup    : iwl100-firmware-39.31.5.1-62.el7.noarch                  127/221
#     virtualbox-iso:   Cleanup    : iwl6050-firmware-41.28.5.1-62.el7.noarch                 128/221
#     virtualbox-iso:   Cleanup    : iwl135-firmware-18.168.6.1-62.el7.noarch                 129/221
#     virtualbox-iso:   Cleanup    : linux-firmware-20180220-62.git6d51311.0.1.el7.noarch     130/221
#     virtualbox-iso:   Cleanup    : iwl6000g2a-firmware-17.168.5.3-62.el7.noarch             131/221
#     virtualbox-iso:   Cleanup    : 1:iwl1000-firmware-39.31.5.1-62.el7.noarch               132/221
#     virtualbox-iso:   Cleanup    : iwl4965-firmware-228.61.2.24-62.el7.noarch               133/221
#     virtualbox-iso:   Cleanup    : iwl6000-firmware-9.221.4.1-62.el7.noarch                 134/221
#     virtualbox-iso:   Cleanup    : iwl2030-firmware-18.168.6.1-62.el7.noarch                135/221
#     virtualbox-iso:   Cleanup    : iwl105-firmware-18.168.6.1-62.el7.noarch                 136/221
#     virtualbox-iso:   Cleanup    : iwl7265-firmware-22.0.7.0-62.el7.noarch                  137/221
#     virtualbox-iso:   Cleanup    : iwl5000-firmware-8.83.5.1_1-62.el7.noarch                138/221
#     virtualbox-iso:   Cleanup    : iwl6000g2b-firmware-17.168.5.2-62.el7.noarch             139/221
#     virtualbox-iso:   Cleanup    : iwl2000-firmware-18.168.6.1-62.el7.noarch                140/221
#     virtualbox-iso:   Cleanup    : iwl7260-firmware-22.0.7.0-62.el7.noarch                  141/221
#     virtualbox-iso:   Cleanup    : iwl5150-firmware-8.24.2.2-62.el7.noarch                  142/221
#     virtualbox-iso:   Cleanup    : iwl3160-firmware-22.0.7.0-62.el7.noarch                  143/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-config-server-1.10.2-13.el7.noarch      144/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-tui-1.10.2-13.el7.x86_64                145/221
#     virtualbox-iso:   Cleanup    : 7:lvm2-2.02.177-4.0.1.el7.x86_64                         146/221
#     virtualbox-iso:   Cleanup    : 7:lvm2-libs-2.02.177-4.0.1.el7.x86_64                    147/221
#     virtualbox-iso:   Cleanup    : 7:device-mapper-event-1.02.146-4.0.1.el7.x86_64          148/221
#     virtualbox-iso:   Cleanup    : e2fsprogs-1.42.9-11.0.1.el7.x86_64                       149/221
#     virtualbox-iso:   Cleanup    : 1:openssl-1.0.2k-12.0.1.el7.x86_64                       150/221
#     virtualbox-iso:   Cleanup    : rsyslog-8.24.0-16.el7.x86_64                             151/221
#     virtualbox-iso:   Cleanup    : 1:mariadb-libs-5.5.56-2.el7.x86_64                       152/221
#     virtualbox-iso:   Cleanup    : audit-2.8.1-3.el7.x86_64                                 153/221
#     virtualbox-iso:   Cleanup    : 10:qemu-guest-agent-2.8.0-2.el7.x86_64                   154/221
#     virtualbox-iso:   Cleanup    : sudo-1.8.19p2-13.el7.x86_64                              155/221
#     virtualbox-iso:   Cleanup    : python-perf-3.10.0-862.el7.x86_64                        156/221
#     virtualbox-iso:   Cleanup    : libselinux-python-2.5-12.el7.x86_64                      157/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-team-1.10.2-13.el7.x86_64               158/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-1.10.2-13.el7.x86_64                    159/221
#     virtualbox-iso:   Cleanup    : 1:NetworkManager-libnm-1.10.2-13.el7.x86_64              160/221
#     virtualbox-iso:   Cleanup    : polkit-0.112-14.el7.x86_64                               161/221
#     virtualbox-iso:   Cleanup    : libgudev1-219-57.0.1.el7.x86_64                          162/221
#     virtualbox-iso:   Cleanup    : kexec-tools-2.0.15-13.0.1.el7.x86_64                     163/221
#     virtualbox-iso:   Cleanup    : kernel-tools-3.10.0-862.el7.x86_64                       164/221
#     virtualbox-iso:   Cleanup    : libstdc++-4.8.5-28.0.1.el7.x86_64                        165/221
#     virtualbox-iso:   Cleanup    : e2fsprogs-libs-1.42.9-11.0.1.el7.x86_64                  166/221
#     virtualbox-iso:   Cleanup    : libss-1.42.9-11.0.1.el7.x86_64                           167/221
#     virtualbox-iso:   Cleanup    : 7:device-mapper-event-libs-1.02.146-4.0.1.el7.x86_64     168/221
#     virtualbox-iso:   Cleanup    : gnupg2-2.0.22-4.el7.x86_64                               169/221
#     virtualbox-iso:   Cleanup    : libselinux-utils-2.5-12.el7.x86_64                       170/221
#     virtualbox-iso:   Cleanup    : dracut-network-033-535.0.1.el7.x86_64                    171/221
#     virtualbox-iso:   Cleanup    : 12:dhclient-4.2.5-68.0.1.el7.x86_64                      172/221
#     virtualbox-iso:   Cleanup    : systemd-sysv-219-57.0.1.el7.x86_64                       173/221
#     virtualbox-iso:   Cleanup    : 32:bind-libs-lite-9.9.4-61.el7.x86_64                    174/221
#     virtualbox-iso:   Cleanup    : dracut-033-535.0.1.el7.x86_64                            175/221
#     virtualbox-iso:   Cleanup    : initscripts-9.49.41-1.0.1.el7.x86_64                     176/221
#     virtualbox-iso:   Cleanup    : python-2.7.5-68.0.1.el7.x86_64                           177/221
#     virtualbox-iso:   Cleanup    : python-libs-2.7.5-68.0.1.el7.x86_64                      178/221
#     virtualbox-iso:   Cleanup    : procps-ng-3.3.10-17.el7.x86_64                           179/221
#     virtualbox-iso:   Cleanup    : kpartx-0.4.9-119.el7.x86_64                              180/221
#     virtualbox-iso:   Cleanup    : 7:device-mapper-1.02.146-4.0.1.el7.x86_64                181/221
#     virtualbox-iso:   Cleanup    : 7:device-mapper-libs-1.02.146-4.0.1.el7.x86_64           182/221
#     virtualbox-iso:   Cleanup    : util-linux-2.23.2-52.el7.x86_64                          183/221
#     virtualbox-iso:   Cleanup    : libdtrace-ctf-0.7.0-1.el7.x86_64                         184/221
#     virtualbox-iso:   Cleanup    : gcc-4.8.5-28.0.1.el7.x86_64                              185/221
#     virtualbox-iso:   Cleanup    : glibc-devel-2.17-222.el7.x86_64                          186/221
#     virtualbox-iso:   Cleanup    : glibc-headers-2.17-222.el7.x86_64                        187/221
#     virtualbox-iso:   Cleanup    : 2:microcode_ctl-2.1-29.0.2.el7.x86_64                    188/221
#     virtualbox-iso:   Cleanup    : 12:dhcp-common-4.2.5-68.0.1.el7.x86_64                   189/221
#     virtualbox-iso:   Cleanup    : systemd-219-57.0.1.el7.x86_64                            190/221
#     virtualbox-iso:   Cleanup    : 12:dhcp-libs-4.2.5-68.0.1.el7.x86_64                     191/221
#     virtualbox-iso:   Cleanup    : openldap-2.4.44-13.el7.x86_64                            192/221
#     virtualbox-iso:   Cleanup    : nss-tools-3.34.0-4.el7.x86_64                            193/221
#     virtualbox-iso:   Cleanup    : nss-sysinit-3.34.0-4.el7.x86_64                          194/221
#     virtualbox-iso:   Cleanup    : nss-3.34.0-4.el7.x86_64                                  195/221
#     virtualbox-iso:   Cleanup    : nss-softokn-3.34.0-2.0.1.el7.x86_64                      196/221
#     virtualbox-iso:   Cleanup    : krb5-libs-1.15.1-18.el7.x86_64                           197/221
#     virtualbox-iso:   Cleanup    : 1:openssl-libs-1.0.2k-12.0.1.el7.x86_64                  198/221
#     virtualbox-iso:   Cleanup    : libmount-2.23.2-52.el7.x86_64                            199/221
#     virtualbox-iso:   Cleanup    : systemd-libs-219-57.0.1.el7.x86_64                       200/221
#     virtualbox-iso:   Cleanup    : libblkid-2.23.2-52.el7.x86_64                            201/221
#     virtualbox-iso:   Cleanup    : libuuid-2.23.2-52.el7.x86_64                             202/221
#     virtualbox-iso:   Cleanup    : libcom_err-1.42.9-11.0.1.el7.x86_64                      203/221
#     virtualbox-iso:   Cleanup    : audit-libs-2.8.1-3.el7.x86_64                            204/221
#     virtualbox-iso:   Cleanup    : binutils-2.27-27.base.el7.x86_64                         205/221
#     virtualbox-iso:   Cleanup    : cpp-4.8.5-28.0.1.el7.x86_64                              206/221
#     virtualbox-iso:   Cleanup    : libgomp-4.8.5-28.0.1.el7.x86_64                          207/221
#     virtualbox-iso:   Cleanup    : kernel-tools-libs-3.10.0-862.el7.x86_64                  208/221
#     virtualbox-iso:   Cleanup    : iptables-1.4.21-24.el7.x86_64                            209/221
#     virtualbox-iso:   Cleanup    : ca-certificates-2017.2.20-71.el7.noarch                  210/221
#     virtualbox-iso:   Cleanup    : 1:redhat-release-server-7.5-8.0.1.el7.x86_64             211/221
#     virtualbox-iso:   Cleanup    : kernel-headers-3.10.0-862.el7.x86_64                     212/221
#     virtualbox-iso:   Cleanup    : 32:bind-license-9.9.4-61.el7.noarch                      213/221
#     virtualbox-iso:   Cleanup    : libselinux-2.5-12.el7.x86_64                             214/221
#     virtualbox-iso:   Cleanup    : glibc-common-2.17-222.el7.x86_64                         215/221
#     virtualbox-iso:   Cleanup    : nspr-4.17.0-1.el7.x86_64                                 216/221
#     virtualbox-iso:   Cleanup    : nss-util-3.34.0-2.el7.x86_64                             217/221
#     virtualbox-iso:   Cleanup    : nss-softokn-freebl-3.34.0-2.0.1.el7.x86_64               218/221
#     virtualbox-iso:   Cleanup    : glibc-2.17-222.el7.x86_64                                219/221
#     virtualbox-iso:   Cleanup    : tzdata-2018c-1.el7.noarch                                220/221
#     virtualbox-iso:   Cleanup    : libgcc-4.8.5-28.0.1.el7.x86_64                           221/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-config-server-1.10.2-16.el7_5.noarch      1/221
#     virtualbox-iso:   Verifying  : nss-softokn-3.36.0-5.0.1.el7_5.x86_64                      2/221
#     virtualbox-iso:   Verifying  : firewalld-filesystem-0.4.4.4-15.el7_5.noarch               3/221
#     virtualbox-iso:   Verifying  : glibc-devel-2.17-222.0.7.el7.x86_64                        4/221
#     virtualbox-iso:   Verifying  : polkit-0.112-14.0.1.el7.x86_64                             5/221
#     virtualbox-iso:   Verifying  : rhn-check-2.0.2-21.0.9.el7.noarch                          6/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-event-1.02.146-4.0.2.el7.x86_64            7/221
#     virtualbox-iso:   Verifying  : iwl3160-firmware-22.0.7.0-999.el7.noarch                   8/221
#     virtualbox-iso:   Verifying  : iwl5150-firmware-8.24.2.2-999.el7.noarch                   9/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-tui-1.10.2-16.el7_5.x86_64               10/221
#     virtualbox-iso:   Verifying  : kernel-uek-4.1.12-124.20.3.el7uek.x86_64                  11/221
#     virtualbox-iso:   Verifying  : iwl7260-firmware-22.0.7.0-999.el7.noarch                  12/221
#     virtualbox-iso:   Verifying  : openldap-2.4.44-15.el7_5.x86_64                           13/221
#     virtualbox-iso:   Verifying  : 32:bind-license-9.9.4-61.el7_5.1.noarch                   14/221
#     virtualbox-iso:   Verifying  : iptables-1.4.21-24.1.el7_5.x86_64                         15/221
#     virtualbox-iso:   Verifying  : kernel-headers-3.10.0-862.14.4.el7.x86_64                 16/221
#     virtualbox-iso:   Verifying  : kernel-uek-devel-4.1.12-124.20.3.el7uek.x86_64            17/221
#     virtualbox-iso:   Verifying  : libcom_err-1.42.9-12.el7_5.x86_64                         18/221
#     virtualbox-iso:   Verifying  : nss-sysinit-3.36.0-7.el7_5.x86_64                         19/221
#     virtualbox-iso:   Verifying  : iwl2000-firmware-18.168.6.1-999.el7.noarch                20/221
#     virtualbox-iso:   Verifying  : 12:dhclient-4.2.5-68.0.1.el7_5.1.x86_64                   21/221
#     virtualbox-iso:   Verifying  : python-firewall-0.4.4.4-15.el7_5.noarch                   22/221
#     virtualbox-iso:   Verifying  : util-linux-2.23.2-52.el7_5.1.x86_64                       23/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-1.10.2-16.el7_5.x86_64                   24/221
#     virtualbox-iso:   Verifying  : iwl6000g2b-firmware-17.168.5.2-999.el7.noarch             25/221
#     virtualbox-iso:   Verifying  : glibc-headers-2.17-222.0.7.el7.x86_64                     26/221
#     virtualbox-iso:   Verifying  : iwl5000-firmware-8.83.5.1_1-999.el7.noarch                27/221
#     virtualbox-iso:   Verifying  : iwl7265-firmware-22.0.7.0-999.el7.noarch                  28/221
#     virtualbox-iso:   Verifying  : systemd-219-57.0.1.el7_5.3.x86_64                         29/221
#     virtualbox-iso:   Verifying  : 7:lvm2-2.02.177-4.0.2.el7.x86_64                          30/221
#     virtualbox-iso:   Verifying  : libuuid-2.23.2-52.el7_5.1.x86_64                          31/221
#     virtualbox-iso:   Verifying  : kpartx-0.4.9-119.el7_5.1.x86_64                           32/221
#     virtualbox-iso:   Verifying  : nss-util-3.36.0-1.el7_5.x86_64                            33/221
#     virtualbox-iso:   Verifying  : 32:bind-libs-lite-9.9.4-61.el7_5.1.x86_64                 34/221
#     virtualbox-iso:   Verifying  : 2:microcode_ctl-2.1-29.16.0.1.el7_5.x86_64                35/221
#     virtualbox-iso:   Verifying  : cpp-4.8.5-28.0.1.el7_5.1.x86_64                           36/221
#     virtualbox-iso:   Verifying  : 10:qemu-guest-agent-2.8.0-2.el7_5.1.x86_64                37/221
#     virtualbox-iso:   Verifying  : tuned-2.9.0-1.el7_5.2.noarch                              38/221
#     virtualbox-iso:   Verifying  : kernel-tools-3.10.0-862.14.4.el7.x86_64                   39/221
#     virtualbox-iso:   Verifying  : iwl105-firmware-18.168.6.1-999.el7.noarch                 40/221
#     virtualbox-iso:   Verifying  : e2fsprogs-1.42.9-12.el7_5.x86_64                          41/221
#     virtualbox-iso:   Verifying  : libselinux-utils-2.5-12.0.1.el7.x86_64                    42/221
#     virtualbox-iso:   Verifying  : selinux-policy-3.13.1-192.0.6.el7_5.6.noarch              43/221
#     virtualbox-iso:   Verifying  : rsyslog-8.24.0-16.el7_5.4.x86_64                          44/221
#     virtualbox-iso:   Verifying  : libstdc++-4.8.5-28.0.1.el7_5.1.x86_64                     45/221
#     virtualbox-iso:   Verifying  : sudo-1.8.19p2-14.el7_5.x86_64                             46/221
#     virtualbox-iso:   Verifying  : kexec-tools-2.0.15-13.0.1.el7_5.2.x86_64                  47/221
#     virtualbox-iso:   Verifying  : iwl2030-firmware-18.168.6.1-999.el7.noarch                48/221
#     virtualbox-iso:   Verifying  : ca-certificates-2018.2.22-70.0.el7_5.noarch               49/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-event-libs-1.02.146-4.0.2.el7.x86_64      50/221
#     virtualbox-iso:   Verifying  : dracut-config-rescue-033-535.0.5.el7_5.1.x86_64           51/221
#     virtualbox-iso:   Verifying  : iwl6000-firmware-9.221.4.1-999.el7.noarch                 52/221
#     virtualbox-iso:   Verifying  : libgudev1-219-57.0.1.el7_5.3.x86_64                       53/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-team-1.10.2-16.el7_5.x86_64              54/221
#     virtualbox-iso:   Verifying  : yum-3.4.3-158.0.2.el7.noarch                              55/221
#     virtualbox-iso:   Verifying  : kernel-3.10.0-862.14.4.el7.x86_64                         56/221
#     virtualbox-iso:   Verifying  : python-2.7.5-69.0.1.el7_5.x86_64                          57/221
#     virtualbox-iso:   Verifying  : systemd-libs-219-57.0.1.el7_5.3.x86_64                    58/221
#     virtualbox-iso:   Verifying  : iwl4965-firmware-228.61.2.24-999.el7.noarch               59/221
#     virtualbox-iso:   Verifying  : rhn-setup-2.0.2-21.0.9.el7.noarch                         60/221
#     virtualbox-iso:   Verifying  : gnupg2-2.0.22-5.el7_5.x86_64                              61/221
#     virtualbox-iso:   Verifying  : libss-1.42.9-12.el7_5.x86_64                              62/221
#     virtualbox-iso:   Verifying  : 1:iwl1000-firmware-39.31.5.1-999.el7.noarch               63/221
#     virtualbox-iso:   Verifying  : initscripts-9.49.41-1.0.4.el7_5.2.x86_64                  64/221
#     virtualbox-iso:   Verifying  : glibc-common-2.17-222.0.7.el7.x86_64                      65/221
#     virtualbox-iso:   Verifying  : 7:lvm2-libs-2.02.177-4.0.2.el7.x86_64                     66/221
#     virtualbox-iso:   Verifying  : 1:openssl-1.0.2k-12.0.3.el7.x86_64                        67/221
#     virtualbox-iso:   Verifying  : e2fsprogs-libs-1.42.9-12.el7_5.x86_64                     68/221
#     virtualbox-iso:   Verifying  : kernel-devel-3.10.0-862.14.4.el7.x86_64                   69/221
#     virtualbox-iso:   Verifying  : iwl6000g2a-firmware-17.168.5.3-999.el7.noarch             70/221
#     virtualbox-iso:   Verifying  : libselinux-python-2.5-12.0.1.el7.x86_64                   71/221
#     virtualbox-iso:   Verifying  : binutils-2.27-28.base.el7_5.1.x86_64                      72/221
#     virtualbox-iso:   Verifying  : procps-ng-3.3.10-17.el7_5.2.x86_64                        73/221
#     virtualbox-iso:   Verifying  : linux-firmware-20180906-999.git85c5d90f.el7.noarch        74/221
#     virtualbox-iso:   Verifying  : glibc-2.17-222.0.7.el7.x86_64                             75/221
#     virtualbox-iso:   Verifying  : 1:openssl-libs-1.0.2k-12.0.3.el7.x86_64                   76/221
#     virtualbox-iso:   Verifying  : audit-libs-2.8.1-3.el7_5.1.x86_64                         77/221
#     virtualbox-iso:   Verifying  : nspr-4.19.0-1.el7_5.x86_64                                78/221
#     virtualbox-iso:   Verifying  : firewalld-0.4.4.4-15.el7_5.noarch                         79/221
#     virtualbox-iso:   Verifying  : libdtrace-ctf-0.8.0-1.el7.x86_64                          80/221
#     virtualbox-iso:   Verifying  : python-libs-2.7.5-69.0.1.el7_5.x86_64                     81/221
#     virtualbox-iso:   Verifying  : libgomp-4.8.5-28.0.1.el7_5.1.x86_64                       82/221
#     virtualbox-iso:   Verifying  : kernel-tools-libs-3.10.0-862.14.4.el7.x86_64              83/221
#     virtualbox-iso:   Verifying  : 7:oraclelinux-release-7.5-1.0.5.el7.x86_64                84/221
#     virtualbox-iso:   Verifying  : systemd-sysv-219-57.0.1.el7_5.3.x86_64                    85/221
#     virtualbox-iso:   Verifying  : iwl135-firmware-18.168.6.1-999.el7.noarch                 86/221
#     virtualbox-iso:   Verifying  : iwl6050-firmware-41.28.5.1-999.el7.noarch                 87/221
#     virtualbox-iso:   Verifying  : audit-2.8.1-3.el7_5.1.x86_64                              88/221
#     virtualbox-iso:   Verifying  : iwl100-firmware-39.31.5.1-999.el7.noarch                  89/221
#     virtualbox-iso:   Verifying  : nss-3.36.0-7.el7_5.x86_64                                 90/221
#     virtualbox-iso:   Verifying  : nss-softokn-freebl-3.36.0-5.0.1.el7_5.x86_64              91/221
#     virtualbox-iso:   Verifying  : kernel-uek-firmware-4.1.12-124.20.3.el7uek.noarch         92/221
#     virtualbox-iso:   Verifying  : 1:redhat-release-server-7.5-8.0.5.el7.x86_64              93/221
#     virtualbox-iso:   Verifying  : 12:dhcp-libs-4.2.5-68.0.1.el7_5.1.x86_64                  94/221
#     virtualbox-iso:   Verifying  : selinux-policy-targeted-3.13.1-192.0.6.el7_5.6.noarch     95/221
#     virtualbox-iso:   Verifying  : 1:mariadb-libs-5.5.60-1.el7_5.x86_64                      96/221
#     virtualbox-iso:   Verifying  : 12:dhcp-common-4.2.5-68.0.1.el7_5.1.x86_64                97/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-libnm-1.10.2-16.el7_5.x86_64             98/221
#     virtualbox-iso:   Verifying  : libmount-2.23.2-52.el7_5.1.x86_64                         99/221
#     virtualbox-iso:   Verifying  : rhn-client-tools-2.0.2-21.0.9.el7.noarch                 100/221
#     virtualbox-iso:   Verifying  : python-perf-3.10.0-862.14.4.el7.x86_64                   101/221
#     virtualbox-iso:   Verifying  : libblkid-2.23.2-52.el7_5.1.x86_64                        102/221
#     virtualbox-iso:   Verifying  : gcc-4.8.5-28.0.1.el7_5.1.x86_64                          103/221
#     virtualbox-iso:   Verifying  : tzdata-2018e-3.el7.noarch                                104/221
#     virtualbox-iso:   Verifying  : krb5-libs-1.15.1-19.el7.x86_64                           105/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-1.02.146-4.0.2.el7.x86_64                106/221
#     virtualbox-iso:   Verifying  : dracut-033-535.0.5.el7_5.1.x86_64                        107/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-libs-1.02.146-4.0.2.el7.x86_64           108/221
#     virtualbox-iso:   Verifying  : iwl3945-firmware-15.32.2.9-999.el7.noarch                109/221
#     virtualbox-iso:   Verifying  : nss-tools-3.36.0-7.el7_5.x86_64                          110/221
#     virtualbox-iso:   Verifying  : dracut-network-033-535.0.5.el7_5.1.x86_64                111/221
#     virtualbox-iso:   Verifying  : libgcc-4.8.5-28.0.1.el7_5.1.x86_64                       112/221
#     virtualbox-iso:   Verifying  : libselinux-2.5-12.0.1.el7.x86_64                         113/221
#     virtualbox-iso:   Verifying  : libselinux-python-2.5-12.el7.x86_64                      114/221
#     virtualbox-iso:   Verifying  : 12:dhcp-common-4.2.5-68.0.1.el7.x86_64                   115/221
#     virtualbox-iso:   Verifying  : nss-sysinit-3.34.0-4.el7.x86_64                          116/221
#     virtualbox-iso:   Verifying  : iwl5150-firmware-8.24.2.2-62.el7.noarch                  117/221
#     virtualbox-iso:   Verifying  : glibc-devel-2.17-222.el7.x86_64                          118/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-event-libs-1.02.146-4.0.1.el7.x86_64     119/221
#     virtualbox-iso:   Verifying  : iwl4965-firmware-228.61.2.24-62.el7.noarch               120/221
#     virtualbox-iso:   Verifying  : iwl6000-firmware-9.221.4.1-62.el7.noarch                 121/221
#     virtualbox-iso:   Verifying  : 1:iwl1000-firmware-39.31.5.1-62.el7.noarch               122/221
#     virtualbox-iso:   Verifying  : polkit-0.112-14.el7.x86_64                               123/221
#     virtualbox-iso:   Verifying  : libss-1.42.9-11.0.1.el7.x86_64                           124/221
#     virtualbox-iso:   Verifying  : 1:redhat-release-server-7.5-8.0.1.el7.x86_64             125/221
#     virtualbox-iso:   Verifying  : iwl7265-firmware-22.0.7.0-62.el7.noarch                  126/221
#     virtualbox-iso:   Verifying  : systemd-219-57.0.1.el7.x86_64                            127/221
#     virtualbox-iso:   Verifying  : python-libs-2.7.5-68.0.1.el7.x86_64                      128/221
#     virtualbox-iso:   Verifying  : firewalld-0.4.4.4-14.el7.noarch                          129/221
#     virtualbox-iso:   Verifying  : libgomp-4.8.5-28.0.1.el7.x86_64                          130/221
#     virtualbox-iso:   Verifying  : initscripts-9.49.41-1.0.1.el7.x86_64                     131/221
#     virtualbox-iso:   Verifying  : gcc-4.8.5-28.0.1.el7.x86_64                              132/221
#     virtualbox-iso:   Verifying  : libgudev1-219-57.0.1.el7.x86_64                          133/221
#     virtualbox-iso:   Verifying  : cpp-4.8.5-28.0.1.el7.x86_64                              134/221
#     virtualbox-iso:   Verifying  : util-linux-2.23.2-52.el7.x86_64                          135/221
#     virtualbox-iso:   Verifying  : nss-3.34.0-4.el7.x86_64                                  136/221
#     virtualbox-iso:   Verifying  : tzdata-2018c-1.el7.noarch                                137/221
#     virtualbox-iso:   Verifying  : yum-3.4.3-158.0.1.el7.noarch                             138/221
#     virtualbox-iso:   Verifying  : nss-util-3.34.0-2.el7.x86_64                             139/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-config-server-1.10.2-13.el7.noarch      140/221
#     virtualbox-iso:   Verifying  : libselinux-2.5-12.el7.x86_64                             141/221
#     virtualbox-iso:   Verifying  : iptables-1.4.21-24.el7.x86_64                            142/221
#     virtualbox-iso:   Verifying  : 32:bind-libs-lite-9.9.4-61.el7.x86_64                    143/221
#     virtualbox-iso:   Verifying  : gnupg2-2.0.22-4.el7.x86_64                               144/221
#     virtualbox-iso:   Verifying  : selinux-policy-targeted-3.13.1-192.0.1.el7.noarch        145/221
#     virtualbox-iso:   Verifying  : nss-softokn-freebl-3.34.0-2.0.1.el7.x86_64               146/221
#     virtualbox-iso:   Verifying  : iwl3160-firmware-22.0.7.0-62.el7.noarch                  147/221
#     virtualbox-iso:   Verifying  : dracut-config-rescue-033-535.0.1.el7.x86_64              148/221
#     virtualbox-iso:   Verifying  : 12:dhcp-libs-4.2.5-68.0.1.el7.x86_64                     149/221
#     virtualbox-iso:   Verifying  : iwl100-firmware-39.31.5.1-62.el7.noarch                  150/221
#     virtualbox-iso:   Verifying  : 2:microcode_ctl-2.1-29.0.2.el7.x86_64                    151/221
#     virtualbox-iso:   Verifying  : iwl6050-firmware-41.28.5.1-62.el7.noarch                 152/221
#     virtualbox-iso:   Verifying  : iwl6000g2a-firmware-17.168.5.3-62.el7.noarch             153/221
#     virtualbox-iso:   Verifying  : iwl7260-firmware-22.0.7.0-62.el7.noarch                  154/221
#     virtualbox-iso:   Verifying  : libdtrace-ctf-0.7.0-1.el7.x86_64                         155/221
#     virtualbox-iso:   Verifying  : 1:openssl-1.0.2k-12.0.1.el7.x86_64                       156/221
#     virtualbox-iso:   Verifying  : binutils-2.27-27.base.el7.x86_64                         157/221
#     virtualbox-iso:   Verifying  : tuned-2.9.0-1.el7.noarch                                 158/221
#     virtualbox-iso:   Verifying  : libgcc-4.8.5-28.0.1.el7.x86_64                           159/221
#     virtualbox-iso:   Verifying  : glibc-2.17-222.el7.x86_64                                160/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-1.10.2-13.el7.x86_64                    161/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-libs-1.02.146-4.0.1.el7.x86_64           162/221
#     virtualbox-iso:   Verifying  : 1:openssl-libs-1.0.2k-12.0.1.el7.x86_64                  163/221
#     virtualbox-iso:   Verifying  : audit-2.8.1-3.el7.x86_64                                 164/221
#     virtualbox-iso:   Verifying  : e2fsprogs-libs-1.42.9-11.0.1.el7.x86_64                  165/221
#     virtualbox-iso:   Verifying  : dracut-033-535.0.1.el7.x86_64                            166/221
#     virtualbox-iso:   Verifying  : kernel-tools-libs-3.10.0-862.el7.x86_64                  167/221
#     virtualbox-iso:   Verifying  : iwl105-firmware-18.168.6.1-62.el7.noarch                 168/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-event-1.02.146-4.0.1.el7.x86_64          169/221
#     virtualbox-iso:   Verifying  : 32:bind-license-9.9.4-61.el7.noarch                      170/221
#     virtualbox-iso:   Verifying  : kernel-headers-3.10.0-862.el7.x86_64                     171/221
#     virtualbox-iso:   Verifying  : iwl3945-firmware-15.32.2.9-62.el7.noarch                 172/221
#     virtualbox-iso:   Verifying  : 10:qemu-guest-agent-2.8.0-2.el7.x86_64                   173/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-tui-1.10.2-13.el7.x86_64                174/221
#     virtualbox-iso:   Verifying  : ca-certificates-2017.2.20-71.el7.noarch                  175/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-libnm-1.10.2-13.el7.x86_64              176/221
#     virtualbox-iso:   Verifying  : libuuid-2.23.2-52.el7.x86_64                             177/221
#     virtualbox-iso:   Verifying  : linux-firmware-20180220-62.git6d51311.0.1.el7.noarch     178/221
#     virtualbox-iso:   Verifying  : libcom_err-1.42.9-11.0.1.el7.x86_64                      179/221
#     virtualbox-iso:   Verifying  : 7:oraclelinux-release-7.5-1.0.3.el7.x86_64               180/221
#     virtualbox-iso:   Verifying  : rsyslog-8.24.0-16.el7.x86_64                             181/221
#     virtualbox-iso:   Verifying  : libmount-2.23.2-52.el7.x86_64                            182/221
#     virtualbox-iso:   Verifying  : audit-libs-2.8.1-3.el7.x86_64                            183/221
#     virtualbox-iso:   Verifying  : rhn-setup-2.0.2-21.0.3.el7.noarch                        184/221
#     virtualbox-iso:   Verifying  : iwl6000g2b-firmware-17.168.5.2-62.el7.noarch             185/221
#     virtualbox-iso:   Verifying  : openldap-2.4.44-13.el7.x86_64                            186/221
#     virtualbox-iso:   Verifying  : selinux-policy-3.13.1-192.0.1.el7.noarch                 187/221
#     virtualbox-iso:   Verifying  : 1:NetworkManager-team-1.10.2-13.el7.x86_64               188/221
#     virtualbox-iso:   Verifying  : firewalld-filesystem-0.4.4.4-14.el7.noarch               189/221
#     virtualbox-iso:   Verifying  : rhn-client-tools-2.0.2-21.0.3.el7.noarch                 190/221
#     virtualbox-iso:   Verifying  : 7:lvm2-libs-2.02.177-4.0.1.el7.x86_64                    191/221
#     virtualbox-iso:   Verifying  : kexec-tools-2.0.15-13.0.1.el7.x86_64                     192/221
#     virtualbox-iso:   Verifying  : nspr-4.17.0-1.el7.x86_64                                 193/221
#     virtualbox-iso:   Verifying  : iwl2000-firmware-18.168.6.1-62.el7.noarch                194/221
#     virtualbox-iso:   Verifying  : systemd-libs-219-57.0.1.el7.x86_64                       195/221
#     virtualbox-iso:   Verifying  : iwl2030-firmware-18.168.6.1-62.el7.noarch                196/221
#     virtualbox-iso:   Verifying  : glibc-common-2.17-222.el7.x86_64                         197/221
#     virtualbox-iso:   Verifying  : sudo-1.8.19p2-13.el7.x86_64                              198/221
#     virtualbox-iso:   Verifying  : python-perf-3.10.0-862.el7.x86_64                        199/221
#     virtualbox-iso:   Verifying  : iwl135-firmware-18.168.6.1-62.el7.noarch                 200/221
#     virtualbox-iso:   Verifying  : nss-softokn-3.34.0-2.0.1.el7.x86_64                      201/221
#     virtualbox-iso:   Verifying  : procps-ng-3.3.10-17.el7.x86_64                           202/221
#     virtualbox-iso:   Verifying  : python-2.7.5-68.0.1.el7.x86_64                           203/221
#     virtualbox-iso:   Verifying  : libselinux-utils-2.5-12.el7.x86_64                       204/221
#     virtualbox-iso:   Verifying  : glibc-headers-2.17-222.el7.x86_64                        205/221
#     virtualbox-iso:   Verifying  : libblkid-2.23.2-52.el7.x86_64                            206/221
#     virtualbox-iso:   Verifying  : python-firewall-0.4.4.4-14.el7.noarch                    207/221
#     virtualbox-iso:   Verifying  : nss-tools-3.34.0-4.el7.x86_64                            208/221
#     virtualbox-iso:   Verifying  : krb5-libs-1.15.1-18.el7.x86_64                           209/221
#     virtualbox-iso:   Verifying  : e2fsprogs-1.42.9-11.0.1.el7.x86_64                       210/221
#     virtualbox-iso:   Verifying  : 7:device-mapper-1.02.146-4.0.1.el7.x86_64                211/221
#     virtualbox-iso:   Verifying  : kernel-tools-3.10.0-862.el7.x86_64                       212/221
#     virtualbox-iso:   Verifying  : dracut-network-033-535.0.1.el7.x86_64                    213/221
#     virtualbox-iso:   Verifying  : 12:dhclient-4.2.5-68.0.1.el7.x86_64                      214/221
#     virtualbox-iso:   Verifying  : rhn-check-2.0.2-21.0.3.el7.noarch                        215/221
#     virtualbox-iso:   Verifying  : kpartx-0.4.9-119.el7.x86_64                              216/221
#     virtualbox-iso:   Verifying  : iwl5000-firmware-8.83.5.1_1-62.el7.noarch                217/221
#     virtualbox-iso:   Verifying  : libstdc++-4.8.5-28.0.1.el7.x86_64                        218/221
#     virtualbox-iso:   Verifying  : systemd-sysv-219-57.0.1.el7.x86_64                       219/221
#     virtualbox-iso:   Verifying  : 1:mariadb-libs-5.5.56-2.el7.x86_64                       220/221
#     virtualbox-iso:   Verifying  : 7:lvm2-2.02.177-4.0.1.el7.x86_64                         221/221
#     virtualbox-iso:
#     virtualbox-iso: Installed:
#     virtualbox-iso:   kernel.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kernel-devel.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kernel-uek.x86_64 0:4.1.12-124.20.3.el7uek
#     virtualbox-iso:   kernel-uek-devel.x86_64 0:4.1.12-124.20.3.el7uek
#     virtualbox-iso:   kernel-uek-firmware.noarch 0:4.1.12-124.20.3.el7uek
#     virtualbox-iso:
#     virtualbox-iso: Updated:
#     virtualbox-iso:   NetworkManager.x86_64 1:1.10.2-16.el7_5
#     virtualbox-iso:   NetworkManager-config-server.noarch 1:1.10.2-16.el7_5
#     virtualbox-iso:   NetworkManager-libnm.x86_64 1:1.10.2-16.el7_5
#     virtualbox-iso:   NetworkManager-team.x86_64 1:1.10.2-16.el7_5
#     virtualbox-iso:   NetworkManager-tui.x86_64 1:1.10.2-16.el7_5
#     virtualbox-iso:   audit.x86_64 0:2.8.1-3.el7_5.1
#     virtualbox-iso:   audit-libs.x86_64 0:2.8.1-3.el7_5.1
#     virtualbox-iso:   bind-libs-lite.x86_64 32:9.9.4-61.el7_5.1
#     virtualbox-iso:   bind-license.noarch 32:9.9.4-61.el7_5.1
#     virtualbox-iso:   binutils.x86_64 0:2.27-28.base.el7_5.1
#     virtualbox-iso:   ca-certificates.noarch 0:2018.2.22-70.0.el7_5
#     virtualbox-iso:   cpp.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   device-mapper.x86_64 7:1.02.146-4.0.2.el7
#     virtualbox-iso:   device-mapper-event.x86_64 7:1.02.146-4.0.2.el7
#     virtualbox-iso:   device-mapper-event-libs.x86_64 7:1.02.146-4.0.2.el7
#     virtualbox-iso:   device-mapper-libs.x86_64 7:1.02.146-4.0.2.el7
#     virtualbox-iso:   dhclient.x86_64 12:4.2.5-68.0.1.el7_5.1
#     virtualbox-iso:   dhcp-common.x86_64 12:4.2.5-68.0.1.el7_5.1
#     virtualbox-iso:   dhcp-libs.x86_64 12:4.2.5-68.0.1.el7_5.1
#     virtualbox-iso:   dracut.x86_64 0:033-535.0.5.el7_5.1
#     virtualbox-iso:   dracut-config-rescue.x86_64 0:033-535.0.5.el7_5.1
#     virtualbox-iso:   dracut-network.x86_64 0:033-535.0.5.el7_5.1
#     virtualbox-iso:   e2fsprogs.x86_64 0:1.42.9-12.el7_5
#     virtualbox-iso:   e2fsprogs-libs.x86_64 0:1.42.9-12.el7_5
#     virtualbox-iso:   firewalld.noarch 0:0.4.4.4-15.el7_5
#     virtualbox-iso:   firewalld-filesystem.noarch 0:0.4.4.4-15.el7_5
#     virtualbox-iso:   gcc.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   glibc.x86_64 0:2.17-222.0.7.el7
#     virtualbox-iso:   glibc-common.x86_64 0:2.17-222.0.7.el7
#     virtualbox-iso:   glibc-devel.x86_64 0:2.17-222.0.7.el7
#     virtualbox-iso:   glibc-headers.x86_64 0:2.17-222.0.7.el7
#     virtualbox-iso:   gnupg2.x86_64 0:2.0.22-5.el7_5
#     virtualbox-iso:   initscripts.x86_64 0:9.49.41-1.0.4.el7_5.2
#     virtualbox-iso:   iptables.x86_64 0:1.4.21-24.1.el7_5
#     virtualbox-iso:   iwl100-firmware.noarch 0:39.31.5.1-999.el7
#     virtualbox-iso:   iwl1000-firmware.noarch 1:39.31.5.1-999.el7
#     virtualbox-iso:   iwl105-firmware.noarch 0:18.168.6.1-999.el7
#     virtualbox-iso:   iwl135-firmware.noarch 0:18.168.6.1-999.el7
#     virtualbox-iso:   iwl2000-firmware.noarch 0:18.168.6.1-999.el7
#     virtualbox-iso:   iwl2030-firmware.noarch 0:18.168.6.1-999.el7
#     virtualbox-iso:   iwl3160-firmware.noarch 0:22.0.7.0-999.el7
#     virtualbox-iso:   iwl3945-firmware.noarch 0:15.32.2.9-999.el7
#     virtualbox-iso:   iwl4965-firmware.noarch 0:228.61.2.24-999.el7
#     virtualbox-iso:   iwl5000-firmware.noarch 0:8.83.5.1_1-999.el7
#     virtualbox-iso:   iwl5150-firmware.noarch 0:8.24.2.2-999.el7
#     virtualbox-iso:   iwl6000-firmware.noarch 0:9.221.4.1-999.el7
#     virtualbox-iso:   iwl6000g2a-firmware.noarch 0:17.168.5.3-999.el7
#     virtualbox-iso:   iwl6000g2b-firmware.noarch 0:17.168.5.2-999.el7
#     virtualbox-iso:   iwl6050-firmware.noarch 0:41.28.5.1-999.el7
#     virtualbox-iso:   iwl7260-firmware.noarch 0:22.0.7.0-999.el7
#     virtualbox-iso:   iwl7265-firmware.noarch 0:22.0.7.0-999.el7
#     virtualbox-iso:   kernel-headers.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kernel-tools.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kernel-tools-libs.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   kexec-tools.x86_64 0:2.0.15-13.0.1.el7_5.2
#     virtualbox-iso:   kpartx.x86_64 0:0.4.9-119.el7_5.1
#     virtualbox-iso:   krb5-libs.x86_64 0:1.15.1-19.el7
#     virtualbox-iso:   libblkid.x86_64 0:2.23.2-52.el7_5.1
#     virtualbox-iso:   libcom_err.x86_64 0:1.42.9-12.el7_5
#     virtualbox-iso:   libdtrace-ctf.x86_64 0:0.8.0-1.el7
#     virtualbox-iso:   libgcc.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   libgomp.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   libgudev1.x86_64 0:219-57.0.1.el7_5.3
#     virtualbox-iso:   libmount.x86_64 0:2.23.2-52.el7_5.1
#     virtualbox-iso:   libselinux.x86_64 0:2.5-12.0.1.el7
#     virtualbox-iso:   libselinux-python.x86_64 0:2.5-12.0.1.el7
#     virtualbox-iso:   libselinux-utils.x86_64 0:2.5-12.0.1.el7
#     virtualbox-iso:   libss.x86_64 0:1.42.9-12.el7_5
#     virtualbox-iso:   libstdc++.x86_64 0:4.8.5-28.0.1.el7_5.1
#     virtualbox-iso:   libuuid.x86_64 0:2.23.2-52.el7_5.1
#     virtualbox-iso:   linux-firmware.noarch 0:20180906-999.git85c5d90f.el7
#     virtualbox-iso:   lvm2.x86_64 7:2.02.177-4.0.2.el7
#     virtualbox-iso:   lvm2-libs.x86_64 7:2.02.177-4.0.2.el7
#     virtualbox-iso:   mariadb-libs.x86_64 1:5.5.60-1.el7_5
#     virtualbox-iso:   microcode_ctl.x86_64 2:2.1-29.16.0.1.el7_5
#     virtualbox-iso:   nspr.x86_64 0:4.19.0-1.el7_5
#     virtualbox-iso:   nss.x86_64 0:3.36.0-7.el7_5
#     virtualbox-iso:   nss-softokn.x86_64 0:3.36.0-5.0.1.el7_5
#     virtualbox-iso:   nss-softokn-freebl.x86_64 0:3.36.0-5.0.1.el7_5
#     virtualbox-iso:   nss-sysinit.x86_64 0:3.36.0-7.el7_5
#     virtualbox-iso:   nss-tools.x86_64 0:3.36.0-7.el7_5
#     virtualbox-iso:   nss-util.x86_64 0:3.36.0-1.el7_5
#     virtualbox-iso:   openldap.x86_64 0:2.4.44-15.el7_5
#     virtualbox-iso:   openssl.x86_64 1:1.0.2k-12.0.3.el7
#     virtualbox-iso:   openssl-libs.x86_64 1:1.0.2k-12.0.3.el7
#     virtualbox-iso:   oraclelinux-release.x86_64 7:7.5-1.0.5.el7
#     virtualbox-iso:   polkit.x86_64 0:0.112-14.0.1.el7
#     virtualbox-iso:   procps-ng.x86_64 0:3.3.10-17.el7_5.2
#     virtualbox-iso:   python.x86_64 0:2.7.5-69.0.1.el7_5
#     virtualbox-iso:   python-firewall.noarch 0:0.4.4.4-15.el7_5
#     virtualbox-iso:   python-libs.x86_64 0:2.7.5-69.0.1.el7_5
#     virtualbox-iso:   python-perf.x86_64 0:3.10.0-862.14.4.el7
#     virtualbox-iso:   qemu-guest-agent.x86_64 10:2.8.0-2.el7_5.1
#     virtualbox-iso:   redhat-release-server.x86_64 1:7.5-8.0.5.el7
#     virtualbox-iso:   rhn-check.noarch 0:2.0.2-21.0.9.el7
#     virtualbox-iso:   rhn-client-tools.noarch 0:2.0.2-21.0.9.el7
#     virtualbox-iso:   rhn-setup.noarch 0:2.0.2-21.0.9.el7
#     virtualbox-iso:   rsyslog.x86_64 0:8.24.0-16.el7_5.4
#     virtualbox-iso:   selinux-policy.noarch 0:3.13.1-192.0.6.el7_5.6
#     virtualbox-iso:   selinux-policy-targeted.noarch 0:3.13.1-192.0.6.el7_5.6
#     virtualbox-iso:   sudo.x86_64 0:1.8.19p2-14.el7_5
#     virtualbox-iso:   systemd.x86_64 0:219-57.0.1.el7_5.3
#     virtualbox-iso:   systemd-libs.x86_64 0:219-57.0.1.el7_5.3
#     virtualbox-iso:   systemd-sysv.x86_64 0:219-57.0.1.el7_5.3
#     virtualbox-iso:   tuned.noarch 0:2.9.0-1.el7_5.2
#     virtualbox-iso:   tzdata.noarch 0:2018e-3.el7
#     virtualbox-iso:   util-linux.x86_64 0:2.23.2-52.el7_5.1
#     virtualbox-iso:   yum.noarch 0:3.4.3-158.0.2.el7
#     virtualbox-iso:
#     virtualbox-iso: Complete!
#     virtualbox-iso: Loaded plugins: ulninfo
#     virtualbox-iso: Cleaning repos: ol7_UEKR4 ol7_latest
#     virtualbox-iso: Cleaning up everything
#     virtualbox-iso: Maybe you want: rm -rf /var/cache/yum, to also free up space taken by orphaned data from disabled or removed repos
#     virtualbox-iso: useradd: user 'vagrant' already exists
# ==> virtualbox-iso: Pausing 1s before the next provisioner...
# ==> virtualbox-iso: Provisioning with shell script: E:\tmp\packer-shell139546903
#     virtualbox-iso: Verifying archive integrity... All good.
#     virtualbox-iso: Uncompressing VirtualBox 5.1.28 Guest Additions for Linux...........
#     virtualbox-iso: VirtualBox Guest Additions installer
#     virtualbox-iso: Copying additional installer modules ...
#     virtualbox-iso: Installing additional modules ...
#     virtualbox-iso: vboxadd.sh: Starting the VirtualBox Guest Additions.
#     virtualbox-iso:
#     virtualbox-iso: Could not find the X.Org or XFree86 Window System, skipping.
# ==> virtualbox-iso: Gracefully halting virtual machine...
# ==> virtualbox-iso: Preparing to export machine...
#     virtualbox-iso: Deleting forwarded port mapping for the communicator (SSH, WinRM, etc) (host port 3482)
# ==> virtualbox-iso: Exporting virtual machine...
#     virtualbox-iso: Executing: export packer-ol75-1d-base --output output-ol75-base.vzell.de\packer-ol75-1d-base.ovf --manifest --vsys 0 --description Oracle Linux 7 Update 5, with one additional 2TB data disk
#     virtualbox-iso: 
#     virtualbox-iso: prepared by Dr. Volker Zell --version 0.9.1
# ==> virtualbox-iso: Keeping virtual machine registered with VirtualBox host (keep_registered = true)
# Build 'virtualbox-iso' finished.
# 
# ==> Builds finished. The artifacts of successful builds are:
# --> virtualbox-iso: VM files in directory: output-ol75-base.vzell.de
# 
# real	14m48.143s
# user	0m0.000s
# sys	0m0.016s
Resulting filesystem structure
tree -a /misc/packer
# /misc/packer
# └── ol
#     ├── 7.5
#     │   ├── http
#     │   │   └── ks.cfg
#     │   ├── iso
#     │   │   └── iso-info.json
#     │   ├── output-ol75-base.vzell.de
#     │   │   ├── .vagrant
#     │   │   │   └── rgloader
#     │   │   │       └── loader.rb
#     │   │   ├── box.ovf
#     │   │   ├── info.json
#     │   │   ├── metadata.json
#     │   │   ├── packer-ol75-base.mf
#     │   │   ├── packer-ol75-base.vdi
#     │   │   ├── packer-ol75-base-disk001.vmdk
#     │   │   └── Vagrantfile
#     │   ├── packer.json
#     │   └── packer_cache
#     └── 7.5-1d
#         ├── .virtualbox
#         │   └── data1.vdi
#         ├── http
#         │   └── ks.cfg
#         ├── iso
#         │   ├── iso-info.json
#         │   └── V975367-01.iso
#         ├── output-ol75-base.vzell.de
#         │   ├── packer-ol75-1d-base.mf
#         │   ├── packer-ol75-1d-base.ovf
#         │   ├── packer-ol75-1d-base.vdi
#         │   ├── packer-ol75-1d-base-disk001.vmdk
#         │   └── packer-ol75-1d-base-disk002.vmdk
#         ├── packer.json
#         └── packer_cache

Oracle Linux 6.10

export ol_ver=6.10
mkdir -p /misc/packer/ol/${ol_ver}/{iso,http} && cd /misc/packer/ol/${ol_ver}
Download Oracle Linux DVD ISO image from Oracle

Donwload from https://edelivery.oracle.com the Oracle Linux DVD and place V978757-01.iso into the /misc/packer/ol/6.10/iso folder.

Kickstart file
hostname=ol610-base \
domain=vzell.de \
swap_size=16000 # in MB (16 GB) \
root_size=393096 # in MB (400 GB) \
cat > http/ks.cfg <<-_EOF
#version=DEVEL

# Install OS instead of upgrade
install

# Use CDROM installation media
cdrom

# Use text mode install
text

# Use graphical install
# graphical

# Reboot after installation
reboot

# Do NOT Run the Setup Agent on first boot
firstboot --disable

# System language
lang en_US.UTF-8

# Keyboard layouts
keyboard de

# System timezone
timezone --utc Europe/Berlin

# Network information
network --bootproto dhcp --device eth0 --ipv6 auto --onboot yes
network --hostname ${hostname}.${domain}

# System authorization information
auth --enableshadow --passalgo=sha512

# Root password
rootpw vagrant

# Create additional user
user --name=vagrant --plaintext --password=vagrant --gecos="Vagrant"

# Specifies a list of disks for the installation program to use
ignoredisk --only-use=sda

# System bootloader configuration
bootloader --location=mbr --driveorder=sda --append="crashkernel=auto rhgb quit"

# Clear the Master Boot Record
zerombr

# Partition clearing information
clearpart --all --drives=sda --initlabel

# Disk partitioning information
part pv.008002 --grow --size=1
part /boot --fstype=ext4 --size=500
volgroup ol --pesize=4096 pv.008002
logvol swap --size=${swap_size} --name=swap --vgname=ol
logvol / --fstype=ext4 --size=${root_size} --name=root --vgname=ol

# Firewall configuration
firewall --enabled --service=ssh

# SELinux configuration
selinux --enforcing

# Installation logging level
logging --level=info

# Do not configure the X Window System
skipx

# Additional yum repositories that may be used as sources for package installation
repo --name="UEK4 kernel repo" --baseurl=file://anaconda-addon

# Packages section (minimal + packages needed for building VirtualBox Guest Additions)
%packages --ignoremissing
@core
bzip2
gcc
make
kernel-uek
kernel-uek-devel
perl
# Unnecessary firmware
-bfa-firmware
-ivtv-firmware
-iwl100-firmware
-iwl1000-firmware
-iwl3945-firmware
-iwl4965-firmware
-iwl5000-firmware
-iwl5150-firmware
-iwl6000-firmware
-iwl6000g2a-firmware
-iwl6050-firmware
-kernel-firmware
-libertas-usb8388-firmware
-netxen-firmware
-ql2100-firmware
-ql2200-firmware
-ql23xx-firmware
-ql2400-firmware
-ql2500-firmware
-rt61pci-firmware
-rt73usb-firmware
%end

%post
yum -y update
yum clean all
[ -d /var/cache/yum ] && rm -fr /var/cache/yum
%end
_EOF
Packer JSON script
md5sum /misc/packer/ol/${ol_ver}/iso/V978757-01.iso | awk '{ print $1; }'
# a57fe73747227cc947426cf1d8eb21a1
cat > iso/iso-info.json <<-_EOF
{
  "iso_url": "V978757-01.iso",
  "iso_checksum": "$(md5sum /misc/packer/ol/${ol_ver}/iso/V978757-01.iso | awk '{ print $1; }')",
  "iso_checksum_type": "md5"
}
_EOF
cat > packer.json <<-"_EOF"
{
    "variables": {
        "vm_name": "packer-ol610-base",
        "vm_description": "{{env `vm_description`}}",
        "vm_version": "1.0.0",
        "group_name": "/Oracle Linux/Oracle Linux 6 Update 10",
        "ssh_username": "root",
        "ssh_password": "vagrant",
        "hostname": "ol610-base.vzell.de",
        "compression": "6",
        "vagrantfile": ""
    },
    "builders": [
        {
            "type": "virtualbox-iso",
            "communicator": "ssh",
            "ssh_username": "{{user `ssh_username`}}",
            "ssh_password": "{{user `ssh_password`}}",
            "ssh_timeout": "15m",
            "guest_os_type": "Oracle_64",
            "guest_additions_url": "",
            "guest_additions_sha256": "",
            "guest_additions_path": "",
            "guest_additions_mode": "upload",
            "output_directory": "output-{{user `hostname`}}",
            "iso_url": "iso/{{user `iso_url`}}",
            "iso_checksum": "{{user `iso_checksum`}}",
            "iso_checksum_type": "{{user `iso_checksum_type`}}",
            "http_directory": "http",
            "http_port_min": 8080,
            "http_port_max": 8082,
            "vm_name": "{{user `vm_name`}}",
            "keep_registered": true,
            "export_opts": [
                "--manifest",
                "--vsys",
                "0",
                "--description",
                "{{user `vm_description`}}",
                "--version",
                "{{user `vm_version`}}"
            ],
            "vboxmanage": [
                [
                    "modifyvm",
                    "{{.Name}}",
                    "--groups",
                    "{{user `group_name`}}"
                ],
                [
                    "modifyvm",
                    "{{.Name}}",
                    "--boot1",
                    "disk",
                    "--boot2",
                    "dvd",
                    "--boot3",
                    "none",
                    "--boot4",
                    "none"
                ],
                [
                    "modifyvm",
                    "{{.Name}}",
                    "--vram",
                    "32"
                ],
                [
                    "modifyvm",
                    "{{.Name}}",
                    "--memory",
                    "2048"
                ],
                [
                    "modifyvm",
                    "{{.Name}}",
                    "--cpus",
                    "2"
                ],
                [
                    "modifyvm",
                    "{{.Name}}",
                    "--audio",
                    "none"
                ],
                [
                    "modifyvm",
                    "{{.Name}}",
                    "--vrde",
                    "off"
                ],
                [
                    "modifyvm",
                    "{{.Name}}",
                    "--rtcuseutc",
                    "on"
                ]
            ],
            "hard_drive_interface": "sata",
            "sata_port_count": 4,
            "disk_size": 409600,
            "headless": false,
            "shutdown_command": "shutdown -h now",
            "shutdown_timeout": "30m",
            "boot_wait": "5s",
            "boot_command": [
                "<tab>",
                " text ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/ks.cfg ",
                "<enter>"
            ]
        }
    ],
    "provisioners": [
        {
            "type": "shell",
            "execute_command": "sh '{{ .Path }}'",
            "pause_before": "1s",
            "inline": [
                "useradd vagrant",
                "cp /etc/sudoers /etc/sudoers.orig",
                "sed -i -e 's/Defaults\\s*requiretty$/#Defaults\trequiretty/' /etc/sudoers",
                "sed -i -e '/# %wheel\tALL=(ALL)\tNOPASSWD: ALL/a %vagrant\tALL=(ALL)\tNOPASSWD: ALL' /etc/sudoers",
                "mkdir ~vagrant/.ssh",
                "chmod 700 ~vagrant/.ssh",
                "echo 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key' > ~vagrant/.ssh/authorized_keys",
                "chmod 600 ~vagrant/.ssh/authorized_keys",
                "chown -R vagrant: ~vagrant/.ssh"
            ]
        },
        {
            "type": "shell",
            "only": [
                "virtualbox-iso"
            ],
            "execute_command": "sh '{{ .Path }}'",
            "pause_before": "1s",
            "inline": [
                "mkdir -p /media/dvd",
                "mount -o loop,ro VBoxGuestAdditions*.iso /media/dvd",
                "sh /media/dvd/VBoxLinuxAdditions.run --nox11",
                "umount /media/dvd",
                "rm VBoxGuestAdditions*.iso"
            ]
        }
    ]
}
_EOF
Packer execution
packer validate -var-file=iso/iso-info.json packer.json
# Template validated successfully.
packer inspect packer.json
# Optional variables and their defaults:
# 
#   compression    = 6
#   group_name     = /Oracle Linux/Oracle Linux 6 Update 10
#   hostname       = ol610-base.vzell.de
#   ssh_password   = vagrant
#   ssh_username   = root
#   vagrantfile    = 
#   vm_description = {{env `vm_description`}}
#   vm_name        = packer-ol610-base
#   vm_version     = 1.0.0
# 
# Builders:
# 
#   virtualbox-iso
# 
# Provisioners:
# 
#   shell
#   shell
# 
# Note: If your build names contain user variables or template
# functions such as 'timestamp', these are processed at build time,
# and therefore only show in their raw form here.
vm_description='Oracle Linux 6 Update 10

prepared by Dr. Volker Zell'
vm_version='0.9.0'
time packer build \
    -var "vm_description=${vm_description}" \
    -var "vm_version=${vm_version}"         \
    -var-file=iso/iso-info.json             \
    packer.json
# virtualbox-iso output will be in this color.
#
# ==> virtualbox-iso: Retrieving Guest additions
#     virtualbox-iso: Using file in-place: file:///C:/Program%20Files/Oracle/VirtualBox/VBoxGuestAdditions.iso
# ==> virtualbox-iso: Retrieving ISO
#     virtualbox-iso: Using file in-place: file:///D:/misc/packer/ol/6.10/iso/V978757-01.iso
# ==> virtualbox-iso: Starting HTTP server on port 8080
# ==> virtualbox-iso: Creating virtual machine...
# ==> virtualbox-iso: Creating hard drive...
# ==> virtualbox-iso: Creating forwarded port mapping for communicator (SSH, WinRM, etc) (host port 3801)
# ==> virtualbox-iso: Executing custom VBoxManage commands...
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --groups /Oracle Linux/Oracle Linux 6 Update 10
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --boot1 disk --boot2 dvd --boot3 none --boot4 none
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --vram 32
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --memory 2048
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --cpus 2
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --audio none
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --vrde off
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --rtcuseutc on
# ==> virtualbox-iso: Starting the virtual machine...
# ==> virtualbox-iso: Waiting 5s for boot...
# ==> virtualbox-iso: Typing the boot command...
# ==> virtualbox-iso: Using ssh communicator to connect: 127.0.0.1
# ==> virtualbox-iso: Waiting for SSH to become available...
# ==> virtualbox-iso: Connected to SSH!
# ==> virtualbox-iso: Uploading VirtualBox version info (5.1.28)
# ==> virtualbox-iso: Uploading VirtualBox guest additions ISO...
# ==> virtualbox-iso: Pausing 1s before the next provisioner...
# ==> virtualbox-iso: Provisioning with shell script: E:\tmp\packer-shell772238767
#     virtualbox-iso: useradd: user 'vagrant' already exists
# ==> virtualbox-iso: Pausing 1s before the next provisioner...
# ==> virtualbox-iso: Provisioning with shell script: E:\tmp\packer-shell168027603
#     virtualbox-iso: Verifying archive integrity... All good.
#     virtualbox-iso: Uncompressing VirtualBox 5.1.28 Guest Additions for Linux...........
#     virtualbox-iso: VirtualBox Guest Additions installer
#     virtualbox-iso: Copying additional installer modules ...
#     virtualbox-iso: Installing additional modules ...
#     virtualbox-iso: vboxadd.sh: Starting the VirtualBox Guest Additions.
#     virtualbox-iso:
#     virtualbox-iso: Could not find the X.Org or XFree86 Window System, skipping.
# ==> virtualbox-iso: Gracefully halting virtual machine...
# ==> virtualbox-iso: Preparing to export machine...
#     virtualbox-iso: Deleting forwarded port mapping for the communicator (SSH, WinRM, etc) (host port 3801)
# ==> virtualbox-iso: Exporting virtual machine...
#     virtualbox-iso: Executing: export packer-ol610-base --output output-ol610-base.vzell.de\packer-ol610-base.ovf --manifest --vsys 0 --description Oracle Linux 6 Update 10
#     virtualbox-iso: 
#     virtualbox-iso: prepared by Dr. Volker Zell --version 0.9.0
# ==> virtualbox-iso: Keeping virtual machine registered with VirtualBox host (keep_registered = true)
# Build 'virtualbox-iso' finished.
# 
# ==> Builds finished. The artifacts of successful builds are:
# --> virtualbox-iso: VM files in directory: output-ol610-base.vzell.de
# 
# real	12m40.205s
# user	0m0.000s
# sys	0m0.015s
Resulting filesystem structure
tree -a /misc/packer
# /misc/packer
# └── ol
#     ├── 6.10
#     │   ├── http
#     │   │   └── ks.cfg
#     │   ├── iso
#     │   │   ├── iso-info.json
#     │   │   └── V978757-01.iso
#     │   ├── output-ol610-base.vzell.de
#     │   │   ├── packer-ol610-base.mf
#     │   │   ├── packer-ol610-base.ovf
#     │   │   ├── packer-ol610-base.vdi
#     │   │   └── packer-ol610-base-disk001.vmdk
#     │   ├── packer.json
#     │   └── packer_cache
#     ├── 7.5
#     │   ├── http
#     │   │   └── ks.cfg
#     │   ├── iso
#     │   │   └── iso-info.json
#     │   ├── output-ol75-base.vzell.de
#     │   │   ├── .vagrant
#     │   │   │   └── rgloader
#     │   │   │       └── loader.rb
#     │   │   ├── box.ovf
#     │   │   ├── info.json
#     │   │   ├── metadata.json
#     │   │   ├── packer-ol75-base.mf
#     │   │   ├── packer-ol75-base.vdi
#     │   │   ├── packer-ol75-base-disk001.vmdk
#     │   │   └── Vagrantfile
#     │   ├── packer.json
#     │   └── packer_cache
#     └── 7.5-1d
#         ├── .virtualbox
#         │   └── data1.vdi
#         ├── http
#         │   └── ks.cfg
#         ├── iso
#         │   ├── iso-info.json
#         │   └── V975367-01.iso
#         ├── output-ol75-base.vzell.de
#         │   ├── packer-ol75-1d-base.mf
#         │   ├── packer-ol75-1d-base.ovf
#         │   ├── packer-ol75-1d-base.vdi
#         │   ├── packer-ol75-1d-base-disk001.vmdk
#         │   └── packer-ol75-1d-base-disk002.vmdk
#         ├── packer.json
#         └── packer_cache

3 Using Ansible-Galaxy

Ansible Galaxy (https://galaxy.ansible.com/) is Ansible’s official hub for sharing Ansible content. It is a free site for finding, downloading, and sharing community developed roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects.

The ansible-galaxy command comes bundled with Ansible, and you can use it to install roles from Galaxy or directly from a git based SCM. You can also use it to create a new role, remove roles, or perform tasks on the Galaxy website.

3.1 Create a role for provisioning an Oracle Linux 7.5 system with GNOME Desktop and Docker support

mkdir -p /misc/ansible/roles && cd /misc/ansible/roles
ansible-galaxy
# Usage: ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ...
# 
# Perform various Role related operations.
# 
# Options:
#   -h, --help            show this help message and exit
#   -c, --ignore-certs    Ignore SSL certificate validation errors.
#   -s API_SERVER, --server=API_SERVER
#                         The API server destination
#   -v, --verbose         verbose mode (-vvv for more, -vvvv to enable connection debugging)
#   --version             show program's version number and exit
# 
#  See 'ansible-galaxy <command> --help' for more information on a specific command.
# ERROR! Missing required action
ansible-galaxy init --help
# Usage: ansible-galaxy init [options] role_name
# 
# Initialize new role with the base structure of a role.
# 
# Options:
#   -f, --force           Force overwriting an existing role
#   -h, --help            show this help message and exit
#   -c, --ignore-certs    Ignore SSL certificate validation errors.
#   --init-path=INIT_PATH
#                         The path in which the skeleton role will be created. The default is the current working directory.
#   --offline             Don't query the galaxy API when creating roles
#   --role-skeleton=ROLE_SKELETON
#                         The path to a role skeleton that the new role should be based upon.
#   -s API_SERVER, --server=API_SERVER
#                         The API server destination
#   --type=ROLE_TYPE      Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.
#   -v, --verbose         verbose mode (-vvv for more, -vvvv to enable connection debugging)
#   --version             show program's version number and exit
# 
#  See 'ansible-galaxy <command> --help' for more information on a specific command.
ansible-galaxy init vzell.yum-gnome
# - vzell.yum-gnome was created successfully
ansible-galaxy list
# - vzell.yum-gnome, (unknown version)
tree -a /misc/ansible
# /misc/ansible
# └── roles
#     └── vzell.yum-gnome
#         ├── defaults
#         │   └── main.yml
#         ├── files
#         ├── handlers
#         │   └── main.yml
#         ├── meta
#         │   └── main.yml
#         ├── README.md
#         ├── tasks
#         │   └── main.yml
#         ├── templates
#         ├── tests
#         │   ├── inventory
#         │   └── test.yml
#         └── vars
#             └── main.yml
# 
# 10 directories, 8 files
cat > /misc/ansible/roles/vzell.yum-gnome/tasks/main.yml <<-"_EOF"
---
# tasks file for vzell.yum-gnome

# https://support.oracle.com/epmos/faces/DocumentDisplay?id=2153562.1
- name: Install the 'Gnome desktop' environment group
  yum:
    name: "@^Server with GUI"
    state: present

- name: Install ansible from EPEL
  yum:
    name: ansible
    enablerepo: ol7_developer_EPEL

- name: Install docker with multiple repos enabled
  yum:
    name: docker-engine
    enablerepo: "ol7_latest,ol7_UEKR4,ol7_addons"

- name: Use systemctl command to enable GUI on system start
  command: systemctl set-default graphical.target
  when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'
  register: enable_gui

- name: Debug enabling GUI on system start
  debug: 
    var: enable_gui.stdout_lines
  when: enable_gui is changed

- name: Enable and start the docker service
  systemd:
    name: docker
    state: started
    enabled: yes
  when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'
  register: docker_started

- name: Debug docker service configuration
  debug: 
    var: docker_started.stdout_lines
  when: docker_started is changed
_EOF

4 Using Vagrant

Vagrant is a tool for building and managing virtual machine environments in a single workflow. With an easy-to-use workflow and focus on automation, Vagrant lowers development environment setup time, increases production parity, and makes the "works on my machine" excuse a relic of the past.

Vagrant provides easy to configure, reproducible, and portable work environments built on top of industry-standard technology and controlled by a single consistent workflow to help maximize the productivity and flexibility of you and your team.

To achieve its magic, Vagrant stands on the shoulders of giants. Machines are provisioned on top of VirtualBox, VMware, AWS, or any other provider. Then, industry-standard provisioning tools such as shell scripts, Chef, or Puppet, can automatically install and configure software on the virtual machine.

4.1 Commands - 2.1.5

vagrant

vagrant
# Usage: vagrant [options] <command> [<args>]
# 
#     -v, --version                    Print the version and exit.
#     -h, --help                       Print this help.
# 
# Common commands:
#      box             manages boxes: installation, removal, etc.
#      destroy         stops and deletes all traces of the vagrant machine
#      global-status   outputs status Vagrant environments for this user
#      halt            stops the vagrant machine
#      help            shows the help for a subcommand
#      init            initializes a new Vagrant environment by creating a Vagrantfile
#      login           log in to HashiCorp's Vagrant Cloud
#      package         packages a running vagrant environment into a box
#      plugin          manages plugins: install, uninstall, update, etc.
#      port            displays information about guest port mappings
#      powershell      connects to machine via powershell remoting
#      provision       provisions the vagrant machine
#      push            deploys code in this environment to a configured destination
#      rdp             connects to machine via RDP
#      reload          restarts vagrant machine, loads new Vagrantfile configuration
#      resume          resume a suspended vagrant machine
#      snapshot        manages snapshots: saving, restoring, etc.
#      ssh             connects to machine via SSH
#      ssh-config      outputs OpenSSH valid configuration to connect to the machine
#      status          outputs status of the vagrant machine
#      suspend         suspends the machine
#      up              starts and provisions the vagrant environment
#      validate        validates the Vagrantfile
#      version         prints current and latest Vagrant version
# 
# For help on any individual command run `vagrant COMMAND -h`
# 
# Additional subcommands are available, but are either more advanced
# or not commonly used. To see all subcommands, run the command
# `vagrant list-commands`.

version

vagrant version
# Installed Version: 2.1.5
# Latest Version: 2.1.5
#  
# You're running an up-to-date version of Vagrant!

global-status

vagrant global-status -h
# Usage: vagrant global-status
# 
#         --prune                      Prune invalid entries.
#     -h, --help                       Print this help
vagrant global-status --prune
# id       name     provider   state    directory                           
# --------------------------------------------------------------------------
# a56a065  default  virtualbox poweroff D:/misc/vagrant/ol75-generic        
# 3800a17  ol75     virtualbox aborted  D:/misc/vagrant/ol75                
# efb816a  ol610    virtualbox running  D:/misc/vagrant/packer-ol610        
# 2f41e27  machine1 virtualbox running  D:/misc/vagrant/machine1            
# 7c0743a  machine2 virtualbox running  D:/misc/vagrant/machine2            
# d554214  server1  virtualbox poweroff D:/misc/vagrant/server1             
# 66a94a8  server2  virtualbox poweroff D:/misc/vagrant/server2             
# 125d011  dbhost   virtualbox poweroff D:/misc/vagrant/dbhost              
#  
# The above shows information about all known Vagrant environments
# on this machine. This data is cached and may not be completely
# up-to-date (use "vagrant global-status --prune" to prune invalid
# entries). To interact with any of the machines, you can go to that
# directory and run Vagrant, or you can use the ID directly with
# Vagrant commands from any directory. For example:
# "vagrant destroy 1a2b3c4d"

4.2 Commands - 2.2.0

vagrant

vagrant
# WARNING: This command has been deprecated in favor of `vagrant cloud auth login`
# Usage: vagrant [options] <command> [<args>]
# 
#     -v, --version                    Print the version and exit.
#     -h, --help                       Print this help.
# 
# Common commands:
#      box             manages boxes: installation, removal, etc.
#      cloud           manages everything related to Vagrant Cloud
#      destroy         stops and deletes all traces of the vagrant machine
#      global-status   outputs status Vagrant environments for this user
#      halt            stops the vagrant machine
#      help            shows the help for a subcommand
#      init            initializes a new Vagrant environment by creating a Vagrantfile
#      login           
#      package         packages a running vagrant environment into a box
#      plugin          manages plugins: install, uninstall, update, etc.
#      port            displays information about guest port mappings
#      powershell      connects to machine via powershell remoting
#      provision       provisions the vagrant machine
#      push            deploys code in this environment to a configured destination
#      rdp             connects to machine via RDP
#      reload          restarts vagrant machine, loads new Vagrantfile configuration
#      resume          resume a suspended vagrant machine
#      snapshot        manages snapshots: saving, restoring, etc.
#      ssh             connects to machine via SSH
#      ssh-config      outputs OpenSSH valid configuration to connect to the machine
#      status          outputs status of the vagrant machine
#      suspend         suspends the machine
#      up              starts and provisions the vagrant environment
#      upload          upload to machine via communicator
#      validate        validates the Vagrantfile
#      version         prints current and latest Vagrant version
#      winrm           executes commands on a machine via WinRM
#      winrm-config    outputs WinRM configuration to connect to the machine
# 
# For help on any individual command run `vagrant COMMAND -h`
# 
# Additional subcommands are available, but are either more advanced
# or not commonly used. To see all subcommands, run the command
# `vagrant list-commands`.

When you call vagrant the first time it creates a directory structure under the directory where the environment variable VAGRANT_HOME points to, in our case /misc/vagrant/.vagrant.d. The directory VAGRANT_HOME does not need to be precreated.

tree -a /misc/vagrant
# /misc/vagrant
# └── .vagrant.d
#     ├── boxes
#     ├── data
#     │   ├── checkpoint_signature
#     │   └── machine-index
#     │       └── index.lock
#     ├── gems
#     │   └── 2.4.4
#     ├── insecure_private_key
#     ├── rgloader
#     │   └── loader.rb
#     ├── setup_version
#     └── tmp
# 
# 8 directories, 5 files

version

vagrant version
# Installed Version: 2.2.0
# Latest Version: 2.2.0
#  
# You're running an up-to-date version of Vagrant!

global-status

vagrant global-status -h
# Usage: vagrant global-status
# 
#         --prune                      Prune invalid entries.
#     -h, --help                       Print this help
vagrant global-status --prune
# id       name   provider state  directory                           
# --------------------------------------------------------------------
# There are no active Vagrant environments on this computer! Or,
# you haven't destroyed and recreated Vagrant environments that were
# started with an older version of Vagrant.

4.3 Building Vagrant base box for Oracle Linux 7.5

Rename the packer generated OVF file to box.ovf

cd /misc/packer/ol/7.5/output-ol75-base.vzell.de && mv packer-ol75-base.ovf box.ovf

Create Vagrantfile

grep MACAddress box.ovf
#           <Adapter slot="0" enabled="true" MACAddress="080027126875" type="82540EM">
grep MACAddress box.ovf | awk -F"MACAddress=\"" '{print $2}' | awk -F"\"" '{print $1}'
# 080027126875
cat > Vagrantfile <<-_EOF
Vagrant::Config.run do |config|
  config.vm.base_mac = "$(grep MACAddress box.ovf | awk -F"MACAddress=\"" '{print $2}' | awk -F"\"" '{print $1}')"
end
_EOF

Create Box Info File

cat > info.json <<-"_EOF"
{
  "Author": "Dr. Volker Zell",
  "Homepage": "http://volkerzell.de",
  "Repository": "https://github.com/vzell/ol75/",
  "Description": "This box contains Oracle Linux 7.5 64-bit in a base installation created with packer"
}
_EOF

Create Box File for Provider

cat > metadata.json <<-"_EOF"
{
  "provider": "virtualbox"
}
_EOF

Directory structure

tree -a /misc/packer
# /misc/packer
# └── ol
#     └── 7.5
#         ├── http
#         │   └── ks.cfg
#         ├── iso
#         │   ├── iso-info.json
#         │   └── V975367-01.iso
#         ├── output-ol75-base.vzell.de
#         │   ├── .vagrant
#         │   │   └── rgloader
#         │   │       └── loader.rb
#         │   ├── box.ovf
#         │   ├── info.json
#         │   ├── metadata.json
#         │   ├── packer-ol75-base.mf
#         │   ├── packer-ol75-base.vdi
#         │   ├── packer-ol75-base-disk001.vmdk
#         │   └── Vagrantfile
#         ├── packer.json
#         └── packer_cache
# 
# 8 directories, 12 files

Create Vagrant Box

mkdir -p /misc/vagrant/boxes
tar --exclude='*.vdi' -cvzf /misc/vagrant/boxes/packer-ol75-0.9.0.box *
# box.ovf
# info.json
# metadata.json
# packer-ol75-base.mf
# packer-ol75-base-disk001.vmdk
# Vagrantfile

Create Box Metadata File

sha1sum /misc/vagrant/boxes/packer-ol75-0.9.0.box | awk '{ print $1; }'
# 9fb684f74d055a7ea11228c765d06327f84931c2
cat > /misc/vagrant/boxes/packer-ol75-0.9.0.metadata.json <<-_EOF
{
  "name": "vzell/packer-ol75",
  "description": "This box contains Oracle Linux 7.5 64-bit created with packer",
  "versions": [
    {
      "version": "0.9.0",
      "providers": [
        {
          "name": "virtualbox",
          "url": "file://D:/misc/vagrant/boxes/packer-ol75-0.9.0.box",
          "checksum_type": "sha1",
          "checksum": "$(sha1sum /misc/vagrant/boxes/packer-ol75-0.9.0.box | awk '{print $1}')"
        }
      ]
    }
  ]
}
_EOF

Add the box to the vagrant registry

cd /misc/vagrant
vagrant box -h
# Usage: vagrant box <subcommand> [<args>]
# 
# Available subcommands:
#      add
#      list
#      outdated
#      prune
#      remove
#      repackage
#      update
# 
# For help on any individual subcommand run `vagrant box <subcommand> -h`
vagrant box list
# There are no installed boxes! Use `vagrant box add` to add some.
vagrant box add -h
# Usage: vagrant box add [options] <name, url, or path>
# 
# Options:
# 
#     -c, --clean                      Clean any temporary download files
#     -f, --force                      Overwrite an existing box if it exists
#         --insecure                   Do not validate SSL certificates
#         --cacert FILE                CA certificate for SSL download
#         --capath DIR                 CA certificate directory for SSL download
#         --cert FILE                  A client SSL cert, if needed
#         --location-trusted           Trust 'Location' header from HTTP redirects and use the same credentials for subsequent urls as for the initial one
#         --provider PROVIDER          Provider the box should satisfy
#         --box-version VERSION        Constrain version of the added box
# 
# The box descriptor can be the name of a box on HashiCorp's Vagrant Cloud,
# or a URL, or a local .box file, or a local .json file containing
# the catalog metadata.
# 
# The options below only apply if you're adding a box file directly,
# and not using a Vagrant server or a box structured like 'user/box':
# 
#         --checksum CHECKSUM          Checksum for the box
#         --checksum-type TYPE         Checksum type (md5, sha1, sha256)
#         --name BOX                   Name of the box
#     -h, --help                       Print this help
vagrant box add /misc/vagrant/boxes/packer-ol75-0.9.0.metadata.json
# ==> box: Loading metadata for box '/misc/vagrant/boxes/packer-ol75-0.9.0.metadata.json'
#     box: URL: file://D:/misc/vagrant/boxes/packer-ol75-0.9.0.metadata.json
# ==> box: Adding box 'vzell/packer-ol75' (v0.9.0) for provider: virtualbox
#     box: Unpacking necessary files from: file://D:/misc/vagrant/boxes/packer-ol75-0.9.0.box
#     box: 
#     box: Calculating and comparing box checksum...
# ==> box: Successfully added box 'vzell/packer-ol75' (v0.9.0) for 'virtualbox'!

Directory structure

tree -a /misc/vagrant
# /misc/vagrant
# ├── .vagrant.d
# │   ├── boxes
# │   │   └── vzell-VAGRANTSLASH-packer-ol75
# │   │       ├── 0.9.0
# │   │       │   └── virtualbox
# │   │       │       ├── box.ovf
# │   │       │       ├── info.json
# │   │       │       ├── metadata.json
# │   │       │       ├── packer-ol75-base.mf
# │   │       │       ├── packer-ol75-base-disk001.vmdk
# │   │       │       └── Vagrantfile
# │   │       └── metadata_url
# │   ├── data
# │   │   ├── checkpoint_cache
# │   │   ├── checkpoint_signature
# │   │   └── machine-index
# │   │       └── index.lock
# │   ├── gems
# │   │   └── 2.4.4
# │   ├── insecure_private_key
# │   ├── rgloader
# │   │   └── loader.rb
# │   ├── setup_version
# │   └── tmp
# └── boxes
#     ├── packer-ol75-0.9.0.box
#     └── packer-ol75-0.9.0.metadata.json
# 
# 12 directories, 15 files

Checking the vagrant registry

vagrant box list
# vzell/packer-ol75 (virtualbox, 0.9.0)
vagrant box list -i
# vzell/packer-ol75 (virtualbox, 0.9.0)
#   - Author: Dr. Volker Zell
#   - Homepage: http://volkerzell.de
#   - Repository: https://github.com/vzell/ol75/
#   - Description: This box contains Oracle Linux 7.5 64-bit in a base installation created with packer

4.4 Building Vagrant base box for Oracle Linux 6.10

Rename the packer generated OVF file to box.ovf

cd /misc/packer/ol/6.10/output-ol610-base.vzell.de && mv packer-ol610-base.ovf box.ovf

Vagrantfile

grep MACAddress box.ovf
#           <Adapter slot="0" enabled="true" MACAddress="080027A8639C" type="82540EM">
grep MACAddress box.ovf | awk -F"MACAddress=\"" '{print $2}' | awk -F"\"" '{print $1}'
# 080027A8639C
cat > Vagrantfile <<-_EOF
Vagrant::Config.run do |config|
  config.vm.base_mac = "$(grep MACAddress box.ovf | awk -F"MACAddress=\"" '{print $2}' | awk -F"\"" '{print $1}')"
end
_EOF

Create Box Info File

cat > info.json <<-"_EOF"
{
  "Author": "Dr. Volker Zell",
  "Homepage": "http://volkerzell.de",
  "Repository": "http://volkerzell.de/repo/ol/6.10/",
  "Description": "This box contains Oracle Linux 6.10 64-bit in a base installation created with packer"
}
_EOF

Create Box File for Provider

cat > metadata.json <<-"_EOF"
{
  "provider": "virtualbox"
}
_EOF

Create Vagrant Box

tar --exclude='*.vdi' -cvzf /misc/vagrant/boxes/packer-ol610-0.9.0.box *
# box.ovf
# info.json
# metadata.json
# packer-ol610-base.mf
# packer-ol610-base-disk001.vmdk
# Vagrantfile

Create Box Metadata File

sha1sum /misc/vagrant/boxes/packer-ol610-0.9.0.box | awk '{ print $1; }'
# e773b269160037a47cff525ca9296fa8514d88c3
cat > /misc/vagrant/boxes/packer-ol610-0.9.0.metadata.json <<-_EOF
{
  "name": "vzell/packer-ol610",
  "description": "This box contains Oracle Linux 6.10 64-bit created with packer",
  "versions": [
    {
      "version": "0.9.0",
      "providers": [
        {
          "name": "virtualbox",
          "url": "file://D:/misc/vagrant/boxes/packer-ol610-0.9.0.box",
          "checksum_type": "sha1",
          "checksum": "$(sha1sum /misc/vagrant/boxes/packer-ol610-0.9.0.box | awk '{print $1}')"
        }
      ]
    }
  ]
}
_EOF

Add the box to the vagrant registry

cd /misc/vagrant
vagrant box list
# vzell/packer-ol75 (virtualbox, 0.9.0)
vagrant box add /misc/vagrant/boxes/packer-ol610-0.9.0.metadata.json
# ==> box: Loading metadata for box '/misc/vagrant/boxes/packer-ol610-0.9.0.metadata.json'
#     box: URL: file://D:/misc/vagrant/boxes/packer-ol610-0.9.0.metadata.json
# ==> box: Adding box 'vzell/packer-ol610' (v0.9.0) for provider: virtualbox
#     box: Unpacking necessary files from: file://D:/misc/vagrant/boxes/packer-ol610-0.9.0.box
#     box: 
#     box: Calculating and comparing box checksum...
# ==> box: Successfully added box 'vzell/packer-ol610' (v0.9.0) for 'virtualbox'!

Directory structure

tree -a /misc/vagrant
# /misc/vagrant
# ├── .vagrant.d
# │   ├── boxes
# │   │   ├── vzell-VAGRANTSLASH-packer-ol610
# │   │   │   ├── 0.9.0
# │   │   │   │   └── virtualbox
# │   │   │   │       ├── box.ovf
# │   │   │   │       ├── info.json
# │   │   │   │       ├── metadata.json
# │   │   │   │       ├── packer-ol610-base.mf
# │   │   │   │       ├── packer-ol610-base-disk001.vmdk
# │   │   │   │       └── Vagrantfile
# │   │   │   └── metadata_url
# │   │   └── vzell-VAGRANTSLASH-packer-ol75
# │   │       ├── 0.9.0
# │   │       │   └── virtualbox
# │   │       │       ├── box.ovf
# │   │       │       ├── info.json
# │   │       │       ├── metadata.json
# │   │       │       ├── packer-ol75-base.mf
# │   │       │       ├── packer-ol75-base-disk001.vmdk
# │   │       │       └── Vagrantfile
# │   │       └── metadata_url
# │   ├── data
# │   │   ├── checkpoint_cache
# │   │   ├── checkpoint_signature
# │   │   ├── fp-leases
# │   │   ├── lock.dotlock.lock
# │   │   ├── machine-index
# │   │   │   ├── index
# │   │   │   └── index.lock
# │   │   └── vbox_symlink_create_warning
# │   ├── gems
# │   │   └── 2.4.4
# │   ├── insecure_private_key
# │   ├── rgloader
# │   │   └── loader.rb
# │   ├── setup_version
# │   └── tmp
# └── boxes
#     ├── packer-ol610-0.9.0.box
#     ├── packer-ol610-0.9.0.metadata.json
#     ├── packer-ol75-0.9.0.box
#     └── packer-ol75-0.9.0.metadata.json

Checking the vagrant registry

vagrant box list
# vzell/packer-ol610 (virtualbox, 0.9.0)
# vzell/packer-ol75  (virtualbox, 0.9.0)
vagrant box list -i
# vzell/packer-ol610 (virtualbox, 0.9.0)
#   - Author: Dr. Volker Zell
#   - Homepage: http://volkerzell.de
#   - Repository: http://volkerzell.de/repo/ol/6.10/
#   - Description: This box contains Oracle Linux 6.10 64-bit in a base installation created with packer
# vzell/packer-ol75  (virtualbox, 0.9.0)
#   - Author: Dr. Volker Zell
#   - Homepage: http://volkerzell.de
#   - Repository: https://github.com/vzell/ol75/
#   - Description: This box contains Oracle Linux 7.5 64-bit in a base installation created with packer

4.5 Building a multimachine setup with vagrant and ansible based on a yaml input file

Create the staging area which will be used during provisioning

mkdir -p /misc/vagrant/stage

Clone the vagrant-multihost repository from Opitz Bitbucket

Clone the vagrant-multihost repository from Opitz Bitbucket.

Use your standard Opitz account for logging in when prompted, eg. as username your 3 letter abbreviation.

git clone https://git.opitz-consulting.de/scm/~vze/vagrant-multihost.git
cd /misc/vagrant/vagrant-multihost

Directory Structure

tree -a /misc/vagrant/vagrant-multihost
# /misc/vagrant/vagrant-multihost
# ├── .git                                            (subdirectories NOT displayed)
# ├── .gitignore
# ├── ansible
# │   ├── group_vars
# │   │   └── all.yml
# │   ├── roles
# │   │   ├── vzell.filesystem
# │   │   │   ├── defaults
# │   │   │   │   └── main.yml
# │   │   │   ├── files
# │   │   │   ├── handlers
# │   │   │   │   └── main.yml
# │   │   │   ├── meta
# │   │   │   │   └── main.yml
# │   │   │   ├── README.md
# │   │   │   ├── tasks
# │   │   │   │   └── main.yml
# │   │   │   ├── templates
# │   │   │   ├── tests
# │   │   │   │   ├── inventory
# │   │   │   │   └── test.yml
# │   │   │   └── vars
# │   │   │       └── main.yml
# │   │   └── vzell.yum
# │   │       ├── defaults
# │   │       │   └── main.yml
# │   │       ├── files
# │   │       ├── handlers
# │   │       │   └── main.yml
# │   │       ├── meta
# │   │       │   └── main.yml
# │   │       ├── README.md
# │   │       ├── tasks
# │   │       │   └── main.yml
# │   │       ├── templates
# │   │       ├── tests
# │   │       │   ├── inventory
# │   │       │   └── test.yml
# │   │       └── vars
# │   │           └── main.yml
# │   └── site.yml
# ├── ansible.cfg
# ├── custom-vagrant-hosts.yml
# ├── LICENSE
# ├── README.md
# ├── scripts
# │   └── inventory.py
# ├── test
# │   └── runbats.sh
# ├── Vagrantfile
# └── vagrant-hosts.yml
# 
# 24 directories, 27 files

Create, start and provision the multimachine setup

vagrant up -h
# Usage: vagrant up [options] [name|id]
# 
# Options:
# 
#         --[no-]provision             Enable or disable provisioning
#         --provision-with x,y,z       Enable only certain provisioners, by type or by name.
#         --[no-]destroy-on-error      Destroy machine if any fatal error happens (default to true)
#         --[no-]parallel              Enable or disable parallelism if provider supports it
#         --provider PROVIDER          Back the machine with a specific provider
#         --[no-]install-provider      If possible, install the provider if it isn't installed
#     -h, --help                       Print this help

When you execute the next command be prepared to react on the Windows Firewall popup window and give access to vboxheadless.

time vagrant up
# Bringing machine 'ol75-master' up with 'virtualbox' provider...
# Bringing machine 'dbnode' up with 'virtualbox' provider...
# Bringing machine 'fmwnode1' up with 'virtualbox' provider...
# Bringing machine 'fmwnode2' up with 'virtualbox' provider...
# ==> ol75-master: Importing base box 'vzell/packer-ol75'...
# ==> ol75-master: Matching MAC address for NAT networking...
# ==> ol75-master: Checking if box 'vzell/packer-ol75' is up to date...
# ==> ol75-master: Setting the name of the VM: vagrant-multihost_ol75-master_1539973390788_76844
# Vagrant is currently configured to create VirtualBox synced folders with
# the `SharedFoldersEnableSymlinksCreate` option enabled. If the Vagrant
# guest is not trusted, you may want to disable this option. For more
# information on this option, please refer to the VirtualBox manual:
# 
#   https://www.virtualbox.org/manual/ch04.html#sharedfolders
# 
# This option can be disabled globally with an environment variable:
# 
#   VAGRANT_DISABLE_VBOXSYMLINKCREATE=1
# 
# or on a per folder basis within the Vagrantfile:
# 
#   config.vm.synced_folder '/host/path', '/guest/path', SharedFoldersEnableSymlinksCreate: false
# ==> ol75-master: Clearing any previously set network interfaces...
# ==> ol75-master: Preparing network interfaces based on configuration...
#     ol75-master: Adapter 1: nat
#     ol75-master: Adapter 2: hostonly
# ==> ol75-master: Forwarding ports...
#     ol75-master: 22 (guest) => 11119 (host) (adapter 1)
# ==> ol75-master: Running 'pre-boot' VM customizations...
# ==> ol75-master: Booting VM...
# ==> ol75-master: Waiting for machine to boot. This may take a few minutes...
#     ol75-master: SSH address: 127.0.0.1:11119
#     ol75-master: SSH username: vagrant
#     ol75-master: SSH auth method: private key
# ==> ol75-master: Machine booted and ready!
# ==> ol75-master: Checking for guest additions in VM...
#     ol75-master: No guest additions were detected on the base box for this VM! Guest
#     ol75-master: additions are required for forwarded ports, shared folders, host only
#     ol75-master: networking, and more. If SSH fails on this machine, please install
#     ol75-master: the guest additions and repackage the box to continue.
#     ol75-master: 
#     ol75-master: This is not an error message; everything may continue to work properly,
#     ol75-master: in which case you may ignore this message.
# ==> ol75-master: Setting hostname...
# ==> ol75-master: Configuring and enabling network interfaces...
#     ol75-master: SSH address: 127.0.0.1:11119
#     ol75-master: SSH username: vagrant
#     ol75-master: SSH auth method: private key
# ==> ol75-master: Mounting shared folders...
#     ol75-master: /vagrant => D:/misc/vagrant/vagrant-multihost
#     ol75-master: /media/stage => D:/misc/vagrant/stage
# ==> dbnode: Importing base box 'vzell/packer-ol75'...
# ==> dbnode: Matching MAC address for NAT networking...
# ==> dbnode: Checking if box 'vzell/packer-ol75' is up to date...
# ==> dbnode: Setting the name of the VM: vagrant-multihost_dbnode_1539973472701_37626
# ==> dbnode: Clearing any previously set network interfaces...
# ==> dbnode: Preparing network interfaces based on configuration...
#     dbnode: Adapter 1: nat
#     dbnode: Adapter 2: hostonly
# ==> dbnode: Forwarding ports...
#     dbnode: 22 (guest) => 11120 (host) (adapter 1)
# ==> dbnode: Running 'pre-boot' VM customizations...
# ==> dbnode: Booting VM...
# ==> dbnode: Waiting for machine to boot. This may take a few minutes...
#     dbnode: SSH address: 127.0.0.1:11120
#     dbnode: SSH username: vagrant
#     dbnode: SSH auth method: private key
# ==> dbnode: Machine booted and ready!
# ==> dbnode: Checking for guest additions in VM...
#     dbnode: No guest additions were detected on the base box for this VM! Guest
#     dbnode: additions are required for forwarded ports, shared folders, host only
#     dbnode: networking, and more. If SSH fails on this machine, please install
#     dbnode: the guest additions and repackage the box to continue.
#     dbnode: 
#     dbnode: This is not an error message; everything may continue to work properly,
#     dbnode: in which case you may ignore this message.
# ==> dbnode: Setting hostname...
# ==> dbnode: Configuring and enabling network interfaces...
#     dbnode: SSH address: 127.0.0.1:11120
#     dbnode: SSH username: vagrant
#     dbnode: SSH auth method: private key
# ==> dbnode: Mounting shared folders...
#     dbnode: /vagrant => D:/misc/vagrant/vagrant-multihost
#     dbnode: /media/stage => D:/misc/vagrant/stage
# ==> fmwnode1: Importing base box 'vzell/packer-ol75'...
# ==> fmwnode1: Matching MAC address for NAT networking...
# ==> fmwnode1: Checking if box 'vzell/packer-ol75' is up to date...
# ==> fmwnode1: Setting the name of the VM: vagrant-multihost_fmwnode1_1539973553934_68151
# ==> fmwnode1: Clearing any previously set network interfaces...
# ==> fmwnode1: Preparing network interfaces based on configuration...
#     fmwnode1: Adapter 1: nat
#     fmwnode1: Adapter 2: hostonly
# ==> fmwnode1: Forwarding ports...
#     fmwnode1: 22 (guest) => 11121 (host) (adapter 1)
# ==> fmwnode1: Running 'pre-boot' VM customizations...
# ==> fmwnode1: Booting VM...
# ==> fmwnode1: Waiting for machine to boot. This may take a few minutes...
#     fmwnode1: SSH address: 127.0.0.1:11121
#     fmwnode1: SSH username: vagrant
#     fmwnode1: SSH auth method: private key
# ==> fmwnode1: Machine booted and ready!
# ==> fmwnode1: Checking for guest additions in VM...
#     fmwnode1: No guest additions were detected on the base box for this VM! Guest
#     fmwnode1: additions are required for forwarded ports, shared folders, host only
#     fmwnode1: networking, and more. If SSH fails on this machine, please install
#     fmwnode1: the guest additions and repackage the box to continue.
#     fmwnode1: 
#     fmwnode1: This is not an error message; everything may continue to work properly,
#     fmwnode1: in which case you may ignore this message.
# ==> fmwnode1: Setting hostname...
# ==> fmwnode1: Configuring and enabling network interfaces...
#     fmwnode1: SSH address: 127.0.0.1:11121
#     fmwnode1: SSH username: vagrant
#     fmwnode1: SSH auth method: private key
# ==> fmwnode1: Mounting shared folders...
#     fmwnode1: /vagrant => D:/misc/vagrant/vagrant-multihost
#     fmwnode1: /media/stage => D:/misc/vagrant/stage
# ==> fmwnode2: Importing base box 'vzell/packer-ol75'...
# ==> fmwnode2: Matching MAC address for NAT networking...
# ==> fmwnode2: Checking if box 'vzell/packer-ol75' is up to date...
# ==> fmwnode2: Setting the name of the VM: vagrant-multihost_fmwnode2_1539973632789_99666
# ==> fmwnode2: Clearing any previously set network interfaces...
# ==> fmwnode2: Preparing network interfaces based on configuration...
#     fmwnode2: Adapter 1: nat
#     fmwnode2: Adapter 2: hostonly
# ==> fmwnode2: Forwarding ports...
#     fmwnode2: 22 (guest) => 11122 (host) (adapter 1)
# ==> fmwnode2: Running 'pre-boot' VM customizations...
# ==> fmwnode2: Booting VM...
# ==> fmwnode2: Waiting for machine to boot. This may take a few minutes...
#     fmwnode2: SSH address: 127.0.0.1:11122
#     fmwnode2: SSH username: vagrant
#     fmwnode2: SSH auth method: private key
# ==> fmwnode2: Machine booted and ready!
# ==> fmwnode2: Checking for guest additions in VM...
#     fmwnode2: No guest additions were detected on the base box for this VM! Guest
#     fmwnode2: additions are required for forwarded ports, shared folders, host only
#     fmwnode2: networking, and more. If SSH fails on this machine, please install
#     fmwnode2: the guest additions and repackage the box to continue.
#     fmwnode2: 
#     fmwnode2: This is not an error message; everything may continue to work properly,
#     fmwnode2: in which case you may ignore this message.
# ==> fmwnode2: Setting hostname...
# ==> fmwnode2: Configuring and enabling network interfaces...
#     fmwnode2: SSH address: 127.0.0.1:11122
#     fmwnode2: SSH username: vagrant
#     fmwnode2: SSH auth method: private key
# ==> fmwnode2: Mounting shared folders...
#     fmwnode2: /vagrant => D:/misc/vagrant/vagrant-multihost
#     fmwnode2: /media/stage => D:/misc/vagrant/stage
# ==> fmwnode2: Running provisioner: ansible...
# Windows is not officially supported for the Ansible Control Machine.
# Please check https://docs.ansible.com/intro_installation.html#control-machine-requirements
#     fmwnode2: Running ansible-playbook...
# The Ansible software could not be found! Please verify
# that Ansible is correctly installed on your host system.
# 
# If you haven't installed Ansible yet, please install Ansible
# on your host system. Vagrant can't do this for you in a safe and
# automated way.
# Please check https://docs.ansible.com for more information.
# 
# real	5m24.453s
# user	0m0.000s
# sys	0m0.030s
vagrant status
# Current machine states:
# 
# ol75-master               running (virtualbox)
# dbnode                    running (virtualbox)
# fmwnode1                  running (virtualbox)
# fmwnode2                  running (virtualbox)
# 
# This environment represents multiple VMs. The VMs are all listed
# above with their current state. For more information about a specific
# VM, run `vagrant status NAME`.
vagrant global-status
# id       name        provider   state   directory                           
# ----------------------------------------------------------------------------
# 2e8bf66  ol75-master virtualbox running D:/misc/vagrant/vagrant-multihost   
# ff2ab2f  dbnode      virtualbox running D:/misc/vagrant/vagrant-multihost   
# 0e4d3ec  fmwnode1    virtualbox running D:/misc/vagrant/vagrant-multihost   
# 025e7ec  fmwnode2    virtualbox running D:/misc/vagrant/vagrant-multihost   
#  
# The above shows information about all known Vagrant environments
# on this machine. This data is cached and may not be completely
# up-to-date (use "vagrant global-status --prune" to prune invalid
# entries). To interact with any of the machines, you can go to that
# directory and run Vagrant, or you can use the ID directly with
# Vagrant commands from any directory. For example:
# "vagrant destroy 1a2b3c4d"
vagrant up --provision
# > > (.venv) vzell:/misc/packer/ol/6.10> (.venv) vzell:/misc/packer/ol/6.10> > > > > virtualbox-iso output will be in this color.
# 
# ==> virtualbox-iso: Retrieving Guest additions
#     virtualbox-iso: Using file in-place: file:///C:/Program%20Files/Oracle/VirtualBox/VBoxGuestAdditions.iso
# ==> virtualbox-iso: Retrieving ISO
#     virtualbox-iso: Using file in-place: file:///D:/misc/packer/ol/6.10/iso/V978757-01.iso
# ==> virtualbox-iso: Starting HTTP server on port 8080
# ==> virtualbox-iso: Creating virtual machine...
# ==> virtualbox-iso: Creating hard drive...
# ==> virtualbox-iso: Creating forwarded port mapping for communicator (SSH, WinRM, etc) (host port 3801)
# ==> virtualbox-iso: Executing custom VBoxManage commands...
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --groups /Oracle Linux/Oracle Linux 6 Update 10
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --boot1 disk --boot2 dvd --boot3 none --boot4 none
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --vram 32
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --memory 2048
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --cpus 2
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --audio none
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --vrde off
#     virtualbox-iso: Executing: modifyvm packer-ol610-base --rtcuseutc on
# ==> virtualbox-iso: Starting the virtual machine...
# ==> virtualbox-iso: Waiting 5s for boot...
# ==> virtualbox-iso: Typing the boot command...
# ==> virtualbox-iso: Using ssh communicator to connect: 127.0.0.1
# ==> virtualbox-iso: Waiting for SSH to become available...
# ==> virtualbox-iso: Connected to SSH!
# ==> virtualbox-iso: Uploading VirtualBox version info (5.1.28)
# ==> virtualbox-iso: Uploading VirtualBox guest additions ISO...
# ==> virtualbox-iso: Pausing 1s before the next provisioner...
# ==> virtualbox-iso: Provisioning with shell script: E:\tmp\packer-shell772238767
#     virtualbox-iso: useradd: user 'vagrant' already exists
# ==> virtualbox-iso: Pausing 1s before the next provisioner...
# ==> virtualbox-iso: Provisioning with shell script: E:\tmp\packer-shell168027603
#     virtualbox-iso: Verifying archive integrity... All good.
#     virtualbox-iso: Uncompressing VirtualBox 5.1.28 Guest Additions for Linux...........
#     virtualbox-iso: VirtualBox Guest Additions installer
#     virtualbox-iso: Copying additional installer modules ...
#     virtualbox-iso: Installing additional modules ...
#     virtualbox-iso: vboxadd.sh: Starting the VirtualBox Guest Additions.
#     virtualbox-iso:
#     virtualbox-iso: Could not find the X.Org or XFree86 Window System, skipping.
# ==> virtualbox-iso: Gracefully halting virtual machine...
# ==> virtualbox-iso: Preparing to export machine...
#     virtualbox-iso: Deleting forwarded port mapping for the communicator (SSH, WinRM, etc) (host port 3801)
# ==> virtualbox-iso: Exporting virtual machine...
#     virtualbox-iso: Executing: export packer-ol610-base --output output-ol610-base.vzell.de\packer-ol610-base.ovf --manifest --vsys 0 --description Oracle Linux 6 Update 10
#     virtualbox-iso: 
#     virtualbox-iso: prepared by Dr. Volker Zell --version 0.9.0
# ==> virtualbox-iso: Keeping virtual machine registered with VirtualBox host (keep_registered = true)
# Build 'virtualbox-iso' finished.
# 
# ==> Builds finished. The artifacts of successful builds are:
# --> virtualbox-iso: VM files in directory: output-ol610-base.vzell.de
# 
# real	12m40.205s
# user	0m0.000s
# sys	0m0.015s
vagrant halt
# ==> fmwnode2: Attempting graceful shutdown of VM...
# ==> fmwnode1: Attempting graceful shutdown of VM...
# ==> dbnode: Attempting graceful shutdown of VM...
# ==> ol75-master: Attempting graceful shutdown of VM...
vagrant status
# Current machine states:
# 
# ol75-master               poweroff (virtualbox)
# dbnode                    poweroff (virtualbox)
# fmwnode1                  poweroff (virtualbox)
# fmwnode2                  poweroff (virtualbox)
# 
# This environment represents multiple VMs. The VMs are all listed
# above with their current state. For more information about a specific
# VM, run `vagrant status NAME`.
vagrant global-status
# id       name        provider   state    directory                           
# -----------------------------------------------------------------------------
# 2e8bf66  ol75-master virtualbox poweroff D:/misc/vagrant/vagrant-multihost   
# ff2ab2f  dbnode      virtualbox poweroff D:/misc/vagrant/vagrant-multihost   
# 0e4d3ec  fmwnode1    virtualbox poweroff D:/misc/vagrant/vagrant-multihost   
# 025e7ec  fmwnode2    virtualbox poweroff D:/misc/vagrant/vagrant-multihost   
#  
# The above shows information about all known Vagrant environments
# on this machine. This data is cached and may not be completely
# up-to-date (use "vagrant global-status --prune" to prune invalid
# entries). To interact with any of the machines, you can go to that
# directory and run Vagrant, or you can use the ID directly with
# Vagrant commands from any directory. For example:
# "vagrant destroy 1a2b3c4d"

During the next booting of the ol75-master node it will go into graphical mode, because this VM has been provisioned with a GNOME Desktop

vagrant up ol75-master
# Bringing machine 'ol75-master' up with 'virtualbox' provider...
# ==> ol75-master: Checking if box 'vzell/packer-ol75' is up to date...
# ==> ol75-master: Clearing any previously set forwarded ports...
# ==> ol75-master: Clearing any previously set network interfaces...
# ==> ol75-master: Preparing network interfaces based on configuration...
#     ol75-master: Adapter 1: nat
#     ol75-master: Adapter 2: hostonly
# ==> ol75-master: Forwarding ports...
#     ol75-master: 22 (guest) => 11119 (host) (adapter 1)
# ==> ol75-master: Running 'pre-boot' VM customizations...
# ==> ol75-master: Booting VM...
# ==> ol75-master: Waiting for machine to boot. This may take a few minutes...
#     ol75-master: SSH address: 127.0.0.1:11119
#     ol75-master: SSH username: vagrant
#     ol75-master: SSH auth method: private key
# ==> ol75-master: Machine booted and ready!
# ==> ol75-master: Checking for guest additions in VM...
# ==> ol75-master: Setting hostname...
# ==> ol75-master: Configuring and enabling network interfaces...
#     ol75-master: SSH address: 127.0.0.1:11119
#     ol75-master: SSH username: vagrant
#     ol75-master: SSH auth method: private key
# ==> ol75-master: Mounting shared folders...
#     ol75-master: /vagrant => D:/misc/vagrant/vagrant-multihost
#     ol75-master: /media/stage => D:/misc/vagrant/stage
# ==> ol75-master: Machine already provisioned. Run `vagrant provision` or use the `--provision`
# ==> ol75-master: flag to force provisioning. Provisioners marked to run always will still run.

Halting the system

Don't do this now when you want to proceed with the tutorial.

vagrant halt
==> ol75-master: Attempting graceful shutdown of VM...

Destroying the system

Don't do this now when you want to proceed with the tutorial.

vagrant destroy -f
# ==> fmwnode2: Destroying VM and associated drives...
# ==> fmwnode1: Destroying VM and associated drives...
# ==> dbnode: Destroying VM and associated drives...
# ==> ol75-master: Destroying VM and associated drives...

5 Using Ansible

Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates.

6 Using Docker

Docker is a computer program that performs operating-system-level virtualization, also known as "containerization"

Docker is used to run software packages called containers. Containers are isolated from each other and bundle their own tools, libraries and configuration files; they can communicate with each other through well-defined channels. All containers are run by a single operating system kernel and are thus more lightweight than virtual machines. Containers are created from images that specify their precise contents. Images are often created by combining and modifying standard images downloaded from public repositories.

6.1 Create a docker account

cygstart https://cloud.docker.com/
cygstart https://hub.docker.com/

6.2 Configure and test Docker installation inside the Linux guest in VirtualBox

Configuration

vagrant ssh ol75-master
# Last login: Fri Oct 19 21:47:19 2018
# /usr/bin/xauth:  file /home/vagrant/.Xauthority does not exist

To confirm that userid is part of docker group run below command and make sure it lists group docker.

id -Gn
# vagrant
sudo /sbin/usermod -a -G docker vagrant
exit
vagrant ssh ol75-master
docker --version
# Docker version 18.03.1-ol, build 0d51d18
docker version
# Client:
#  Version:      18.03.1-ol
#  API version:  1.37
#  Go version:   go1.9.4
#  Git commit:   0d51d18
#  Built:        Wed Aug 22 21:59:42 2018
#  OS/Arch:      linux/amd64
#  Experimental: false
#  Orchestrator: swarm
# 
# Server:
#  Engine:
#   Version:      18.03.1-ol
#   API version:  1.37 (minimum version 1.12)
#   Go version:   go1.9.4
#   Git commit:   0d51d18
#   Built:        Wed Aug 22 22:03:05 2018
#   OS/Arch:      linux/amd64
#   Experimental: false

Test

docker run hello-world
# Unable to find image 'hello-world:latest' locally
# Trying to pull repository docker.io/hello-world ... 
# latest: Pulling from docker.io/library/hello-world
# 
# Digest: sha256:0add3ace90ecb4adbf7777e9aacf18357296e799f81cabc9fde470971e499788
# Status: Downloaded newer image for hello-world:latest
# 
# Hello from Docker!
# This message shows that your installation appears to be working correctly.
# 
# To generate this message, Docker took the following steps:
#  1. The Docker client contacted the Docker daemon.
#  2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
#     (amd64)
#  3. The Docker daemon created a new container from that image which runs the
#     executable that produces the output you are currently reading.
#  4. The Docker daemon streamed that output to the Docker client, which sent it
#     to your terminal.
# 
# To try something more ambitious, you can run an Ubuntu container with:
#  $ docker run -it ubuntu bash
# 
# Share images, automate workflows, and more with a free Docker ID:
#  https://hub.docker.com/
# 
# For more examples and ideas, visit:
#  https://docs.docker.com/get-started/
# 
docker run hello-world
# 
# Hello from Docker!
# This message shows that your installation appears to be working correctly.
# 
# To generate this message, Docker took the following steps:
#  1. The Docker client contacted the Docker daemon.
#  2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
#     (amd64)
#  3. The Docker daemon created a new container from that image which runs the
#     executable that produces the output you are currently reading.
#  4. The Docker daemon streamed that output to the Docker client, which sent it
#     to your terminal.
# 
# To try something more ambitious, you can run an Ubuntu container with:
#  $ docker run -it ubuntu bash
# 
# Share images, automate workflows, and more with a free Docker ID:
#  https://hub.docker.com/
# 
# For more examples and ideas, visit:
#  https://docs.docker.com/get-started/
# 

Cleanup

docker rm $(docker ps -a -q)
# f4166ea05562
# a37c22da90e8
docker rmi $(docker images -q)
# Untagged: hello-world:latest
# Untagged: hello-world@sha256:0add3ace90ecb4adbf7777e9aacf18357296e799f81cabc9fde470971e499788
# Deleted: sha256:4ab4c602aa5eed5528a6620ff18a1dc4faef0e1ab3a5eddeddb410714478c67f
# Deleted: sha256:428c97da766c4c13b19088a471de6b622b038f3ae8efa10ec5a37d6d31a2df0b

6.3 Get Started with Docker on Linux guest in VirtualBox

Containers

Define a container with Dockerfile
mkdir -p ~/docker/part2 && cd ~/docker/part2
cat > Dockerfile <<-"_EOF"
# Use an official Python runtime as a parent image
FROM python:2.7-slim

# Set the working directory to /app
WORKDIR /app

# Copy the current directory contents into the container at /app
COPY . /app

# Install any needed packages specified in requirements.txt
RUN pip install --trusted-host pypi.python.org -r requirements.txt

# Make port 80 available to the world outside this container
EXPOSE 80

# Define environment variable
ENV NAME World

# Run app.py when the container launches
CMD ["python", "app.py"]
_EOF
The app itself
cat > requirements.txt <<-"_EOF"
Flask
Redis
_EOF
cat > app.py <<-"_EOF"
from flask import Flask
from redis import Redis, RedisError
import os
import socket

# Connect to Redis
redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)

app = Flask(__name__)

@app.route("/")
def hello():
    try:
        visits = redis.incr("counter")
    except RedisError:
        visits = "<i>cannot connect to Redis, counter disabled</i>"

    html = "<h3>Hello {name}!</h3>" \
           "<b>Hostname:</b> {hostname}<br/>" \
           "<b>Visits:</b> {visits}"
    return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits)

if __name__ == "__main__":
    app.run(host='0.0.0.0', port=80)
_EOF
ls -lt
# total 3
# -rw-rw-r--+ 1 vzell None 665 Oct 20 16:37 app.py
# -rw-rw-r--+ 1 vzell None  12 Oct 20 16:37 requirements.txt
# -rw-rw-r--+ 1 vzell None 513 Oct 20 16:37 Dockerfile
Build the app
docker build -t friendlyhello .
# Sending build context to Docker daemon   5.12kB
# Step 1/7 : FROM python:2.7-slim
# Trying to pull repository docker.io/python ... 
# 2.7-slim: Pulling from docker.io/library/python
# 
# 
# 
# 
# Digest: sha256:3b9c77ba2cdb829f6d41cb64a3e6b3fb7f40a9143648c506864b7fbf272dc77e
# Status: Downloaded newer image for python:2.7-slim
#  ---> 804b0a01ea83
# Step 2/7 : WORKDIR /app
# Removing intermediate container 33c0e81521eb
#  ---> 1e6c6d27d26c
# Step 3/7 : COPY . /app
#  ---> 6481ae3eca89
# Step 4/7 : RUN pip install --trusted-host pypi.python.org -r requirements.txt
#  ---> Running in af855bcd3f32
# Collecting Flask (from -r requirements.txt (line 1))
#   Downloading https://files.pythonhosted.org/packages/7f/e7/08578774ed4536d3242b14dacb4696386634607af824ea997202cd0edb4b/Flask-1.0.2-py2.py3-none-any.whl (91kB)
# Collecting Redis (from -r requirements.txt (line 2))
#   Downloading https://files.pythonhosted.org/packages/3b/f6/7a76333cf0b9251ecf49efff635015171843d9b977e4ffcf59f9c4428052/redis-2.10.6-py2.py3-none-any.whl (64kB)
# Collecting itsdangerous>=0.24 (from Flask->-r requirements.txt (line 1))
#   Downloading https://files.pythonhosted.org/packages/c9/c3/8dadb353944803796515ce68ad3944e6e7acc934f5036c40829cb96e64a1/ItsDangerous-1.0.0-py2.py3-none-any.whl
# Collecting Jinja2>=2.10 (from Flask->-r requirements.txt (line 1))
#   Downloading https://files.pythonhosted.org/packages/7f/ff/ae64bacdfc95f27a016a7bed8e8686763ba4d277a78ca76f32659220a731/Jinja2-2.10-py2.py3-none-any.whl (126kB)
# Collecting Werkzeug>=0.14 (from Flask->-r requirements.txt (line 1))
#   Downloading https://files.pythonhosted.org/packages/20/c4/12e3e56473e52375aa29c4764e70d1b8f3efa6682bef8d0aae04fe335243/Werkzeug-0.14.1-py2.py3-none-any.whl (322kB)
# Collecting click>=5.1 (from Flask->-r requirements.txt (line 1))
#   Downloading https://files.pythonhosted.org/packages/fa/37/45185cb5abbc30d7257104c434fe0b07e5a195a6847506c074527aa599ec/Click-7.0-py2.py3-none-any.whl (81kB)
# Collecting MarkupSafe>=0.23 (from Jinja2>=2.10->Flask->-r requirements.txt (line 1))
#   Downloading https://files.pythonhosted.org/packages/4d/de/32d741db316d8fdb7680822dd37001ef7a448255de9699ab4bfcbdf4172b/MarkupSafe-1.0.tar.gz
# Building wheels for collected packages: MarkupSafe
#   Running setup.py bdist_wheel for MarkupSafe: started
#   Running setup.py bdist_wheel for MarkupSafe: finished with status 'done'
#   Stored in directory: /root/.cache/pip/wheels/33/56/20/ebe49a5c612fffe1c5a632146b16596f9e64676768661e4e46
# Successfully built MarkupSafe
# Installing collected packages: itsdangerous, MarkupSafe, Jinja2, Werkzeug, click, Flask, Redis
# Successfully installed Flask-1.0.2 Jinja2-2.10 MarkupSafe-1.0 Redis-2.10.6 Werkzeug-0.14.1 click-7.0 itsdangerous-1.0.0
# Removing intermediate container af855bcd3f32
#  ---> cf6a5205c1ae
# Step 5/7 : EXPOSE 80
#  ---> Running in 055a295ada88
# Removing intermediate container 055a295ada88
#  ---> c438cbde4b82
# Step 6/7 : ENV NAME World
#  ---> Running in 64afe620cb4f
# Removing intermediate container 64afe620cb4f
#  ---> f46110e88b18
# Step 7/7 : CMD ["python", "app.py"]
#  ---> Running in a80bd1e4e4f8
# Removing intermediate container a80bd1e4e4f8
#  ---> 9c6dc6d7d3ed
# Successfully built 9c6dc6d7d3ed
# Successfully tagged friendlyhello:latest
docker image ls
# REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
# friendlyhello       latest              45da216addc7        5 seconds ago       132MB
# python              2.7-slim            804b0a01ea83        4 days ago          120MB
Run and test the app
docker run -p 4000:80 friendlyhello
#  * Serving Flask app "app" (lazy loading)
#  * Environment: production
#    WARNING: Do not use the development server in a production environment.
#    Use a production WSGI server instead.
#  * Debug mode: off
#  * Running on http://0.0.0.0:80/ (Press CTRL+C to quit)

Test from a browser on the guest system. Log in as user vagrant at your GNOME Dektop machine. Open a terminal window and enter the following command:

firefox http://localhost:4000 &

Test from your Windows host. Use the IP address that you set in the vagrant-hosts.yml file.

http://192.168.56.109:4000/

#  * Serving Flask app "app" (lazy loading)
#  * Environment: production
#    WARNING: Do not use the development server in a production environment.
#    Use a production WSGI server instead.
#  * Debug mode: off
#  * Running on http://0.0.0.0:80/ (Press CTRL+C to quit)
# 172.17.0.1 - - [17/Oct/2018 16:11:29] "GET / HTTP/1.1" 200 -
# 172.17.0.1 - - [17/Oct/2018 16:11:29] "GET /favicon.ico HTTP/1.1" 404 -

You could also use curl from a terminal on the guest system

curl http://localhost:4000

Hit CTRL+C in your host terminal to quit the running app.

Run the app in detached mode.

docker run -d -p 4000:80 friendlyhello
# 27c4f6d1b63b159348f2abf01306adad7666f93b69e9abbfb04d0c333e8f320a
docker container ls
# CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS                  NAMES
# 27c4f6d1b63b        friendlyhello       "python app.py"     5 seconds ago       Up 4 seconds        0.0.0.0:4000->80/tcp   peaceful_jepsen
docker container ls | sed 1,1d | awk '{ print $1 }'
# 27c4f6d1b63b
docker container stop $(docker container ls | sed 1,1d | awk '{ print $1 }')
# 27c4f6d1b63b
Share your image
  • Create a docker account
    cygstart https://hub.docker.com/
    
  • Log in with your Docker ID
    docker login
    

    Enter your docker username

    vzell
    
    # Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.
    # Username: vzell
    # Password: *********************************
    # Login Succeeded
    
  • Tag the image
    docker tag friendlyhello vzell/get-started:part2
    
    docker image ls
    
    # REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
    # vzell/get-started   part2               45da216addc7        11 minutes ago      132MB
    # friendlyhello       latest              45da216addc7        11 minutes ago      132MB
    # python              2.7-slim            804b0a01ea83        4 days ago          120MB
    
  • Publish the image
    docker push vzell/get-started:part2
    
    # The push refers to repository [docker.io/vzell/get-started]
    # 
    # 
    # 
    # 
    # 
    # 
    # 
    # part2: digest: sha256:1f1eda84ee4958b8dae1350d8b0f05591c23a33c091e494f81faff5abec945ff size: 1787
    
  • Pull and run the image from the remote repository

    Let's first delete the current image

    docker image ls | grep "friendlyhello" | awk '{ print $3 }'
    
    docker rmi $(docker image ls | grep "friendlyhello" | awk '{ print $3 }')
    
    # Error response from daemon: conflict: unable to delete 45da216addc7 (must be forced) - image is referenced in multiple repositories
    
    docker rmi $(docker image ls | grep "friendlyhello" | awk '{ print $3 }') --force
    
    docker image ls
    
    # REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
    # python              2.7-slim            804b0a01ea83        4 days ago          120MB
    
    docker run -p 4000:80 vzell/get-started:part2
    
    # Unable to find image 'vzell/get-started:part2' locally
    # Trying to pull repository docker.io/vzell/get-started ... 
    # part2: Pulling from docker.io/vzell/get-started
    # 
    # 
    # 
    # 
    # 
    # 
    # 
    # Digest: sha256:1f1eda84ee4958b8dae1350d8b0f05591c23a33c091e494f81faff5abec945ff
    # Status: Downloaded newer image for vzell/get-started:part2
    #  * Serving Flask app "app" (lazy loading)
    #  * Environment: production
    #    WARNING: Do not use the development server in a production environment.
    #    Use a production WSGI server instead.
    #  * Debug mode: off
    #  * Running on http://0.0.0.0:80/ (Press CTRL+C to quit)
    

    Test on your Windows host. Use the IP address that you set in the vagrant-hosts.yml file.

    http://192.168.56.109:4000/

    Press CTRL+C to quit

    docker image ls
    
    # REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
    # vzell/get-started   part2               9c6dc6d7d3ed        30 hours ago        132MB
    # python              2.7-slim            804b0a01ea83        4 days ago          120MB
    

Cleanup

Kill all running containers
docker ps -q
docker kill $(docker ps -q)
Delete all stopped containers
docker ps -a -q
# f720ea901010
# 27c4f6d1b63b
# 1d0be8573df9
docker rm $(docker ps -a -q)
# f720ea901010
# 27c4f6d1b63b
# 1d0be8573df9
Delete all images
docker images -q
# 9c6dc6d7d3ed
# 804b0a01ea83
docker rmi $(docker images -q)
# Untagged: vzell/get-started:part2
# Untagged: vzell/get-started@sha256:1f1eda84ee4958b8dae1350d8b0f05591c23a33c091e494f81faff5abec945ff
# Deleted: sha256:9c6dc6d7d3ed2830efdccbce73044cd2de0f4e892582e37e349f49493348b397
# Deleted: sha256:606148e7eb56ca5fadd42c2efeb08fde0bfefe0a4099143f7dbdc1abc5ecd456
# Deleted: sha256:13bbac417d32f7dd95c8e6a9d752161ea7f4e1bf218e28180eea0b41f9480372
# Deleted: sha256:4fe2ea1f924e4a5b8896fb54938a4b4b2197eb700a8c0349c46d37a30ee5ece6
# Untagged: python:2.7-slim
# Untagged: python@sha256:3b9c77ba2cdb829f6d41cb64a3e6b3fb7f40a9143648c506864b7fbf272dc77e
# Deleted: sha256:804b0a01ea8339185e3e4eeb9b308d02ac2065a6dcd29e291d2b84ca5d66cd7f
# Deleted: sha256:03771d0f4c3b83587b9057ded5ad72baf52ac505eab67a7afcac020f29051187
# Deleted: sha256:4d7045a899fead0f58e5d6348db0cb3eab6156f8c34b0ced03af477a28cc28a2
# Deleted: sha256:bc1b6dfe593aa351303dbfabb8951c86cabf7c1cc37733fe00aa1217c9a2d44c
# Deleted: sha256:237472299760d6726d376385edd9e79c310fe91d794bc9870d038417d448c2d5

Exit from your Linux guest

exit

6.4 Get Started with Docker on Cygwin under Windows

From now on we will work again under Cygwin from our Windows host system.

Create two machines (on Windows)

docker-machine --version
# docker-machine.exe version 0.15.0, build b48dc28d
mkdir -p /misc/docker/.docker
export MACHINE_STORAGE_PATH="D:\misc\docker\.docker"
docker-machine create --help
# Usage: docker-machine create [OPTIONS] [arg...]
# 
# Create a machine
# 
# Description:
#    Run 'C:\ProgramData\chocolatey\lib\docker-machine\bin\docker-machine.exe create --driver name --help' to include the create flags for that driver in the help text.
# 
# Options:
#    
#    --driver, -d "virtualbox"										Driver to create machine with. [$MACHINE_DRIVER]
#    --engine-env [--engine-env option --engine-env option]						Specify environment variables to set in the engine
#    --engine-insecure-registry [--engine-insecure-registry option --engine-insecure-registry option]	Specify insecure registries to allow with the created engine
#    --engine-install-url "https://get.docker.com"							Custom URL to use for engine installation [$MACHINE_DOCKER_INSTALL_URL]
#    --engine-label [--engine-label option --engine-label option]						Specify labels for the created engine
#    --engine-opt [--engine-opt option --engine-opt option]						Specify arbitrary flags to include with the created engine in the form flag=value
#    --engine-registry-mirror [--engine-registry-mirror option --engine-registry-mirror option]		Specify registry mirrors to use [$ENGINE_REGISTRY_MIRROR]
#    --engine-storage-driver 										Specify a storage driver to use with the engine
#    --swarm												Configure Machine to join a Swarm cluster
#    --swarm-addr 											addr to advertise for Swarm (default: detect and use the machine IP)
#    --swarm-discovery 											Discovery service to use with Swarm
#    --swarm-experimental											Enable Swarm experimental features
#    --swarm-host "tcp://0.0.0.0:3376"									ip/socket to listen on for Swarm master
#    --swarm-image "swarm:latest"										Specify Docker image to use for Swarm [$MACHINE_SWARM_IMAGE]
#    --swarm-join-opt [--swarm-join-opt option --swarm-join-opt option]					Define arbitrary flags for Swarm join
#    --swarm-master											Configure Machine to be a Swarm master
#    --swarm-opt [--swarm-opt option --swarm-opt option]							Define arbitrary flags for Swarm master
#    --swarm-strategy "spread"										Define a default scheduling strategy for Swarm
#    --tls-san [--tls-san option --tls-san option]							Support extra SANs for TLS certs
#    --virtualbox-boot2docker-url 									The URL of the boot2docker image. Defaults to the latest available version [$VIRTUALBOX_BOOT2DOCKER_URL]
#    --virtualbox-cpu-count "1"										number of CPUs for the machine (-1 to use the number of CPUs available) [$VIRTUALBOX_CPU_COUNT]
#    --virtualbox-disk-size "20000"									Size of disk for host in MB [$VIRTUALBOX_DISK_SIZE]
#    --virtualbox-host-dns-resolver									Use the host DNS resolver [$VIRTUALBOX_HOST_DNS_RESOLVER]
#    --virtualbox-hostonly-cidr "192.168.99.1/24"								Specify the Host Only CIDR [$VIRTUALBOX_HOSTONLY_CIDR]
#    --virtualbox-hostonly-nicpromisc "deny"								Specify the Host Only Network Adapter Promiscuous Mode [$VIRTUALBOX_HOSTONLY_NIC_PROMISC]
#    --virtualbox-hostonly-nictype "82540EM"								Specify the Host Only Network Adapter Type [$VIRTUALBOX_HOSTONLY_NIC_TYPE]
#    --virtualbox-hostonly-no-dhcp									Disable the Host Only DHCP Server [$VIRTUALBOX_HOSTONLY_NO_DHCP]
#    --virtualbox-import-boot2docker-vm 									The name of a Boot2Docker VM to import [$VIRTUALBOX_BOOT2DOCKER_IMPORT_VM]
#    --virtualbox-memory "1024"										Size of memory for host in MB [$VIRTUALBOX_MEMORY_SIZE]
#    --virtualbox-nat-nictype "82540EM"									Specify the Network Adapter Type [$VIRTUALBOX_NAT_NICTYPE]
#    --virtualbox-no-dns-proxy										Disable proxying all DNS requests to the host [$VIRTUALBOX_NO_DNS_PROXY]
#    --virtualbox-no-share										Disable the mount of your home directory [$VIRTUALBOX_NO_SHARE]
#    --virtualbox-no-vtx-check										Disable checking for the availability of hardware virtualization before the vm is started [$VIRTUALBOX_NO_VTX_CHECK]
#    --virtualbox-share-folder 										Mount the specified directory instead of the default home location. Format: dir:name [$VIRTUALBOX_SHARE_FOLDER]
#    --virtualbox-ui-type "headless"									Specify the UI Type: (gui|sdl|headless|separate) [$VIRTUALBOX_UI_TYPE]

Be prepared to for the Windows popups: Windows might ask for the permission to create a network adapter. Sometimes, such confirmation window is minimized in the taskbar. Windows might ask for the permission to configure a network adapter. Sometimes, such confirmation window is minimized in the taskbar. Windows might ask for the permission to configure a dhcp server. Sometimes, such confirmation window is minimized in the taskbar.

docker-machine create --driver virtualbox myvm1
# Creating CA: D:\misc\docker\.docker\certs\ca.pem
# Creating client certificate: D:\misc\docker\.docker\certs\cert.pem
# Running pre-create checks...
# (myvm1) Image cache directory does not exist, creating it at D:\misc\docker\.docker\cache...
# (myvm1) No default Boot2Docker ISO found locally, downloading the latest release...
# (myvm1) Latest release for github.com/boot2docker/boot2docker is v18.06.1-ce
# (myvm1) Downloading D:\misc\docker\.docker\cache\boot2docker.iso from https://github.com/boot2docker/boot2docker/releases/download/v18.06.1-ce/boot2docker.iso...
# (myvm1) 0%....10%....20%....30%....40%....50%....60%....70%....80%....90%....100%
# Creating machine...
# (myvm1) Copying D:\misc\docker\.docker\cache\boot2docker.iso to D:\misc\docker\.docker\machines\myvm1\boot2docker.iso...
# (myvm1) Creating VirtualBox VM...
# (myvm1) Creating SSH key...
# (myvm1) Starting the VM...
# (myvm1) Check network to re-create if needed...
# (myvm1) Windows might ask for the permission to create a network adapter. Sometimes, such confirmation window is minimized in the taskbar.
# (myvm1) Found a new host-only adapter: "VirtualBox Host-Only Ethernet Adapter #10"
# (myvm1) Windows might ask for the permission to configure a network adapter. Sometimes, such confirmation window is minimized in the taskbar.
# (myvm1) Windows might ask for the permission to configure a dhcp server. Sometimes, such confirmation window is minimized in the taskbar.
# (myvm1) Waiting for an IP...
# Waiting for machine to be running, this may take a few minutes...
# Detecting operating system of created instance...
# Waiting for SSH to be available...
# Detecting the provisioner...
# Provisioning with boot2docker...
# Copying certs to the local machine directory...
# Copying certs to the remote machine...
# Setting Docker configuration on the remote daemon...
# Checking connection to Docker...
# Docker is up and running!
# To see how to connect your Docker Client to the Docker Engine running on this virtual machine, run: C:\ProgramData\chocolatey\lib\docker-machine\bin\docker-machine.exe env myvm1
docker-machine create --driver virtualbox myvm2
# Running pre-create checks...
# Creating machine...
# (myvm2) Copying D:\misc\docker\.docker\cache\boot2docker.iso to D:\misc\docker\.docker\machines\myvm2\boot2docker.iso...
# (myvm2) Creating VirtualBox VM...
# (myvm2) Creating SSH key...
# (myvm2) Starting the VM...
# (myvm2) Check network to re-create if needed...
# (myvm2) Windows might ask for the permission to configure a dhcp server. Sometimes, such confirmation window is minimized in the taskbar.
# (myvm2) Waiting for an IP...
# Waiting for machine to be running, this may take a few minutes...
# Detecting operating system of created instance...
# Waiting for SSH to be available...
# Detecting the provisioner...
# Provisioning with boot2docker...
# Copying certs to the local machine directory...
# Copying certs to the remote machine...
# Setting Docker configuration on the remote daemon...
# Checking connection to Docker...
# Docker is up and running!
# To see how to connect your Docker Client to the Docker Engine running on this virtual machine, run: C:\ProgramData\chocolatey\lib\docker-machine\bin\docker-machine.exe env myvm2

Docker machine shell environment on Cygwin

docker-machine env myvm1
# export DOCKER_TLS_VERIFY="1"
# export DOCKER_HOST="tcp://192.168.99.100:2376"
# export DOCKER_CERT_PATH="D:\misc\docker\.docker\machines\myvm1"
# export DOCKER_MACHINE_NAME="myvm1"
# export COMPOSE_CONVERT_WINDOWS_PATHS="true"
# # Run this command to configure your shell: 
# # eval $("C:\ProgramData\chocolatey\lib\docker-machine\bin\docker-machine.exe" env myvm1)
eval $(docker-machine env myvm1)
env | sort | grep DOCKER
# DOCKER_CERT_PATH=D:\misc\docker\.docker\machines\myvm1
# DOCKER_HOST=tcp://192.168.99.100:2376
# DOCKER_MACHINE_NAME=myvm1
# DOCKER_TLS_VERIFY=1
docker-machine ls
# NAME    ACTIVE   DRIVER       STATE     URL                         SWARM   DOCKER        ERRORS
# myvm1   *        virtualbox   Running   tcp://192.168.99.100:2376           v18.06.1-ce   
# myvm2   -        virtualbox   Running   tcp://192.168.99.101:2376           v18.06.1-ce   

Containers

Define a container with Dockerfile
mkdir -p /misc/docker/docker/part2 && cd /misc/docker/docker/part2
cat > Dockerfile <<-"_EOF"
# Use an official Python runtime as a parent image
FROM python:2.7-slim

# Set the working directory to /app
WORKDIR /app

# Copy the current directory contents into the container at /app
COPY . /app

# Install any needed packages specified in requirements.txt
RUN pip install --trusted-host pypi.python.org -r requirements.txt

# Make port 80 available to the world outside this container
EXPOSE 80

# Define environment variable
ENV NAME World

# Run app.py when the container launches
CMD ["python", "app.py"]
_EOF
The app itself
cat > requirements.txt <<-"_EOF"
Flask
Redis
_EOF
cat > app.py <<-"_EOF"
from flask import Flask
from redis import Redis, RedisError
import os
import socket

# Connect to Redis
redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)

app = Flask(__name__)

@app.route("/")
def hello():
    try:
        visits = redis.incr("counter")
    except RedisError:
        visits = "<i>cannot connect to Redis, counter disabled</i>"

    html = "<h3>Hello {name}!</h3>" \
           "<b>Hostname:</b> {hostname}<br/>" \
           "<b>Visits:</b> {visits}"
    return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits)

if __name__ == "__main__":
    app.run(host='0.0.0.0', port=80)
_EOF
ls -lt
# total 3
# -rw-rw-r--+ 1 vzell None 665 Oct 20 16:37 app.py
# -rw-rw-r--+ 1 vzell None  12 Oct 20 16:37 requirements.txt
# -rw-rw-r--+ 1 vzell None 513 Oct 20 16:37 Dockerfile
Build the app
docker build -t friendlyhello .
# docker-machine.exe version 0.15.0, build b48dc28d
Sending build context to Docker daemon   5.12kB
Step 1/7 : FROM python:2.7-slim
2.7-slim: Pulling from library/python
f17d81b4b692: Pulling fs layer
7429ec5d1bbc: Pulling fs layer
45b34d043e88: Pulling fs layer
49d33f4617f3: Pulling fs layer
49d33f4617f3: Waiting
7429ec5d1bbc: Verifying Checksum
7429ec5d1bbc: Download complete
49d33f4617f3: Verifying Checksum
49d33f4617f3: Download complete
45b34d043e88: Verifying Checksum
45b34d043e88: Download complete
f17d81b4b692: Verifying Checksum
f17d81b4b692: Download complete
f17d81b4b692: Pull complete
7429ec5d1bbc: Pull complete
45b34d043e88: Pull complete
49d33f4617f3: Pull complete
Digest: sha256:3b9c77ba2cdb829f6d41cb64a3e6b3fb7f40a9143648c506864b7fbf272dc77e
Status: Downloaded newer image for python:2.7-slim
 ---> 804b0a01ea83
Step 2/7 : WORKDIR /app
 ---> Running in f5de2dbe095b
Removing intermediate container f5de2dbe095b
 ---> e52e6b3b70ed
Step 3/7 : COPY . /app
 ---> b23817b88b63
Step 4/7 : RUN pip install --trusted-host pypi.python.org -r requirements.txt
 ---> Running in d26b4a6213b8
Collecting Flask (from -r requirements.txt (line 1))
  Downloading https://files.pythonhosted.org/packages/7f/e7/08578774ed4536d3242b14dacb4696386634607af824ea997202cd0edb4b/Flask-1.0.2-py2.py3-none-any.whl (91kB)
Collecting Redis (from -r requirements.txt (line 2))
  Downloading https://files.pythonhosted.org/packages/3b/f6/7a76333cf0b9251ecf49efff635015171843d9b977e4ffcf59f9c4428052/redis-2.10.6-py2.py3-none-any.whl (64kB)
Collecting itsdangerous>=0.24 (from Flask->-r requirements.txt (line 1))
  Downloading https://files.pythonhosted.org/packages/c9/c3/8dadb353944803796515ce68ad3944e6e7acc934f5036c40829cb96e64a1/ItsDangerous-1.0.0-py2.py3-none-any.whl
Collecting Jinja2>=2.10 (from Flask->-r requirements.txt (line 1))
  Downloading https://files.pythonhosted.org/packages/7f/ff/ae64bacdfc95f27a016a7bed8e8686763ba4d277a78ca76f32659220a731/Jinja2-2.10-py2.py3-none-any.whl (126kB)
Collecting Werkzeug>=0.14 (from Flask->-r requirements.txt (line 1))
  Downloading https://files.pythonhosted.org/packages/20/c4/12e3e56473e52375aa29c4764e70d1b8f3efa6682bef8d0aae04fe335243/Werkzeug-0.14.1-py2.py3-none-any.whl (322kB)
Collecting click>=5.1 (from Flask->-r requirements.txt (line 1))
  Downloading https://files.pythonhosted.org/packages/fa/37/45185cb5abbc30d7257104c434fe0b07e5a195a6847506c074527aa599ec/Click-7.0-py2.py3-none-any.whl (81kB)
Collecting MarkupSafe>=0.23 (from Jinja2>=2.10->Flask->-r requirements.txt (line 1))
  Downloading https://files.pythonhosted.org/packages/4d/de/32d741db316d8fdb7680822dd37001ef7a448255de9699ab4bfcbdf4172b/MarkupSafe-1.0.tar.gz
Building wheels for collected packages: MarkupSafe
  Running setup.py bdist_wheel for MarkupSafe: started
  Running setup.py bdist_wheel for MarkupSafe: finished with status 'done'
  Stored in directory: /root/.cache/pip/wheels/33/56/20/ebe49a5c612fffe1c5a632146b16596f9e64676768661e4e46
Successfully built MarkupSafe
Installing collected packages: itsdangerous, MarkupSafe, Jinja2, Werkzeug, click, Flask, Redis
Successfully installed Flask-1.0.2 Jinja2-2.10 MarkupSafe-1.0 Redis-2.10.6 Werkzeug-0.14.1 click-7.0 itsdangerous-1.0.0
Removing intermediate container d26b4a6213b8
 ---> bbea6fe9aeb2
Step 5/7 : EXPOSE 80
 ---> Running in 4e6488828ace
Removing intermediate container 4e6488828ace
 ---> ff7f3e08db42
Step 6/7 : ENV NAME World
 ---> Running in 5d877335ab42
Removing intermediate container 5d877335ab42
 ---> 94be63704a3b
Step 7/7 : CMD ["python", "app.py"]
 ---> Running in 4ce8bc9bf29e
Removing intermediate container 4ce8bc9bf29e
 ---> 35350e047d34
Successfully built 35350e047d34
Successfully tagged friendlyhello:latest
SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.
docker image ls
# REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
# friendlyhello       latest              35350e047d34        9 minutes ago       132MB
# python              2.7-slim            804b0a01ea83        4 days ago          120MB
Run the app on the first machine
docker run -p 4000:80 friendlyhello
#  * Serving Flask app "app" (lazy loading)
#  * Environment: production
#    WARNING: Do not use the development server in a production environment.
#    Use a production WSGI server instead.
#  * Debug mode: off
#  * Running on http://0.0.0.0:80/ (Press CTRL+C to quit)

Hit Ctrl+c in your host terminal to quit the running app.

On Windows systems, Ctrl+c does not stop the container, as is the case of our Linux guest.. So, first type Ctrl+c to get the prompt back (or open another shell), then type docker container ls to list the running containers, followed by docker container stop <Container NAME or ID> to stop the container. Otherwise, you get an error response from the daemon when you try to re-run the container in the next step.

Test from your Windows host. Use the IP address that docker-machine reports via docker-machine ip <Machine-ID>.

http://192.168.99.100:4000

docker container ls
# CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS                  NAMES
# 9370db949036        friendlyhello       "python app.py"     5 seconds ago       Up 8 seconds        0.0.0.0:4000->80/tcp   dreamy_pasteur
docker container stop $(docker container ls | grep "friendlyhello" | awk '{ print $1 }')
# 9370db949036

Run in detached mode.

docker run -d -p 4000:80 friendlyhello
# 9370db949036a6ce52a33f55acb7760bfde89496b67238622a1c9811281583e4
docker-machine ls
# NAME    ACTIVE   DRIVER       STATE     URL                         SWARM   DOCKER        ERRORS
# myvm1   *        virtualbox   Running   tcp://192.168.99.100:2376           v18.06.1-ce   
# myvm2   -        virtualbox   Running   tcp://192.168.99.101:2376           v18.06.1-ce   

Test from your Windows host. Use the IP address that docker-machine reports via docker-machine ip <Machine-ID>.

docker-machine ip myvm1
# 192.168.99.100
cygstart http://$(docker-machine ip myvm1):4000
curl http://$(docker-machine ip myvm1):4000
Share your image (NOT needed anymore, as we did it already from inside our Linux guest)
  • Create a docker account
    cygstart https://hub.docker.com/
    
  • Log in with your Docker ID
    docker login
    
    # Error: Cannot perform an interactive login from a non TTY device
    
    winpty docker login
    

    Enter your docker username, e.g. vzell

    vzell
    

    FIXME: This hangs.

  • Tag the image
    docker tag friendlyhello vzell/get-started:part2-win
    
    docker image ls
    
  • Publish the image
    docker push vzell/get-started:part2-win
    
Pull and run the image from the remote repository on the second machine
eval $(docker-machine env myvm2)
env | sort | grep DOCKER
# DOCKER_CERT_PATH=D:\misc\docker\.docker\machines\myvm2
# DOCKER_HOST=tcp://192.168.99.101:2376
# DOCKER_MACHINE_NAME=myvm2
# DOCKER_TLS_VERIFY=1
docker-machine ls
# NAME    ACTIVE   DRIVER       STATE     URL                         SWARM   DOCKER        ERRORS
# myvm1   -        virtualbox   Running   tcp://192.168.99.100:2376           v18.06.1-ce   
# myvm2   *        virtualbox   Running   tcp://192.168.99.101:2376           v18.06.1-ce   
docker image ls
# REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
docker run -p 4000:80 vzell/get-started:part2
Unable to find image 'vzell/get-started:part2' locally
part2: Pulling from vzell/get-started
f17d81b4b692: Pulling fs layer
7429ec5d1bbc: Pulling fs layer
45b34d043e88: Pulling fs layer
49d33f4617f3: Pulling fs layer
2e1aa3b143dc: Pulling fs layer
d8e29732af4f: Pulling fs layer
913401b9ea98: Pulling fs layer
49d33f4617f3: Waiting
2e1aa3b143dc: Waiting
d8e29732af4f: Waiting
913401b9ea98: Waiting
7429ec5d1bbc: Verifying Checksum
7429ec5d1bbc: Download complete
49d33f4617f3: Verifying Checksum
49d33f4617f3: Download complete
2e1aa3b143dc: Verifying Checksum
2e1aa3b143dc: Download complete
d8e29732af4f: Verifying Checksum
d8e29732af4f: Download complete
45b34d043e88: Verifying Checksum
45b34d043e88: Download complete
913401b9ea98: Verifying Checksum
913401b9ea98: Download complete
f17d81b4b692: Verifying Checksum
f17d81b4b692: Download complete
f17d81b4b692: Pull complete
7429ec5d1bbc: Pull complete
45b34d043e88: Pull complete
49d33f4617f3: Pull complete
2e1aa3b143dc: Pull complete
d8e29732af4f: Pull complete
913401b9ea98: Pull complete
Digest: sha256:1f1eda84ee4958b8dae1350d8b0f05591c23a33c091e494f81faff5abec945ff
Status: Downloaded newer image for vzell/get-started:part2
 * Serving Flask app "app" (lazy loading)
 * Environment: production
   WARNING: Do not use the development server in a production environment.
   Use a production WSGI server instead.
 * Debug mode: off
 * Running on http://0.0.0.0:80/ (Press CTRL+C to quit)

Hit CTRL+C in your host terminal to quit the running app.

On Windows systems, CTRL+C does not stop the container. So, first type CTRL+C to get the prompt back (or open another shell), then type docker container ls to list the running containers, followed by docker container stop <Container NAME or ID> to stop the container. Otherwise, you get an error response from the daemon when you try to re-run the container in the next step.

Test from your Windows host. Use the IP address that docker-machine reports via docker-machine ip <Machine-ID>.

http://192.168.99.101:4000

docker container ls
# CONTAINER ID        IMAGE                     COMMAND             CREATED             STATUS              PORTS                  NAMES
# 1da3829ae6d9        vzell/get-started:part2   "python app.py"     29 seconds ago      Up 31 seconds       0.0.0.0:4000->80/tcp   vigilant_keller
docker container stop $(docker container ls | sed 1,1d | awk '{ print $1 }')
1da3829ae6d9

Run in detached mode

docker run -d -p 4000:80 vzell/get-started:part2
# 72a04dfebe7653445a3edba6a34d28db70b88e7295998aff0c9a4697453cfa71

Test from your Windows host. Use the IP address that docker-machine reports via docker-machine ip <Machine-ID>.

docker-machine ip myvm2
# 192.168.99.101
cygstart http://$(docker-machine ip myvm2):4000
curl http://$(docker-machine ip myvm2):4000
docker container ls | sed 1,1d | awk '{ print $1 }'
# 72a04dfebe76
docker container stop $(docker container ls | sed 1,1d | awk '{ print $1 }')
# 72a04dfebe76

Services

Your first docker-compose.yml file
mkdir -p /misc/docker/docker/part3 && cd /misc/docker/docker/part3
cat > docker-compose.yml <<-"_EOF"
version: "3"
services:
  web:
    # replace username/repo:tag with your name and image details
    image: vzell/get-started:part2
    deploy:
      replicas: 5
      resources:
        limits:
          cpus: "0.1"
          memory: 50M
      restart_policy:
        condition: on-failure
    ports:
      - "4000:80"
    networks:
      - webnet
networks:
  webnet:
_EOF
Run your new load-balanced app
docker swarm init
# Error response from daemon: could not choose an IP address to advertise since this system has multiple addresses on different interfaces (10.0.2.15 on enp0s3 and 192.168.56.109 on enp0s8) - specify one with --advertise-addr
docker swarm init --advertise-addr 10.0.2.15
# Swarm initialized: current node (ruiw5gb5nr4seru05tyoic83z) is now a manager.
# 
# To add a worker to this swarm, run the following command:
# 
#     docker swarm join --token SWMTKN-1-47bf7k82f8gj8gb4wqavnd6wvewzcqzq4y8eepc9bx58wpg08d-62fwt1mat1f66ukkwilt8awpn 10.0.2.15:2377
# 
# To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
# 
docker stack deploy -c docker-compose.yml getstartedlab
# Creating network getstartedlab_webnet
# Creating service getstartedlab_web
docker service ls
# ID                  NAME                MODE                REPLICAS            IMAGE                     PORTS
# b5uy14riia4q        getstartedlab_web   replicated          5/5                 vzell/get-started:part2   *:4000->80/tcp
docker service ps getstartedlab_web
# ID                  NAME                  IMAGE                     NODE                DESIRED STATE       CURRENT STATE           ERROR               PORTS
# qxa74s1ghejr        getstartedlab_web.1   vzell/get-started:part2   myvm2               Running             Running 4 seconds ago                       
# sq37nbxyniot        getstartedlab_web.2   vzell/get-started:part2   myvm2               Running             Running 4 seconds ago                       
# pmp5lv7pwyno        getstartedlab_web.3   vzell/get-started:part2   myvm2               Running             Running 4 seconds ago                       
# lwfclazzmp4s        getstartedlab_web.4   vzell/get-started:part2   myvm2               Running             Running 4 seconds ago                       
# sd1hdu0wp2pp        getstartedlab_web.5   vzell/get-started:part2   myvm2               Running             Running 4 seconds ago                       
docker container ls -q
# 68b6202d5b80
# 0ad951f8ed01
# 6233ca9f0f80
# 828b4986eb3b
# cab66ca26fdb
cygstart http://$(docker-machine ip myvm2):4000/

Refresh multiple times and watch the changing Hostname corresponding the above docker container ls -q listing.

curl -4 http://$(docker-machine ip myvm2):4000
# <h3>Hello World!</h3><b>Hostname:</b> f97cea158373<br/><b>Visits:</b> <i>cannot connect to Redis, counter disabled</i>

The container ID changes, demonstrating the load-balancing; with each request, one of the 5 tasks is chosen, in a round-robin fashion, to respond. The container IDs match your output from the previous command (docker container ls -q).

Scale the app

You can scale the app by changing the replicas value in docker-compose.yml

cat > docker-compose.yml <<-"_EOF"
version: "3"
services:
  web:
    # replace username/repo:tag with your name and image details
    image: vzell/get-started:part2
    deploy:
      replicas: 7
      resources:
        limits:
          cpus: "0.1"
          memory: 50M
      restart_policy:
        condition: on-failure
    ports:
      - "4000:80"
    networks:
      - webnet
networks:
  webnet:
_EOF
docker stack deploy -c docker-compose.yml getstartedlab
# Updating service getstartedlab_web (id: b5uy14riia4q991jjigspxnme)
docker container ls -q
# eed5f3183df0
# fe31534ce871
# 68b6202d5b80
# 0ad951f8ed01
# 6233ca9f0f80
# 828b4986eb3b
# cab66ca26fdb
Take down the app and the swarm
docker stack rm getstartedlab
# Removing service getstartedlab_web
# Removing network getstartedlab_webnet
docker swarm leave --force
# Node left the swarm.

Swarms

Set up your swarm
  • Create a cluster (on Windows)

    Already done, we have created already 2 machines.

  • List the VMs and get their IP addresses
    eval $(docker-machine env myvm1)
    
    docker-machine ls
    
    # NAME    ACTIVE   DRIVER       STATE     URL                         SWARM   DOCKER        ERRORS
    # myvm1   *        virtualbox   Running   tcp://192.168.99.100:2376           v18.06.1-ce   
    # myvm2   -        virtualbox   Running   tcp://192.168.99.101:2376           v18.06.1-ce   
    
    docker swarm init --advertise-addr 192.168.99.100
    
    # Swarm initialized: current node (5nusjel5dobkprhcs2ts1ga2l) is now a manager.
    # 
    # To add a worker to this swarm, run the following command:
    # 
    #     docker swarm join --token SWMTKN-1-40l51b82uf1lueq8ibt01ij0vm9hibytsxprhi4fbceov4qrib-4a6rc2xn4quy9vxazkrthdxnk 192.168.99.100:2377
    # 
    # To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
    # 
    
    eval $(docker-machine env myvm2)
    
    docker swarm join --token SWMTKN-1-40l51b82uf1lueq8ibt01ij0vm9hibytsxprhi4fbceov4qrib-4a6rc2xn4quy9vxazkrthdxnk 192.168.99.100:2377
    
    # This node joined a swarm as a worker.
    
    eval $(docker-machine env myvm1)
    
    docker node ls
    
    # ID                            HOSTNAME            STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
    # 5nusjel5dobkprhcs2ts1ga2l *   myvm1               Ready               Active              Leader              18.06.1-ce
    # uxmudairgs7kuauz7ihkncfpy     myvm2               Ready               Active                                  18.06.1-ce
    
Deploy your app on the swarm cluster
  • Deploy the app on the swarm manager
    docker stack deploy -c docker-compose.yml getstartedlab
    
    # Creating network getstartedlab_webnet
    # Creating service getstartedlab_web
    

    Note: If your image is stored on a private registry instead of Docker Hub, you need to be logged in using docker login <your-registry> and then you need to add the –with-registry-auth flag to the above command. For example:

    docker login registry.example.com
    docker stack deploy --with-registry-auth -c docker-compose.yml getstartedlab
    
    docker stack ps getstartedlab
    
    # ID                  NAME                  IMAGE                     NODE                DESIRED STATE       CURRENT STATE            ERROR               PORTS
    # eclewyyfec5r        getstartedlab_web.1   vzell/get-started:part2   myvm1               Running             Running 25 seconds ago                       
    # krcu5il70m7g        getstartedlab_web.2   vzell/get-started:part2   myvm2               Running             Running 28 seconds ago                       
    # m60cbxtguxx3        getstartedlab_web.3   vzell/get-started:part2   myvm1               Running             Running 24 seconds ago                       
    # 7i75gfqjq8wg        getstartedlab_web.4   vzell/get-started:part2   myvm2               Running             Running 28 seconds ago                       
    # n5w64tiskebm        getstartedlab_web.5   vzell/get-started:part2   myvm1               Running             Running 25 seconds ago                       
    # lqazi3tbehps        getstartedlab_web.6   vzell/get-started:part2   myvm2               Running             Running 28 seconds ago                       
    # zlnmtwqemftx        getstartedlab_web.7   vzell/get-started:part2   myvm2               Running             Running 28 seconds ago                       
    
  • Accessing your cluster
    docker-machine ls
    
    # NAME    ACTIVE   DRIVER       STATE     URL                         SWARM   DOCKER        ERRORS
    # myvm1   *        virtualbox   Running   tcp://192.168.99.100:2376           v18.06.1-ce   
    # myvm2   -        virtualbox   Running   tcp://192.168.99.101:2376           v18.06.1-ce   
    
    http://$(docker-machine ip myvm1):4000
    
    http://$(docker-machine ip myvm2):4000
    
    curl http://$(docker-machine ip myvm1):4000
    
    curl http://$(docker-machine ip myvm2):4000
    
  • Iterating and scaling your app

    You can scale the app by changing the replicas value in docker-compose.yml

    cat > docker-compose.yml <<-"_EOF"
    version: "3"
    services:
      web:
        # replace username/repo:tag with your name and image details
        image: vzell/get-started:part2
        deploy:
          replicas: 4
          resources:
            limits:
              cpus: "0.1"
              memory: 50M
          restart_policy:
            condition: on-failure
        ports:
          - "4000:80"
        networks:
          - webnet
    networks:
      webnet:
    _EOF
    
    docker stack deploy -c docker-compose.yml getstartedlab
    
    # Updating service getstartedlab_web (id: 3kbq1ire0euhl8r3mhp7m8ll1)
    
    docker container ls -q
    
    # d6b65e97a6ba
    # e5ecd6580ddc
    # fb6e23b33f92
    # 9370db949036
    
  • Cleanup and reboot
    • Stacks and swarms
      docker stack rm getstartedlab
      
      # Removing service getstartedlab_web
      # Removing network getstartedlab_webnet
      
      eval $(docker-machine env myvm2)
      docker swarm leave
      
      # Node left the swarm.
      
      eval $(docker-machine env myvm1)
      docker swarm leave
      
      # Error response from daemon: You are attempting to leave the swarm on a node that is participating as a manager. Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message.
      
      eval $(docker-machine env myvm2)
      docker swarm leave --force
      
      # Node left the swarm.
      
    • Unsetting docker-machine shell variable settings
      eval $(docker-machine env -u)
      
      env | grep DOCKER
      
    • Restarting Docker machines
      docker-machine ls
      
      # NAME    ACTIVE   DRIVER       STATE     URL                         SWARM   DOCKER        ERRORS
      # myvm1   -        virtualbox   Running   tcp://192.168.99.100:2376           v18.06.1-ce   
      # myvm2   -        virtualbox   Running   tcp://192.168.99.101:2376           v18.06.1-ce   
      
      docker-machine stop myvm2
      
      # Stopping "myvm2"...
      # Machine "myvm2" was stopped.
      
      docker-machine stop myvm1
      
      # Stopping "myvm1"...
      # Machine "myvm1" was stopped.
      
      docker-machine start myvm1
      
      # Starting "myvm1"...
      # (myvm1) Check network to re-create if needed...
      # (myvm1) Windows might ask for the permission to configure a dhcp server. Sometimes, such confirmation window is minimized in the taskbar.
      # (myvm1) Waiting for an IP...
      # Machine "myvm1" was started.
      # Waiting for SSH to be available...
      # Detecting the provisioner...
      # Started machines may have new IP addresses. You may need to re-run the `docker-machine env` command.
      
      docker-machine start myvm2
      
      # Starting "myvm2"...
      # (myvm2) Check network to re-create if needed...
      # (myvm2) Windows might ask for the permission to configure a dhcp server. Sometimes, such confirmation window is minimized in the taskbar.
      # (myvm2) Waiting for an IP...
      # Machine "myvm2" was started.
      # Waiting for SSH to be available...
      # Detecting the provisioner...
      # Started machines may have new IP addresses. You may need to re-run the `docker-machine env` command.
      

Stacks

Prerequisites
eval $(docker-machine env myvm2)
docker swarm leave
eval $(docker-machine env myvm1)
docker swarm leave --force
docker-machine stop myvm1 myvm2
docker-machine start myvm1 myvm2
eval $(docker-machine env myvm1)
docker-machine ls
docker run -d -p 80:80 vzell/get-started:part2
cygstart http://$(docker-machine ip myvm1)
cygstart http://$(docker-machine ip myvm2)
curl http://$(docker-machine ip myvm1)
curl http://$(docker-machine ip myvm2)
# NAME    ACTIVE   DRIVER       STATE     URL                         SWARM   DOCKER        ERRORS
# myvm1   *        virtualbox   Running   tcp://192.168.99.100:2376           v18.06.1-ce   
# myvm2   -        virtualbox   Running   tcp://192.168.99.101:2376           v18.06.1-ce   
eval $(docker-machine env myvm1)
docker node ls
# (.venv) vzell:/misc/docker/docker/part4> Error response from daemon: This node is not a swarm manager. Use "docker swarm init" or "docker swarm join" to connect this node to swarm and try again.
docker swarm init --advertise-addr 192.168.99.100
# Swarm initialized: current node (bt6jvp1s872tc124dh1w7xz8b) is now a manager.
# 
# To add a worker to this swarm, run the following command:
# 
#     docker swarm join --token SWMTKN-1-347kh8m4cuww6kipbhkvcuqjwxk7kiq740t4juc4axkyl8a4ej-61t6r0ryluocs2g8ce3p7h5bv 192.168.99.100:2377
# 
# To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
# 
eval $(docker-machine env myvm2)
docker swarm join --token SWMTKN-1-347kh8m4cuww6kipbhkvcuqjwxk7kiq740t4juc4axkyl8a4ej-61t6r0ryluocs2g8ce3p7h5bv 192.168.99.100:2377
# This node joined a swarm as a worker.
eval $(docker-machine env myvm1)
docker node ls
# (.venv) vzell:/misc/docker/docker/part4> ID                            HOSTNAME            STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
# bt6jvp1s872tc124dh1w7xz8b *   myvm1               Ready               Active              Leader              18.06.1-ce
# vmp7yafp4itl54zrsz02ehtyu     myvm2               Ready               Active                                  18.06.1-ce
Add a new service and redeploy
mkdir -p /misc/docker/docker/part4 && cd /misc/docker/docker/part4
cat > docker-compose.yml <<-"_EOF"
version: "3"
services:
  web:
    # replace username/repo:tag with your name and image details
    image: vzell/get-started:part2
    deploy:
      replicas: 5
      restart_policy:
        condition: on-failure
      resources:
        limits:
          cpus: "0.1"
          memory: 50M
    ports:
      - "80:80"
    networks:
      - webnet
  visualizer:
    image: dockersamples/visualizer:stable
    ports:
      - "8080:8080"
    volumes:
      - "/var/run/docker.sock:/var/run/docker.sock"
    deploy:
      placement:
        constraints: [node.role == manager]
    networks:
      - webnet
networks:
  webnet:
_EOF
docker stack deploy -c docker-compose.yml getstartedlab
# Creating network getstartedlab_webnet
# Creating service getstartedlab_web
# Creating service getstartedlab_visualizer
cygstart http://$(docker-machine ip myvm1):8080/
docker stack ps getstartedlab
# ID                  NAME                         IMAGE                             NODE                DESIRED STATE       CURRENT STATE                ERROR               PORTS
# qloycjlq8977        getstartedlab_visualizer.1   dockersamples/visualizer:stable   myvm1               Running             Running about a minute ago                       
# k4wh1pz5y1ej        getstartedlab_web.1          vzell/get-started:part2           myvm2               Running             Running about a minute ago                       
# rdzht0ja3xt0        getstartedlab_web.2          vzell/get-started:part2           myvm1               Running             Running about a minute ago                       
# 4z893nqvcynv        getstartedlab_web.3          vzell/get-started:part2           myvm2               Running             Running about a minute ago                       
# l8a4zik3gq5j        getstartedlab_web.4          vzell/get-started:part2           myvm1               Running             Running about a minute ago                       
# fk9whp13lgrq        getstartedlab_web.5          vzell/get-started:part2           myvm2               Running             Running about a minute ago                       
Persist the data
cat > docker-compose.yml <<-"_EOF"
version: "3"
services:
  web:
    # replace username/repo:tag with your name and image details
    image: vzell/get-started:part2
    deploy:
      replicas: 5
      restart_policy:
        condition: on-failure
      resources:
        limits:
          cpus: "0.1"
          memory: 50M
    ports:
      - "80:80"
    networks:
      - webnet
  visualizer:
    image: dockersamples/visualizer:stable
    ports:
      - "8080:8080"
    volumes:
      - "/var/run/docker.sock:/var/run/docker.sock"
    deploy:
      placement:
        constraints: [node.role == manager]
    networks:
      - webnet
  redis:
    image: redis
    ports:
      - "6379:6379"
    volumes:
      - "/home/docker/data:/data"
    deploy:
      placement:
        constraints: [node.role == manager]
    command: redis-server --appendonly yes
    networks:
      - webnet
networks:
  webnet:
_EOF
docker-machine ssh myvm1 "mkdir ./data"
docker stack deploy -c docker-compose.yml getstartedlab
# Updating service getstartedlab_visualizer (id: ir57zgo5lavw3v13qqa6iujwu)
# Creating service getstartedlab_redis
# Updating service getstartedlab_web (id: vzg2z8bjif9ievck1nhlg3h25)
docker service ls
# ID                  NAME                       MODE                REPLICAS            IMAGE                             PORTS
# jbxqsnvi564k        getstartedlab_redis        replicated          1/1                 redis:latest                      *:6379->6379/tcp
# ir57zgo5lavw        getstartedlab_visualizer   replicated          1/1                 dockersamples/visualizer:stable   *:8080->8080/tcp
# vzg2z8bjif9i        getstartedlab_web          replicated          5/5                 vzell/get-started:part2           *:80->80/tcp
cygstart http://$(docker-machine ip myvm2):8080/

Deploy your app on a cloud provider

Cleanup

docker-machine ls -q
# myvm1
# myvm2
eval $(docker-machine env myvm2)
docker swarm leave
eval $(docker-machine env myvm1)
docker swarm leave --force
docker-machine stop $(docker-machine ls -q)
docker-machine rm $(docker-machine ls -q) --force
# About to remove myvm1, myvm2
# WARNING: This action will delete both local reference and remote instance.
# Successfully removed myvm1
# Successfully removed myvm2

Recap

docker-machine create --driver virtualbox myvm1 # Create a VM (Mac, Win7, Linux)
docker-machine create -d hyperv --hyperv-virtual-switch "myswitch" myvm1 # Win10
docker-machine env myvm1                # View basic information about your node
docker-machine ssh myvm1 "docker node ls"         # List the nodes in your swarm
docker-machine ssh myvm1 "docker node inspect <node ID>"        # Inspect a node
docker-machine ssh myvm1 "docker swarm join-token -q worker"   # View join token
docker-machine ssh myvm1   # Open an SSH session with the VM; type "exit" to end
docker node ls                # View nodes in swarm (while logged on to manager)
docker-machine ssh myvm2 "docker swarm leave"  # Make the worker leave the swarm
docker-machine ssh myvm1 "docker swarm leave -f" # Make master leave, kill swarm
docker-machine ls # list VMs, asterisk shows which VM this shell is talking to
docker-machine start myvm1            # Start a VM that is currently not running
docker-machine env myvm1      # show environment variables and command for myvm1
eval $(docker-machine env myvm1)         # Mac command to connect shell to myvm1
& "C:\Program Files\Docker\Docker\Resources\bin\docker-machine.exe" env myvm1 | Invoke-Expression   # Windows command to connect shell to myvm1
docker stack deploy -c <file> <app>  # Deploy an app; command shell must be set to talk to manager (myvm1), uses local Compose file
docker-machine scp docker-compose.yml myvm1:~ # Copy file to node's home dir (only required if you use ssh to connect to manager and deploy the app)
docker-machine ssh myvm1 "docker stack deploy -c <file> <app>"   # Deploy an app using ssh (you must have first copied the Compose file to myvm1)
eval $(docker-machine env -u)     # Disconnect shell from VMs, use native docker
docker-machine stop $(docker-machine ls -q)               # Stop all running VMs
docker-machine rm $(docker-machine ls -q) # Delete all VMs and their disk images

6.5 First steps with provisioning of Docker containers using Vagrant as provider

mkdir -p /misc/docker/vagrant && cd /misc/docker/vagrant
cat > Vagrantfile <<-"_EOF"
ENV['VAGRANT_DEFAULT_PROVIDER'] = 'docker'

Vagrant.configure("2") do |config|

  config.vm.define "my-little-container" do |m|

    m.vm.provider :docker do |d|
      d.force_host_vm = true
      d.name = 'my-little-container'
      d.build_dir = "."
      d.cmd = ["ping", "-c 51", "127.0.0.1"]
      d.remains_running = true
      d.vagrant_machine = "dockerhostvm"
      d.vagrant_vagrantfile = "./DockerHostVagrantfile"
    end
  end
end
_EOF
cat > DockerHostVagrantFile <<-"_EOF"
Vagrant.configure("2") do |config|

  config.vm.provision "docker"

# The following line terminates all ssh connections. Therefore Vagrant will be forced to reconnect.
# That's a workaround for the following error:
#
# Command: "docker" "ps" "-a" "-q" "--no-trunc"
# 
# Stderr: Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock:
# Get http://%2Fvar%2Frun%2Fdocker.sock/v1.38/containers/json?all=1: dial unix /var/run/docker.sock: connect: permission denied

  config.vm.provision "shell", inline:
    "ps aux | grep 'sshd:' | awk '{print $2}' | xargs kill"

  config.vm.define "dockerhostvm"
  config.vm.box = "ubuntu/trusty64"

  config.vm.provider :virtualbox do |vb|
    vb.name = "dockerhostvm"
  end

end
_EOF
cat >  Dockerfile <<-"_EOF"
FROM ubuntu:14.04

RUN mkdir /u01 && \
chmod a+xr /u01
COPY /files/readme.txt /u01/
_EOF
mkdir -p files && echo "Hi from Volki" > files/readme.txt
vagrant up
Bringing machine 'my-little-container' up with 'docker' provider...
==> my-little-container: Docker host is required. One will be created if necessary...
    my-little-container: Vagrant will now create or start a local VM to act as the Docker
    my-little-container: host. You'll see the output of the `vagrant up` for this VM below.
    my-little-container:  
    dockerhostvm: Importing base box 'ubuntu/trusty64'...
    dockerhostvm: Matching MAC address for NAT networking...
    dockerhostvm: Checking if box 'ubuntu/trusty64' is up to date...
    dockerhostvm: Setting the name of the VM: dockerhostvm
    dockerhostvm: Clearing any previously set forwarded ports...
    dockerhostvm: Clearing any previously set network interfaces...
    dockerhostvm: Preparing network interfaces based on configuration...
    dockerhostvm: Adapter 1: nat
    dockerhostvm: Forwarding ports...
    dockerhostvm: 22 (guest) => 2222 (host) (adapter 1)
    dockerhostvm: Booting VM...
    dockerhostvm: Waiting for machine to boot. This may take a few minutes...
    dockerhostvm: SSH address: 127.0.0.1:2222
    dockerhostvm: SSH username: vagrant
    dockerhostvm: SSH auth method: private key
    dockerhostvm: 
    dockerhostvm: Vagrant insecure key detected. Vagrant will automatically replace
    dockerhostvm: this with a newly generated keypair for better security.
    dockerhostvm: 
    dockerhostvm: Inserting generated public key within guest...
    dockerhostvm: Removing insecure key from the guest if it's present...
    dockerhostvm: Key inserted! Disconnecting and reconnecting using new SSH key...
    dockerhostvm: Machine booted and ready!
    dockerhostvm: Checking for guest additions in VM...
    dockerhostvm: The guest additions on this VM do not match the installed version of
    dockerhostvm: VirtualBox! In most cases this is fine, but in rare cases it can
    dockerhostvm: prevent things such as shared folders from working properly. If you see
    dockerhostvm: shared folder errors, please make sure the guest additions within the
    dockerhostvm: virtual machine match the version of VirtualBox you have installed on
    dockerhostvm: your host and reload your VM.
    dockerhostvm: 
    dockerhostvm: Guest Additions Version: 4.3.36
    dockerhostvm: VirtualBox Version: 5.2
    dockerhostvm: Mounting shared folders...
    dockerhostvm: /vagrant => D:/misc/docker/vagrant
    dockerhostvm: Running provisioner: docker...
    dockerhostvm: Installing Docker onto machine...
    dockerhostvm: Running provisioner: shell...
    dockerhostvm: Running: inline script
==> my-little-container: Syncing folders to the host VM...
    dockerhostvm: Mounting shared folders...
    dockerhostvm: /var/lib/docker/docker_1539709422_87058 => D:/misc/docker/vagrant
    dockerhostvm: /var/lib/docker/docker_build_1a8d0f70620301b3c04205ba73a3d2c2 => D:/misc/docker/vagrant
==> my-little-container: Building the container from a Dockerfile...
    my-little-container: Sending build context to Docker daemon  24.58kB
    my-little-container: Step 1/3 : FROM ubuntu:14.04
    my-little-container: 14.04: Pulling from library/ubuntu
    my-little-container: 72c01b436656: Pulling fs layer
    my-little-container: 65584f5f70ee: Pulling fs layer
    my-little-container: dc9874b52952: Pulling fs layer
    my-little-container: 86656bbaa6fd: Pulling fs layer
    my-little-container: 7fe6916ab382: Pulling fs layer
    my-little-container: 86656bbaa6fd: Waiting
    my-little-container: 7fe6916ab382: Waiting
    my-little-container: 65584f5f70ee: Verifying Checksum
    my-little-container: 65584f5f70ee: Download complete
    my-little-container: dc9874b52952: Verifying Checksum
    my-little-container: dc9874b52952: Download complete
    my-little-container: 7fe6916ab382: Verifying Checksum
    my-little-container: 7fe6916ab382: Download complete
    my-little-container: 86656bbaa6fd: Verifying Checksum
    my-little-container: 86656bbaa6fd: Download complete
    my-little-container: 72c01b436656: Verifying Checksum
    my-little-container: 72c01b436656: Download complete
    my-little-container: 72c01b436656: Pull complete
    my-little-container: 65584f5f70ee: Pull complete
    my-little-container: dc9874b52952: Pull complete
    my-little-container: 86656bbaa6fd: Pull complete
    my-little-container: 7fe6916ab382: Pull complete
    my-little-container: Digest: sha256:cb96ec8eb632c873d5130053cf5e2548234e5275d8115a39394289d96c9963a6
    my-little-container: Status: Downloaded newer image for ubuntu:14.04
    my-little-container:  ---> c32fae490809
    my-little-container: Step 2/3 : RUN mkdir /u01 && chmod a+xr /u01
    my-little-container:  ---> Running in 6f263b363ab8
    my-little-container: Removing intermediate container 6f263b363ab8
    my-little-container:  ---> e0f927f150ac
    my-little-container: Step 3/3 : COPY /files/readme.txt /u01/
    my-little-container:  ---> 24119281cf25
    my-little-container: Successfully built 24119281cf25
    my-little-container: 
    my-little-container: Image: 24119281cf25
==> my-little-container: Warning: When using a remote Docker host, forwarded ports will NOT be
==> my-little-container: immediately available on your machine. They will still be forwarded on
==> my-little-container: the remote machine, however, so if you have a way to access the remote
==> my-little-container: machine, then you should be able to access those ports there. This is
==> my-little-container: not an error, it is only an informational message.
==> my-little-container: Creating the container...
    my-little-container:   Name: my-little-container
    my-little-container:  Image: 24119281cf25
    my-little-container:    Cmd: ping -c 51 127.0.0.1
    my-little-container: Volume: /var/lib/docker/docker_1539709422_87058:/vagrant
    my-little-container:  
    my-little-container: Container created: 0f29ba9ccf86e0e0
==> my-little-container: Starting container...
==> my-little-container: Provisioners will not be run since container doesn't support SSH.
vagrant docker-logs
# ==> my-little-container: PING 127.0.0.1 (127.0.0.1) 56(84) bytes of data.
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=1 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=2 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=3 ttl=64 time=0.068 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=4 ttl=64 time=0.033 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=5 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=6 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=7 ttl=64 time=0.044 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=8 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=9 ttl=64 time=0.042 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=10 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=11 ttl=64 time=0.041 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=12 ttl=64 time=0.041 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=13 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=14 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=15 ttl=64 time=0.041 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=16 ttl=64 time=0.045 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=17 ttl=64 time=0.040 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=18 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=19 ttl=64 time=0.055 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=20 ttl=64 time=0.060 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=21 ttl=64 time=0.026 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=22 ttl=64 time=0.066 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=23 ttl=64 time=0.033 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=24 ttl=64 time=0.027 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=25 ttl=64 time=0.069 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=26 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=27 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=28 ttl=64 time=0.042 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=29 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=30 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=31 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=32 ttl=64 time=0.036 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=33 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=34 ttl=64 time=0.052 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=35 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=36 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=37 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=38 ttl=64 time=0.045 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=39 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=40 ttl=64 time=0.043 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=41 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=42 ttl=64 time=0.061 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=43 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=44 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=45 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=46 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=47 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=48 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=49 ttl=64 time=0.027 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=50 ttl=64 time=0.024 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=51 ttl=64 time=0.031 ms
# ==> my-little-container:  
# ==> my-little-container: --- 127.0.0.1 ping statistics ---
# ==> my-little-container: 51 packets transmitted, 51 received, 0% packet loss, time 50001ms
# ==> my-little-container: rtt min/avg/max/mdev = 0.024/0.035/0.069/0.013 ms
vagrant global-status
# id       name                provider   state    directory                           
# -------------------------------------------------------------------------------------
# a56a065  default             virtualbox poweroff D:/misc/vagrant/ol75-generic        
# 3800a17  ol75                virtualbox aborted  D:/misc/vagrant/ol75                
# efb816a  ol610               virtualbox poweroff D:/misc/vagrant/packer-ol610        
# 2f41e27  machine1            virtualbox poweroff D:/misc/vagrant/machine1            
# 7c0743a  machine2            virtualbox poweroff D:/misc/vagrant/machine2            
# 66a94a8  server2             virtualbox poweroff D:/misc/vagrant/server2             
# 125d011  dbhost              virtualbox poweroff D:/misc/vagrant/dbhost              
# a60c29a  server1             virtualbox poweroff D:/misc/vagrant/ol75-server1        
# e278ea5  server2             virtualbox poweroff D:/misc/vagrant/ol75-server2        
# 63f7ed1  master              virtualbox poweroff D:/misc/vagrant/multi               
# 137537a  node1               virtualbox poweroff D:/misc/vagrant/multi               
# 99317bc  node2               virtualbox poweroff D:/misc/vagrant/multi               
# 6d1f3a2  ol75-controlhost    virtualbox running  D:/misc/vagrant/ol75-controlhost    
# 7c0b596  dbnode              virtualbox poweroff D:/misc/vagrant/fmw-cluster         
# 0bcbcea  fmwnode1            virtualbox poweroff D:/misc/vagrant/fmw-cluster         
# 7150346  fmwnode2            virtualbox poweroff D:/misc/vagrant/fmw-cluster         
# 40c91f2  dockerhostvm        virtualbox running  D:/misc/docker/vagrant              
# a36b91e  my-little-container docker     stopped  D:/misc/docker/vagrant              
#  
# The above shows information about all known Vagrant environments
# on this machine. This data is cached and may not be completely
# up-to-date (use "vagrant global-status --prune" to prune invalid
# entries). To interact with any of the machines, you can go to that
# directory and run Vagrant, or you can use the ID directly with
# Vagrant commands from any directory. For example:
# "vagrant destroy 1a2b3c4d"
vagrant halt 40c91f2
vagrant up
# Bringing machine 'my-little-container' up with 'docker' provider...
# ==> my-little-container: Docker host is required. One will be created if necessary...
#     my-little-container: Vagrant will now create or start a local VM to act as the Docker
#     my-little-container: host. You'll see the output of the `vagrant up` for this VM below.
#     my-little-container:  
#     dockerhostvm: Checking if box 'ubuntu/trusty64' is up to date...
#     dockerhostvm: Clearing any previously set forwarded ports...
#     dockerhostvm: Clearing any previously set network interfaces...
#     dockerhostvm: Preparing network interfaces based on configuration...
#     dockerhostvm: Adapter 1: nat
#     dockerhostvm: Forwarding ports...
#     dockerhostvm: 22 (guest) => 2222 (host) (adapter 1)
#     dockerhostvm: Booting VM...
#     dockerhostvm: Waiting for machine to boot. This may take a few minutes...
#     dockerhostvm: SSH address: 127.0.0.1:2222
#     dockerhostvm: SSH username: vagrant
#     dockerhostvm: SSH auth method: private key
#     dockerhostvm: Machine booted and ready!
#     dockerhostvm: Checking for guest additions in VM...
#     dockerhostvm: The guest additions on this VM do not match the installed version of
#     dockerhostvm: VirtualBox! In most cases this is fine, but in rare cases it can
#     dockerhostvm: prevent things such as shared folders from working properly. If you see
#     dockerhostvm: shared folder errors, please make sure the guest additions within the
#     dockerhostvm: virtual machine match the version of VirtualBox you have installed on
#     dockerhostvm: your host and reload your VM.
#     dockerhostvm: 
#     dockerhostvm: Guest Additions Version: 4.3.36
#     dockerhostvm: VirtualBox Version: 5.2
#     dockerhostvm: Mounting shared folders...
#     dockerhostvm: /vagrant => D:/misc/docker/vagrant
#     dockerhostvm: Running provisioner: docker...
#     dockerhostvm: Running provisioner: shell...
#     dockerhostvm: Running: inline script
# ==> my-little-container: Syncing folders to the host VM...
#     dockerhostvm: Mounting shared folders...
#     dockerhostvm: /var/lib/docker/docker_1539712203_52767 => D:/misc/docker/vagrant
#     dockerhostvm: /var/lib/docker/docker_build_1a8d0f70620301b3c04205ba73a3d2c2 => D:/misc/docker/vagrant
# ==> my-little-container: Image is already built from the Dockerfile. `vagrant reload` to rebuild.
# ==> my-little-container: Vagrant has noticed that the synced folder definitions have changed.
# ==> my-little-container: With Docker, these synced folder changes won't take effect until you
# ==> my-little-container: destroy the container and recreate it.
# ==> my-little-container: Starting container...
# ==> my-little-container: Provisioners will not be run since container doesn't support SSH.
vagrant docker-logs
# ==> my-little-container: PING 127.0.0.1 (127.0.0.1) 56(84) bytes of data.
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=1 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=2 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=3 ttl=64 time=0.068 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=4 ttl=64 time=0.033 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=5 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=6 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=7 ttl=64 time=0.044 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=8 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=9 ttl=64 time=0.042 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=10 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=11 ttl=64 time=0.041 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=12 ttl=64 time=0.041 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=13 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=14 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=15 ttl=64 time=0.041 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=16 ttl=64 time=0.045 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=17 ttl=64 time=0.040 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=18 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=19 ttl=64 time=0.055 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=20 ttl=64 time=0.060 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=21 ttl=64 time=0.026 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=22 ttl=64 time=0.066 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=23 ttl=64 time=0.033 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=24 ttl=64 time=0.027 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=25 ttl=64 time=0.069 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=26 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=27 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=28 ttl=64 time=0.042 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=29 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=30 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=31 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=32 ttl=64 time=0.036 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=33 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=34 ttl=64 time=0.052 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=35 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=36 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=37 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=38 ttl=64 time=0.045 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=39 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=40 ttl=64 time=0.043 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=41 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=42 ttl=64 time=0.061 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=43 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=44 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=45 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=46 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=47 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=48 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=49 ttl=64 time=0.027 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=50 ttl=64 time=0.024 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=51 ttl=64 time=0.031 ms
# ==> my-little-container:  
# ==> my-little-container: --- 127.0.0.1 ping statistics ---
# ==> my-little-container: 51 packets transmitted, 51 received, 0% packet loss, time 50001ms
# ==> my-little-container: rtt min/avg/max/mdev = 0.024/0.035/0.069/0.013 ms
# ==> my-little-container: PING 127.0.0.1 (127.0.0.1) 56(84) bytes of data.
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=1 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=2 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=3 ttl=64 time=0.026 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=4 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=5 ttl=64 time=0.024 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=6 ttl=64 time=0.050 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=7 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=8 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=9 ttl=64 time=0.026 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=10 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=11 ttl=64 time=0.022 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=12 ttl=64 time=0.054 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=13 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=14 ttl=64 time=0.031 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=15 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=16 ttl=64 time=0.055 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=17 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=18 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=19 ttl=64 time=0.046 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=20 ttl=64 time=0.054 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=21 ttl=64 time=0.027 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=22 ttl=64 time=0.029 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=23 ttl=64 time=0.059 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=24 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=25 ttl=64 time=0.027 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=26 ttl=64 time=0.061 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=27 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=28 ttl=64 time=0.046 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=29 ttl=64 time=0.026 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=30 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=31 ttl=64 time=0.045 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=32 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=33 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=34 ttl=64 time=0.031 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=35 ttl=64 time=0.024 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=36 ttl=64 time=0.045 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=37 ttl=64 time=0.035 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=38 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=39 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=40 ttl=64 time=0.046 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=41 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=42 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=43 ttl=64 time=0.030 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=44 ttl=64 time=0.028 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=45 ttl=64 time=0.025 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=46 ttl=64 time=0.032 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=47 ttl=64 time=0.048 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=48 ttl=64 time=0.062 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=49 ttl=64 time=0.100 ms
# ==> my-little-container: 64 bytes from 127.0.0.1: icmp_seq=50 ttl=64 time=0.087 ms
vagrant global-status
# id       name                provider   state    directory                           
# -------------------------------------------------------------------------------------
# a56a065  default             virtualbox poweroff D:/misc/vagrant/ol75-generic        
# 3800a17  ol75                virtualbox aborted  D:/misc/vagrant/ol75                
# efb816a  ol610               virtualbox poweroff D:/misc/vagrant/packer-ol610        
# 2f41e27  machine1            virtualbox poweroff D:/misc/vagrant/machine1            
# 7c0743a  machine2            virtualbox poweroff D:/misc/vagrant/machine2            
# 66a94a8  server2             virtualbox poweroff D:/misc/vagrant/server2             
# 125d011  dbhost              virtualbox poweroff D:/misc/vagrant/dbhost              
# a60c29a  server1             virtualbox poweroff D:/misc/vagrant/ol75-server1        
# e278ea5  server2             virtualbox poweroff D:/misc/vagrant/ol75-server2        
# 63f7ed1  master              virtualbox poweroff D:/misc/vagrant/multi               
# 137537a  node1               virtualbox poweroff D:/misc/vagrant/multi               
# 99317bc  node2               virtualbox poweroff D:/misc/vagrant/multi               
# 6d1f3a2  ol75-controlhost    virtualbox poweroff D:/misc/vagrant/ol75-controlhost    
# 7c0b596  dbnode              virtualbox poweroff D:/misc/vagrant/fmw-cluster         
# 0bcbcea  fmwnode1            virtualbox poweroff D:/misc/vagrant/fmw-cluster         
# 7150346  fmwnode2            virtualbox poweroff D:/misc/vagrant/fmw-cluster         
# 40c91f2  dockerhostvm        virtualbox running  D:/misc/docker/vagrant              
# a36b91e  my-little-container docker     running  D:/misc/docker/vagrant              
#  
# The above shows information about all known Vagrant environments
# on this machine. This data is cached and may not be completely
# up-to-date (use "vagrant global-status --prune" to prune invalid
# entries). To interact with any of the machines, you can go to that
# directory and run Vagrant, or you can use the ID directly with
# Vagrant commands from any directory. For example:
# "vagrant destroy 1a2b3c4d"

6.6 Java 9 - running Docker container in VirtualBox VM on Windows 10 courtesy of Vagrant

mkdir -p /misc/docker/java9 && cd /misc/docker/java9
cat > Vagrantfile <<-"_EOF"
Vagrant.configure("2") do |config|

  config.vm.provision "docker" do |d|
    d.run "j9",
          image: "openjdk:9",
          cmd: "/bin/sh",
          args: "-v '/vagrant:/var/www'"
    d.remains_running = true 
  end

  # The following line terminates all ssh connections. Therefore Vagrant will be forced to reconnect.
  # That's a workaround to have the docker command in the PATH
  # Command: "docker" "ps" "-a" "-q" "--no-trunc"
  # without it, I run into this error:
  # Stderr: Get http:///var/run/docker.sock/v1.19/containers/json?all=1: dial unix /var/run/docker.sock: permission denied.
  # Are you trying to connect to a TLS-enabled daemon without TLS?

  config.vm.provision "shell", inline:
                                 "ps aux | grep 'sshd:' | awk '{print $2}' | xargs kill"

  config.vm.define "dockerhostvm"
  config.vm.box = "ubuntu/trusty64"
  config.vm.network "private_network", ip: "192.168.56.13"

  config.vm.provider :virtualbox do |vb|
    vb.name = "dockerhostvm"
    vb.memory = 4096
    vb.cpus = 2
    vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
    vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
  end

end

# to get into running container: 
# vagrant ssh
# docker run -it  -v /vagrant:/var/www openjdk:9 /bin/sh
_EOF
vagrant up
Bringing machine 'dockerhostvm' up with 'virtualbox' provider...
==> dockerhostvm: Importing base box 'ubuntu/trusty64'...
==> dockerhostvm: Matching MAC address for NAT networking...
==> dockerhostvm: Checking if box 'ubuntu/trusty64' is up to date...
==> dockerhostvm: Setting the name of the VM: dockerhostvm
==> dockerhostvm: Clearing any previously set forwarded ports...
==> dockerhostvm: Clearing any previously set network interfaces...
==> dockerhostvm: Preparing network interfaces based on configuration...
    dockerhostvm: Adapter 1: nat
    dockerhostvm: Adapter 2: hostonly
==> dockerhostvm: Forwarding ports...
    dockerhostvm: 22 (guest) => 2222 (host) (adapter 1)
==> dockerhostvm: Running 'pre-boot' VM customizations...
==> dockerhostvm: Booting VM...
==> dockerhostvm: Waiting for machine to boot. This may take a few minutes...
    dockerhostvm: SSH address: 127.0.0.1:2222
    dockerhostvm: SSH username: vagrant
    dockerhostvm: SSH auth method: private key
    dockerhostvm: 
    dockerhostvm: Vagrant insecure key detected. Vagrant will automatically replace
    dockerhostvm: this with a newly generated keypair for better security.
    dockerhostvm: 
    dockerhostvm: Inserting generated public key within guest...
    dockerhostvm: Removing insecure key from the guest if it's present...
    dockerhostvm: Key inserted! Disconnecting and reconnecting using new SSH key...
==> dockerhostvm: Machine booted and ready!
==> dockerhostvm: Checking for guest additions in VM...
    dockerhostvm: The guest additions on this VM do not match the installed version of
    dockerhostvm: VirtualBox! In most cases this is fine, but in rare cases it can
    dockerhostvm: prevent things such as shared folders from working properly. If you see
    dockerhostvm: shared folder errors, please make sure the guest additions within the
    dockerhostvm: virtual machine match the version of VirtualBox you have installed on
    dockerhostvm: your host and reload your VM.
    dockerhostvm: 
    dockerhostvm: Guest Additions Version: 4.3.36
    dockerhostvm: VirtualBox Version: 5.2
==> dockerhostvm: Configuring and enabling network interfaces...
==> dockerhostvm: Mounting shared folders...
    dockerhostvm: /vagrant => D:/misc/docker/java9
==> dockerhostvm: Running provisioner: docker...
    dockerhostvm: Installing Docker onto machine...
==> dockerhostvm: Starting Docker containers...
==> dockerhostvm: -- Container: j9
==> dockerhostvm: Running provisioner: shell...
    dockerhostvm: Running: inline script
vagrant ssh
# Welcome to Ubuntu 14.04.5 LTS (GNU/Linux 3.13.0-160-generic x86_64)
# 
#  * Documentation:  https://help.ubuntu.com/
# 
#   System information as of Tue Oct 16 14:26:32 UTC 2018
# 
#   System load:  0.77              Processes:           92
#   Usage of /:   3.6% of 39.34GB   Users logged in:     0
#   Memory usage: 3%                IP address for eth0: 10.0.2.15
#   Swap usage:   0%
# 
#   Graph this data and manage this system at:
#     https://landscape.canonical.com/
# 
#   Get cloud support with Ubuntu Advantage Cloud Guest:
#     http://www.ubuntu.com/business/services/cloud
# 
# 0 packages can be updated.
# 0 updates are security updates.
# 
# New release '16.04.5 LTS' available.
# Run 'do-release-upgrade' to upgrade to it.
docker run -it  -v /vagrant:/var/www openjdk:9 /bin/sh
type java
# java is /usr/bin/java
java -version
# openjdk version "9.0.4"
# OpenJDK Runtime Environment (build 9.0.4+12-Debian-4)
# OpenJDK 64-Bit Server VM (build 9.0.4+12-Debian-4, mixed mode)
mkdir -p /misc/docker/java9 && cd /misc/docker/java9
vagrant global-status --prune
# id       name             provider   state    directory                           
# ----------------------------------------------------------------------------------
# a56a065  default          virtualbox poweroff D:/misc/vagrant/ol75-generic        
# 3800a17  ol75             virtualbox aborted  D:/misc/vagrant/ol75                
# efb816a  ol610            virtualbox poweroff D:/misc/vagrant/packer-ol610        
# 2f41e27  machine1         virtualbox poweroff D:/misc/vagrant/machine1            
# 7c0743a  machine2         virtualbox poweroff D:/misc/vagrant/machine2            
# 66a94a8  server2          virtualbox poweroff D:/misc/vagrant/server2             
# 125d011  dbhost           virtualbox poweroff D:/misc/vagrant/dbhost              
# a60c29a  server1          virtualbox poweroff D:/misc/vagrant/ol75-server1        
# e278ea5  server2          virtualbox poweroff D:/misc/vagrant/ol75-server2        
# 63f7ed1  master           virtualbox poweroff D:/misc/vagrant/multi               
# 137537a  node1            virtualbox poweroff D:/misc/vagrant/multi               
# 99317bc  node2            virtualbox poweroff D:/misc/vagrant/multi               
# 6d1f3a2  ol75-controlhost virtualbox running  D:/misc/vagrant/ol75-controlhost    
# 7c0b596  dbnode           virtualbox poweroff D:/misc/vagrant/fmw-cluster         
# 0bcbcea  fmwnode1         virtualbox poweroff D:/misc/vagrant/fmw-cluster         
# 7150346  fmwnode2         virtualbox poweroff D:/misc/vagrant/fmw-cluster         
#  
# The above shows information about all known Vagrant environments
# on this machine. This data is cached and may not be completely
# up-to-date (use "vagrant global-status --prune" to prune invalid
# entries). To interact with any of the machines, you can go to that
# directory and run Vagrant, or you can use the ID directly with
# Vagrant commands from any directory. For example:
# "vagrant destroy 1a2b3c4d"
vagrant destroy -f

6.7 Vagrant Provisioning with Docker - Provisioning Virtual Systems with Docker

Building Docker Image

export WORKAREA=/misc/vagrant/vagrant-docker
WORKAREA=${WORKAREA:-"${HOME}/vagrant-docker"}
mkdir -p ${WORKAREA}/build/public-html
touch ${WORKAREA}/build/{Dockerfile,Vagrantfile}

cat <<-'HTML' > ${WORKAREA}/build/public-html/index.html
<html>
  <body>
    <h1>Hello World!</h1>
  </body>
</html>
HTML

cd ${WORKAREA}/build
.
└── build
    ├── Dockerfile
    ├── Vagrantfile
    └── public-html
        └── index.html
cat > Vagrantfile <<-"_EOF"
Vagrant.configure("2") do |config|
  config.vm.box = "ubuntu/xenial64"
  config.vm.network "forwarded_port", guest: 80, host: 8081

  ####### Provision #######
  config.vm.provision "docker" do |docker|
    docker.build_image "/vagrant",
      args: "-t example/hello_web"
    docker.run "hello_web",
      image: "example/hello_web:latest",
      args: "-p 80:80"
  end
end
_EOF
cat > Dockerfile <<-"_EOF"
FROM ubuntu:16.04
RUN apt-get -qq update && \
    apt-get install -y apache2 && \
    apt-get clean
COPY public-html/index.html /var/www/html/
EXPOSE 80
CMD apachectl -D FOREGROUND
_EOF
time vagrant up
# Bringing machine 'default' up with 'virtualbox' provider...
# ==> default: Box 'ubuntu/xenial64' could not be found. Attempting to find and install...
#     default: Box Provider: virtualbox
#     default: Box Version: >= 0
# ==> default: Loading metadata for box 'ubuntu/xenial64'
#     default: URL: https://vagrantcloud.com/ubuntu/xenial64
# ==> default: Adding box 'ubuntu/xenial64' (v20181016.0.0) for provider: virtualbox
#     default: Downloading: https://vagrantcloud.com/ubuntu/boxes/xenial64/versions/20181016.0.0/providers/virtualbox.box
#     default: Download redirected to host: cloud-images.ubuntu.com
#     default: 
# ==> default: Successfully added box 'ubuntu/xenial64' (v20181016.0.0) for 'virtualbox'!
# ==> default: Importing base box 'ubuntu/xenial64'...
# ==> default: Matching MAC address for NAT networking...
# ==> default: Checking if box 'ubuntu/xenial64' is up to date...
# ==> default: Setting the name of the VM: build_default_1539785587755_62964
# ==> default: Clearing any previously set network interfaces...
# ==> default: Preparing network interfaces based on configuration...
#     default: Adapter 1: nat
# ==> default: Forwarding ports...
#     default: 80 (guest) => 8081 (host) (adapter 1)
#     default: 22 (guest) => 2222 (host) (adapter 1)
# ==> default: Running 'pre-boot' VM customizations...
# ==> default: Booting VM...
# ==> default: Waiting for machine to boot. This may take a few minutes...
#     default: SSH address: 127.0.0.1:2222
#     default: SSH username: vagrant
#     default: SSH auth method: private key
#     default: Warning: Connection reset. Retrying...
#     default: 
#     default: Vagrant insecure key detected. Vagrant will automatically replace
#     default: this with a newly generated keypair for better security.
#     default: 
#     default: Inserting generated public key within guest...
#     default: Removing insecure key from the guest if it's present...
#     default: Key inserted! Disconnecting and reconnecting using new SSH key...
# ==> default: Machine booted and ready!
# ==> default: Checking for guest additions in VM...
#     default: The guest additions on this VM do not match the installed version of
#     default: VirtualBox! In most cases this is fine, but in rare cases it can
#     default: prevent things such as shared folders from working properly. If you see
#     default: shared folder errors, please make sure the guest additions within the
#     default: virtual machine match the version of VirtualBox you have installed on
#     default: your host and reload your VM.
#     default: 
#     default: Guest Additions Version: 5.1.38
#     default: VirtualBox Version: 5.2
# ==> default: Mounting shared folders...
#     default: /vagrant => D:/misc/vagrant/vagrant-docker/build
# ==> default: Running provisioner: docker...
#     default: Installing Docker onto machine...
# ==> default: Building Docker images...
# ==> default: -- Path: /vagrant
# ==> default: Sending build context to Docker daemon     64kB
# ==> default: Step 1/5 : FROM ubuntu:16.04
# ==> default: 16.04: Pulling from library/ubuntu
# ==> default: 3b37166ec614: Pulling fs layer
# ==> default: 504facff238f: Pulling fs layer
# ==> default: ebbcacd28e10: Pulling fs layer
# ==> default: c7fb3351ecad: Pulling fs layer
# ==> default: 2e3debadcbf7: Pulling fs layer
# ==> default: c7fb3351ecad: Waiting
# ==> default: 2e3debadcbf7: Waiting
# ==> default: ebbcacd28e10: Verifying Checksum
# ==> default: ebbcacd28e10: Download complete
# ==> default: 504facff238f: Verifying Checksum
# ==> default: 504facff238f: Download complete
# ==> default: c7fb3351ecad: Verifying Checksum
# ==> default: c7fb3351ecad: Download complete
# ==> default: 2e3debadcbf7: Verifying Checksum
# ==> default: 2e3debadcbf7: Download complete
# ==> default: 3b37166ec614: 
# ==> default: Verifying Checksum
# ==> default: 3b37166ec614: 
# ==> default: Download complete
# ==> default: 3b37166ec614: 
# ==> default: Pull complete
# ==> default: 504facff238f: 
# ==> default: Pull complete
# ==> default: ebbcacd28e10: 
# ==> default: Pull complete
# ==> default: c7fb3351ecad: 
# ==> default: Pull complete
# ==> default: 2e3debadcbf7: 
# ==> default: Pull complete
# ==> default: Digest: sha256:45ddfa61744947b0b8f7f20b8de70cbcdd441a6a0532f791fd4c09f5e491a8eb
# ==> default: Status: Downloaded newer image for ubuntu:16.04
# ==> default:  ---> b9e15a5d1e1a
# ==> default: Step 2/5 : RUN apt-get -qq update &&     apt-get install -y apache2 &&     apt-get clean
# ==> default:  ---> Running in 70ec0b27b78c
# ==> default: Reading package lists...
# ==> default: Building dependency tree...
# ==> default: Reading state information...
# ==> default: The following additional packages will be installed:
# ==> default:   apache2-bin apache2-data apache2-utils file ifupdown iproute2
# ==> default:   isc-dhcp-client isc-dhcp-common libapr1 libaprutil1 libaprutil1-dbd-sqlite3
# ==> default:   libaprutil1-ldap libasn1-8-heimdal libatm1 libdns-export162 libexpat1
# ==> default:   libffi6 libgdbm3 libgmp10 libgnutls30 libgssapi3-heimdal libhcrypto4-heimdal
# ==> default:   libheimbase1-heimdal libheimntlm0-heimdal libhogweed4 libhx509-5-heimdal
# ==> default:   libicu55 libidn11 libisc-export160 libkrb5-26-heimdal libldap-2.4-2
# ==> default:   liblua5.1-0 libmagic1 libmnl0 libnettle6 libp11-kit0 libperl5.22
# ==> default:   libroken18-heimdal libsasl2-2 libsasl2-modules libsasl2-modules-db
# ==> default:   libsqlite3-0 libssl1.0.0 libtasn1-6 libwind0-heimdal libxml2 libxtables11
# ==> default:   mime-support netbase openssl perl perl-modules-5.22 rename sgml-base
# ==> default:   ssl-cert xml-core
# ==> default: Suggested packages:
# ==> default:   www-browser apache2-doc apache2-suexec-pristine | apache2-suexec-custom ufw
# ==> default:   ppp rdnssd iproute2-doc resolvconf avahi-autoipd isc-dhcp-client-ddns
# ==> default:   apparmor gnutls-bin libsasl2-modules-otp libsasl2-modules-ldap
# ==> default:   libsasl2-modules-sql libsasl2-modules-gssapi-mit
# ==> default:   | libsasl2-modules-gssapi-heimdal ca-certificates perl-doc
# ==> default:   libterm-readline-gnu-perl | libterm-readline-perl-perl make sgml-base-doc
# ==> default:   openssl-blacklist debhelper
# ==> default: The following NEW packages will be installed:
# ==> default:   apache2 apache2-bin apache2-data apache2-utils file ifupdown iproute2
# ==> default:   isc-dhcp-client isc-dhcp-common libapr1 libaprutil1 libaprutil1-dbd-sqlite3
# ==> default:   libaprutil1-ldap libasn1-8-heimdal libatm1 libdns-export162 libexpat1
# ==> default:   libffi6 libgdbm3 libgmp10 libgnutls30 libgssapi3-heimdal libhcrypto4-heimdal
# ==> default:   libheimbase1-heimdal libheimntlm0-heimdal libhogweed4 libhx509-5-heimdal
# ==> default:   libicu55 libidn11 libisc-export160 libkrb5-26-heimdal libldap-2.4-2
# ==> default:   liblua5.1-0 libmagic1 libmnl0 libnettle6 libp11-kit0 libperl5.22
# ==> default:   libroken18-heimdal libsasl2-2 libsasl2-modules libsasl2-modules-db
# ==> default:   libsqlite3-0 libssl1.0.0 libtasn1-6 libwind0-heimdal libxml2 libxtables11
# ==> default:   mime-support netbase openssl perl perl-modules-5.22 rename sgml-base
# ==> default:   ssl-cert xml-core
# ==> default: 0 upgraded, 57 newly installed, 0 to remove and 1 not upgraded.
# ==> default: Need to get 22.7 MB of archives.
# ==> default: After this operation, 102 MB of additional disk space will be used.
# ==> default: Get:1 http://archive.ubuntu.com/ubuntu xenial/main amd64 libatm1 amd64 1:2.5.1-1.5 [24.2 kB]
# ==> default: Get:2 http://archive.ubuntu.com/ubuntu xenial/main amd64 libmnl0 amd64 1.0.3-5 [12.0 kB]
# ==> default: Get:3 http://archive.ubuntu.com/ubuntu xenial/main amd64 libgdbm3 amd64 1.8.3-13.1 [16.9 kB]
# ==> default: Get:4 http://archive.ubuntu.com/ubuntu xenial/main amd64 sgml-base all 1.26+nmu4ubuntu1 [12.5 kB]
# ==> default: Get:5 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 perl-modules-5.22 all 5.22.1-9ubuntu0.5 [2645 kB]
# ==> default: Get:6 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libperl5.22 amd64 5.22.1-9ubuntu0.5 [3396 kB]
# ==> default: Get:7 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 perl amd64 5.22.1-9ubuntu0.5 [238 kB]
# ==> default: Get:8 http://archive.ubuntu.com/ubuntu xenial/main amd64 mime-support all 3.59ubuntu1 [31.0 kB]
# ==> default: Get:9 http://archive.ubuntu.com/ubuntu xenial/main amd64 libapr1 amd64 1.5.2-3 [86.0 kB]
# ==> default: Get:10 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libexpat1 amd64 2.1.0-7ubuntu0.16.04.3 [71.2 kB]
# ==> default: Get:11 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libssl1.0.0 amd64 1.0.2g-1ubuntu4.13 [1083 kB]
# ==> default: Get:12 http://archive.ubuntu.com/ubuntu xenial/main amd64 libaprutil1 amd64 1.5.4-1build1 [77.1 kB]
# ==> default: Get:13 http://archive.ubuntu.com/ubuntu xenial/main amd64 libsqlite3-0 amd64 3.11.0-1ubuntu1 [396 kB]
# ==> default: Get:14 http://archive.ubuntu.com/ubuntu xenial/main amd64 libaprutil1-dbd-sqlite3 amd64 1.5.4-1build1 [10.6 kB]
# ==> default: Get:15 http://archive.ubuntu.com/ubuntu xenial/main amd64 libgmp10 amd64 2:6.1.0+dfsg-2 [240 kB]
# ==> default: Get:16 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libnettle6 amd64 3.2-1ubuntu0.16.04.1 [93.5 kB]
# ==> default: Get:17 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libhogweed4 amd64 3.2-1ubuntu0.16.04.1 [136 kB]
# ==> default: Get:18 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libidn11 amd64 1.32-3ubuntu1.2 [46.5 kB]
# ==> default: Get:19 http://archive.ubuntu.com/ubuntu xenial/main amd64 libffi6 amd64 3.2.1-4 [17.8 kB]
# ==> default: Get:20 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libp11-kit0 amd64 0.23.2-5~ubuntu16.04.1 [105 kB]
# ==> default: Get:21 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libtasn1-6 amd64 4.7-3ubuntu0.16.04.3 [43.5 kB]
# ==> default: Get:22 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libgnutls30 amd64 3.4.10-4ubuntu1.4 [548 kB]
# ==> default: Get:23 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libroken18-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [41.4 kB]
# ==> default: Get:24 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libasn1-8-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [174 kB]
# ==> default: Get:25 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libhcrypto4-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [85.0 kB]
# ==> default: Get:26 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libheimbase1-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [29.3 kB]
# ==> default: Get:27 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libwind0-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [47.8 kB]
# ==> default: Get:28 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libhx509-5-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [107 kB]
# ==> default: Get:29 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libkrb5-26-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [202 kB]
# ==> default: Get:30 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libheimntlm0-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [15.1 kB]
# ==> default: Get:31 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libgssapi3-heimdal amd64 1.7~git20150920+dfsg-4ubuntu1.16.04.1 [96.1 kB]
# ==> default: Get:32 http://archive.ubuntu.com/ubuntu xenial/main amd64 libsasl2-modules-db amd64 2.1.26.dfsg1-14build1 [14.5 kB]
# ==> default: Get:33 http://archive.ubuntu.com/ubuntu xenial/main amd64 libsasl2-2 amd64 2.1.26.dfsg1-14build1 [48.7 kB]
# ==> default: Get:34 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libldap-2.4-2 amd64 2.4.42+dfsg-2ubuntu3.3 [161 kB]
# ==> default: Get:35 http://archive.ubuntu.com/ubuntu xenial/main amd64 libaprutil1-ldap amd64 1.5.4-1build1 [8720 B]
# ==> default: Get:36 http://archive.ubuntu.com/ubuntu xenial/main amd64 liblua5.1-0 amd64 5.1.5-8ubuntu1 [102 kB]
# ==> default: Get:37 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libicu55 amd64 55.1-7ubuntu0.4 [7646 kB]
# ==> default: Get:38 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libxml2 amd64 2.9.3+dfsg1-1ubuntu0.6 [697 kB]
# ==> default: Get:39 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 apache2-bin amd64 2.4.18-2ubuntu3.9 [925 kB]
# ==> default: Get:40 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 apache2-utils amd64 2.4.18-2ubuntu3.9 [81.8 kB]
# ==> default: Get:41 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 apache2-data all 2.4.18-2ubuntu3.9 [162 kB]
# ==> default: Get:42 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 apache2 amd64 2.4.18-2ubuntu3.9 [86.6 kB]
# ==> default: Get:43 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libmagic1 amd64 1:5.25-2ubuntu1.1 [216 kB]
# ==> default: Get:44 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 file amd64 1:5.25-2ubuntu1.1 [21.2 kB]
# ==> default: Get:45 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 iproute2 amd64 4.3.0-1ubuntu3.16.04.3 [522 kB]
# ==> default: Get:46 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 ifupdown amd64 0.8.10ubuntu1.4 [54.9 kB]
# ==> default: Get:47 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libisc-export160 amd64 1:9.10.3.dfsg.P4-8ubuntu1.11 [153 kB]
# ==> default: Get:48 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libdns-export162 amd64 1:9.10.3.dfsg.P4-8ubuntu1.11 [667 kB]
# ==> default: Get:49 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 isc-dhcp-client amd64 4.3.3-5ubuntu12.10 [224 kB]
# ==> default: Get:50 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 isc-dhcp-common amd64 4.3.3-5ubuntu12.10 [105 kB]
# ==> default: Get:51 http://archive.ubuntu.com/ubuntu xenial/main amd64 libxtables11 amd64 1.6.0-2ubuntu3 [27.2 kB]
# ==> default: Get:52 http://archive.ubuntu.com/ubuntu xenial/main amd64 netbase all 5.3 [12.9 kB]
# ==> default: Get:53 http://archive.ubuntu.com/ubuntu xenial/main amd64 libsasl2-modules amd64 2.1.26.dfsg1-14build1 [47.5 kB]
# ==> default: Get:54 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 openssl amd64 1.0.2g-1ubuntu4.13 [492 kB]
# ==> default: Get:55 http://archive.ubuntu.com/ubuntu xenial/main amd64 xml-core all 0.13+nmu2 [23.3 kB]
# ==> default: Get:56 http://archive.ubuntu.com/ubuntu xenial/main amd64 rename all 0.20-4 [12.0 kB]
# ==> default: Get:57 http://archive.ubuntu.com/ubuntu xenial/main amd64 ssl-cert all 1.0.37 [16.9 kB]
# ==> default: debconf: delaying package configuration, since apt-utils is not installed
# ==> default: 
# ==> default: Fetched 22.7 MB in 11s (1970 kB/s)
# ==> default: Selecting previously unselected package libatm1:amd64.
# ==> default: (Reading database ... 
# ==> default: (Reading database ... 5%
# ==> default: (Reading database ... 10%
# ==> default: (Reading database ... 15%
# ==> default: (Reading database ... 20%
# ==> default: (Reading database ... 25%
# ==> default: (Reading database ... 30%
# ==> default: (Reading database ... 35%
# ==> default: (Reading database ... 40%
# ==> default: (Reading database ... 45%
# ==> default: (Reading database ... 50%
# ==> default: (Reading database ... 55%
# ==> default: (Reading database ... 60%
# ==> default: (Reading database ... 65%
# ==> default: (Reading database ... 70%
# ==> default: (Reading database ... 75%
# ==> default: (Reading database ... 80%
# ==> default: (Reading database ... 85%
# ==> default: (Reading database ... 90%
# ==> default: (Reading database ... 95%
# ==> default: (Reading database ... 100%
# ==> default: (Reading database ... 
# ==> default: 4768 files and directories currently installed.)
# ==> default: Preparing to unpack .../libatm1_1%3a2.5.1-1.5_amd64.deb ...
# ==> default: Unpacking libatm1:amd64 (1:2.5.1-1.5) ...
# ==> default: Selecting previously unselected package libmnl0:amd64.
# ==> default: Preparing to unpack .../libmnl0_1.0.3-5_amd64.deb ...
# ==> default: Unpacking libmnl0:amd64 (1.0.3-5) ...
# ==> default: Selecting previously unselected package libgdbm3:amd64.
# ==> default: Preparing to unpack .../libgdbm3_1.8.3-13.1_amd64.deb ...
# ==> default: Unpacking libgdbm3:amd64 (1.8.3-13.1) ...
# ==> default: Selecting previously unselected package sgml-base.
# ==> default: Preparing to unpack .../sgml-base_1.26+nmu4ubuntu1_all.deb ...
# ==> default: Unpacking sgml-base (1.26+nmu4ubuntu1) ...
# ==> default: Selecting previously unselected package perl-modules-5.22.
# ==> default: Preparing to unpack .../perl-modules-5.22_5.22.1-9ubuntu0.5_all.deb ...
# ==> default: Unpacking perl-modules-5.22 (5.22.1-9ubuntu0.5) ...
# ==> default: Selecting previously unselected package libperl5.22:amd64.
# ==> default: Preparing to unpack .../libperl5.22_5.22.1-9ubuntu0.5_amd64.deb ...
# ==> default: Unpacking libperl5.22:amd64 (5.22.1-9ubuntu0.5) ...
# ==> default: Selecting previously unselected package perl.
# ==> default: Preparing to unpack .../perl_5.22.1-9ubuntu0.5_amd64.deb ...
# ==> default: Unpacking perl (5.22.1-9ubuntu0.5) ...
# ==> default: Selecting previously unselected package mime-support.
# ==> default: Preparing to unpack .../mime-support_3.59ubuntu1_all.deb ...
# ==> default: Unpacking mime-support (3.59ubuntu1) ...
# ==> default: Selecting previously unselected package libapr1:amd64.
# ==> default: Preparing to unpack .../libapr1_1.5.2-3_amd64.deb ...
# ==> default: Unpacking libapr1:amd64 (1.5.2-3) ...
# ==> default: Selecting previously unselected package libexpat1:amd64.
# ==> default: Preparing to unpack .../libexpat1_2.1.0-7ubuntu0.16.04.3_amd64.deb ...
# ==> default: Unpacking libexpat1:amd64 (2.1.0-7ubuntu0.16.04.3) ...
# ==> default: Selecting previously unselected package libssl1.0.0:amd64.
# ==> default: Preparing to unpack .../libssl1.0.0_1.0.2g-1ubuntu4.13_amd64.deb ...
# ==> default: Unpacking libssl1.0.0:amd64 (1.0.2g-1ubuntu4.13) ...
# ==> default: Selecting previously unselected package libaprutil1:amd64.
# ==> default: Preparing to unpack .../libaprutil1_1.5.4-1build1_amd64.deb ...
# ==> default: Unpacking libaprutil1:amd64 (1.5.4-1build1) ...
# ==> default: Selecting previously unselected package libsqlite3-0:amd64.
# ==> default: Preparing to unpack .../libsqlite3-0_3.11.0-1ubuntu1_amd64.deb ...
# ==> default: Unpacking libsqlite3-0:amd64 (3.11.0-1ubuntu1) ...
# ==> default: Selecting previously unselected package libaprutil1-dbd-sqlite3:amd64.
# ==> default: Preparing to unpack .../libaprutil1-dbd-sqlite3_1.5.4-1build1_amd64.deb ...
# ==> default: Unpacking libaprutil1-dbd-sqlite3:amd64 (1.5.4-1build1) ...
# ==> default: Selecting previously unselected package libgmp10:amd64.
# ==> default: Preparing to unpack .../libgmp10_2%3a6.1.0+dfsg-2_amd64.deb ...
# ==> default: Unpacking libgmp10:amd64 (2:6.1.0+dfsg-2) ...
# ==> default: Selecting previously unselected package libnettle6:amd64.
# ==> default: Preparing to unpack .../libnettle6_3.2-1ubuntu0.16.04.1_amd64.deb ...
# ==> default: Unpacking libnettle6:amd64 (3.2-1ubuntu0.16.04.1) ...
# ==> default: Selecting previously unselected package libhogweed4:amd64.
# ==> default: Preparing to unpack .../libhogweed4_3.2-1ubuntu0.16.04.1_amd64.deb ...
# ==> default: Unpacking libhogweed4:amd64 (3.2-1ubuntu0.16.04.1) ...
# ==> default: Selecting previously unselected package libidn11:amd64.
# ==> default: Preparing to unpack .../libidn11_1.32-3ubuntu1.2_amd64.deb ...
# ==> default: Unpacking libidn11:amd64 (1.32-3ubuntu1.2) ...
# ==> default: Selecting previously unselected package libffi6:amd64.
# ==> default: Preparing to unpack .../libffi6_3.2.1-4_amd64.deb ...
# ==> default: Unpacking libffi6:amd64 (3.2.1-4) ...
# ==> default: Selecting previously unselected package libp11-kit0:amd64.
# ==> default: Preparing to unpack .../libp11-kit0_0.23.2-5~ubuntu16.04.1_amd64.deb ...
# ==> default: Unpacking libp11-kit0:amd64 (0.23.2-5~ubuntu16.04.1) ...
# ==> default: Selecting previously unselected package libtasn1-6:amd64.
# ==> default: Preparing to unpack .../libtasn1-6_4.7-3ubuntu0.16.04.3_amd64.deb ...
# ==> default: Unpacking libtasn1-6:amd64 (4.7-3ubuntu0.16.04.3) ...
# ==> default: Selecting previously unselected package libgnutls30:amd64.
# ==> default: Preparing to unpack .../libgnutls30_3.4.10-4ubuntu1.4_amd64.deb ...
# ==> default: Unpacking libgnutls30:amd64 (3.4.10-4ubuntu1.4) ...
# ==> default: Selecting previously unselected package libroken18-heimdal:amd64.
# ==> default: Preparing to unpack .../libroken18-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libroken18-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libasn1-8-heimdal:amd64.
# ==> default: Preparing to unpack .../libasn1-8-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libasn1-8-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libhcrypto4-heimdal:amd64.
# ==> default: Preparing to unpack .../libhcrypto4-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libhcrypto4-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libheimbase1-heimdal:amd64.
# ==> default: Preparing to unpack .../libheimbase1-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libheimbase1-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libwind0-heimdal:amd64.
# ==> default: Preparing to unpack .../libwind0-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libwind0-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libhx509-5-heimdal:amd64.
# ==> default: Preparing to unpack .../libhx509-5-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libhx509-5-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libkrb5-26-heimdal:amd64.
# ==> default: Preparing to unpack .../libkrb5-26-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libkrb5-26-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libheimntlm0-heimdal:amd64.
# ==> default: Preparing to unpack .../libheimntlm0-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libheimntlm0-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libgssapi3-heimdal:amd64.
# ==> default: Preparing to unpack .../libgssapi3-heimdal_1.7~git20150920+dfsg-4ubuntu1.16.04.1_amd64.deb ...
# ==> default: Unpacking libgssapi3-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Selecting previously unselected package libsasl2-modules-db:amd64.
# ==> default: Preparing to unpack .../libsasl2-modules-db_2.1.26.dfsg1-14build1_amd64.deb ...
# ==> default: Unpacking libsasl2-modules-db:amd64 (2.1.26.dfsg1-14build1) ...
# ==> default: Selecting previously unselected package libsasl2-2:amd64.
# ==> default: Preparing to unpack .../libsasl2-2_2.1.26.dfsg1-14build1_amd64.deb ...
# ==> default: Unpacking libsasl2-2:amd64 (2.1.26.dfsg1-14build1) ...
# ==> default: Selecting previously unselected package libldap-2.4-2:amd64.
# ==> default: Preparing to unpack .../libldap-2.4-2_2.4.42+dfsg-2ubuntu3.3_amd64.deb ...
# ==> default: Unpacking libldap-2.4-2:amd64 (2.4.42+dfsg-2ubuntu3.3) ...
# ==> default: Selecting previously unselected package libaprutil1-ldap:amd64.
# ==> default: Preparing to unpack .../libaprutil1-ldap_1.5.4-1build1_amd64.deb ...
# ==> default: Unpacking libaprutil1-ldap:amd64 (1.5.4-1build1) ...
# ==> default: Selecting previously unselected package liblua5.1-0:amd64.
# ==> default: Preparing to unpack .../liblua5.1-0_5.1.5-8ubuntu1_amd64.deb ...
# ==> default: Unpacking liblua5.1-0:amd64 (5.1.5-8ubuntu1) ...
# ==> default: Selecting previously unselected package libicu55:amd64.
# ==> default: Preparing to unpack .../libicu55_55.1-7ubuntu0.4_amd64.deb ...
# ==> default: Unpacking libicu55:amd64 (55.1-7ubuntu0.4) ...
# ==> default: Selecting previously unselected package libxml2:amd64.
# ==> default: Preparing to unpack .../libxml2_2.9.3+dfsg1-1ubuntu0.6_amd64.deb ...
# ==> default: Unpacking libxml2:amd64 (2.9.3+dfsg1-1ubuntu0.6) ...
# ==> default: Selecting previously unselected package apache2-bin.
# ==> default: Preparing to unpack .../apache2-bin_2.4.18-2ubuntu3.9_amd64.deb ...
# ==> default: Unpacking apache2-bin (2.4.18-2ubuntu3.9) ...
# ==> default: Selecting previously unselected package apache2-utils.
# ==> default: Preparing to unpack .../apache2-utils_2.4.18-2ubuntu3.9_amd64.deb ...
# ==> default: Unpacking apache2-utils (2.4.18-2ubuntu3.9) ...
# ==> default: Selecting previously unselected package apache2-data.
# ==> default: Preparing to unpack .../apache2-data_2.4.18-2ubuntu3.9_all.deb ...
# ==> default: Unpacking apache2-data (2.4.18-2ubuntu3.9) ...
# ==> default: Selecting previously unselected package apache2.
# ==> default: Preparing to unpack .../apache2_2.4.18-2ubuntu3.9_amd64.deb ...
# ==> default: Unpacking apache2 (2.4.18-2ubuntu3.9) ...
# ==> default: Selecting previously unselected package libmagic1:amd64.
# ==> default: Preparing to unpack .../libmagic1_1%3a5.25-2ubuntu1.1_amd64.deb ...
# ==> default: Unpacking libmagic1:amd64 (1:5.25-2ubuntu1.1) ...
# ==> default: Selecting previously unselected package file.
# ==> default: Preparing to unpack .../file_1%3a5.25-2ubuntu1.1_amd64.deb ...
# ==> default: Unpacking file (1:5.25-2ubuntu1.1) ...
# ==> default: Selecting previously unselected package iproute2.
# ==> default: Preparing to unpack .../iproute2_4.3.0-1ubuntu3.16.04.3_amd64.deb ...
# ==> default: Unpacking iproute2 (4.3.0-1ubuntu3.16.04.3) ...
# ==> default: Selecting previously unselected package ifupdown.
# ==> default: Preparing to unpack .../ifupdown_0.8.10ubuntu1.4_amd64.deb ...
# ==> default: Unpacking ifupdown (0.8.10ubuntu1.4) ...
# ==> default: Selecting previously unselected package libisc-export160.
# ==> default: Preparing to unpack .../libisc-export160_1%3a9.10.3.dfsg.P4-8ubuntu1.11_amd64.deb ...
# ==> default: Unpacking libisc-export160 (1:9.10.3.dfsg.P4-8ubuntu1.11) ...
# ==> default: Selecting previously unselected package libdns-export162.
# ==> default: Preparing to unpack .../libdns-export162_1%3a9.10.3.dfsg.P4-8ubuntu1.11_amd64.deb ...
# ==> default: Unpacking libdns-export162 (1:9.10.3.dfsg.P4-8ubuntu1.11) ...
# ==> default: Selecting previously unselected package isc-dhcp-client.
# ==> default: Preparing to unpack .../isc-dhcp-client_4.3.3-5ubuntu12.10_amd64.deb ...
# ==> default: Unpacking isc-dhcp-client (4.3.3-5ubuntu12.10) ...
# ==> default: Selecting previously unselected package isc-dhcp-common.
# ==> default: Preparing to unpack .../isc-dhcp-common_4.3.3-5ubuntu12.10_amd64.deb ...
# ==> default: Unpacking isc-dhcp-common (4.3.3-5ubuntu12.10) ...
# ==> default: Selecting previously unselected package libxtables11:amd64.
# ==> default: Preparing to unpack .../libxtables11_1.6.0-2ubuntu3_amd64.deb ...
# ==> default: Unpacking libxtables11:amd64 (1.6.0-2ubuntu3) ...
# ==> default: Selecting previously unselected package netbase.
# ==> default: Preparing to unpack .../archives/netbase_5.3_all.deb ...
# ==> default: Unpacking netbase (5.3) ...
# ==> default: Selecting previously unselected package libsasl2-modules:amd64.
# ==> default: Preparing to unpack .../libsasl2-modules_2.1.26.dfsg1-14build1_amd64.deb ...
# ==> default: Unpacking libsasl2-modules:amd64 (2.1.26.dfsg1-14build1) ...
# ==> default: Selecting previously unselected package openssl.
# ==> default: Preparing to unpack .../openssl_1.0.2g-1ubuntu4.13_amd64.deb ...
# ==> default: Unpacking openssl (1.0.2g-1ubuntu4.13) ...
# ==> default: Selecting previously unselected package xml-core.
# ==> default: Preparing to unpack .../xml-core_0.13+nmu2_all.deb ...
# ==> default: Unpacking xml-core (0.13+nmu2) ...
# ==> default: Selecting previously unselected package rename.
# ==> default: Preparing to unpack .../archives/rename_0.20-4_all.deb ...
# ==> default: Unpacking rename (0.20-4) ...
# ==> default: Selecting previously unselected package ssl-cert.
# ==> default: Preparing to unpack .../ssl-cert_1.0.37_all.deb ...
# ==> default: Unpacking ssl-cert (1.0.37) ...
# ==> default: Processing triggers for libc-bin (2.23-0ubuntu10) ...
# ==> default: Processing triggers for systemd (229-4ubuntu21.4) ...
# ==> default: Setting up libatm1:amd64 (1:2.5.1-1.5) ...
# ==> default: Setting up libmnl0:amd64 (1.0.3-5) ...
# ==> default: Setting up libgdbm3:amd64 (1.8.3-13.1) ...
# ==> default: Setting up sgml-base (1.26+nmu4ubuntu1) ...
# ==> default: Setting up perl-modules-5.22 (5.22.1-9ubuntu0.5) ...
# ==> default: Setting up libperl5.22:amd64 (5.22.1-9ubuntu0.5) ...
# ==> default: Setting up perl (5.22.1-9ubuntu0.5) ...
# ==> default: update-alternatives: 
# ==> default: using /usr/bin/prename to provide /usr/bin/rename (rename) in auto mode
# ==> default: Setting up mime-support (3.59ubuntu1) ...
# ==> default: Setting up libapr1:amd64 (1.5.2-3) ...
# ==> default: Setting up libexpat1:amd64 (2.1.0-7ubuntu0.16.04.3) ...
# ==> default: Setting up libssl1.0.0:amd64 (1.0.2g-1ubuntu4.13) ...
# ==> default: debconf: unable to initialize frontend: Dialog
# ==> default: debconf: (TERM is not set, so the dialog frontend is not usable.)
# ==> default: debconf: falling back to frontend: Readline
# ==> default: Setting up libaprutil1:amd64 (1.5.4-1build1) ...
# ==> default: Setting up libsqlite3-0:amd64 (3.11.0-1ubuntu1) ...
# ==> default: Setting up libaprutil1-dbd-sqlite3:amd64 (1.5.4-1build1) ...
# ==> default: Setting up libgmp10:amd64 (2:6.1.0+dfsg-2) ...
# ==> default: Setting up libnettle6:amd64 (3.2-1ubuntu0.16.04.1) ...
# ==> default: Setting up libhogweed4:amd64 (3.2-1ubuntu0.16.04.1) ...
# ==> default: Setting up libidn11:amd64 (1.32-3ubuntu1.2) ...
# ==> default: Setting up libffi6:amd64 (3.2.1-4) ...
# ==> default: Setting up libp11-kit0:amd64 (0.23.2-5~ubuntu16.04.1) ...
# ==> default: Setting up libtasn1-6:amd64 (4.7-3ubuntu0.16.04.3) ...
# ==> default: Setting up libgnutls30:amd64 (3.4.10-4ubuntu1.4) ...
# ==> default: Setting up libroken18-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libasn1-8-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libhcrypto4-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libheimbase1-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libwind0-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libhx509-5-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libkrb5-26-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libheimntlm0-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libgssapi3-heimdal:amd64 (1.7~git20150920+dfsg-4ubuntu1.16.04.1) ...
# ==> default: Setting up libsasl2-modules-db:amd64 (2.1.26.dfsg1-14build1) ...
# ==> default: Setting up libsasl2-2:amd64 (2.1.26.dfsg1-14build1) ...
# ==> default: Setting up libldap-2.4-2:amd64 (2.4.42+dfsg-2ubuntu3.3) ...
# ==> default: Setting up libaprutil1-ldap:amd64 (1.5.4-1build1) ...
# ==> default: Setting up liblua5.1-0:amd64 (5.1.5-8ubuntu1) ...
# ==> default: Setting up libicu55:amd64 (55.1-7ubuntu0.4) ...
# ==> default: Setting up libxml2:amd64 (2.9.3+dfsg1-1ubuntu0.6) ...
# ==> default: Setting up apache2-bin (2.4.18-2ubuntu3.9) ...
# ==> default: Setting up apache2-utils (2.4.18-2ubuntu3.9) ...
# ==> default: Setting up apache2-data (2.4.18-2ubuntu3.9) ...
# ==> default: Setting up apache2 (2.4.18-2ubuntu3.9) ...
# ==> default: Enabling module mpm_event.
# ==> default: Enabling module authz_core.
# ==> default: Enabling module authz_host.
# ==> default: Enabling module authn_core.
# ==> default: Enabling module auth_basic.
# ==> default: Enabling module access_compat.
# ==> default: Enabling module authn_file.
# ==> default: Enabling module authz_user.
# ==> default: Enabling module alias.
# ==> default: Enabling module dir.
# ==> default: Enabling module autoindex.
# ==> default: Enabling module env.
# ==> default: Enabling module mime.
# ==> default: Enabling module negotiation.
# ==> default: Enabling module setenvif.
# ==> default: Enabling module filter.
# ==> default: Enabling module deflate.
# ==> default: Enabling module status.
# ==> default: Enabling conf charset.
# ==> default: Enabling conf localized-error-pages.
# ==> default: Enabling conf other-vhosts-access-log.
# ==> default: Enabling conf security.
# ==> default: Enabling conf serve-cgi-bin.
# ==> default: Enabling site 000-default.
# ==> default: invoke-rc.d: could not determine current runlevel
# ==> default: invoke-rc.d: policy-rc.d denied execution of start.
# ==> default: Setting up libmagic1:amd64 (1:5.25-2ubuntu1.1) ...
# ==> default: Setting up file (1:5.25-2ubuntu1.1) ...
# ==> default: Setting up iproute2 (4.3.0-1ubuntu3.16.04.3) ...
# ==> default: Setting up ifupdown (0.8.10ubuntu1.4) ...
# ==> default: Creating /etc/network/interfaces.
# ==> default: Setting up libisc-export160 (1:9.10.3.dfsg.P4-8ubuntu1.11) ...
# ==> default: Setting up libdns-export162 (1:9.10.3.dfsg.P4-8ubuntu1.11) ...
# ==> default: Setting up isc-dhcp-client (4.3.3-5ubuntu12.10) ...
# ==> default: Setting up isc-dhcp-common (4.3.3-5ubuntu12.10) ...
# ==> default: Setting up libxtables11:amd64 (1.6.0-2ubuntu3) ...
# ==> default: Setting up netbase (5.3) ...
# ==> default: Setting up libsasl2-modules:amd64 (2.1.26.dfsg1-14build1) ...
# ==> default: Setting up openssl (1.0.2g-1ubuntu4.13) ...
# ==> default: Setting up xml-core (0.13+nmu2) ...
# ==> default: Setting up rename (0.20-4) ...
# ==> default: update-alternatives: 
# ==> default: using /usr/bin/file-rename to provide /usr/bin/rename (rename) in auto mode
# ==> default: Setting up ssl-cert (1.0.37) ...
# ==> default: debconf: unable to initialize frontend: Dialog
# ==> default: debconf: (TERM is not set, so the dialog frontend is not usable.)
# ==> default: debconf: falling back to frontend: Readline
# ==> default: Processing triggers for libc-bin (2.23-0ubuntu10) ...
# ==> default: Processing triggers for systemd (229-4ubuntu21.4) ...
# ==> default: Processing triggers for sgml-base (1.26+nmu4ubuntu1) ...
# ==> default: Removing intermediate container 70ec0b27b78c
# ==> default:  ---> ce8d4232671b
# ==> default: Step 3/5 : COPY public-html/index.html /var/www/html/
# ==> default:  ---> 15e5cda3df5a
# ==> default: Step 4/5 : EXPOSE 80
# ==> default:  ---> Running in 6e043fef64c1
# ==> default: Removing intermediate container 6e043fef64c1
# ==> default:  ---> 9ffe5bcde639
# ==> default: Step 5/5 : CMD apachectl -D FOREGROUND
# ==> default:  ---> Running in eafb51a67dcd
# ==> default: Removing intermediate container eafb51a67dcd
# ==> default:  ---> 6d575db6a25b
# ==> default: Successfully built 6d575db6a25b
# ==> default: Successfully tagged example/hello_web:latest
# ==> default: Starting Docker containers...
# ==> default: -- Container: hello_web
# 
# real	7m15.886s
# user	0m0.000s
# sys	0m0.046s
curl -i http://127.0.0.1:8081
# HTTP/1.1 200 OK
# Date: Wed, 17 Oct 2018 14:23:33 GMT
# Server: Apache/2.4.18 (Ubuntu)
# Last-Modified: Wed, 17 Oct 2018 14:04:29 GMT
# ETag: "3c-5786d26117540"
# Accept-Ranges: bytes
# Content-Length: 60
# Content-Type: text/html
# 
# <html>
#   <body>
#     <h1>Hello World!</h1>
#   </body>
# </html>
vagrant halt
# ==> default: Attempting graceful shutdown of VM...

Using Existing Image

export WORKAREA=/misc/vagrant/vagrant-docker
WORKAREA=${WORKAREA:-"${HOME}/vagrant-docker"}
mkdir -p ${WORKAREA}/image/public-html
touch ${WORKAREA}/image/Vagrantfile

cat <<-'HTML' > ${WORKAREA}/image/public-html/index.html
<html>
  <body>
    <h1>Hello World!</h1>
  </body>
</html>
HTML

cd ${WORKAREA}/image
.
├── build
│   ├── Dockerfile
│   ├── Vagrantfile
│   └── public-html
│       └── index.html
└── image
    ├── Vagrantfile
    └── public-html
        └── index.html
cat > Vagrantfile <<-"_EOF"
Vagrant.configure("2") do |config|
  config.vm.box = "ubuntu/xenial64"
  config.vm.network "forwarded_port", guest: 80, host: 8081

  ####### Provision #######
  config.vm.provision "docker", images: %w(httpd:2.4) do |docker|
    docker.run "hello_web",
      image: "httpd:2.4",
      args: "-p 80:80 " +
        "-v /vagrant/public-html:/usr/local/apache2/htdocs/"
  end
end
_EOF
time vagrant up
# Bringing machine 'default' up with 'virtualbox' provider...
# ==> default: Importing base box 'ubuntu/xenial64'...
# ==> default: Matching MAC address for NAT networking...
# ==> default: Checking if box 'ubuntu/xenial64' is up to date...
# ==> default: Setting the name of the VM: image_default_1539787124449_92617
# ==> default: Clearing any previously set network interfaces...
# ==> default: Preparing network interfaces based on configuration...
#     default: Adapter 1: nat
# ==> default: Forwarding ports...
#     default: 80 (guest) => 8081 (host) (adapter 1)
#     default: 22 (guest) => 2222 (host) (adapter 1)
# ==> default: Running 'pre-boot' VM customizations...
# ==> default: Booting VM...
# ==> default: Waiting for machine to boot. This may take a few minutes...
#     default: SSH address: 127.0.0.1:2222
#     default: SSH username: vagrant
#     default: SSH auth method: private key
#     default: Warning: Connection reset. Retrying...
#     default: 
#     default: Vagrant insecure key detected. Vagrant will automatically replace
#     default: this with a newly generated keypair for better security.
#     default: 
#     default: Inserting generated public key within guest...
#     default: Removing insecure key from the guest if it's present...
#     default: Key inserted! Disconnecting and reconnecting using new SSH key...
# ==> default: Machine booted and ready!
# ==> default: Checking for guest additions in VM...
#     default: The guest additions on this VM do not match the installed version of
#     default: VirtualBox! In most cases this is fine, but in rare cases it can
#     default: prevent things such as shared folders from working properly. If you see
#     default: shared folder errors, please make sure the guest additions within the
#     default: virtual machine match the version of VirtualBox you have installed on
#     default: your host and reload your VM.
#     default: 
#     default: Guest Additions Version: 5.1.38
#     default: VirtualBox Version: 5.2
# ==> default: Mounting shared folders...
#     default: /vagrant => D:/misc/vagrant/vagrant-docker/image
# ==> default: Running provisioner: docker...
#     default: Installing Docker onto machine...
# ==> default: Pulling Docker images...
# ==> default: -- Image: httpd:2.4
# ==> default: 2.4: Pulling from library/httpd
# ==> default: 61be48634cb9: Pulling fs layer
# ==> default: 8bc097f195a9: Pulling fs layer
# ==> default: ea3629bcb425: Pulling fs layer
# ==> default: 07159bbfdb88: Pulling fs layer
# ==> default: 538471922e0d: Pulling fs layer
# ==> default: 34b734c5da13: Pulling fs layer
# ==> default: 9845bf1bc468: Pulling fs layer
# ==> default: 07159bbfdb88: Waiting
# ==> default: 538471922e0d: Waiting
# ==> default: 9845bf1bc468: Waiting
# ==> default: 34b734c5da13: Waiting
# ==> default: 8bc097f195a9: Verifying Checksum
# ==> default: 8bc097f195a9: Download complete
# ==> default: ea3629bcb425: Download complete
# ==> default: 07159bbfdb88: Verifying Checksum
# ==> default: 07159bbfdb88: Download complete
# ==> default: 34b734c5da13: 
# ==> default: Verifying Checksum
# ==> default: 34b734c5da13: 
# ==> default: Download complete
# ==> default: 9845bf1bc468: 
# ==> default: Verifying Checksum
# ==> default: 9845bf1bc468: 
# ==> default: Download complete
# ==> default: 538471922e0d: 
# ==> default: Verifying Checksum
# ==> default: 538471922e0d: 
# ==> default: Download complete
# ==> default: 61be48634cb9: Verifying Checksum
# ==> default: 61be48634cb9: 
# ==> default: Download complete
# ==> default: 61be48634cb9: Pull complete
# ==> default: 8bc097f195a9: Pull complete
# ==> default: ea3629bcb425: Pull complete
# ==> default: 07159bbfdb88: Pull complete
# ==> default: 538471922e0d: Pull complete
# ==> default: 34b734c5da13: Pull complete
# ==> default: 9845bf1bc468: Pull complete
# ==> default: Digest: sha256:51a0ffaec934e8495ebac738db13a5c906069c8920ca0b460264ce99d15a3688
# ==> default: Status: Downloaded newer image for httpd:2.4
# ==> default: Starting Docker containers...
# ==> default: -- Container: hello_web
# 
# real	3m18.822s
# user	0m0.000s
# sys	0m0.030s
curl -i http://127.0.0.1:8081
# HTTP/1.1 200 OK
# Date: Wed, 17 Oct 2018 14:42:00 GMT
# Server: Apache/2.4.35 (Unix)
# Last-Modified: Wed, 17 Oct 2018 14:26:12 GMT
# ETag: "3c-5786d73c4e6e0"
# Accept-Ranges: bytes
# Content-Length: 60
# Content-Type: text/html
# 
# <html>
#   <body>
#     <h1>Hello World!</h1>
#   </body>
# </html>
vagrant halt
vagrant global-status

The slight difference here, is that using the Docker image httpd:2.4, we can see version 2.4.35, where a package installed from Ubuntu is at 2.4.18.

The advantage of using the image, where Apache is compiled from source, is that we get more up to date versions. The Linux distro package systems, at least with Ubuntu and more so with RHEL, tend to fall behind the current versions.

7 Using Kubernetes

Kubernetes (commonly stylized as K8s) is an open-source container-orchestration system for automating deployment, scaling and management of containerized applications. It was originally designed by Google and is now maintained by the Cloud Native Computing Foundation. It aims to provide a "platform for automating deployment, scaling, and operations of application containers across clusters of hosts". It works with a range of container tools, including Docker

7.1 Configuring Kubectl

https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/

mkdir -p /misc/kubernetes/.kube && touch /misc/kubernetes/.kube/config
export KUBECONFIG="D:\misc\kubernetes\.kube\config"
kubectl config view
# apiVersion: v1
# clusters: []
# contexts: []
# current-context: ""
# kind: Config
# preferences: {}
# users: []

7.2 Configurin Minikube

export MINIKUBE_HOME="D:\misc\kubernetes\.minikube"

7.3 Tutorial

Help

minikube --help
# Minikube is a CLI tool that provisions and manages single-node Kubernetes clusters optimized for development workflows.
# 
# Usage:
#   minikube [command]
# 
# Available Commands:
#   addons         Modify minikube's kubernetes addons
#   cache          Add or delete an image from the local cache.
#   completion     Outputs minikube shell completion for the given shell (bash or zsh)
#   config         Modify minikube config
#   dashboard      Access the kubernetes dashboard running within the minikube cluster
#   delete         Deletes a local kubernetes cluster
#   docker-env     Sets up docker env variables; similar to '$(docker-machine env)'
#   help           Help about any command
#   ip             Retrieves the IP address of the running cluster
#   logs           Gets the logs of the running instance, used for debugging minikube, not user code
#   mount          Mounts the specified directory into minikube
#   profile        Profile sets the current minikube profile
#   service        Gets the kubernetes URL(s) for the specified service in your local cluster
#   ssh            Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'
#   ssh-key        Retrieve the ssh identity key path of the specified cluster
#   start          Starts a local kubernetes cluster
#   status         Gets the status of a local kubernetes cluster
#   stop           Stops a running local kubernetes cluster
#   update-check   Print current and latest version number
#   update-context Verify the IP address of the running cluster in kubeconfig.
#   version        Print the version of minikube
# 
# Flags:
#       --alsologtostderr                  log to standard error as well as files
#   -b, --bootstrapper string              The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm")
#   -h, --help                             help for minikube
#       --log_backtrace_at traceLocation   when logging hits line file:N, emit a stack trace (default :0)
#       --log_dir string                   If non-empty, write log files in this directory
#       --logtostderr                      log to standard error instead of files
#   -p, --profile string                   The name of the minikube VM being used.  
#                                          	This can be modified to allow for multiple minikube instances to be run independently (default "minikube")
#       --stderrthreshold severity         logs at or above this threshold go to stderr (default 2)
#   -v, --v Level                          log level for V logs
#       --vmodule moduleSpec               comma-separated list of pattern=N settings for file-filtered logging
# 
# Use "minikube [command] --help" for more information about a command.

Starting the cluster

minikube start --help
# Starts a local kubernetes cluster using VM. This command
# assumes you have already installed one of the VM drivers: virtualbox/vmwarefusion/kvm/xhyve/hyperv.
# 
# Usage:
#   minikube start [flags]
# 
# Flags:
#       --apiserver-ips ipSlice          A set of apiserver IP Addresses which are used in the generated certificate for kubernetes.  This can be used if you want to make the apiserver available from outside the machine (default [])
#       --apiserver-name string          The apiserver name which is used in the generated certificate for kubernetes.  This can be used if you want to make the apiserver available from outside the machine (default "minikubeCA")
#       --apiserver-names stringArray    A set of apiserver names which are used in the generated certificate for kubernetes.  This can be used if you want to make the apiserver available from outside the machine
#       --cache-images                   If true, cache docker images for the current bootstrapper and load them into the machine.
#       --container-runtime string       The container runtime to be used
#       --cpus int                       Number of CPUs allocated to the minikube VM (default 2)
#       --disable-driver-mounts          Disables the filesystem mounts provided by the hypervisors (vboxfs, xhyve-9p)
#       --disk-size string               Disk size allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g) (default "20g")
#       --dns-domain string              The cluster dns domain name used in the kubernetes cluster (default "cluster.local")
#       --docker-env stringArray         Environment variables to pass to the Docker daemon. (format: key=value)
#       --docker-opt stringArray         Specify arbitrary flags to pass to the Docker daemon. (format: key=value)
#       --extra-config ExtraOption       A set of key=value pairs that describe configuration that may be passed to different components.
#                                        		The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
#                                        		Valid components are: kubelet, apiserver, controller-manager, etcd, proxy, scheduler.
#       --feature-gates string           A set of key=value pairs that describe feature gates for alpha/experimental features.
#       --gpu                            Enable experimental NVIDIA GPU support in minikube (works only with kvm2 driver on Linux)
#   -h, --help                           help for start
#       --host-only-cidr string          The CIDR to be used for the minikube VM (only supported with Virtualbox driver) (default "192.168.99.1/24")
#       --hyperkit-vpnkit-sock string    Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock.
#       --hyperkit-vsock-ports strings   List of guest VSock ports that should be exposed as sockets on the host (Only supported on with hyperkit now).
#       --hyperv-virtual-switch string   The hyperv virtual switch name. Defaults to first found. (only supported with HyperV driver)
#       --insecure-registry strings      Insecure Docker registries to pass to the Docker daemon.  The default service CIDR range will automatically be added.
#       --iso-url string                 Location of the minikube iso (default "https://storage.googleapis.com/minikube/iso/minikube-v0.30.0.iso")
#       --keep-context                   This will keep the existing kubectl context and will create a minikube context.
#       --kubernetes-version string      The kubernetes version that the minikube VM will use (ex: v1.2.3) (default "v1.10.0")
#       --kvm-network string             The KVM network name. (only supported with KVM driver) (default "default")
#       --memory int                     Amount of RAM allocated to the minikube VM in MB (default 2048)
#       --mount                          This will start the mount daemon and automatically mount files into minikube
#       --mount-string string            The argument to pass the minikube mount command on start (default "E:\\home\\vzell:/minikube-host")
#       --network-plugin string          The name of the network plugin
#       --nfs-share strings              Local folders to share with Guest via NFS mounts (Only supported on with hyperkit now)
#       --nfs-shares-root string         Where to root the NFS Shares (defaults to /nfsshares, only supported with hyperkit now) (default "/nfsshares")
#       --registry-mirror strings        Registry mirrors to pass to the Docker daemon
#       --uuid string                    Provide VM UUID to restore MAC address (only supported with Hyperkit driver).
#       --vm-driver string               VM driver is one of: [virtualbox vmwarefusion kvm xhyve hyperv hyperkit kvm2 none] (default "virtualbox")
#       --xhyve-disk-driver string       The disk driver to use [ahci-hd|virtio-blk] (only supported with xhyve driver) (default "ahci-hd")
# 
# Global Flags:
#       --alsologtostderr                  log to standard error as well as files
#   -b, --bootstrapper string              The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm")
#       --log_backtrace_at traceLocation   when logging hits line file:N, emit a stack trace (default :0)
#       --log_dir string                   If non-empty, write log files in this directory
#       --logtostderr                      log to standard error instead of files
#   -p, --profile string                   The name of the minikube VM being used.  
#                                          	This can be modified to allow for multiple minikube instances to be run independently (default "minikube")
#       --stderrthreshold severity         logs at or above this threshold go to stderr (default 2)
#   -v, --v Level                          log level for V logs
#       --vmodule moduleSpec               comma-separated list of pattern=N settings for file-filtered logging
minikube start
# Starting local Kubernetes v1.10.0 cluster...
# Starting VM...
# Downloading Minikube ISO
#  170.78 MB / 170.78 MB  100.00% 0s
# Getting VM IP address...
# Moving files into cluster...
# Downloading kubelet v1.10.0
# Downloading kubeadm v1.10.0
# Finished Downloading kubeadm v1.10.0
# Finished Downloading kubelet v1.10.0
# Setting up certs...
# Connecting to cluster...
# Setting up kubeconfig...
# Starting cluster components...
# Kubectl is now configured to use the cluster.
# Loading cached images from config file.
minikube status
# minikube: Running
# cluster: Running
# kubectl: Correctly Configured: pointing to minikube-vm at 192.168.99.103
kubectl cluster-info
# Kubernetes master is running at https://192.168.99.103:8443
# CoreDNS is running at https://192.168.99.103:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
# 
# To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
kubectl cluster-info dump

Kubernetes Client and Server version

kubectl version
# Client Version: version.Info{Major:"1", Minor:"12", GitVersion:"v1.12.1", GitCommit:"4ed3216f3ec431b140b1d899130a69fc671678f4", GitTreeState:"clean", BuildDate:"2018-10-05T16:46:06Z", GoVersion:"go1.10.4", Compiler:"gc", Platform:"windows/amd64"}
# Server Version: version.Info{Major:"1", Minor:"10", GitVersion:"v1.10.0", GitCommit:"fc32d2f3698e36b93322a3465f63a14e9f0eaead", GitTreeState:"clean", BuildDate:"2018-03-26T16:44:10Z", GoVersion:"go1.9.3", Compiler:"gc", Platform:"linux/amd64"}

Cluster IP address

minikube ip
# 192.168.99.103

Kubernetes Dashboard

minikube dashboard
# Opening http://127.0.0.1:51863/api/v1/namespaces/kube-system/services/http:kubernetes-dashboard:/proxy/ in your default browser...
# failed to open browser: exec: "cmd": executable file not found in %!P(MISSING)ATH%!(NOVERB)
minikube dashboard --url=true
http://127.0.0.1:51863/api/v1/namespaces/kube-system/services/http:kubernetes-dashboard:/proxy/
cygstart http://127.0.0.1:51863/api/v1/namespaces/kube-system/services/http:kubernetes-dashboard:/proxy/
kubectl get nodes
# NAME       STATUS   ROLES    AGE   VERSION
# minikube   Ready    master   9m    v1.10.0

Running a workloads

kubectl run hello-nginx --image=nginx --port=80
# kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
# deployment.apps/hello-nginx created
kubectl create hello-nginx --image=nginx --port=80
kubectl get pods
# NAME                           READY   STATUS    RESTARTS   AGE
# hello-nginx-6f9f4fc7dd-bh5cd   1/1     Running   0          1m
kubectl describe pod hello-nginx-6f9f4fc7dd-bh5cd
# Name:           hello-nginx-6f9f4fc7dd-bh5cd
# Namespace:      default
# Node:           minikube/10.0.2.15
# Start Time:     Sat, 20 Oct 2018 14:06:41 +0200
# Labels:         pod-template-hash=2959097388
#                 run=hello-nginx
# Annotations:    <none>
# Status:         Running
# IP:             172.17.0.5
# Controlled By:  ReplicaSet/hello-nginx-6f9f4fc7dd
# Containers:
#   hello-nginx:
#     Container ID:   docker://bc2e089a42022ff5b495c6da5bbf04ae3a44a3db8d4864a59d7e03a8b86fc7e6
#     Image:          nginx
#     Image ID:       docker-pullable://nginx@sha256:b73f527d86e3461fd652f62cf47e7b375196063bbbd503e853af5be16597cb2e
#     Port:           80/TCP
#     Host Port:      0/TCP
#     State:          Running
#       Started:      Sat, 20 Oct 2018 14:06:55 +0200
#     Ready:          True
#     Restart Count:  0
#     Environment:    <none>
#     Mounts:
#       /var/run/secrets/kubernetes.io/serviceaccount from default-token-vcnp2 (ro)
# Conditions:
#   Type           Status
#   Initialized    True 
#   Ready          True 
#   PodScheduled   True 
# Volumes:
#   default-token-vcnp2:
#     Type:        Secret (a volume populated by a Secret)
#     SecretName:  default-token-vcnp2
#     Optional:    false
# QoS Class:       BestEffort
# Node-Selectors:  <none>
# Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
#                  node.kubernetes.io/unreachable:NoExecute for 300s
# Events:
#   Type    Reason                 Age    From               Message
#   ----    ------                 ----   ----               -------
#   Normal  Scheduled              4m12s  default-scheduler  Successfully assigned hello-nginx-6f9f4fc7dd-bh5cd to minikube
#   Normal  SuccessfulMountVolume  4m12s  kubelet, minikube  MountVolume.SetUp succeeded for volume "default-token-vcnp2"
#   Normal  Pulling                4m11s  kubelet, minikube  pulling image "nginx"
#   Normal  Pulled                 3m59s  kubelet, minikube  Successfully pulled image "nginx"
#   Normal  Created                3m59s  kubelet, minikube  Created container
#   Normal  Started                3m58s  kubelet, minikube  Started container

Expose a service

kubectl expose deployment hello-nginx --type=NodePort
# service/hello-nginx exposed
kubectl get services
# NAME          TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
# hello-nginx   NodePort    10.96.74.235   <none>        80:32234/TCP   26s
# kubernetes    ClusterIP   10.96.0.1      <none>        443/TCP        25m
minikube service --url=true hello-nginx
# http://192.168.99.103:32234
cygstart http://192.168.99.103:32234
minikube service hello-nginx
kubectl logs hello-nginx-6f9f4fc7dd-bh5cd
# 172.17.0.1 - - [20/Oct/2018:12:18:54 +0000] "GET / HTTP/1.1" 200 612 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0" "-"
# 2018/10/20 12:18:54 [error] 5#5: *1 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 172.17.0.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "192.168.99.103:32234"
# 172.17.0.1 - - [20/Oct/2018:12:18:54 +0000] "GET /favicon.ico HTTP/1.1" 404 153 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0" "-"
# 2018/10/20 12:18:54 [error] 5#5: *1 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 172.17.0.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "192.168.99.103:32234"
# 172.17.0.1 - - [20/Oct/2018:12:18:54 +0000] "GET /favicon.ico HTTP/1.1" 404 153 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0" "-"
# 2018/10/20 12:18:54 [error] 5#5: *1 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 172.17.0.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "192.168.99.103:32234"
# 172.17.0.1 - - [20/Oct/2018:12:18:54 +0000] "GET /favicon.ico HTTP/1.1" 404 153 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0" "-"

Scaling the service

kubectl scale --replicas=3 deployment/hello-nginx
# deployment.extensions/hello-nginx scaled
kubectl get deployment
# NAME          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
# hello-nginx   3         3         3            3           14m

Stopping the Cluster

minikube stop 
# Stopping local Kubernetes cluster...
# Machine stopped.

Deleting the Cluster

minikube delete
# Deleting local Kubernetes cluster...
# Machine deleted.

8 Using Git

8.1 Bitbucket (Opitz)

Create an account

Create a Jira ticket and ask for access to the Opitz Bitbucket server. When you have access use a URL of the form https://git.opitz-consulting.de/users/<userid> to access Bitbucket.

https://git.opitz-consulting.de/users/vze/

Generating a new SSH key

Make sure you use your own e-Mail address in the next command.

ssh-keygen -t rsa -b 4096 -C "volker.zell@opitz-consulting.com" -f ~/.ssh/bitbucket-oc
# Generating public/private rsa key pair.
# Enter passphrase (empty for no passphrase): 
# Enter same passphrase again: 
# Your identification has been saved in /home/VZE/.ssh/bitbucket-oc.
# Your public key has been saved in /home/VZE/.ssh/bitbucket-oc.pub.
# The key fingerprint is:
# SHA256:RZzgw9Cmijr0/iX7fe70hALGDYGUKZcwtNGU2jGdadg volker.zell@opitz-consulting.com
# The key's randomart image is:
# +---[RSA 4096]----+
# |  .==o@o+o..     |
# |   .+% E+.o      |
# |   .= +++ .      |
# |   . .o oo       |
# |   . . +S.       |
# | .. . . .   .    |
# |...  . . . o .   |
# |o  .  + . o.o    |
# | ....o.. .+o .   |
# +----[SHA256]-----+

Adding your SSH key to the ssh-agent

eval $(ssh-agent -s)
# Agent pid 18452
ssh-add ~/.ssh/bitbucket-oc
# Identity added: /home/VZE/.ssh/bitbucket-oc (/home/VZE/.ssh/bitbucket-oc)

8.2 Working with repositories

Create a repository

Use a URL of the form https://git.opitz-consulting.de/users/<userid>/repos?create

https://git.opitz-consulting.de/users/vze/repos?create

vagrant-multihost

My code is ready to be pushed

cd /misc/vagrant/vagrant-multihost
git init
# Initialized empty Git repository in /misc/vagrant/vagrant-multihost/.git/
git add .
git commit -m "Initial Commit"
# [master (root-commit) a52d5f9] Initial Commit
#  27 files changed, 1265 insertions(+)
#  create mode 100755 .gitignore
#  create mode 100755 LICENSE
#  create mode 100755 README.md
#  create mode 100755 Vagrantfile
#  create mode 100755 ansible.cfg
#  create mode 100755 ansible/group_vars/all.yml
#  create mode 100755 ansible/roles/vzell.filesystem/README.md
#  create mode 100755 ansible/roles/vzell.filesystem/defaults/main.yml
#  create mode 100755 ansible/roles/vzell.filesystem/handlers/main.yml
#  create mode 100755 ansible/roles/vzell.filesystem/meta/main.yml
#  create mode 100755 ansible/roles/vzell.filesystem/tasks/main.yml
#  create mode 100755 ansible/roles/vzell.filesystem/tests/inventory
#  create mode 100755 ansible/roles/vzell.filesystem/tests/test.yml
#  create mode 100755 ansible/roles/vzell.filesystem/vars/main.yml
#  create mode 100755 ansible/roles/vzell.yum/README.md
#  create mode 100755 ansible/roles/vzell.yum/defaults/main.yml
#  create mode 100755 ansible/roles/vzell.yum/handlers/main.yml
#  create mode 100755 ansible/roles/vzell.yum/meta/main.yml
#  create mode 100755 ansible/roles/vzell.yum/tasks/main.yml
#  create mode 100755 ansible/roles/vzell.yum/tests/inventory
#  create mode 100755 ansible/roles/vzell.yum/tests/test.yml
#  create mode 100755 ansible/roles/vzell.yum/vars/main.yml
#  create mode 100755 ansible/site.yml
#  create mode 100755 custom-vagrant-hosts.yml
#  create mode 100755 scripts/inventory.py
#  create mode 100755 test/runbats.sh
#  create mode 100755 vagrant-hosts.yml
git remote add origin ssh://git@git.opitz-consulting.de:7999/~vze/vagrant-multihost.git
git remote show origin
# Warning: Permanently added the RSA host key for IP address '[195.81.211.173]:7999' to the list of known hosts.
# * remote origin
#   Fetch URL: ssh://git@git.opitz-consulting.de:7999/~vze/vagrant-multihost.git
#   Push  URL: ssh://git@git.opitz-consulting.de:7999/~vze/vagrant-multihost.git
#   HEAD branch: (unknown)
git push -u origin master
# Counting objects: 44, done.
# Delta compression using up to 8 threads.
# Compressing objects: 100% (26/26), done.
# Writing objects: 100% (44/44), 16.66 KiB | 775.00 KiB/s, done.
# Total 44 (delta 2), reused 0 (delta 0)
# To ssh://git.opitz-consulting.de:7999/~vze/vagrant-multihost.git
#  * [new branch]      master -> master
# Branch 'master' set up to track remote branch 'master' from 'origin'.
git add README.md
git commit -m "Added cygwin specific documentation"
# [master ae3604d] Added cygwin specific documentation
#  1 file changed, 186 insertions(+), 73 deletions(-)
git push -u origin master
# Counting objects: 3, done.
# Delta compression using up to 8 threads.
# Compressing objects: 100% (3/3), done.
# Writing objects: 100% (3/3), 974 bytes | 487.00 KiB/s, done.
# Total 3 (delta 2), reused 0 (delta 0)
# To ssh://git.opitz-consulting.de:7999/~vze/vagrant-multihost.git
#    a52d5f9..ae3604d  master -> master
# Branch 'master' set up to track remote branch 'master' from 'origin'.
git add README.md
git commit -m "Added download link to Oracle Linux vagrant boxes"
# [master 7a9207d] Added download link to Oracle Linux vagrant boxes
#  1 file changed, 4 insertions(+), 3 deletions(-)
git push -u origin master
# Counting objects: 3, done.
# Delta compression using up to 8 threads.
# Compressing objects: 100% (3/3), done.
# Writing objects: 100% (3/3), 401 bytes | 200.00 KiB/s, done.
# Total 3 (delta 2), reused 0 (delta 0)
# To ssh://git.opitz-consulting.de:7999/~vze/vagrant-multihost.git
#    ae3604d..7a9207d  master -> master
# Branch 'master' set up to track remote branch 'master' from 'origin'.

Testing your SSH connection

Click on a repository and select Clone, you should see an ssh URL in addition to http in the pulldown menu.

9 Cleanup the multimachine setup

vagrant halt

If you ever want to get rid of all your VMs just call the following

vagrant destroy -f

10 Misc

10.1 Switching between Docker for Windows and VirtualBox on Windows 10

When using VirtualBox

Run from elevated prompt (admin privileges)

bcdedit /set hypervisorlaunchtype off

And to start using Docker for Windows again, re-enable Hyper-V:

Run from elevated prompt (admin privileges)

bcdedit /set hypervisorlaunchtype auto

Author: Dr. Volker Zell

Created: 2018-10-21 Sun 07:45

Validate