from .xlate import XLATOR
-__version__ = '1.2.0'
+__version__ = '1.2.1'
_ = XLATOR.gettext
ngettext = XLATOR.ngettext
pass
+# =============================================================================
+class AbortExecution(ExpectedHandlerError):
+ """Indicating an abort of the execution."""
+
+ # -------------------------------------------------------------------------
+ def __init__(self, step=None):
+
+ if step:
+ self.step = step
+ else:
+ self.step = _('<some unknown step>')
+
+ # -------------------------------------------------------------------------
+ def __str__(self):
+
+ return _("Aborting after {!r}.").format(self.step)
+
+
# =============================================================================
if __name__ == "__main__":
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2021 by Frank Brehm, Berlin
-@summary: A handler module for underlaying actions
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import os
-import logging
-import re
-import socket
-import ipaddress
-import shutil
-import stat
-import textwrap
-import copy
-import sys
-
-from pathlib import Path
-
-from subprocess import PIPE
-
-from distutils.version import LooseVersion
-
-from operator import attrgetter
-
-HAS_GETCH = False
-try:
- import getch
- HAS_GETCH = True
-except ImportError:
- pass
-
-# Third party modules
-import pytz
-import yaml
-import six
-
-# Own modules
-from fb_tools.common import pp, to_bool, to_str, RE_DOT_AT_END
-from fb_tools.errors import HandlerError, ExpectedHandlerError, CommandNotFoundError
-from fb_tools.handling_obj import HandlingObject, CalledProcessError
-from fb_tools.handler import BaseHandler
-
-from fb_vmware.errors import VSphereExpectedError
-from fb_vmware.config import VSPhereConfigInfo
-from fb_vmware.connect import VsphereConnection
-
-from fb_pdnstools.server import PowerDNSServer
-from fb_pdnstools.errors import PowerDNSHandlerError
-
-from . import MIN_VERSION_TERRAFORM, MAX_VERSION_TERRAFORM
-from . import MIN_VERSION_VSPHERE_PROVIDER
-
-from .config import CrTfConfiguration
-
-from .terraform.vm import TerraformVm
-
-from .terraform.disk import TerraformDisk
-
-from .xlate import XLATOR
-
-__version__ = '3.8.1'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-def password_input_getch(prompt='', fill_char='*', max_len=64):
- p_s = ''
- proxy_string = ' ' * 64
-
- fch = ' '
- if len(fill_char) >= 1:
- fch = fill_char[0]
-
- while True:
-
- print('\r' + proxy_string, end='', flush=True)
- print('\r' + prompt, end='', flush=True)
-
- c = getch.getch()
- if c == b'\r' or c == b'\n':
- break
- elif c == b'\x08':
- if len(p_s):
- p_s = p_s[:-1]
- continue
-
- p_s += to_str(c)
- if len(p_s) >= max_len:
- break
-
- print('', flush=True)
- return p_s
-
-
-# =============================================================================
-def password_input(prompt='', fill_char='*', max_len=64):
-
- if HAS_GETCH:
- return password_input_getch(prompt=prompt, fill_char=fill_char, max_len=max_len)
-
- import getpass
-
- return getpass.getpass(prompt=prompt)
-
-
-# =============================================================================
-class AbortExecution(ExpectedHandlerError):
- """Indicating an abort of the execution."""
-
- # -------------------------------------------------------------------------
- def __init__(self, step=None):
-
- if step:
- self.step = step
- else:
- self.step = _('<some unknown step>')
-
- # -------------------------------------------------------------------------
- def __str__(self):
-
- return _("Aborting after {!r}.").format(self.step)
-
-
-# =============================================================================
-class CreateTerraformHandler(BaseHandler):
- """
- A handler class for creating the terraform environment
- """
-
- re_default = re.compile(r'^\s*defaults?\s*$', re.IGNORECASE)
- re_vm_key = re.compile(r'^\s*vms?\s*$', re.IGNORECASE)
- re_group = re.compile(r'^\s*groups?\s*$', re.IGNORECASE)
- re_group_name = re.compile(r'^\s*name\s*$', re.IGNORECASE)
- re_doublequote = re.compile(r'"')
-
- re_tf_version = re.compile(r'^\s*Terraform\s+v(\S+)', re.IGNORECASE)
-
- std_file_permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
- std_secure_file_permissions = stat.S_IRUSR | stat.S_IWUSR
-
- open_opts = {}
- if six.PY3:
- open_opts['encoding'] = 'utf-8'
- open_opts['errors'] = 'surrogateescape'
-
- max_groups_depth = 10
-
- tz_name = 'Europe/Berlin'
- tz = pytz.timezone(tz_name)
-
- steps = (
- 'init', 'vmw-init', 'read-yaml', 'pdns-zones', 'vmw-test', 'collect-folders',
- 'vmw-clusters', 'vmw-datastores', 'vmw-ds-clusters', 'vmw-networks', 'vmw-templates',
- 'validate-yaml', 'validate-storage', 'validate-iface', 'validate-dns',
- 'perform-dns', 'project-dir', 'tf-files', 'ensure-vmw-folders',
- )
- step_desc = {
- 'init': _('After initialization of all objects and handlers.'),
- 'vmw-init': _('After initialisation of VSPhere handlers.'),
- 'read-yaml': _('After reading the given YAML file.'),
- 'pdns-zones': _('After retrieving all DNS zones from PowerDNS.'),
- 'vmw-test': _('After testing VSPhere handlers.'),
- 'collect-folders': _('After collecting all VMWare and local folders.'),
- 'vmw-clusters': _('After collecting all VMWare clusters.'),
- 'vmw-datastores': _('After collecting all VMWare datastores.'),
- 'vmw-ds-clusters': _('After collecting all VMWare datastore clusters.'),
- 'vmw-networks': _('After collecting all VMWare networks.'),
- 'vmw-templates': _('After validating all given VMWare templates.'),
- 'validate-yaml': _('After syntax validating of data from loaded YAML file.'),
- 'validate-storage': _('After validating all given storage data.'),
- 'validate-iface': _('After validating all given network interface data.'),
- 'validate-dns': _('After validating all given DNS data.'),
- 'perform-dns': _('After performing all necessary actions in DNS.'),
- 'project-dir': _('After ensuring availability of the project directory.'),
- 'tf-files': _('After creation of the Terraform project files.'),
- 'ensure-vmw-folders': _('After ensuring availability of VM folders in VMWare vSphere.'),
- }
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__version__, base_dir=None,
- config=None, simulate=False, force=False, ignore_existing_dns=False,
- terminal_has_colors=False, initialized=False):
-
- self.pdns = None
- self.vsphere = {}
- self.config = None
-
- self.terraform_cmd = None
-
- self.yaml_data = None
-
- self.default_vm = None
- self.group_default_vms = {}
-
- self.ignore_existing_dns = bool(ignore_existing_dns)
-
- self.vms = []
- self.vsphere_templates = {}
-
- self.vm_names = []
- self.fqdns = {}
- self.addresses = {}
-
- self.vsphere_folders = []
-
- self.vsphere_user = None
- self.vsphere_password = None
-
- self.used_networks = {}
- self.used_dc_clusters = {}
- self.used_datastores = {}
- self.project_dir = None
- self.project_name = None
-
- self._terraform_root_dir = None
-
- self.all_vms = {}
- self.existing_vms = []
-
- self.start_dir = Path(os.getcwd())
-
- self.script_dir = None
- self.script_dir_rel = None
-
- self._stop_at_step = None
-
- self.min_version_terraform = None
- if MIN_VERSION_TERRAFORM:
- self.min_version_terraform = LooseVersion(MIN_VERSION_TERRAFORM)
-
- self.max_version_terraform = None
- if MAX_VERSION_TERRAFORM:
- self.max_version_terraform = LooseVersion(MAX_VERSION_TERRAFORM)
-
- self.min_version_vsphere_provider = None
- if MIN_VERSION_VSPHERE_PROVIDER:
- self.min_version_vsphere_provider = LooseVersion(MIN_VERSION_VSPHERE_PROVIDER)
-
- self.dns_mapping = {
- 'forward': [],
- 'reverse': [],
- }
- self.dns_mappings2create = {
- 'forward': [],
- 'reverse': [],
- }
-
- self.updated_zones = []
-
- self.eval_errors = 0
-
- super(CreateTerraformHandler, self).__init__(
- appname=appname, verbose=verbose, version=version, base_dir=base_dir,
- simulate=simulate, force=force, terminal_has_colors=terminal_has_colors,
- initialized=False,
- )
-
- if config:
- self.config = config
- if self.verbose >= 1:
- msg = _("Given configuration:") + '\n' + pp(self.config.as_dict())
- LOG.debug(msg)
-
- self.script_dir = self.base_dir.joinpath('postinstall-scripts')
- LOG.debug(_("Directory for postinstall scripts: {!r}.").format(str(self.script_dir)))
- if not self.script_dir.exists():
- msg = _("Directory for postinstall scripts {!r} does not exists.").format(
- str(self.script_dir))
- raise ExpectedHandlerError(msg)
- if not self.script_dir.is_dir():
- msg = _("Path {!r} for postinstall scripts exists, but is not a directory.").format(
- str(self.script_dir))
- raise ExpectedHandlerError(msg)
-
- if initialized:
- self.initialized = True
-
- # -----------------------------------------------------------
- @HandlingObject.simulate.setter
- def simulate(self, value):
- self._simulate = to_bool(value)
-
- if self.initialized:
- LOG.debug(_("Setting simulate of all subsequent objects to {!r} ...").format(
- self.simulate))
-
- if self.pdns:
- self.pdns.simulate = self.simulate
-
- for vsphere_name in self.vsphere.keys():
- if self.vsphere[vsphere_name]:
- self.vsphere[vsphere_name].simulate = self.simulate
-
- # -----------------------------------------------------------
- @property
- def stop_at_step(self):
- """Step, at which the execution should be interrupted."""
- return self._stop_at_step
-
- @stop_at_step.setter
- def stop_at_step(self, value):
- if value is None:
- self._stop_at_step = None
- return
- v = str(value).strip().lower().replace('_', '-')
- if v == '':
- self._stop_at_step = None
- return
- if v not in self.steps:
- msg = _("Invalid step name {!r} for interrupting execution.").format(value)
- raise ValueError(msg)
- self._stop_at_step = v
-
- # -----------------------------------------------------------
- @property
- def terraform_root_dir(self):
- """Root directory of all terraform directories."""
- if self.is_venv:
- return self.base_dir.parent
- return self._terraform_root_dir
-
- # -----------------------------------------------------------
- @property
- def full_project_name(self):
- """Complete project name with parent paths."""
- if not self.project_name:
- return None
- if not self.project_dir:
- return None
- if not self.terraform_root_dir:
- return self.project_name
- return os.path.relpath(str(self.project_dir), self.terraform_root_dir)
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(CreateTerraformHandler, self).as_dict(short=short)
- res['std_file_permissions'] = "{:04o}".format(self.std_file_permissions)
- res['std_secure_file_permissions'] = "{:04o}".format(self.std_secure_file_permissions)
- res['open_opts'] = self.open_opts
- res['stop_at_step'] = self.stop_at_step
- res['steps'] = copy.copy(self.steps)
- res['tz_name'] = self.tz_name
- res['terraform_root_dir'] = self.terraform_root_dir
- res['full_project_name'] = self.full_project_name
- res['vsphere'] = {}
- for vsphere_name in self.vsphere.keys():
- res['vsphere'][vsphere_name] = self.vsphere[vsphere_name].as_dict(short=short)
-
- return res
-
- # -------------------------------------------------------------------------
- @classmethod
- def set_tz(cls, tz_name):
-
- if not tz_name.strip():
- raise ValueError(_("Invalid time zone name {!r}.").format(tz_name))
- tz_name = tz_name.strip()
- LOG.debug(_("Setting time zone to {!r}.").format(tz_name))
- cls.tz = pytz.timezone(tz_name)
- cls.tz_name = tz_name
-
- # -------------------------------------------------------------------------
- def incr_verbosity(self, diff=1):
-
- new_verbose = self.verbose + int(diff)
- if new_verbose < 0:
- new_verbose = 0
- self.verbose = new_verbose
-
- if self.pdns:
- self.pdns.verbose = self.verbose
-
- for vname in self.vsphere:
- self.vsphere[vname].verbose = self.verbose
-
- # -------------------------------------------------------------------------
- def init_handlers(self):
-
- if not self.config:
- msg = _("No configuration given before initialisation of handlers.")
- raise HandlerError(msg)
-
- if not isinstance(self.config, CrTfConfiguration):
- raise HandlerError(_(
- "{n} is not a {e}-instance, but a {w}-instance instead.").format(
- n='self.config', e='CrTfConfiguration', w=self.config.__class__.__name__))
-
- TerraformDisk.default_size = self.config.disk_size
- TerraformDisk.min_size_gb = self.config.disk_min_size
- TerraformDisk.max_size_gb = self.config.disk_max_size
-
- TerraformVm.min_rootdisk_size = self.config.root_min_size
- TerraformVm.max_rootdisk_size = self.config.root_max_size
-
- LOG.info(_("Initialize some additional handlers."))
-
- self.terraform_cmd = self.get_command('terraform', quiet=True)
- if not self.terraform_cmd:
- raise CommandNotFoundError('terraform')
- self.check_terraform_version()
-
- self.pdns = PowerDNSServer(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- master_server=self.config.pdns_master_server,
- port=self.config.pdns_api_port, key=self.config.pdns_api_key,
- use_https=self.config.pdns_api_use_https, path_prefix=self.config.pdns_api_path_prefix,
- simulate=self.simulate, force=self.force, initialized=True,
- )
-
- if not self.config.no_pdns:
- try:
- api_version = self.pdns.get_api_server_version() # noqa
- except (PowerDNSHandlerError, ConnectionError) as e:
- msg = "{c}: {e}".format(c=e.__class__.__name__, e=str(e))
- raise ExpectedHandlerError(msg)
-
- # -------------------------------------------------------------------------
- def check_terraform_version(self):
- """ Checking, that the called terraform has a minimum version."""
-
- tf_timeout = 10
-
- got_tf_version = None
- LOG.info(_("Checking the terraform version ..."))
-
- cmd = [str(self.terraform_cmd), 'version']
- cmd_str = ' '.join(cmd)
- LOG.debug(_("Executing {!r} ...").format(cmd_str))
- result = self.run(
- cmd, may_simulate=False, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
- LOG.debug(_("Completed process:") + "\n" + str(result))
-
- if not result.stdout:
- msg = _("No output on command {!r}.").format(cmd_str)
- raise ExpectedHandlerError(msg)
- lines = result.stdout.splitlines()
-
- if self.verbose > 2:
- LOG.debug(_("First line:") + '\n' + lines[0])
- match = self.re_tf_version.search(lines[0])
- if not match:
- msg = _("Could not evaluate version output of terraform:") + '\n' + result.stdout
- raise ExpectedHandlerError(msg)
-
- got_tf_version = LooseVersion(match.group(1))
- LOG.info(_("Terraform version: {!r}.").format(str(got_tf_version)))
-
- if self.min_version_terraform:
- LOG.debug(_("Checking for {o}{m!r} ...").format(
- o='>=', m=str(self.min_version_terraform)))
- if got_tf_version < self.min_version_terraform:
- msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
- c=str(got_tf_version), o='>=', m=str(self.min_version_terraform))
- raise ExpectedHandlerError(msg)
-
- if self.max_version_terraform:
- LOG.debug(_("Checking for {o}{m!r} ...").format(
- o='<=', m=str(self.max_version_terraform)))
- if got_tf_version > self.max_version_terraform:
- msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
- c=str(got_tf_version), o='<=', m=str(self.max_version_terraform))
- raise ExpectedHandlerError(msg)
-
- # -------------------------------------------------------------------------
- def __del__(self):
- """Destructor."""
-
- LOG.debug(_("Self destruction."))
-
- if self.pdns:
- self.pdns = None
-
- if self.vsphere:
- self.vsphere = None
-
- # -------------------------------------------------------------------------
- def first_call(self, yaml_file):
- """First steps until reading the YAML file."""
-
- if not self.initialized:
- raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
-
- try:
-
- self.exec_init_run()
-
- LOG.info(_("Go ahead..."))
-
- self.exec_read_yaml(yaml_file)
-
- print()
- LOG.info(_("Initialising VSPhere handlers."))
- self.init_vspheres(yaml_file)
-
- return True
-
- except AbortExecution as e:
- LOG.warn(str(e))
- return False
-
- # -------------------------------------------------------------------------
- def __call__(self, yaml_file):
- """Executing the underlying action."""
-
- if not self.initialized:
- raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
-
- try:
-
- if self.simulate:
- print()
- msg_a = _("Simulation mode")
- msg_b = (
- "* " + _("Necessary DNS records are not created."),
- "* " + _("Terraform files are not created.")
- )
- ll = 4
- if len(msg_a) > ll:
- ll = len(msg_a)
- for msg in msg_b:
- if len(msg) > ll:
- ll = len(msg)
-
- print(self.colored('#' * (ll + 4), 'AQUA'))
- line = self.colored('#', 'AQUA') + ' '
- line += self.colored(msg_a.center(ll), 'YELLOW')
- line += ' ' + self.colored('#', 'AQUA')
- print(line)
- for msg in msg_b:
- line = '# ' + msg.ljust(ll) + ' #'
- print(self.colored(line, 'AQUA'))
- print(self.colored('#' * (ll + 4), 'AQUA'))
- print()
-
- self.exec_pdns_zones()
-
- print()
- LOG.info(_("Cpllecting first information from vSPhere."))
- self.test_vsphere_handlers()
- self.exec_collect_folders(yaml_file)
- self.assign_default_vmw_values()
-
- print()
- LOG.info(_("Retrieving information from vSphere."))
-
- self.exec_vmw_clusters()
- self.exec_vmw_datastores()
- self.exec_vmw_ds_clusters()
- self.exec_vmw_networks()
- self.exec_vmw_templates()
-
- self.exec_validate_yaml()
- self.exec_validate_storage()
- self.exec_validate_iface()
- self.exec_validate_dns()
-
- if self.verbose > 2:
-
- vm_list = []
- for vm in self.vms:
- vm_list.append(vm.as_dict())
- LOG.debug(_("Validated VMs:") + "\n" + pp(vm_list))
-
- if self.existing_vms:
- msg = ngettext(
- "There is one existing virtual machine.",
- "There are {c} existing virtual machines.",
- len(self.existing_vms)).format(c=len(self.existing_vms))
- LOG.warn(msg)
- if self.verbose > 2:
- msg = ngettext(
- "Existing virtual machine:", "Existing virtual machines:",
- len(self.existing_vms))
- LOG.debug(msg + '\n' + pp(self.existing_vms))
- else:
- LOG.info(_("No existing virtual machines found in YAML file."))
-
- self.exec_perform_dns()
- self.exec_project_dir()
-
- self.exec_tf_files()
- self.exec_vsphere_folders()
-
- LOG.info(_("Finished all steps."))
-
- except AbortExecution as e:
- LOG.warn(str(e))
- return
-
- self.exec_terraform()
- if self.simulate:
- print()
- msg = print(self.colored(
- _('And how I said before - it was only a simulation!'), 'AQUA'))
-
- print()
-
- # -------------------------------------------------------------------------·
- def exec_init_run(self):
-
- if self.stop_at_step == 'init':
- self.incr_verbosity()
-
- if self.verbose > 2:
- LOG.debug(_("Current {} object:").format(self.__class__.__name__) + "\n" + str(self))
-
- LOG.info(_("Finished step {!r}.").format('init'))
- if self.stop_at_step == 'init':
- raise AbortExecution('init')
-
- # -------------------------------------------------------------------------·
- def exec_read_yaml(self, yaml_file):
-
- if self.stop_at_step == 'read-yaml':
- self.incr_verbosity()
-
- self.read_yaml_data(yaml_file)
- self.eval_yaml_data()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in evaluation of YAML data of {f!r}.",
- "Found {n} errors in evaluation of YAML data of {f!r}.",
- self.eval_errors).format(n=self.eval_errors, f=str(yaml_file))
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('read-yaml'))
- if self.stop_at_step == 'read-yaml':
- raise AbortExecution('read-yaml')
-
- # -------------------------------------------------------------------------·
- def exec_collect_folders(self, yaml_file):
-
- if self.stop_at_step == 'collect-folders':
- self.incr_verbosity()
-
- LOG.info(_("Collecting all VMWare and local folders ..."))
- LOG.info(_("Get vSphere datacenter ..."))
- for vname in self.vsphere:
- self.vsphere[vname].get_datacenter()
-
- LOG.debug(_("Collecting vSphere folders."))
- self.vsphere_folders = []
- for vm in self.vms:
- if vm.folder:
- if vm.folder not in self.vsphere_folders:
- self.vsphere_folders.append(vm.folder)
- self.vsphere_folders.sort(key=str.lower)
- LOG.debug(_("Collected vSphere folders:") + "\n" + pp(self.vsphere_folders))
-
- # Set project name and directory
- yfile = Path(yaml_file)
- yfile_base = yfile.name
- yfile_dir = yfile.parent.resolve()
- (yfile_stem, yfile_ext) = os.path.splitext(yfile_base)
- self.project_name = yfile_stem
- LOG.info(_("Project name is {!r}.").format(str(self.project_name)))
- self.project_dir = yfile_dir / yfile_stem
- LOG.info(_("Project directory is: {!r}.").format(str(self.project_dir)))
-
- # Evaluating root terraform directory
- if not self.is_venv:
- i = 4
- cdir = copy.copy(self.project_dir).parent
- while i > 0:
- git_dir = cdir / '.git'
- if git_dir.is_dir():
- self._terraform_root_dir = cdir
- break
- i -= 1
- if cdir == cdir.parent:
- break
- cdir = cdir.parent
- if not self._terraform_root_dir:
- msg = _("Did not found root terraform directory above {!r}.").format(
- str(self.project_dir))
- LOG.warn(msg)
-
- LOG.info(_("Full project name: {!r}").format(self.full_project_name))
-
- LOG.info(_("Finished step {!r}.").format('collect-folders'))
- if self.stop_at_step == 'collect-folders':
- raise AbortExecution('collect-folders')
-
- # -------------------------------------------------------------------------·
- def exec_pdns_zones(self):
-
- if self.config.no_pdns:
- return
-
- if self.stop_at_step == 'pdns-zones':
- self.incr_verbosity()
-
- print()
- LOG.info(_("Retrieving informations from PowerDNS ..."))
-
- self.pdns.get_api_zones()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in exploring PowerDNS zones.",
- "Found {n} errors in exploring PowerDNS zones.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('pdns-zones'))
- if self.stop_at_step == 'pdns-zones':
- raise AbortExecution('pdns-zones')
-
- # -------------------------------------------------------------------------·
- def init_vspheres(self, yaml_file):
-
- if self.stop_at_step == 'vmw-init':
- self.incr_verbosity()
-
- # Test for multiple VSphere references
- found_vspheres = []
- for vm in self.vms:
- vname = vm.vsphere
- if vname not in found_vspheres:
- found_vspheres.append(vname)
- if len(found_vspheres) > 1:
- yaml_file_rel = os.path.relpath(str(yaml_file), os.getcwd())
- msg = _("There is only one, unique VSPhere definition allowed in a project file.")
- msg += '\n'
- msg += _("In {f!r} were found {nr} different VSPhere definitions:").format(
- f=yaml_file_rel, nr=len(found_vspheres))
- for vname in sorted(found_vspheres, key=str.lower):
- msg += '\n * {!r}'.format(vname)
- raise ExpectedHandlerError(msg)
-
- self._init_vspheres()
-
- LOG.info(_("Finished step {!r}.").format('vmw-init'))
- if self.stop_at_step == 'vmw-init':
- raise AbortExecution('vmw-init')
-
- # -------------------------------------------------------------------------·
- def _init_vspheres(self):
-
- for vm in self.vms:
- if vm.vsphere in self.vsphere:
- continue
- vname = vm.vsphere
- if vname not in self.config.vsphere:
- msg = _("VSPhere {!r} not defined in configuration.").format(vname)
- raise ExpectedHandlerError(msg)
-
- if not self.vsphere_user and self.config.vsphere[vname].user:
- self.vsphere_user = self.config.vsphere[vname].user
- if not self.vsphere_password and self.config.vsphere[vname].password:
- self.vsphere_password = self.config.vsphere[vname].password
-
- try:
- params = {
- 'appname': self.appname,
- 'verbose': self.verbose,
- 'base_dir': self.base_dir,
- 'simulate': self.simulate,
- 'force': self.force,
- 'terminal_has_colors': self.terminal_has_colors,
- 'initialized': True,
- }
- show_params = copy.copy(params)
-
- connect_info = VSPhereConfigInfo(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- host=self.config.vsphere[vname].host, port=self.config.vsphere[vname].port,
- dc=self.config.vsphere[vname].dc, user=self.vsphere_user,
- password=self.vsphere_password, initialized=True)
-
- params['connect_info'] = connect_info
- show_params['connect_info'] = connect_info.as_dict()
-
- if self.verbose > 1:
- if self.verbose < 5:
- show_params['connect_info']['password'] = '******'
- msg = _("Initialising a {}-object with params:").format('VsphereConnection')
- msg += '\n' + pp(show_params)
- LOG.debug(msg)
-
- vsphere = VsphereConnection(**params)
- self.vsphere[vname] = vsphere
-
- except VSphereExpectedError as e:
- raise ExpectedHandlerError(str(e))
-
- # -------------------------------------------------------------------------·
- def test_vsphere_handlers(self):
-
- if self.stop_at_step == 'vmw-test':
- self.incr_verbosity()
-
- for vname in self.vsphere.keys():
-
- try:
-
- vsphere = self.vsphere[vname]
-
- vsphere.get_about()
- if self.verbose > 2:
- msg = _("Created {}-object:").format('VsphereConnection')
- msg += '\n' + pp(vsphere.as_dict())
- LOG.debug(msg)
-
- except VSphereExpectedError as e:
- raise ExpectedHandlerError(str(e))
-
- LOG.info(_("Finished step {!r}.").format('vmw-test'))
- if self.stop_at_step == 'vmw-test':
- raise AbortExecution('vmw-test')
-
- # -------------------------------------------------------------------------·
- def assign_default_vmw_values(self):
- """Assigning not defined templates and clusters of VMs by their
- appropriate default values."""
-
- LOG.debug(_(
- "Assigning not defined templates and clusters of VMs by their "
- "appropriate default values."))
-
- for vm in self.vms:
-
- if not vm.cluster:
- cl = self.config.vsphere[vm.vsphere].cluster
- if self.verbose > 1:
- LOG.debug(_("Setting cluster of {n!r} to {c!r} ...").format(
- n=vm.name, c=cl))
- vm.cluster = cl
-
- if not vm.vm_template:
- tpl = self.config.vsphere[vm.vsphere].template_name
- if self.verbose > 1:
- LOG.debug(_("Setting template of {n!r} to {t!r} ...").format(
- n=vm.name, t=tpl))
- vm.vm_template = tpl
-
- # -------------------------------------------------------------------------·
- def exec_vmw_clusters(self):
-
- if self.stop_at_step == 'vmw-clusters':
- self.incr_verbosity()
-
- for vname in self.vsphere:
- LOG.debug(_("Searching for clusters in VSPhere {!r} ...").format(vname))
- self.vsphere[vname].get_clusters()
-
- LOG.info(_("Finished step {!r}.").format('vmw-clusters'))
- if self.stop_at_step == 'vmw-clusters':
- raise AbortExecution('vmw-clusters')
-
- # -------------------------------------------------------------------------·
- def exec_vmw_datastores(self):
-
- if self.stop_at_step == 'vmw-datastores':
- self.incr_verbosity()
-
- nr_total = 0
-
- for vname in self.vsphere:
- LOG.debug(_("Searching for datastores in VSPhere {!r} ...").format(vname))
- self.vsphere[vname].get_datastores()
- nr_total += len(self.vsphere[vname].datastores.keys())
-
- if nr_total:
- msg = ngettext("Found one datastore.", "Found {n} datastores.", nr_total)
- LOG.debug(msg.format(n=nr_total))
- else:
- LOG.error(_("No VSPhere datastores found."))
-
- LOG.info(_("Finished step {!r}.").format('vmw-datastores'))
- if self.stop_at_step == 'vmw-datastores':
- raise AbortExecution('vmw-datastores')
-
- # -------------------------------------------------------------------------·
- def exec_vmw_ds_clusters(self):
-
- nr_total = 0
-
- if self.stop_at_step == 'vmw-ds-clusters':
- self.incr_verbosity()
-
- for vname in self.vsphere:
- LOG.debug(_("Searching for datastore clusters in VSPhere {!r} ...").format(vname))
- self.vsphere[vname].get_ds_clusters()
- nr_total += len(self.vsphere[vname].ds_clusters.keys())
-
- if nr_total:
- msg = ngettext("Found one datastore cluster.", "Found {n} datastore clusters.", nr_total)
- LOG.debug(msg.format(n=nr_total))
- else:
- LOG.warn(_("No VSPhere datastore clusters found."))
-
- LOG.info(_("Finished step {!r}.").format('vmw-ds-clusters'))
- if self.stop_at_step == 'vmw-ds-clusters':
- raise AbortExecution('vmw-ds-clusters')
-
- # -------------------------------------------------------------------------·
- def exec_vmw_networks(self):
-
- if self.stop_at_step == 'vmw-networks':
- self.incr_verbosity()
-
- for vname in self.vsphere:
- LOG.debug(_("Searching for networks in VSPhere {!r} ...").format(vname))
- self.vsphere[vname].get_networks()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in exploring vSphere {v!r} resources.",
- "Found {n} errors in exploring vSphere {v!r} resources.",
- self.eval_errors).format(n=self.eval_errors, v=vname)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('vmw-networks'))
- if self.stop_at_step == 'vmw-networks':
- raise AbortExecution('vmw-networks')
-
- # -------------------------------------------------------------------------·
- def exec_vmw_templates(self):
-
- if self.stop_at_step == 'vmw-templates':
- self.incr_verbosity()
-
- self.explore_vsphere_templates()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in exploring vSphere templates.",
- "Found {n} errors in exploring vSphere templates.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('vmw-templates'))
- if self.stop_at_step == 'vmw-templates':
- raise AbortExecution('vmw-templates')
-
- # -------------------------------------------------------------------------·
- def exec_validate_yaml(self):
-
- if self.stop_at_step == 'validate-yaml':
- self.incr_verbosity()
-
- print()
- LOG.info(_("Validating information from YAML file ..."))
-
- self.validate_clusters()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in validating vSphere computing clusters.",
- "Found {n} errors in validating vSphere computing clusters.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- self.get_all_vms()
- self.validate_vms()
-
- LOG.info(_("Finished step {!r}.").format('validate-yaml'))
- if self.stop_at_step == 'validate-yaml':
- raise AbortExecution('validate-yaml')
-
- # -------------------------------------------------------------------------·
- def get_all_vms(self):
-
- LOG.info(_("Got a list of all VMs and templates ..."))
- self.all_vms = {}
- re_vm = re.compile(r'.*')
-
- for vs_name in self.vsphere:
-
- if vs_name not in self.all_vms:
- self.all_vms[vs_name] = {}
-
- vm_list = self.vsphere[vs_name].get_vms(re_vm, name_only=True)
- for vm_tuple in vm_list:
- vm_name = vm_tuple[0]
- vm_path = vm_tuple[1]
- if vm_name in self.all_vms[vs_name]:
- self.all_vms[vs_name][vm_name].append(vm_path)
- else:
- self.all_vms[vs_name][vm_name] = [vm_path]
-
- if self.verbose > 2:
- msg = _("All existing VMs and templates:")
- msg += '\n' + pp(self.all_vms)
- LOG.debug(msg)
-
- # -------------------------------------------------------------------------·
- def exec_validate_storage(self):
-
- if self.stop_at_step == 'validate-storage':
- self.incr_verbosity()
-
- self.validate_storages()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in validating VM storages.",
- "Found {n} errors in validating VM storages.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('validate-storage'))
- if self.stop_at_step == 'validate-storage':
- raise AbortExecution('validate-storage')
-
- # -------------------------------------------------------------------------·
- def exec_validate_iface(self):
-
- if self.stop_at_step == 'validate-iface':
- self.incr_verbosity()
-
- self.validate_interfaces()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in validating VM interfaces.",
- "Found {n} errors in validating VM interfaces.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('validate-iface'))
- if self.stop_at_step == 'validate-iface':
- raise AbortExecution('validate-iface')
-
- # -------------------------------------------------------------------------·
- def exec_validate_dns(self):
-
- if self.stop_at_step == 'validate-dns':
- self.incr_verbosity()
-
- self.validate_dns_mappings()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in validating DNS mappings.",
- "Found {n} errors in validating DNS mappings.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('validate-dns'))
- if self.stop_at_step == 'validate-dns':
- raise AbortExecution('validate-dns')
-
- # -------------------------------------------------------------------------·
- def exec_perform_dns(self):
-
- if self.stop_at_step == 'perform-dns':
- self.incr_verbosity()
-
- self.perform_dns()
-
- LOG.info(_("Finished step {!r}.").format('perform-dns'))
- if self.stop_at_step == 'perform-dns':
- raise AbortExecution('perform-dns')
-
- # -------------------------------------------------------------------------·
- def exec_project_dir(self):
-
- if self.stop_at_step == 'project-dir':
- self.incr_verbosity()
-
- self.ensure_project_dir()
- self.clean_project_dir()
-
- LOG.info(_("Finished step {!r}.").format('project-dir'))
- if self.stop_at_step == 'project-dir':
- raise AbortExecution('project-dir')
-
- # -------------------------------------------------------------------------·
- def exec_tf_files(self):
-
- if self.stop_at_step == 'tf-files':
- self.incr_verbosity()
-
- self.create_terraform_files()
-
- LOG.info(_("Finished step {!r}.").format('tf-files'))
- if self.stop_at_step == 'tf-files':
- raise AbortExecution('tf-files')
-
- # -------------------------------------------------------------------------·
- def exec_vsphere_folders(self):
-
- if self.stop_at_step == 'ensure-vmw-folders':
- self.incr_verbosity()
-
- self.ensure_vsphere_folders()
-
- LOG.info(_("Finished step {!r}.").format('ensure-vmw-folders'))
- if self.stop_at_step == 'ensure-vmw-folders':
- raise AbortExecution('ensure-vmw-folders')
-
- # -------------------------------------------------------------------------·
- def read_yaml_data(self, yaml_file):
-
- LOG.info(_("Reading YAML file {!r} ...").format(str(yaml_file)))
-
- open_opts = {}
- if six.PY3 and self.config.encoding:
- open_opts['encoding'] = self.config.encoding
- open_opts['errors'] = 'surrogateescape'
-
- try:
- with open(str(yaml_file), 'r', **open_opts) as fh:
- self.yaml_data = yaml.full_load(fh)
- except yaml.YAMLError as e:
- msg = _("Error in YAML file {f!r}: {e}.").format(
- f=str(yaml_file), e=e)
- if hasattr(e, 'problem_mark'):
- mark = e.problem_mark
- msg += " " + _("Error position: {li}:{c}").format(
- li=mark.line + 1, c=mark.column + 1)
- raise ExpectedHandlerError(msg)
-
- if self.verbose > 2:
- LOG.debug(_("Read data from YAML file:") + "\n" + pp(self.yaml_data))
-
- if not isinstance(self.yaml_data, dict):
- msg = _(
- "Data read from YAML file {f!r} are not a dictionary, "
- "but a {c} object instead.").format(
- f=str(yaml_file), c=self.yaml_data.__class__.__name__)
- raise ExpectedHandlerError(msg)
-
- for key in self.yaml_data.keys():
- if key.lower() == 'simulate':
- self.simulate = to_bool(self.yaml_data[key])
-
- # -------------------------------------------------------------------------·
- def eval_yaml_data(self):
-
- self.vm_names = []
-
- # Searching for default VM definition
- LOG.debug(_("Searching for default VM definition ..."))
- for key in self.yaml_data.keys():
-
- if self.re_default.match(key):
- vm = self._eval_tpl_vm(name='Default VM', vm_def=self.yaml_data[key])
- if vm:
- self.default_vm = vm
-
- # Searching for VM definitions
- LOG.debug(_("Searching for VM definitions ..."))
- for key in self.yaml_data.keys():
- if self.re_vm_key.match(key):
- for vm_def in self.yaml_data[key]:
- vm = self._eval_vm(vm_def, template_vm=self.default_vm)
- if vm:
- self.vms.append(vm)
-
- # Searching for groups
- for key in self.yaml_data.keys():
- if self.re_group.match(key):
- self._eval_vm_groups(self.yaml_data[key], template_vm=self.default_vm, depth=1)
-
- if self.verbose > 2:
- vm_list = []
- for vm in self.vms:
- vm_list.append(vm.as_dict())
- LOG.debug(_("Evaluated VMs:") + "\n" + pp(vm_list))
-
- # -------------------------------------------------------------------------·
- def _eval_tpl_vm(self, name, vm_def, template_vm=None):
-
- try:
- vm = TerraformVm.from_def(
- vm_def, name=name, is_template=True, template_vm=template_vm, appname=self.appname,
- verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
- force=self.force, terminal_has_colors=self.terminal_has_colors)
- except Exception as e:
- if self.verbose > 2:
- self.handle_error(str(e), e.__class__.__name__, True)
- else:
- LOG.error(_("{c} in evaluating template VM: {e}").format(
- c=e.__class__.__name__, e=e))
- self.eval_errors += 1
- return None
-
- if self.verbose > 2:
- LOG.debug(_(
- "Defined Terraform Template VM {n!r}:").format(
- n=vm.name) + "\n" + pp(vm.as_dict()))
-
- return vm
-
- # -------------------------------------------------------------------------·
- def _eval_vm(self, vm_def, template_vm=None):
-
- try:
- vm = TerraformVm.from_def(
- vm_def, is_template=False, template_vm=template_vm, appname=self.appname,
- verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
- force=self.force, terminal_has_colors=self.terminal_has_colors)
- except Exception as e:
- if self.verbose > 2:
- self.handle_error(str(e), e.__class__.__name__, True)
- else:
- LOG.error(_("{c} in evaluating VM: {e}").format(c=e.__class__.__name__, e=e))
- self.eval_errors += 1
- return None
-
- if self.verbose > 3:
- LOG.debug(_(
- "Defined Terraform-VM {n!r}:").format(n=vm.name) + "\n" + pp(vm.as_dict()))
-
- if vm.name in self.vm_names:
- LOG.error(_("VM {!r} is already defined.").format(vm.name))
- self.eval_errors += 1
- return None
-
- return vm
-
- # -------------------------------------------------------------------------·
- def _eval_vm_groups(self, groups_def, template_vm=None, depth=1):
-
- if not isinstance(groups_def, list):
- msg = _("Group definition list is not a list:") + "\n" + pp(groups_def)
- LOG.error(msg)
- self.eval_errors += 1
- return
-
- if depth >= self.max_groups_depth:
- LOG.warn(_("Maximum recursion depth for VM groups of {} reached.").format(depth))
- return
-
- if self.verbose > 2:
- LOG.debug(_("Evaluating group list:") + "\n" + pp(groups_def))
- if self.verbose > 3:
- LOG.debug(_("Used template: {!r}").format(template_vm))
-
- for group_def in groups_def:
- self._eval_vm_group(group_def, template_vm=template_vm, depth=depth)
-
- # -------------------------------------------------------------------------·
- def _eval_vm_group(self, group_def, template_vm=None, depth=1):
-
- if not isinstance(group_def, dict):
- msg = _("VM definition is not a dictionary:") + "\n" + pp(group_def)
- LOG.error(msg)
- self.eval_errors += 1
- return
-
- group_template = template_vm
- group_name = None
-
- # Searching for the group name ..."
- for key in group_def.keys():
- if self.re_group_name.match(key) and str(group_def[key]).strip():
- group_name = str(group_def[key]).strip()
-
- if not group_name:
- LOG.error(_("No group name defined."))
- return
-
- # Searching for group default VM definition
- LOG.debug(_("Searching for group default VM definition in group {!r} ...").format(
- group_name))
- for key in group_def.keys():
-
- if self.re_default.match(key):
- vm_name = 'Default VM group {!r}'.format(group_name)
- vm = self._eval_tpl_vm(
- name=vm_name, vm_def=group_def[key], template_vm=template_vm)
- if vm:
- group_template = vm
- break
-
- n = None
- if group_template:
- n = group_template.name
- LOG.debug(_("Used template for creating VMs in group {g!r}: {n!r}").format(
- g=group_name, n=n))
- if self.verbose > 3:
- LOG.debug(_("Used template structure:") + "\n" + pp(group_template.as_dict()))
-
- # Searching for VM definitions
- LOG.debug(_("Searching for VM definitions in group {!r} ...").format(group_name))
- for key in group_def.keys():
- if self.re_vm_key.match(key):
- for vm_def in group_def[key]:
- vm = self._eval_vm(vm_def, template_vm=group_template)
- if vm:
- self.vms.append(vm)
-
- # Searching for nested groups
- for key in group_def.keys():
- if self.re_group.match(key):
- self._eval_vm_groups(
- group_def[key], template_vm=group_template, depth=depth + 1)
-
- # -------------------------------------------------------------------------·
- def explore_vsphere_templates(self):
-
- LOG.info(_("Exploring all vSphere templates ..."))
-
- for vname in self.vsphere:
-
- if vname not in self.vsphere_templates:
- self.vsphere_templates[vname] = {}
-
- self.config.vsphere[vname].used_templates = []
-
- for vm in self.vms:
- template_name = vm.vm_template
- if template_name:
- if template_name not in self.config.vsphere[vname].used_templates:
- self.config.vsphere[vname].used_templates.append(template_name)
- else:
- LOG.error(_("VM {!r} has not template defined.").format(vm.name))
- self.eval_errors += 1
-
- msg = _("All {} VSPhere templates to explore:").format(vname)
- msg += "\n" + pp(self.config.vsphere[vname].used_templates)
- LOG.debug(msg)
-
- for template_name in self.config.vsphere[vname].used_templates:
-
- if template_name in self.vsphere_templates[vname]:
- continue
-
- LOG.debug(_("Searching for template {t!r} in VSPhere {v!r} ...").format(
- t=template_name, v=vname))
- re_vm = re.compile(r'^' + re.escape(template_name) + r'$', re.IGNORECASE)
- vm_list = self.vsphere[vname].get_vms(re_vm, as_obj=True, stop_at_found=True)
- if vm_list:
- vm = vm_list[0]
- tname = vm.name.lower()
- if tname not in self.vsphere_templates[vname]:
- self.vsphere_templates[vname][template_name] = vm
- else:
- LOG.error(_("Template {t!r} not found in VSPhere {v!r}.").format(
- t=template_name, v=vname))
- self.eval_errors += 1
-
- if self.verbose > 2:
- msg = _("All explored vSphere templates:")
- out_dict = {}
- for vname in self.vsphere_templates:
- out_dict[vname] = {}
- for tname in self.vsphere_templates[vname]:
- out_dict[vname][tname] = self.vsphere_templates[vname][tname].as_dict()
- msg += "\n" + pp(out_dict)
- LOG.debug(msg)
-
- # -------------------------------------------------------------------------·
- def validate_clusters(self):
-
- print()
- LOG.info(_("Validating existence of computing clusters of the VMs."))
-
- clusters = {}
-
- for vm in self.vms:
-
- vname = vm.vsphere
- if vname not in clusters:
- clusters[vname] = {}
-
- if vm.cluster in clusters:
- clusters[vname][vm.cluster].append(vm.name)
- else:
- clusters[vname][vm.cluster] = [vm.name]
-
- for vname in clusters.keys():
- for cluster in clusters[vname].keys():
-
- vms = clusters[vname][cluster]
-
- cl = str(cluster)
- LOG.debug(_(
- "Checking existence of computing cluster {c!r} in VSPhere {v!r} ...").format(
- c=cl, v=vname))
-
- vsphere = self.vsphere[vname]
- vmw_cluster = vsphere.get_cluster_by_name(cl)
- if vmw_cluster:
- if self.verbose > 1:
- LOG.debug(_(
- "Found computing cluster {cl!r} in VSPhere {v!r} (defined for VMs "
- "{vms}).").format(cl=vmw_cluster.name, v=vname, vms=pp(vms)))
- else:
- LOG.error(_(
- "Computing cluster {cl!r} (defined for VMs {vms}) in VSPhere {v!r} not "
- "found.").format(cl=cl, vms=pp(vms), v=vname))
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def validate_vms(self):
-
- print()
- LOG.info(_("Validating existence of VMs in VMWare."))
- vms2perform = []
-
- for vm in sorted(self.vms, key=attrgetter('tf_name')):
-
- print(" * {} ".format(vm.fqdn), end='', flush=True)
- if self.verbose:
- print()
- vs_name = vm.vsphere
- vsphere = self.vsphere[vs_name]
-
- vm_paths = None
- if vs_name in self.all_vms:
- if vm.fqdn in self.all_vms[vs_name]:
- vm_paths = self.all_vms[vs_name][vm.fqdn]
-
- if vm_paths:
- msg = _('[{m}] - VM is already existing in VSphere {v!r}, path {p!r}.').format(
- m=self.colored('Existing', 'YELLOW'), v=vs_name, p=pp(vm_paths))
- print(msg, end='', flush=True)
- if self.verbose:
- print()
-
- vm_info = vsphere.get_vm(vm.fqdn, vsphere_name=vs_name, as_obj=True)
- if self.verbose > 2:
- LOG.debug(_("VM info:") + "\n" + pp(vm_info.as_dict(bare=True)))
- ds = vm_info.config_path_storage
- LOG.debug(_("Datastore of VM {vm!r}: {ds!r}.").format(vm=vm.name, ds=ds))
- vm.datastore = ds
- vm.already_existing = True
- self.existing_vms.append(vm_info)
-
- else:
-
- print('[{}] '.format(self.colored('OK', 'GREEN')), end='', flush=True)
- vm.already_existing = False
-
- vms2perform.append(vm)
- print()
-
- self.vms = vms2perform
-
- print()
-
- if not len(self.vms):
- print()
- print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
- print(self.colored('* ' + _('CAUTION!'), ('BOLD', 'RED')), file=sys.stderr)
- print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
- print()
- print(
- self.colored(_('Did not found any VM to deploy!'), ('BOLD', 'RED')),
- file=sys.stderr)
- print()
- raise ExpectedHandlerError(_("No VMs to deploy"))
-
- # -------------------------------------------------------------------------·
- def validate_storages(self):
-
- self._validate_ds_clusters()
- self._validate_datastores()
-
- if self.verbose:
- if self.used_dc_clusters:
- out_lines = []
- for vs_name in self.used_dc_clusters:
- for cluster in self.used_dc_clusters[vs_name]:
- out_lines.append(' * VSphere {v!r}: {c}'.format(
- v=vs_name, c=cluster))
- out = '\n'.join(out_lines)
- LOG.debug(_("Used datastore clusters:") + "\n" + out)
- else:
- LOG.debug(_("No datastore clusters are used."))
- if self.used_datastores:
- out_lines = []
- for vs_name in self.used_datastores:
- for ds in self.used_datastores[vs_name]:
- out_lines.append(' * VSphere {v!r}: {ds}'.format(v=vs_name, ds=ds))
- out = '\n'.join(out_lines)
- LOG.debug(_("Used datastors:") + "\n" + out)
- else:
- LOG.debug(_("No datastores are used."))
-
- # -------------------------------------------------------------------------·
- def _validate_ds_clusters(self):
-
- LOG.info(_("Validating given datastore clusters of VMs ..."))
-
- for vm in self.vms:
-
- if not vm.ds_cluster:
- continue
-
- self._validate_dscluster_vm(vm)
-
- # -------------------------------------------------------------------------·
- def _validate_dscluster_vm(self, vm):
-
- needed_gb = 0.0
- if not vm.already_existing:
- for unit_number in vm.disks.keys():
- disk = vm.disks[unit_number]
- needed_gb += disk.size_gb
-
- vs_name = vm.vsphere
- vsphere = self.vsphere[vs_name]
-
- found = False
- for cluster_name in vsphere.ds_clusters.keys():
- if cluster_name.lower() == vm.ds_cluster.lower():
- if self.verbose > 2:
- LOG.debug(_(
- "Found datastore cluster {c!r} in VSphere {v!r} for VM {n!r}.").format(
- n=vm.name, v=vs_name, c=vm.ds_cluster))
- if vm.ds_cluster != cluster_name:
- LOG.debug(_("Setting datastore cluster for VM {n!r} to {c!r} ...").format(
- n=vm.name, c=cluster_name))
- vm.ds_cluster = cluster_name
- ds_cluster = vsphere.ds_clusters[cluster_name]
- if self.verbose > 2:
- LOG.debug(_(
- "Free space of cluster {c!r} in VSphere {v!r} before provisioning: "
- "{a:0.1f} GiB.").format(
- c=cluster_name, v=vs_name, a=ds_cluster.avail_space_gb))
- if ds_cluster.avail_space_gb < needed_gb:
- LOG.error(_(
- "Datastore cluster {d!r} in VSphere {v!r} has not sufficient space for "
- "storage of VM {vm!r} (needed {n:0.1f} GiB, available {a:0.1f} "
- "GiB).").format(
- d=cluster_name, v=vs_name, vm=vm.name, n=needed_gb,
- a=ds_cluster.avail_space_gb))
- self.eval_errors += 1
- else:
- ds_cluster.calculated_usage += needed_gb
- if self.verbose > 1:
- LOG.debug(_(
- "Free space in cluster {c!r} in VSphere {v!r} after provisioning: "
- "{a:0.1f} GiB.").format(
- c=cluster_name, v=vs_name, a=ds_cluster.avail_space_gb))
- found = True
- if vs_name not in self.used_dc_clusters:
- self.used_dc_clusters[vs_name] = []
- if cluster_name not in self.used_dc_clusters[vs_name]:
- self.used_dc_clusters[vs_name].append(cluster_name)
- break
-
- if not found:
- LOG.error(_("Datastore cluster {c!r} of VM {n!r} not found in VSphere {v!r}.").format(
- n=vm.name, c=vm.ds_cluster, v=vs_name))
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def _validate_datastores(self):
-
- LOG.info(_("Validating given datastores of VMs and assign failing ..."))
-
- for vm in self.vms:
-
- if vm.ds_cluster:
- if vm.datastore:
- LOG.debug(_("Removing defined datastore {d!r} for VM {n!r} ...").format(
- d=vm.datastore, n=vm.name))
- vm.datastore = None
- continue
-
- self._validate_ds_vm(vm)
-
- # -------------------------------------------------------------------------·
- def _validate_ds_vm(self, vm):
-
- needed_gb = 0.0
- if not vm.already_existing:
- for unit_number in vm.disks.keys():
- disk = vm.disks[unit_number]
- needed_gb += disk.size_gb
-
- vs_name = vm.vsphere
- vsphere = self.vsphere[vs_name]
-
- vm_cluster = None
- for cluster in vsphere.clusters:
- if cluster.name.lower() == vm.cluster.lower():
- vm_cluster = cluster
- break
- if not vm_cluster:
- msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
- c=vm.cluster, n=vm.name)
- raise HandlerError(msg)
-
- if vm.datastore:
- found = False
- found_ds_name = None
- for ds_name in vsphere.datastores:
- if ds_name.lower() == vm.datastore.lower():
- if self.verbose > 2:
- LOG.debug(_("Found datastore {d!r} for VM {n!r} in VSPhere {v!r}.").format(
- n=vm.name, d=vm.datastore, v=vs_name))
- if ds_name not in vm_cluster.datastores:
- LOG.warn(_("Datastore {d!r} not available in cluster {c!r}.").format(
- d=ds_name, c=vm.cluster))
- break
- if vm.datastore != ds_name:
- LOG.debug(_("Setting datastore for VM {n!r} to {d!r} ...").format(
- n=vm.name, d=ds_name))
- vm.datastore = ds_name
- ds = vsphere.datastores[ds_name]
- if ds.avail_space_gb < needed_gb:
- LOG.error(_(
- "Datastore {d!r} has not sufficient space for storage of VM "
- "{v!r} (needed {n:0.1f} GiB, available {a:0.1f} GiB).").format(
- d=ds_name, v=vm.name, n=needed_gb, a=ds.avail_space_gb))
- self.eval_errors += 1
- else:
- ds.calculated_usage += needed_gb
- found = True
- found_ds_name = ds_name
- break
- if not found:
- LOG.error(_("Datastore {d!r} of VM {n!r} not found in VSPhere {v!r}.").format(
- n=vm.name, d=vm.datastore, v=vs_name))
- self.eval_errors += 1
- if vs_name not in self.used_datastores:
- self.used_datastores[vs_name] = []
- if found_ds_name not in self.used_datastores[vs_name]:
- self.used_datastores[vs_name].append(found_ds_name)
- return
-
- ds_name = vsphere.datastores.find_ds(
- needed_gb, vm.ds_type, use_ds=copy.copy(vm_cluster.datastores), no_k8s=True)
- if ds_name:
- LOG.debug(_("Found datastore {d!r} for VM {n!r} in VSPhere {v!r}.").format(
- d=ds_name, n=vm.name, v=vs_name))
- vm.datastore = ds_name
- if vs_name not in self.used_datastores:
- self.used_datastores[vs_name] = []
- if ds_name not in self.used_datastores[vs_name]:
- self.used_datastores[vs_name].append(ds_name)
- else:
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def validate_interfaces(self):
-
- LOG.info(_("Validating interfaces of VMs and assign networks ..."))
- for vm in self.vms:
- self._validate_interfaces_vm(vm)
-
- if self.verbose > 2:
- LOG.debug(_("Validated FQDNs:") + "\n" + pp(self.fqdns))
- LOG.debug(_("Validated Addresses:") + "\n" + pp(self.addresses))
-
- if self.verbose:
-
- lines = []
- for vs_name in self.used_networks:
- for nw in self.used_networks[vs_name]:
- lines.append(' * VSphere {v!r}: {n}'.format(
- v=vs_name, n=nw))
- out = '\n'.join(lines)
- LOG.debug(_("Used networks:") + "\n" + out)
-
- lines = []
- for pair in self.dns_mapping['forward']:
- line = ' * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
- lines.append(line)
- LOG.debug(_("Used forward DNS entries:") + "\n" + '\n'.join(lines))
-
- lines = []
- for pair in self.dns_mapping['reverse']:
- line = ' * {a!r} => {n!r}'.format(n=pair[1], a=str(pair[0]))
- lines.append(line)
- LOG.debug(_("Used reverse DNS entries:") + "\n" + '\n'.join(lines))
-
- # -------------------------------------------------------------------------·
- def _validate_interfaces_vm(self, vm):
-
- vs_name = vm.vsphere
- LOG.debug(_("Checking interfaces of VM {n!r} in VSPhere {v!r} ...").format(
- n=vm.name, v=vs_name))
-
- if not vm.interfaces:
- LOG.error(_("No interfaces defined for VM {!r}.").format(vm.name))
- self.eval_errors += 1
- return
-
- vsphere = self.vsphere[vs_name]
-
- vm_cluster = None
- for cluster in vsphere.clusters:
- if cluster.name.lower() == vm.cluster.lower():
- vm_cluster = cluster
- break
- if not vm_cluster:
- msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
- c=vm.cluster, n=vm.name)
- raise HandlerError(msg)
-
- i = -1
- for iface in vm.interfaces:
- i += 1
- self._validate_interface_of_vm(
- vm_name=vm.name, iface=iface, vs_name=vs_name, vm_cluster=vm_cluster, i=i)
-
- # -------------------------------------------------------------------------·
- def _validate_interface_of_vm(self, vm_name, iface, vs_name, vm_cluster, i=0):
-
- vsphere = self.vsphere[vs_name]
-
- if self.verbose > 1:
- LOG.debug(_("Checking interface {i} of VM {n!r} ...").format(
- i=i, n=vm_name))
-
- if not iface.address:
- LOG.error(_("Interface {i} of VM {n!r} has no defined address.").format(
- i=i, n=vm_name))
- self.eval_errors += 1
- return
-
- if not iface.fqdn:
- LOG.error(_("Interface {i} of VM {n!r} has no defined FQDN.").format(
- i=i, n=vm_name))
- self.eval_errors += 1
- return
-
- if iface.fqdn in self.fqdns:
- LOG.error(_(
- "FQDN {f!r} already defined for VM {va!r}({ia}) should be set "
- "for interface {ib} of {vb!r}.").format(
- f=iface.fqdn, va=self.fqdns[iface.fqdn][0], ia=self.fqdns[iface.fqdn][1],
- ib=i, vb=vm_name))
- self.eval_errors += 1
- return
-
- self.fqdns[iface.fqdn] = (vm_name, i)
-
- if iface.address_v4:
- if iface.address_v4 in self.addresses:
- LOG.error(_(
- "IPv4 address {a} already defined for VM {va!r}({ia}) should be set "
- "for interface {ib} of {vb!r}.").format(
- a=iface.address_v4, va=self.fqdns[iface.fqdn][0],
- ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm_name))
- self.eval_errors += 1
- return
- self.addresses[iface.address_v4] = (vm_name, i)
- pair = (iface.fqdn, iface.address_v4)
- self.dns_mapping['forward'].append(pair)
- pair = (iface.address_v4, iface.fqdn)
- self.dns_mapping['reverse'].append(pair)
-
- if iface.address_v6:
- if iface.address_v6 in self.addresses:
- LOG.error(_(
- "IPv6 address {a} already defined for VM {va!r}({ia}) should be set "
- "for interface {ib} of {vb!r}.").format(
- a=iface.address_v6, va=self.fqdns[iface.fqdn][0],
- ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm_name))
- self.eval_errors += 1
- return
- self.addresses[iface.address_v6] = (vm_name, i)
- pair = (iface.fqdn, iface.address_v6)
- self.dns_mapping['forward'].append(pair)
- pair = (iface.address_v6, iface.fqdn)
- self.dns_mapping['reverse'].append(pair)
-
- network = iface.network
- if network:
- if network not in vsphere.networks:
- LOG.error(_(
- "Could not find network {n!r} for VM {v!r}, interface {i}.").format(
- n=network, v=vm_name, i=i))
- self.eval_errors += 1
- return
- else:
- network = vsphere.networks.get_network_for_ip(
- iface.address_v4, iface.address_v6)
- if not network:
- self.eval_errors += 1
- return
- iface.network = network
- LOG.debug(_("Found network {n!r} for interface {i} of VM {v!r}.").format(
- n=network, i=i, v=vm_name))
-
- if network not in vm_cluster.networks:
- LOG.error(_(
- "Network {n!r} for interface {i} of VM {v!r} not available in "
- "cluster {c!r}.").format(n=network, v=vm_name, i=i, c=vm_cluster.name))
- self.eval_errors += 1
- return
- LOG.debug(_("Network {n!r} is available in cluster {c!r}.").format(
- n=network, c=vm_cluster.name))
-
- net = vsphere.networks[network]
- if not iface.gateway:
- LOG.debug(_("Setting gateway of interface {i} of VM {v!r} to {g}.").format(
- i=i, v=vm_name, g=net.gateway))
- iface.gateway = net.gateway
-
- if net.network:
- if net.network.version == 4:
- if iface.netmask_v4 is None:
- iface.netmask_v4 = net.network.prefixlen
- else:
- if iface.netmask_v6 is None:
- iface.netmask_v6 = net.network.prefixlen
-
- if vs_name not in self.used_networks:
- self.used_networks[vs_name] = []
- if network not in self.used_networks[vs_name]:
- self.used_networks[vs_name].append(network)
-
- # -------------------------------------------------------------------------·
- def validate_dns_mappings(self):
-
- LOG.info(_("Validating DNS mappings ..."))
- self._validate_forward_dns_mappings()
- self._validate_reverse_dns_mappings()
-
- lines = []
- if self.dns_mappings2create['forward']:
- for pair in self.dns_mappings2create['forward']:
- line = ' * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
- lines.append(line)
- else:
- lines.append(self.colored('>>> ' + _('None') + ' <<<', 'AQUA'))
- LOG.info(_("Forward DNS entries to create:") + "\n" + '\n'.join(lines))
-
- lines = []
- if self.dns_mappings2create['reverse']:
- for pair in self.dns_mappings2create['reverse']:
- line = ' * {r} ({a!r}) => {n!r}'.format(
- r=pair[0].reverse_pointer, n=pair[1], a=str(pair[0]))
- lines.append(line)
- else:
- lines.append(self.colored('>>> ' + _('None') + ' <<<', 'AQUA'))
- LOG.info(_("Reverse DNS entries to create:") + "\n" + '\n'.join(lines))
-
- # -------------------------------------------------------------------------·
- def _validate_forward_dns_mappings(self):
-
- if not self.dns_mapping['forward']:
- return
-
- LOG.debug(_("Validating forward DNS mappings ..."))
-
- for (fqdn, address) in self.dns_mapping['forward']:
-
- if self.verbose > 1:
- LOG.debug(_("Validating {f!r} => {a!r}.").format(f=fqdn, a=str(address)))
-
- results_v4 = []
- results_v6 = []
-
- try:
- addr_infos = socket.getaddrinfo(fqdn, 80)
- except socket.gaierror:
- addr_infos = []
-
- for addr_info in addr_infos:
- if addr_info[0] not in (socket.AF_INET, socket.AF_INET6):
- continue
- addr = ipaddress.ip_address(addr_info[4][0])
- if addr.version == 4:
- if addr not in results_v4:
- results_v4.append(addr)
- else:
- if addr not in results_v6:
- results_v6.append(addr)
- if self.verbose > 2:
- if results_v4 or results_v6:
- lines = []
- for addr in results_v4 + results_v6:
- lines.append(' * {}'.format(str(addr)))
- out = '\n'.join(lines)
- LOG.debug(_("Found existing addresses for {f!r}:").format(f=fqdn) + '\n' + out)
- else:
- LOG.debug(_("Did not found existing addresses for {!r}.").format(fqdn))
-
- if address.version == 4:
- if not results_v4:
- self.dns_mappings2create['forward'].append((fqdn, address))
- continue
- if address in results_v4:
- LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
- f=fqdn, a=str(address)))
- continue
- else:
- if not results_v6:
- self.dns_mappings2create['forward'].append((fqdn, address))
- continue
- if address in results_v6:
- LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
- f=fqdn, a=str(address)))
- continue
-
- alist = '\n'.join(map(lambda x: ' * {}'.format(str(x)), results_v4 + results_v6))
- msg = (_(
- "FQDN {f!r} has already existing addresses, "
- "but none of them are {a!r}:").format(f=fqdn, a=str(address)) + "\n" + alist)
- if self.ignore_existing_dns:
- LOG.warn(msg)
- self.dns_mappings2create['forward'].append((fqdn, address))
- else:
- LOG.error(msg)
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def _validate_reverse_dns_mappings(self):
-
- if not self.dns_mapping['reverse']:
- return
-
- LOG.debug(_("Validating reverse DNS mappings ..."))
-
- for (address, fqdn) in self.dns_mapping['reverse']:
-
- if self.verbose > 1:
- LOG.debug(_("Validating {a!r} => {f!r}.").format(f=fqdn, a=str(address)))
-
- try:
- info = socket.gethostbyaddr(str(address))
- except socket.herror:
- info = []
- if self.verbose > 2:
- LOG.debug(_("Got reverse info:") + "\n" + str(info))
- ptr = None
- if info:
- ptr = info[0]
-
- if not ptr:
- if self.verbose > 1:
- LOG.debug(_("Did not found reverse pointer for {!r}.").format(str(address)))
- self.dns_mappings2create['reverse'].append((address, fqdn))
- continue
-
- ptr = RE_DOT_AT_END.sub('', ptr).lower()
- fqdn_canon = RE_DOT_AT_END.sub('', fqdn).lower()
-
- if self.verbose > 1:
- LOG.debug(_("Found reverse pointer {a!r} => {f!r}.").format(f=ptr, a=str(address)))
- if fqdn_canon == ptr:
- if self.verbose > 1:
- LOG.debug(_("Reverse pointer for {!r} was already existing.").format(
- str(address)))
- continue
-
- LOG.error(_("Address {a!r} has already an existing reverse pointer to {p!r}.").format(
- a=str(address), p=ptr))
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def get_tf_name_network(self, net_name, *args):
-
- default = None
- has_default = False
- if len(args):
- if len(args) > 1:
- msg = ngettext(
- "Method {c}.{m} expected at most one argument, got {n}.",
- "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
- c=self.__class__.__name__, e=2, m='get_tf_name_network', n=len(args))
- raise TypeError(msg)
- default = args[0]
- has_default = True
-
- if net_name in self.vsphere.network_mapping:
- return self.vsphere.network_mapping[net_name]
- if has_default:
- return default
- raise KeyError(_("Did not found network {!r}.").format(net_name))
-
- # --------------------------------------------------------------------------
- def get_tf_name_ds_cluster(self, dsc_name, *args):
-
- default = None
- has_default = False
- if len(args):
- if len(args) > 1:
- msg = ngettext(
- "Method {c}.{m} expected at most one argument, got {n}.",
- "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
- c=self.__class__.__name__, e=2, m='get_tf_name_ds_cluster', n=len(args))
- raise TypeError(msg)
- default = args[0]
- has_default = True
-
- if dsc_name in self.vsphere.ds_cluster_mapping:
- return self.vsphere.ds_cluster_mapping[dsc_name]
- if has_default:
- return default
- raise KeyError(_("Did not found datastore cluster {!r}.").format(dsc_name))
-
- # --------------------------------------------------------------------------
- def get_tf_name_datastore(self, ds_name, *args):
-
- default = None
- has_default = False
- if len(args):
- if len(args) > 1:
- msg = ngettext(
- "Method {c}.{m} expected at most one argument, got {n}.",
- "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
- c=self.__class__.__name__, e=2, m='get_tf_name_datastore', n=len(args))
- raise TypeError(msg)
- default = args[0]
- has_default = True
-
- if ds_name in self.vsphere.ds_mapping:
- return self.vsphere.ds_mapping[ds_name]
- if has_default:
- return default
- raise KeyError(_("Did not found datastore {!r}.").format(ds_name))
-
- # --------------------------------------------------------------------------
- def perform_dns(self):
-
- if self.config.no_pdns:
- LOG.debug(_("Power DNS actions are not eceuted."))
- return
-
- print()
- LOG.info(_("Performing DNS actions ..."))
- print()
-
- # TODO: Check for simulate and mappings to create
-
- errors = 0
-
- for (fqdn, address) in self.dns_mappings2create['forward']:
- if not self._perform_dns_forward(fqdn, address):
- errors += 1
-
- for (address, fqdn) in self.dns_mappings2create['reverse']:
- if not self._perform_dns_reverse(address, fqdn):
- errors += 1
-
- if errors:
- msg = ngettext(
- "There was one error in creating DNS mappings.",
- "There were {n} errors in creating DNS mappings.", errors).format(n=errors)
- raise ExpectedHandlerError(msg)
- else:
- if self.verbose > 1:
- LOG.debug(_("No errors in creating DNS mappings."))
-
- print()
-
- for zone_name in self.updated_zones:
- self._increase_zone_serial(zone_name)
-
- # --------------------------------------------------------------------------
- def _increase_zone_serial(self, zone_name):
-
- LOG.info(_("Increasing serial of zone {!r}.").format(zone_name))
-
- zone = self.pdns.zones[zone_name]
- zone.increase_serial()
- zone.notify()
-
- # --------------------------------------------------------------------------
- def _perform_dns_forward(self, fqdn, address):
-
- record_type = 'A'
- addr_obj = ipaddress.ip_address(address)
- if addr_obj.version == 6:
- record_type = 'AAAA'
-
- canon_fqdn = self.pdns.canon_name(fqdn)
-
- zone_name = self.pdns.get_zone_for_item(canon_fqdn, is_fqdn=True)
- if zone_name:
- if self.verbose > 1:
- LOG.debug(_("Got zone {z!r} for FQDN {f!r}.").format(
- z=zone_name, f=canon_fqdn))
- else:
- LOG.error(_("Did not found zone to insert {t}-record for {f!r}.").format(
- t=record_type, f=fqdn))
- return False
-
- zone = self.pdns.zones[zone_name]
- if addr_obj.is_private:
- zone.add_address_record(
- fqdn, address, set_ptr=False, comment='local',
- account=self.config.pdns_comment_account, append_comments=True)
- else:
- zone.add_address_record(fqdn, address, set_ptr=False)
- if zone_name not in self.updated_zones:
- self.updated_zones.append(zone_name)
- return True
-
- # --------------------------------------------------------------------------
- def _perform_dns_reverse(self, address, fqdn):
-
- LOG.debug(_("Trying to create PTR-record {a!r} => {f!r}.").format(
- f=fqdn, a=str(address)))
-
- pointer = self.pdns.canon_name(address.reverse_pointer)
- if self.verbose > 1:
- LOG.debug(_("PTR of {a!r}: {p!r}.").format(a=str(address), p=pointer))
-
- zone_name = self.pdns.get_zone_for_item(pointer, is_fqdn=True)
- if zone_name:
- if self.verbose > 1:
- LOG.debug(_("Got reverse zone {z!r} for address {a!r}.").format(
- z=zone_name, a=str(address)))
- else:
- LOG.warn(_("Did not found zone to insert PTR-record {p!r} ({a}).").format(
- p=pointer, a=str(address)))
- return True
-
- zone = self.pdns.zones[zone_name]
- zone.add_ptr_record(pointer, fqdn)
- if zone_name not in self.updated_zones:
- self.updated_zones.append(zone_name)
- return True
-
- # --------------------------------------------------------------------------
- def ensure_project_dir(self):
-
- print()
- LOG.info(_("Ensuring existence of directory {!r}.").format(str(self.project_dir)))
-
- if self.project_dir.exists():
- if self.project_dir.is_dir():
- LOG.debug(_("Directory {!r} already exists.").format(str(self.project_dir)))
- else:
- msg = _("Path {!r} exists, but is not a directory.").format(str(self.project_dir))
- raise ExpectedHandlerError(msg)
- else:
- LOG.info(_("Creating directory {!r} ...").format(str(self.project_dir)))
- if self.simulate:
- LOG.debug(_("Simulation mode - directory will not be created."))
- else:
- try:
- os.makedirs(str(self.project_dir), mode=0o755)
- except PermissionError as e:
- msg = _("Could not create directory {d!r}: {e}").format(
- d=str(self.project_dir), e=e)
- raise ExpectedHandlerError(msg)
-
- if not self.project_dir.exists():
- if self.simulate:
- return
- else:
- msg = _("Directory {!r} does not exists ?!?!").format(str(self.project_dir))
- raise ExpectedHandlerError(msg)
-
- if not os.access(str(self.project_dir), os.W_OK):
- msg = _("No write access to directory {!r}.").format(str(self.project_dir))
- raise ExpectedHandlerError(msg)
-
- LOG.debug(_("Changing into directory {!r}.").format(str(self.project_dir)))
- os.chdir(str(self.project_dir))
-
- self.script_dir_rel = Path(os.path.relpath(
- str(self.script_dir), str(self.project_dir)))
- LOG.debug(_("Script-Dir relative to project dir: {!r}.").format(str(self.script_dir_rel)))
-
- if self.verbose > 1:
- LOG.debug(_("Checking {!r} for a previous terraform configuration.").format(
- str(self.project_dir)))
-
- tf_path = self.project_dir / '.terraform'
- if tf_path.exists() and not tf_path.is_dir():
- msg = _("In {d!r} there exists already {w!r}, but this is not a directory.").format(
- d=str(self.project_dir), w='.terraform')
- raise ExpectedHandlerError(msg)
-
- state_path = self.project_dir / 'terraform.tfstate'
- if state_path.exists() and not state_path.is_file():
- msg = _("In {d!r} there exists already {w!r}, but this not a file.").format(
- d=str(self.project_dir), w='terraform.tfstate')
- raise ExpectedHandlerError(msg)
-
- if tf_path.is_dir() and state_path.is_file():
- msg = _(
- "In directory {d!r} there are already existing both {w1!r} and {w2!r}. "
- "Is this an old terraform project?").format(
- d=str(self.project_dir), w1='.terraform', w2='terraform.tfstate')
- raise ExpectedHandlerError(msg)
-
- # --------------------------------------------------------------------------
- def clean_project_dir(self):
-
- print()
- LOG.info(_("Cleaning project directory {!r}.").format(str(self.project_dir)))
-
- files = []
- for path in self.project_dir.glob('*'):
- files.append(path)
- for path in self.project_dir.glob('.terraform'):
- files.append(path)
-
- if not files:
- LOG.debug(_("Directory {!r} is already clean.").format(str(self.project_dir)))
- return
- for pfile in files:
- if pfile.exists():
- if pfile.is_dir():
- LOG.debug(_("Removing recursive directory {!r} ...").format(str(pfile)))
- if not self.simulate:
- shutil.rmtree(str(pfile))
- else:
- LOG.debug(_("Removing {!r} ...").format(str(pfile)))
- if not self.simulate:
- pfile.unlink()
-
- # --------------------------------------------------------------------------
- def create_terraform_files(self):
-
- print()
- print()
- msg = _("Creating all necessary files for terraform.")
- ll = 6
- if len(msg) > ll:
- ll = len(msg)
- print(self.colored('#' * (ll + 6), 'AQUA'))
- line = self.colored('#', 'AQUA') + ' '
- line += self.colored(msg.center(ll), 'YELLOW')
- line += ' ' + self.colored('#', 'AQUA')
- print(line)
- print(self.colored('#' * (ll + 6), 'AQUA'))
- print()
- print()
-
- self.create_varfiles()
- self.create_dcfile()
- self.create_backend_file()
- self.create_instance_files()
-
- # --------------------------------------------------------------------------
- def create_varfiles(self):
-
- LOG.debug(_("Creating {!r} ...").format('terraform.tfvars'))
-
- vs_name = None
- for vs_name in self.vsphere.keys():
- break
- if self.verbose > 1:
- LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
- w='dcfile', v=vs_name))
-
- vs_host = self.config.vsphere[vs_name].host
- vs_user = self.config.vsphere[vs_name].user
- vs_pwd = self.config.vsphere[vs_name].password
- vs_dc = self.config.vsphere[vs_name].dc
-
- content = textwrap.dedent('''\
- ## filename: terraform.tfvars
- ## This file declares the values for the variables to be used in the instance.tf playbook
-
- #
- # ATTENTION!
- #
- # To avoid annoying questions for password and API key
- # create manually a file 'terraform-private.auto.tfvars"
- # with the following content:
- #
- # vsphere_username = "<USERNAME>"
- # vsphere_userpassword = "<PASSWORD>"
- #
- # with the correct values. This file will not be under GIT control
- #
-
- ''')
-
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open('terraform.tfvars', 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod('terraform.tfvars', self.std_file_permissions)
-
- # Sensible stuff
- if vs_user or vs_pwd:
- content = '# Private sensible information. Please keep this file secret.\n\n'
- if vs_user:
- content += 'vsphere_username = "{}"\n'.format(vs_user)
- if vs_pwd:
- content += 'vsphere_userpassword = "{}"\n'.format(vs_pwd)
- content += '\n'
-
- LOG.debug(_("Creating {!r} ...").format('private.auto.tfvars'))
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open('private.auto.tfvars', 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod('private.auto.tfvars', self.std_secure_file_permissions)
-
- # File with variable declarations
- content = textwrap.dedent('''\
- # filename: variables.tf
- # definition of the variables to be used in the play
- # declaration happens in the file terraform.tfvars and private.auto.tfvars
-
- ''')
-
- tpl = textwrap.dedent('''\
- variable "vsphere_vcenter" {{
- default = "{}"
- description = "IP or DNS of the vSphere center."
- type = string
- }}
-
- ''')
- content += tpl.format(vs_host)
-
- tpl = textwrap.dedent('''\
- variable "vsphere_username" {
- description = "vSphere accountname to be used."
- type = string
- }
-
- variable "vsphere_userpassword" {
- description = "Password for vSphere accountname."
- type = string
- }
-
- ''')
- content += tpl
-
- tpl = textwrap.dedent('''\
- variable "vsphere_datacenter" {{
- default = "{dc}"
- description = "Name of the vSphere datacenter to use."
- type = string
- }}
-
- ''')
- content += tpl.format(dc=vs_dc)
-
- tpl = textwrap.dedent('''\
- variable "timezone" {{
- default = "{tz}"
- description = "The global timezone used for VMs"
- type = string
- }}
-
- ''')
- content += tpl.format(tz=self.tz_name)
-
- LOG.debug(_("Creating {!r} ...").format('variables.tf'))
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open('variables.tf', 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod('variables.tf', self.std_file_permissions)
-
- # --------------------------------------------------------------------------
- def create_dcfile(self):
-
- vs_name = None
- for vs_name in self.vsphere.keys():
- break
- vsphere = self.vsphere[vs_name]
-
- LOG.debug(_("Creating {!r} ...").format('dc.tf'))
- if self.verbose > 1:
- LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
- w='dcfile', v=vs_name))
-
- content = textwrap.dedent('''\
- # filename: dc.tf
- # Configuring the VMware VSphere Provider and some dependend common used objects
-
- provider "vsphere" {
- vsphere_server = var.vsphere_vcenter
- user = var.vsphere_username
- password = var.vsphere_userpassword
- allow_unverified_ssl = true
- ''')
-
-# if self.min_version_vsphere_provider:
-# content += ' version = ">= {}"\n'.format(
-# str(self.min_version_vsphere_provider))
-
- content += textwrap.dedent('''\
- }
-
- data "vsphere_datacenter" "dc" {
- name = var.vsphere_datacenter
- }
-
- ''')
-
- for cluster in vsphere.clusters:
- tpl = textwrap.dedent('''\
- data "vsphere_resource_pool" "{pv}" {{
- name = "{pn}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(
- pv=cluster.resource_pool_var, pn=cluster.resource_pool_name)
-
- if self.used_dc_clusters:
- for dsc_name in sorted(self.used_dc_clusters[vs_name], key=str.lower):
- dsc_tf_name = vsphere.ds_cluster_mapping[dsc_name]
- tpl = textwrap.dedent('''\
- data "vsphere_datastore_cluster" "{tn}" {{
- name = "{n}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(tn=dsc_tf_name, n=dsc_name)
-
- if self.used_datastores:
- for ds_name in sorted(self.used_datastores[vs_name], key=str.lower):
- ds_tf_name = vsphere.ds_mapping[ds_name]
- tpl = textwrap.dedent('''\
- data "vsphere_datastore" "{tn}" {{
- name = "{n}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(tn=ds_tf_name, n=ds_name)
-
- for net_name in sorted(self.used_networks[vs_name], key=str.lower):
- net_tf_name = vsphere.network_mapping[net_name]
- tpl = textwrap.dedent('''\
- data "vsphere_network" "{tn}" {{
- name = "{n}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(n=net_name, tn=net_tf_name)
-
- if self.vsphere_templates:
- for tname in sorted(self.vsphere_templates[vs_name].keys(), key=str.lower):
- tpl_tf_name = self.vsphere_templates[vs_name][tname].tf_name
- tpl = textwrap.dedent('''\
- data "vsphere_virtual_machine" "{tn}" {{
- name = "{n}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(tn=tpl_tf_name, n=tname)
-
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open('dc.tf', 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod('dc.tf', self.std_file_permissions)
-
- # --------------------------------------------------------------------------
- def create_backend_file(self):
-
- file_name = 'backend.tf'
- LOG.debug(_("Creating {!r} ...").format(file_name))
-
- tpl = textwrap.dedent('''\
- # Configuration of the backend for storing the terraform status information
- # and the minimum required version of terraform
-
- terraform {{
- backend "consul" {{
- address = "{host}"
- scheme = "{scheme}"
- path = "{prefix}/{project}"
- }}
- ''')
-
- project = self.full_project_name
- if not project:
- project = self.project_name
-
- content = tpl.format(
- host=self.config.tf_backend_host, scheme=self.config.tf_backend_scheme,
- prefix=self.config.tf_backend_path_prefix, project=project)
-
- if self.min_version_terraform:
- content += ' required_version = ">= {}"\n'.format(str(self.min_version_terraform))
- else:
- LOG.warn(_("No minimum version of Terraform defined."))
-
- content += '}\n\n'
-
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open(file_name, 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod(file_name, self.std_file_permissions)
-
- # --------------------------------------------------------------------------
- def create_instance_files(self):
-
- LOG.debug(_("Creating terraform files for VM instances."))
-
- for vm in sorted(self.vms, key=lambda x: x.tf_name):
- self.create_instance_file(vm)
-
- # --------------------------------------------------------------------------
- def create_instance_file(self, vm):
-
- vs_name = vm.vsphere
-
- fname = 'instance.' + vm.name + '.tf'
- LOG.debug(_("Creating file {f!r} for VM instance {n!r}.").format(
- f=fname, n=vm.name))
-
- guest_id = self.config.guest_id
- tpl_vm = None
- if vm.vm_template:
- tpl_vm = self.vsphere_templates[vs_name][vm.vm_template]
- if self.verbose > 3:
- LOG.debug(_("Using template:") + "\n" + pp(tpl_vm))
- guest_id = 'data.vsphere_virtual_machine.{}.guest_id'.format(tpl_vm.tf_name)
- else:
- guest_id = '"' + guest_id + '"'
-
- content = self._create_instfile_general(vm, guest_id, tpl_vm)
-
- i = 0
- for iface in vm.interfaces:
- content += self._create_instfile_if(vm, iface, i, tpl_vm)
- i += 1
-
- for unit_id in sorted(vm.disks.keys()):
- content += self._create_instfile_disk(vm, unit_id)
-
- content += textwrap.indent(textwrap.dedent('''\
- cdrom {
- client_device = "true"
- }
-
- '''), ' ')
-
- content += self._create_instfile_custom(vm, tpl_vm)
-
- if self.verbose > 1:
- LOG.debug(_("Writing {!r}").format(fname))
-
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open(fname, 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod(fname, self.std_file_permissions)
-
- # --------------------------------------------------------------------------
- def _create_instfile_general(self, vm, guest_id, tpl_vm):
-
- vs_name = vm.vsphere
-
- # ## General definitions of VM
- if self.verbose > 1:
- LOG.debug(_("Generating global definitions of {!r}.").format(vm.name))
- content = textwrap.dedent('''\
- # Definition of the VM instance {!r}.
-
- ''').format(vm.name)
-
- cluster = self.vsphere[vs_name].get_cluster_by_name(vm.cluster)
- if not cluster:
- msg = _("Cluster {!r} not found - this shouldn't be happened.").format(
- vm.cluster)
- raise RuntimeError(msg)
-
- content += textwrap.dedent('''\
- resource "vsphere_virtual_machine" "{tn}" {{
-
- resource_pool_id = data.vsphere_resource_pool.{pv}.id
- name = "{n}"
- ''').format(tn=vm.tf_name, n=vm.name, pv=cluster.resource_pool_var)
-
- if vm.ds_cluster:
- dsc_tf_name = self.vsphere[vs_name].ds_cluster_mapping[vm.ds_cluster]
- tpl = ' datastore_cluster_id = data.vsphere_datastore_cluster.{}.id\n'
- content += tpl.format(dsc_tf_name)
-
- if vm.datastore:
- ds_tf_name = self.vsphere[vs_name].ds_mapping[vm.datastore]
- tpl = ' datastore_id = data.vsphere_datastore.{}.id\n'
- content += tpl.format(ds_tf_name)
-
- content += textwrap.indent(textwrap.dedent('''\
- num_cpus = "{cpu}"
- folder = "{f}"
- num_cores_per_socket = "1"
- cpu_hot_add_enabled = "true"
- cpu_hot_remove_enabled = "true"
- memory = "{m}"
- memory_hot_add_enabled = "true"
- boot_delay = "{b}"
- guest_id = {g}
- '''), ' ').format(
- g=guest_id, cpu=vm.num_cpus, f=vm.folder, m=vm.memory, b=int(vm.boot_delay * 1000))
- if vm.vm_template:
- tpl = ' scsi_type = data.vsphere_virtual_machine.{}.scsi_type\n'
- content += tpl.format(tpl_vm.tf_name)
- content += '\n'
-
- content += textwrap.indent(textwrap.dedent('''\
- lifecycle {
- ignore_changes = all
- }
- '''), ' ')
- content += '\n'
-
- return content
-
- # --------------------------------------------------------------------------
- def _create_instfile_if(self, vm, iface, i, tpl_vm):
-
- vs_name = vm.vsphere
-
- # ## Interface definition
-
- if self.verbose > 1:
- LOG.debug(_("Generating interface definition {i} of {v!r}.").format(i=i, v=vm.name))
- nw = iface.network
- nw_name = self.vsphere[vs_name].network_mapping[nw]
-
- content = textwrap.indent(textwrap.dedent('''\
- network_interface {{
- network_id = data.vsphere_network.{n}.id
- adapter_type = data.{vvm}.{t}.{nit}[0]
- }}
- '''), ' ').format(
- n=nw_name, t=tpl_vm.tf_name,
- vvm='vsphere_virtual_machine', nit='network_interface_types')
- content += '\n'
-
- return content
-
- # --------------------------------------------------------------------------
- def _create_instfile_disk(self, vm, unit_id):
-
- # ## Disk definitions
- if self.verbose > 1:
- LOG.debug(_("Generating disk definition {i} of {v!r}.").format(i=unit_id, v=vm.name))
- disk = vm.disks[unit_id]
- content = textwrap.indent(textwrap.dedent('''\
- disk {{
- label = "disk{i}"
- size = "{s}"
- eagerly_scrub = "false"
- thin_provisioned = "false"
- '''), ' ').format(i=unit_id, s=int(disk.size_gb))
- if unit_id > 0:
- content += ' unit_number = {}\n'.format(unit_id)
- content += ' }\n\n'
-
- return content
-
- # --------------------------------------------------------------------------
- def _create_instfile_custom(self, vm, tpl_vm):
-
- # ## Customization of VM
- if self.verbose > 1:
- LOG.debug(_("Generating customization of {v!r}.").format(v=vm.name))
-
- content = textwrap.indent(textwrap.dedent('''\
- clone {{
- template_uuid = data.vsphere_virtual_machine.{t}.id
-
- customize {{
- linux_options {{
- host_name = "{h}"
- domain = "{d}"
- time_zone = var.timezone
- }}
-
- '''), ' ').format(
- t=tpl_vm.tf_name, h=vm.hostname, d=vm.domain)
-
- content += self._create_instfile_nw(vm)
- content += ' }\n'
- content += ' }\n\n'
-
- # ## local SSH cleanup before any actions
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "local-exec" {{
- command = "ssh-keygen -R {h} || true"
- }}
-
- provisioner "local-exec" {{
- command = "ssh-keygen -R {i} || true"
- }}
-
- '''), ' ').format(h=vm.fqdn, i=vm.interfaces[0].address)
-
- # ## Copying postinstall scripts to VM
-
- files = ['conf-resolver', 'create-motd']
- if vm.has_puppet:
- files.append('init-puppet')
- files.append('update-all-packages')
-
- for sname in files:
-
- if self.verbose > 1:
- LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
- f=sname, v=vm.name))
-
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "file" {{
- source = "{d}/{f}"
- destination = "/tmp/{f}"
- connection {{
- type = "ssh"
- user = "root"
- host = "{h}"
- }}
- }}
-
- '''), ' ').format(
- d=self.script_dir_rel, f=sname, h=vm.fqdn)
-
- # ## Postinstall commands on host
- commands = []
-
- commands.append("usermod -c 'root {}' root".format(vm.fqdn))
-
- commands.append("chmod +x /tmp/conf-resolver")
- cmd = '/tmp/conf-resolver'
- for ns in vm.nameservers:
- cmd += ' --ns {!r}'.format(str(ns))
- for dom in vm.searchdomains:
- cmd += ' --search {!r}'.format(dom)
- if vm.dns_options:
- cmd += ' --options {!r}'.format(vm.dns_options)
- else:
- cmd += ' --options {!r}'.format('')
- commands.append(cmd)
- commands.append("rm -f /tmp/conf-resolver")
-
- purpose = self.re_doublequote.sub('\\\"', vm.purpose)
-
- zone = "{z}/{c}".format(z=vm.vsphere, c=vm.cluster)
-
- commands.append("chmod +x /tmp/create-motd")
- cmd = ("/tmp/create-motd --purpose '{p}' --hardware 'vmware (x86_64)' --owner '{o}' "
- "--location 'VMWare' --zone '{z}' --customer '{c}' --email '{m}' --tier '{t}' "
- "--environment '{e}' --role '{r}'").format( p=purpose, t=vm.puppet_tier,
- o=vm.customer, z=zone, c=vm.puppet_customer, m=vm.puppet_contact,
- e=vm.puppet_env, r=vm.puppet_role)
- if vm.puppet_project:
- cmd += " --project '{pr}'".format(pr=vm.puppet_project)
- cmd += " | tee /etc/motd"
- commands.append(cmd)
- commands.append("rm -f /tmp/create-motd")
-
- # ## Backup - Legato networker
- commands.append("systemctl stop networker.service")
- commands.append("rm -rfv /nsr/tmp /nsr/res")
- if vm.has_backup:
- commands.append("mkdir -pv /nsr/res")
- commands.append(
- "if [ ! -f /nsr/res/servers ] ; then "
- "echo 'legato01.pixelpark.com' > /nsr/res/servers; fi")
- commands.append("systemctl start networker.service; sleep 2")
- commands.append("nsrports -S 7937-7999; sleep 2")
- commands.append("systemctl stop networker.service; sleep 2")
- commands.append(
- "systemctl enable networker.service; systemctl start networker.service; sleep 2")
- commands.append("nsrports; sleep 2")
- else:
- commands.append("systemctl disable networker.service")
-
- # ## Configuring and starting puppet
- if vm.has_puppet:
- commands.append("chmod +x /tmp/init-puppet")
- cmd = "/tmp/init-puppet --environment '{e}' --customer '{c}' "
- if vm.puppet_project:
- cmd += "--project '{pr}' "
- cmd += "--role '{r}' --owner '{o}' --tier '{t}' --purpose '{p}' --email '{m}'"
- cmd += " --zone '{z}'"
- if vm.puppet_initial_install:
- cmd += " --initial-install"
- cmd = cmd.format(
- p=purpose, t=vm.puppet_tier, o=vm.customer, c=vm.puppet_customer, z=zone,
- pr=vm.puppet_project, m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
- commands.append(cmd)
- commands.append("rm -f /tmp/init-puppet")
-
- content += ' provisioner "remote-exec" {\n'
- content += ' inline = [\n'
- for cmd in commands:
- content += ' "{}",\n'.format(cmd)
- content += ' ]\n'
- content += ' connection {\n'
- content += ' type = "ssh"\n'
- content += ' user = "root"\n'
- content += ' host = "{}"\n'.format(vm.fqdn)
- content += ' }\n'
- content += ' }\n\n'
-
- # ## postconfigure actions with puppet
- if vm.has_puppet:
- content += self._create_instfile_puppet(vm)
-
- # ## local SSH cleanup on destroy
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "local-exec" {{
- command = "ssh-keygen -R {h} || true"
- when = destroy
- }}
-
- provisioner "local-exec" {{
- command = "ssh-keygen -R {i} || true"
- when = destroy
- }}
- '''), ' ').format(h=vm.fqdn, i=vm.interfaces[0].address)
-
- content += '}\n\n'
-
- return content
-
- # -------------------------------------------------------------------------·
- def _create_instfile_nw(self, vm):
-
- content = ''
-
- gw4 = None
- gw6 = None
- for iface in vm.interfaces:
-
- content += " network_interface {\n"
- if iface.address_v4:
- content += ' ipv4_address = "{}"\n'.format(iface.address_v4)
- if iface.netmask_v4 is not None:
- content += ' ipv4_netmask = "{}"\n'.format(iface.netmask_v4)
- if iface.address_v6:
- content += ' ipv6_address = "{}"\n'.format(iface.address_v6)
- if iface.netmask_v6 is not None:
- content += ' ipv6_netmask = "{}"\n'.format(iface.netmask_v6)
- content += ' }\n\n'
-
- if not gw4:
- gw4 = iface.gateway_v4
- if not gw6:
- gw6 = iface.gateway_v6
-
- if gw4:
- content += ' ipv4_gateway = "{}"\n'.format(gw4)
- if gw6:
- content += ' ipv6_gateway = "{}"\n'.format(gw6)
-
- ns = ', '.join(map(lambda x: '"{}"'.format(x), vm.nameservers))
- content += ' dns_server_list = [{}]\n'.format(ns)
-
- return content
-
- # -------------------------------------------------------------------------·
- def _create_instfile_puppet(self, vm):
-
- content = textwrap.indent(textwrap.dedent('''\
- provisioner "local-exec" {{
- command = "ssh -o StrictHostKeyChecking=no {ca} 'sudo /opt/puppetlabs/bin/puppetserver ca sign --certname {h} || true'"
- }}
-
- provisioner "remote-exec" {{
- inline = [
- "/opt/puppetlabs/bin/puppet agent --test || true",
- "/usr/bin/systemctl start puppet.service",
- "/usr/bin/systemctl enable puppet.service",
- "chmod +x /tmp/update-all-packages",
- "/tmp/update-all-packages",
- "rm -f /tmp/update-all-packages",
- ]
- connection {{
- type = "ssh"
- user = "root"
- host = "{h}"
- }}
- }}
-
- '''), ' ').format(
- ca=self.config.puppetca, h=vm.fqdn,
- )
-
- # Destroy actions with puppet
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "remote-exec" {{
- inline = [
- "/usr/bin/systemctl stop puppet.service || true",
- ]
- when = destroy
- connection {{
- type = "ssh"
- user = "root"
- host = "{h}"
- }}
- }}
-
- provisioner "local-exec" {{
- command = "ssh -o StrictHostKeyChecking=no {ma} 'sudo /opt/puppetlabs/bin/puppet node deactivate {h} || true'"
- when = destroy
- }}
-
- provisioner "local-exec" {{
- command = "ssh -o StrictHostKeyChecking=no {ca} 'sudo /opt/puppetlabs/bin/puppetserver ca clean --certname {h} || true'"
- when = destroy
- }}
-
- '''), ' ').format(
- ca=self.config.puppetca, h=vm.fqdn, ma=self.config.puppetmaster,
- )
-
- return content
-
- # -------------------------------------------------------------------------·
- def ensure_vsphere_folders(self):
-
- vs_name = None
- for vs_name in self.vsphere.keys():
- break
- vsphere = self.vsphere[vs_name]
-
- print()
- LOG.info(_("Ensuring existence of all necessary vSphere VM folders."))
- vsphere.ensure_vm_folders(copy.copy(self.vsphere_folders))
-
- # -------------------------------------------------------------------------·
- def exec_terraform(self):
-
- tf_timeout = 30
-
- print()
- LOG.info(_("Executing {!r} ...").format('terraform init'))
- cmd = [str(self.terraform_cmd), 'init']
- try:
- result = self.run(
- cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
- except CalledProcessError as e:
- if e.stdout:
- print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
- if e.stderr:
- print(self.colored("Error message", ('BOLD', 'RED')) + ':\n' + to_str(e.stderr))
- raise ExpectedHandlerError(str(e))
- LOG.debug(_("Completed process:") + "\n" + str(result))
-
- if self.existing_vms:
- print()
- LOG.info(_("Importing existing virtual machines ..."))
-
- for vm in self.existing_vms:
-
- vs_name = vm.vsphere
- print()
- LOG.info(_("Importing VM {!r}.").format(vm.name))
- vm_obj = 'vsphere_virtual_machine.{}'.format(vm.tf_name)
- path = '/{dc}/{f}/{p}/{n}'.format(
- dc=self.vsphere[vs_name].dc, f=self.vsphere[vs_name].dc_obj.vm_folder,
- p=vm.path, n=vm.name)
- cmd = [str(self.terraform_cmd), 'import', vm_obj, path]
- try:
- result = self.run(
- cmd, may_simulate=True, timeout=tf_timeout,
- stdout=PIPE, stderr=PIPE, check=True)
- except CalledProcessError as e:
- if e.stdout:
- print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
- if e.stderr:
- msg = self.colored("Error message", ('BOLD', 'RED')) + ':\n'
- msg += to_str(e.stderr)
- print(msg)
- LOG.warn(_("Error on importing VM {!r}:").format(vm.name) + ' ' + str(e))
-
- LOG.debug(_("Completed process:") + "\n" + str(result))
-
-# print()
-# LOG.info(_("Executing {!r} ...").format('terraform plan'))
-# cmd = [str(self.terraform_cmd), 'plan']
-# try:
-# result = self.run(
-# cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
-# except CalledProcessError as e:
-# if e.stdout:
-# print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
-# if e.stderr:
-# print(self.colored("Error message", ('BOLD', 'RED')) + ':\n' + to_str(e.stderr))
-# raise ExpectedHandlerError(str(e))
-# LOG.debug(_("Completed process:") + "\n" + str(result))
-
- goto = Path(os.path.relpath(self.project_dir, self.start_dir))
-
- print()
- print()
- print(self.colored(_("Congratulations!"), 'GREEN'))
- print()
- print(_("Now you are ready to deploy the following virtual machines:"))
- for vm in sorted(self.vms, key=lambda x: x.tf_name):
- print(" * {}".format(vm.fqdn))
- print()
- print(_("To start the deployment process change to directory {}").format(
- self.colored(str(goto), 'GREEN')))
- print()
- print(_("and enter: {}").format(self.colored('terraform apply', 'GREEN')))
- print()
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2021 by Frank Brehm, Berlin
+@summary: A handler module for underlaying actions
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import os
+import logging
+import re
+import socket
+import ipaddress
+import shutil
+import stat
+import textwrap
+import copy
+import sys
+
+from pathlib import Path
+
+from subprocess import PIPE
+
+from distutils.version import LooseVersion
+
+from operator import attrgetter
+
+HAS_GETCH = False
+try:
+ import getch
+ HAS_GETCH = True
+except ImportError:
+ pass
+
+# Third party modules
+import pytz
+import yaml
+import six
+
+from fb_tools.common import pp, to_bool, to_str, RE_DOT_AT_END
+from fb_tools.errors import HandlerError, ExpectedHandlerError, CommandNotFoundError
+from fb_tools.handling_obj import HandlingObject, CalledProcessError
+from fb_tools.handler import BaseHandler
+
+from fb_vmware.errors import VSphereExpectedError
+from fb_vmware.config import VSPhereConfigInfo
+from fb_vmware.connect import VsphereConnection
+
+from fb_pdnstools.server import PowerDNSServer
+from fb_pdnstools.errors import PowerDNSHandlerError
+
+# Own modules
+from .. import MIN_VERSION_TERRAFORM, MAX_VERSION_TERRAFORM
+from .. import MIN_VERSION_VSPHERE_PROVIDER
+
+from ..errors import AbortExecution
+
+from ..config import CrTfConfiguration
+
+from ..terraform.vm import TerraformVm
+
+from ..terraform.disk import TerraformDisk
+
+from ..xlate import XLATOR
+
+__version__ = '3.9.0'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+def password_input_getch(prompt='', fill_char='*', max_len=64):
+ p_s = ''
+ proxy_string = ' ' * 64
+
+ # fch = ' '
+ # if len(fill_char) >= 1:
+ # fch = fill_char[0]
+
+ while True:
+
+ print('\r' + proxy_string, end='', flush=True)
+ print('\r' + prompt, end='', flush=True)
+
+ c = getch.getch()
+ if c == b'\r' or c == b'\n':
+ break
+ elif c == b'\x08':
+ if len(p_s):
+ p_s = p_s[:-1]
+ continue
+
+ p_s += to_str(c)
+ if len(p_s) >= max_len:
+ break
+
+ print('', flush=True)
+ return p_s
+
+
+# =============================================================================
+def password_input(prompt='', fill_char='*', max_len=64):
+
+ if HAS_GETCH:
+ return password_input_getch(prompt=prompt, fill_char=fill_char, max_len=max_len)
+
+ import getpass
+
+ return getpass.getpass(prompt=prompt)
+
+
+# =============================================================================
+class CreateTerraformHandler(BaseHandler):
+ """
+ A handler class for creating the terraform environment
+ """
+
+ re_default = re.compile(r'^\s*defaults?\s*$', re.IGNORECASE)
+ re_vm_key = re.compile(r'^\s*vms?\s*$', re.IGNORECASE)
+ re_group = re.compile(r'^\s*groups?\s*$', re.IGNORECASE)
+ re_group_name = re.compile(r'^\s*name\s*$', re.IGNORECASE)
+ re_doublequote = re.compile(r'"')
+
+ re_tf_version = re.compile(r'^\s*Terraform\s+v(\S+)', re.IGNORECASE)
+
+ std_file_permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
+ std_secure_file_permissions = stat.S_IRUSR | stat.S_IWUSR
+
+ open_opts = {}
+ if six.PY3:
+ open_opts['encoding'] = 'utf-8'
+ open_opts['errors'] = 'surrogateescape'
+
+ max_groups_depth = 10
+
+ tz_name = 'Europe/Berlin'
+ tz = pytz.timezone(tz_name)
+
+ steps = (
+ 'init', 'vmw-init', 'read-yaml', 'pdns-zones', 'vmw-test', 'collect-folders',
+ 'vmw-clusters', 'vmw-datastores', 'vmw-ds-clusters', 'vmw-networks', 'vmw-templates',
+ 'validate-yaml', 'validate-storage', 'validate-iface', 'validate-dns',
+ 'perform-dns', 'project-dir', 'tf-files', 'ensure-vmw-folders',
+ )
+ step_desc = {
+ 'init': _('After initialization of all objects and handlers.'),
+ 'vmw-init': _('After initialisation of VSPhere handlers.'),
+ 'read-yaml': _('After reading the given YAML file.'),
+ 'pdns-zones': _('After retrieving all DNS zones from PowerDNS.'),
+ 'vmw-test': _('After testing VSPhere handlers.'),
+ 'collect-folders': _('After collecting all VMWare and local folders.'),
+ 'vmw-clusters': _('After collecting all VMWare clusters.'),
+ 'vmw-datastores': _('After collecting all VMWare datastores.'),
+ 'vmw-ds-clusters': _('After collecting all VMWare datastore clusters.'),
+ 'vmw-networks': _('After collecting all VMWare networks.'),
+ 'vmw-templates': _('After validating all given VMWare templates.'),
+ 'validate-yaml': _('After syntax validating of data from loaded YAML file.'),
+ 'validate-storage': _('After validating all given storage data.'),
+ 'validate-iface': _('After validating all given network interface data.'),
+ 'validate-dns': _('After validating all given DNS data.'),
+ 'perform-dns': _('After performing all necessary actions in DNS.'),
+ 'project-dir': _('After ensuring availability of the project directory.'),
+ 'tf-files': _('After creation of the Terraform project files.'),
+ 'ensure-vmw-folders': _('After ensuring availability of VM folders in VMWare vSphere.'),
+ }
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__version__, base_dir=None,
+ config=None, simulate=False, force=False, ignore_existing_dns=False,
+ terminal_has_colors=False, initialized=False):
+
+ self.pdns = None
+ self.vsphere = {}
+ self.config = None
+
+ self.terraform_cmd = None
+
+ self.yaml_data = None
+
+ self.default_vm = None
+ self.group_default_vms = {}
+
+ self.ignore_existing_dns = bool(ignore_existing_dns)
+
+ self.vms = []
+ self.vsphere_templates = {}
+
+ self.vm_names = []
+ self.fqdns = {}
+ self.addresses = {}
+
+ self.vsphere_folders = []
+
+ self.vsphere_user = None
+ self.vsphere_password = None
+
+ self.used_networks = {}
+ self.used_dc_clusters = {}
+ self.used_datastores = {}
+ self.project_dir = None
+ self.project_name = None
+
+ self._terraform_root_dir = None
+
+ self.all_vms = {}
+ self.existing_vms = []
+
+ self.start_dir = Path(os.getcwd())
+
+ self.script_dir = None
+ self.script_dir_rel = None
+
+ self._stop_at_step = None
+
+ self.min_version_terraform = None
+ if MIN_VERSION_TERRAFORM:
+ self.min_version_terraform = LooseVersion(MIN_VERSION_TERRAFORM)
+
+ self.max_version_terraform = None
+ if MAX_VERSION_TERRAFORM:
+ self.max_version_terraform = LooseVersion(MAX_VERSION_TERRAFORM)
+
+ self.min_version_vsphere_provider = None
+ if MIN_VERSION_VSPHERE_PROVIDER:
+ self.min_version_vsphere_provider = LooseVersion(MIN_VERSION_VSPHERE_PROVIDER)
+
+ self.dns_mapping = {
+ 'forward': [],
+ 'reverse': [],
+ }
+ self.dns_mappings2create = {
+ 'forward': [],
+ 'reverse': [],
+ }
+
+ self.updated_zones = []
+
+ self.eval_errors = 0
+
+ super(CreateTerraformHandler, self).__init__(
+ appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+ simulate=simulate, force=force, terminal_has_colors=terminal_has_colors,
+ initialized=False,
+ )
+
+ if config:
+ self.config = config
+ if self.verbose >= 1:
+ msg = _("Given configuration:") + '\n' + pp(self.config.as_dict())
+ LOG.debug(msg)
+
+ self.script_dir = self.base_dir.joinpath('postinstall-scripts')
+ LOG.debug(_("Directory for postinstall scripts: {!r}.").format(str(self.script_dir)))
+ if not self.script_dir.exists():
+ msg = _("Directory for postinstall scripts {!r} does not exists.").format(
+ str(self.script_dir))
+ raise ExpectedHandlerError(msg)
+ if not self.script_dir.is_dir():
+ msg = _("Path {!r} for postinstall scripts exists, but is not a directory.").format(
+ str(self.script_dir))
+ raise ExpectedHandlerError(msg)
+
+ if initialized:
+ self.initialized = True
+
+ # -----------------------------------------------------------
+ @HandlingObject.simulate.setter
+ def simulate(self, value):
+ self._simulate = to_bool(value)
+
+ if self.initialized:
+ LOG.debug(_("Setting simulate of all subsequent objects to {!r} ...").format(
+ self.simulate))
+
+ if self.pdns:
+ self.pdns.simulate = self.simulate
+
+ for vsphere_name in self.vsphere.keys():
+ if self.vsphere[vsphere_name]:
+ self.vsphere[vsphere_name].simulate = self.simulate
+
+ # -----------------------------------------------------------
+ @property
+ def stop_at_step(self):
+ """Step, at which the execution should be interrupted."""
+ return self._stop_at_step
+
+ @stop_at_step.setter
+ def stop_at_step(self, value):
+ if value is None:
+ self._stop_at_step = None
+ return
+ v = str(value).strip().lower().replace('_', '-')
+ if v == '':
+ self._stop_at_step = None
+ return
+ if v not in self.steps:
+ msg = _("Invalid step name {!r} for interrupting execution.").format(value)
+ raise ValueError(msg)
+ self._stop_at_step = v
+
+ # -----------------------------------------------------------
+ @property
+ def terraform_root_dir(self):
+ """Root directory of all terraform directories."""
+ if self.is_venv:
+ return self.base_dir.parent
+ return self._terraform_root_dir
+
+ # -----------------------------------------------------------
+ @property
+ def full_project_name(self):
+ """Complete project name with parent paths."""
+ if not self.project_name:
+ return None
+ if not self.project_dir:
+ return None
+ if not self.terraform_root_dir:
+ return self.project_name
+ return os.path.relpath(str(self.project_dir), self.terraform_root_dir)
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(CreateTerraformHandler, self).as_dict(short=short)
+ res['std_file_permissions'] = "{:04o}".format(self.std_file_permissions)
+ res['std_secure_file_permissions'] = "{:04o}".format(self.std_secure_file_permissions)
+ res['open_opts'] = self.open_opts
+ res['stop_at_step'] = self.stop_at_step
+ res['steps'] = copy.copy(self.steps)
+ res['tz_name'] = self.tz_name
+ res['terraform_root_dir'] = self.terraform_root_dir
+ res['full_project_name'] = self.full_project_name
+ res['vsphere'] = {}
+ for vsphere_name in self.vsphere.keys():
+ res['vsphere'][vsphere_name] = self.vsphere[vsphere_name].as_dict(short=short)
+
+ return res
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def set_tz(cls, tz_name):
+
+ if not tz_name.strip():
+ raise ValueError(_("Invalid time zone name {!r}.").format(tz_name))
+ tz_name = tz_name.strip()
+ LOG.debug(_("Setting time zone to {!r}.").format(tz_name))
+ cls.tz = pytz.timezone(tz_name)
+ cls.tz_name = tz_name
+
+ # -------------------------------------------------------------------------
+ def incr_verbosity(self, diff=1):
+
+ new_verbose = self.verbose + int(diff)
+ if new_verbose < 0:
+ new_verbose = 0
+ self.verbose = new_verbose
+
+ if self.pdns:
+ self.pdns.verbose = self.verbose
+
+ for vname in self.vsphere:
+ self.vsphere[vname].verbose = self.verbose
+
+ # -------------------------------------------------------------------------
+ def init_handlers(self):
+
+ if not self.config:
+ msg = _("No configuration given before initialisation of handlers.")
+ raise HandlerError(msg)
+
+ if not isinstance(self.config, CrTfConfiguration):
+ raise HandlerError(_(
+ "{n} is not a {e}-instance, but a {w}-instance instead.").format(
+ n='self.config', e='CrTfConfiguration', w=self.config.__class__.__name__))
+
+ TerraformDisk.default_size = self.config.disk_size
+ TerraformDisk.min_size_gb = self.config.disk_min_size
+ TerraformDisk.max_size_gb = self.config.disk_max_size
+
+ TerraformVm.min_rootdisk_size = self.config.root_min_size
+ TerraformVm.max_rootdisk_size = self.config.root_max_size
+
+ LOG.info(_("Initialize some additional handlers."))
+
+ self.terraform_cmd = self.get_command('terraform', quiet=True)
+ if not self.terraform_cmd:
+ raise CommandNotFoundError('terraform')
+ self.check_terraform_version()
+
+ self.pdns = PowerDNSServer(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ master_server=self.config.pdns_master_server,
+ port=self.config.pdns_api_port, key=self.config.pdns_api_key,
+ use_https=self.config.pdns_api_use_https, path_prefix=self.config.pdns_api_path_prefix,
+ simulate=self.simulate, force=self.force, initialized=True,
+ )
+
+ if not self.config.no_pdns:
+ try:
+ api_version = self.pdns.get_api_server_version() # noqa
+ except (PowerDNSHandlerError, ConnectionError) as e:
+ msg = "{c}: {e}".format(c=e.__class__.__name__, e=str(e))
+ raise ExpectedHandlerError(msg)
+
+ # -------------------------------------------------------------------------
+ def check_terraform_version(self):
+ """ Checking, that the called terraform has a minimum version."""
+
+ tf_timeout = 10
+
+ got_tf_version = None
+ LOG.info(_("Checking the terraform version ..."))
+
+ cmd = [str(self.terraform_cmd), 'version']
+ cmd_str = ' '.join(cmd)
+ LOG.debug(_("Executing {!r} ...").format(cmd_str))
+ result = self.run(
+ cmd, may_simulate=False, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+ LOG.debug(_("Completed process:") + "\n" + str(result))
+
+ if not result.stdout:
+ msg = _("No output on command {!r}.").format(cmd_str)
+ raise ExpectedHandlerError(msg)
+ lines = result.stdout.splitlines()
+
+ if self.verbose > 2:
+ LOG.debug(_("First line:") + '\n' + lines[0])
+ match = self.re_tf_version.search(lines[0])
+ if not match:
+ msg = _("Could not evaluate version output of terraform:") + '\n' + result.stdout
+ raise ExpectedHandlerError(msg)
+
+ got_tf_version = LooseVersion(match.group(1))
+ LOG.info(_("Terraform version: {!r}.").format(str(got_tf_version)))
+
+ if self.min_version_terraform:
+ LOG.debug(_("Checking for {o}{m!r} ...").format(
+ o='>=', m=str(self.min_version_terraform)))
+ if got_tf_version < self.min_version_terraform:
+ msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
+ c=str(got_tf_version), o='>=', m=str(self.min_version_terraform))
+ raise ExpectedHandlerError(msg)
+
+ if self.max_version_terraform:
+ LOG.debug(_("Checking for {o}{m!r} ...").format(
+ o='<=', m=str(self.max_version_terraform)))
+ if got_tf_version > self.max_version_terraform:
+ msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
+ c=str(got_tf_version), o='<=', m=str(self.max_version_terraform))
+ raise ExpectedHandlerError(msg)
+
+ # -------------------------------------------------------------------------
+ def __del__(self):
+ """Destructor."""
+
+ LOG.debug(_("Self destruction."))
+
+ if self.pdns:
+ self.pdns = None
+
+ if self.vsphere:
+ self.vsphere = None
+
+ # -------------------------------------------------------------------------
+ def first_call(self, yaml_file):
+ """First steps until reading the YAML file."""
+
+ if not self.initialized:
+ raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
+
+ try:
+
+ self.exec_init_run()
+
+ LOG.info(_("Go ahead..."))
+
+ self.exec_read_yaml(yaml_file)
+
+ print()
+ LOG.info(_("Initialising VSPhere handlers."))
+ self.init_vspheres(yaml_file)
+
+ return True
+
+ except AbortExecution as e:
+ LOG.warn(str(e))
+ return False
+
+ # -------------------------------------------------------------------------
+ def __call__(self, yaml_file):
+ """Executing the underlying action."""
+
+ if not self.initialized:
+ raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
+
+ try:
+
+ if self.simulate:
+ print()
+ msg_a = _("Simulation mode")
+ msg_b = (
+ "* " + _("Necessary DNS records are not created."),
+ "* " + _("Terraform files are not created.")
+ )
+ ll = 4
+ if len(msg_a) > ll:
+ ll = len(msg_a)
+ for msg in msg_b:
+ if len(msg) > ll:
+ ll = len(msg)
+
+ print(self.colored('#' * (ll + 4), 'AQUA'))
+ line = self.colored('#', 'AQUA') + ' '
+ line += self.colored(msg_a.center(ll), 'YELLOW')
+ line += ' ' + self.colored('#', 'AQUA')
+ print(line)
+ for msg in msg_b:
+ line = '# ' + msg.ljust(ll) + ' #'
+ print(self.colored(line, 'AQUA'))
+ print(self.colored('#' * (ll + 4), 'AQUA'))
+ print()
+
+ self.exec_pdns_zones()
+
+ print()
+ LOG.info(_("Cpllecting first information from vSPhere."))
+ self.test_vsphere_handlers()
+ self.exec_collect_folders(yaml_file)
+ self.assign_default_vmw_values()
+
+ print()
+ LOG.info(_("Retrieving information from vSphere."))
+
+ self.exec_vmw_clusters()
+ self.exec_vmw_datastores()
+ self.exec_vmw_ds_clusters()
+ self.exec_vmw_networks()
+ self.exec_vmw_templates()
+
+ self.exec_validate_yaml()
+ self.exec_validate_storage()
+ self.exec_validate_iface()
+ self.exec_validate_dns()
+
+ if self.verbose > 2:
+
+ vm_list = []
+ for vm in self.vms:
+ vm_list.append(vm.as_dict())
+ LOG.debug(_("Validated VMs:") + "\n" + pp(vm_list))
+
+ if self.existing_vms:
+ msg = ngettext(
+ "There is one existing virtual machine.",
+ "There are {c} existing virtual machines.",
+ len(self.existing_vms)).format(c=len(self.existing_vms))
+ LOG.warn(msg)
+ if self.verbose > 2:
+ msg = ngettext(
+ "Existing virtual machine:", "Existing virtual machines:",
+ len(self.existing_vms))
+ LOG.debug(msg + '\n' + pp(self.existing_vms))
+ else:
+ LOG.info(_("No existing virtual machines found in YAML file."))
+
+ self.exec_perform_dns()
+ self.exec_project_dir()
+
+ self.exec_tf_files()
+ self.exec_vsphere_folders()
+
+ LOG.info(_("Finished all steps."))
+
+ except AbortExecution as e:
+ LOG.warn(str(e))
+ return
+
+ self.exec_terraform()
+ if self.simulate:
+ print()
+ msg = print(self.colored(
+ _('And how I said before - it was only a simulation!'), 'AQUA'))
+
+ print()
+
+ # -------------------------------------------------------------------------·
+ def exec_init_run(self):
+
+ if self.stop_at_step == 'init':
+ self.incr_verbosity()
+
+ if self.verbose > 2:
+ LOG.debug(_("Current {} object:").format(self.__class__.__name__) + "\n" + str(self))
+
+ LOG.info(_("Finished step {!r}.").format('init'))
+ if self.stop_at_step == 'init':
+ raise AbortExecution('init')
+
+ # -------------------------------------------------------------------------·
+ def exec_read_yaml(self, yaml_file):
+
+ if self.stop_at_step == 'read-yaml':
+ self.incr_verbosity()
+
+ self.read_yaml_data(yaml_file)
+ self.eval_yaml_data()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in evaluation of YAML data of {f!r}.",
+ "Found {n} errors in evaluation of YAML data of {f!r}.",
+ self.eval_errors).format(n=self.eval_errors, f=str(yaml_file))
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('read-yaml'))
+ if self.stop_at_step == 'read-yaml':
+ raise AbortExecution('read-yaml')
+
+ # -------------------------------------------------------------------------·
+ def exec_collect_folders(self, yaml_file):
+
+ if self.stop_at_step == 'collect-folders':
+ self.incr_verbosity()
+
+ LOG.info(_("Collecting all VMWare and local folders ..."))
+ LOG.info(_("Get vSphere datacenter ..."))
+ for vname in self.vsphere:
+ self.vsphere[vname].get_datacenter()
+
+ LOG.debug(_("Collecting vSphere folders."))
+ self.vsphere_folders = []
+ for vm in self.vms:
+ if vm.folder:
+ if vm.folder not in self.vsphere_folders:
+ self.vsphere_folders.append(vm.folder)
+ self.vsphere_folders.sort(key=str.lower)
+ LOG.debug(_("Collected vSphere folders:") + "\n" + pp(self.vsphere_folders))
+
+ # Set project name and directory
+ yfile = Path(yaml_file)
+ yfile_base = yfile.name
+ yfile_dir = yfile.parent.resolve()
+ (yfile_stem, yfile_ext) = os.path.splitext(yfile_base)
+ self.project_name = yfile_stem
+ LOG.info(_("Project name is {!r}.").format(str(self.project_name)))
+ self.project_dir = yfile_dir / yfile_stem
+ LOG.info(_("Project directory is: {!r}.").format(str(self.project_dir)))
+
+ # Evaluating root terraform directory
+ if not self.is_venv:
+ i = 4
+ cdir = copy.copy(self.project_dir).parent
+ while i > 0:
+ git_dir = cdir / '.git'
+ if git_dir.is_dir():
+ self._terraform_root_dir = cdir
+ break
+ i -= 1
+ if cdir == cdir.parent:
+ break
+ cdir = cdir.parent
+ if not self._terraform_root_dir:
+ msg = _("Did not found root terraform directory above {!r}.").format(
+ str(self.project_dir))
+ LOG.warn(msg)
+
+ LOG.info(_("Full project name: {!r}").format(self.full_project_name))
+
+ LOG.info(_("Finished step {!r}.").format('collect-folders'))
+ if self.stop_at_step == 'collect-folders':
+ raise AbortExecution('collect-folders')
+
+ # -------------------------------------------------------------------------·
+ def exec_pdns_zones(self):
+
+ if self.config.no_pdns:
+ return
+
+ if self.stop_at_step == 'pdns-zones':
+ self.incr_verbosity()
+
+ print()
+ LOG.info(_("Retrieving informations from PowerDNS ..."))
+
+ self.pdns.get_api_zones()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in exploring PowerDNS zones.",
+ "Found {n} errors in exploring PowerDNS zones.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('pdns-zones'))
+ if self.stop_at_step == 'pdns-zones':
+ raise AbortExecution('pdns-zones')
+
+ # -------------------------------------------------------------------------·
+ def init_vspheres(self, yaml_file):
+
+ if self.stop_at_step == 'vmw-init':
+ self.incr_verbosity()
+
+ # Test for multiple VSphere references
+ found_vspheres = []
+ for vm in self.vms:
+ vname = vm.vsphere
+ if vname not in found_vspheres:
+ found_vspheres.append(vname)
+ if len(found_vspheres) > 1:
+ yaml_file_rel = os.path.relpath(str(yaml_file), os.getcwd())
+ msg = _("There is only one, unique VSPhere definition allowed in a project file.")
+ msg += '\n'
+ msg += _("In {f!r} were found {nr} different VSPhere definitions:").format(
+ f=yaml_file_rel, nr=len(found_vspheres))
+ for vname in sorted(found_vspheres, key=str.lower):
+ msg += '\n * {!r}'.format(vname)
+ raise ExpectedHandlerError(msg)
+
+ self._init_vspheres()
+
+ LOG.info(_("Finished step {!r}.").format('vmw-init'))
+ if self.stop_at_step == 'vmw-init':
+ raise AbortExecution('vmw-init')
+
+ # -------------------------------------------------------------------------·
+ def _init_vspheres(self):
+
+ for vm in self.vms:
+ if vm.vsphere in self.vsphere:
+ continue
+ vname = vm.vsphere
+ if vname not in self.config.vsphere:
+ msg = _("VSPhere {!r} not defined in configuration.").format(vname)
+ raise ExpectedHandlerError(msg)
+
+ if not self.vsphere_user and self.config.vsphere[vname].user:
+ self.vsphere_user = self.config.vsphere[vname].user
+ if not self.vsphere_password and self.config.vsphere[vname].password:
+ self.vsphere_password = self.config.vsphere[vname].password
+
+ try:
+ params = {
+ 'appname': self.appname,
+ 'verbose': self.verbose,
+ 'base_dir': self.base_dir,
+ 'simulate': self.simulate,
+ 'force': self.force,
+ 'terminal_has_colors': self.terminal_has_colors,
+ 'initialized': True,
+ }
+ show_params = copy.copy(params)
+
+ connect_info = VSPhereConfigInfo(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ host=self.config.vsphere[vname].host, port=self.config.vsphere[vname].port,
+ dc=self.config.vsphere[vname].dc, user=self.vsphere_user,
+ password=self.vsphere_password, initialized=True)
+
+ params['connect_info'] = connect_info
+ show_params['connect_info'] = connect_info.as_dict()
+
+ if self.verbose > 1:
+ if self.verbose < 5:
+ show_params['connect_info']['password'] = '******'
+ msg = _("Initialising a {}-object with params:").format('VsphereConnection')
+ msg += '\n' + pp(show_params)
+ LOG.debug(msg)
+
+ vsphere = VsphereConnection(**params)
+ self.vsphere[vname] = vsphere
+
+ except VSphereExpectedError as e:
+ raise ExpectedHandlerError(str(e))
+
+ # -------------------------------------------------------------------------·
+ def test_vsphere_handlers(self):
+
+ if self.stop_at_step == 'vmw-test':
+ self.incr_verbosity()
+
+ for vname in self.vsphere.keys():
+
+ try:
+
+ vsphere = self.vsphere[vname]
+
+ vsphere.get_about()
+ if self.verbose > 2:
+ msg = _("Created {}-object:").format('VsphereConnection')
+ msg += '\n' + pp(vsphere.as_dict())
+ LOG.debug(msg)
+
+ except VSphereExpectedError as e:
+ raise ExpectedHandlerError(str(e))
+
+ LOG.info(_("Finished step {!r}.").format('vmw-test'))
+ if self.stop_at_step == 'vmw-test':
+ raise AbortExecution('vmw-test')
+
+ # -------------------------------------------------------------------------·
+ def assign_default_vmw_values(self):
+ """Assigning not defined templates and clusters of VMs by their
+ appropriate default values."""
+
+ LOG.debug(_(
+ "Assigning not defined templates and clusters of VMs by their "
+ "appropriate default values."))
+
+ for vm in self.vms:
+
+ if not vm.cluster:
+ cl = self.config.vsphere[vm.vsphere].cluster
+ if self.verbose > 1:
+ LOG.debug(_("Setting cluster of {n!r} to {c!r} ...").format(
+ n=vm.name, c=cl))
+ vm.cluster = cl
+
+ if not vm.vm_template:
+ tpl = self.config.vsphere[vm.vsphere].template_name
+ if self.verbose > 1:
+ LOG.debug(_("Setting template of {n!r} to {t!r} ...").format(
+ n=vm.name, t=tpl))
+ vm.vm_template = tpl
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_clusters(self):
+
+ if self.stop_at_step == 'vmw-clusters':
+ self.incr_verbosity()
+
+ for vname in self.vsphere:
+ LOG.debug(_("Searching for clusters in VSPhere {!r} ...").format(vname))
+ self.vsphere[vname].get_clusters()
+
+ LOG.info(_("Finished step {!r}.").format('vmw-clusters'))
+ if self.stop_at_step == 'vmw-clusters':
+ raise AbortExecution('vmw-clusters')
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_datastores(self):
+
+ if self.stop_at_step == 'vmw-datastores':
+ self.incr_verbosity()
+
+ nr_total = 0
+
+ for vname in self.vsphere:
+ LOG.debug(_("Searching for datastores in VSPhere {!r} ...").format(vname))
+ self.vsphere[vname].get_datastores()
+ nr_total += len(self.vsphere[vname].datastores.keys())
+
+ if nr_total:
+ msg = ngettext("Found one datastore.", "Found {n} datastores.", nr_total)
+ LOG.debug(msg.format(n=nr_total))
+ else:
+ LOG.error(_("No VSPhere datastores found."))
+
+ LOG.info(_("Finished step {!r}.").format('vmw-datastores'))
+ if self.stop_at_step == 'vmw-datastores':
+ raise AbortExecution('vmw-datastores')
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_ds_clusters(self):
+
+ nr_total = 0
+
+ if self.stop_at_step == 'vmw-ds-clusters':
+ self.incr_verbosity()
+
+ for vname in self.vsphere:
+ LOG.debug(_("Searching for datastore clusters in VSPhere {!r} ...").format(vname))
+ self.vsphere[vname].get_ds_clusters()
+ nr_total += len(self.vsphere[vname].ds_clusters.keys())
+
+ if nr_total:
+ msg = ngettext(
+ "Found one datastore cluster.",
+ "Found {n} datastore clusters.",
+ nr_total)
+ LOG.debug(msg.format(n=nr_total))
+ else:
+ LOG.warn(_("No VSPhere datastore clusters found."))
+
+ LOG.info(_("Finished step {!r}.").format('vmw-ds-clusters'))
+ if self.stop_at_step == 'vmw-ds-clusters':
+ raise AbortExecution('vmw-ds-clusters')
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_networks(self):
+
+ if self.stop_at_step == 'vmw-networks':
+ self.incr_verbosity()
+
+ for vname in self.vsphere:
+ LOG.debug(_("Searching for networks in VSPhere {!r} ...").format(vname))
+ self.vsphere[vname].get_networks()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in exploring vSphere {v!r} resources.",
+ "Found {n} errors in exploring vSphere {v!r} resources.",
+ self.eval_errors).format(n=self.eval_errors, v=vname)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('vmw-networks'))
+ if self.stop_at_step == 'vmw-networks':
+ raise AbortExecution('vmw-networks')
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_templates(self):
+
+ if self.stop_at_step == 'vmw-templates':
+ self.incr_verbosity()
+
+ self.explore_vsphere_templates()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in exploring vSphere templates.",
+ "Found {n} errors in exploring vSphere templates.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('vmw-templates'))
+ if self.stop_at_step == 'vmw-templates':
+ raise AbortExecution('vmw-templates')
+
+ # -------------------------------------------------------------------------·
+ def exec_validate_yaml(self):
+
+ if self.stop_at_step == 'validate-yaml':
+ self.incr_verbosity()
+
+ print()
+ LOG.info(_("Validating information from YAML file ..."))
+
+ self.validate_clusters()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in validating vSphere computing clusters.",
+ "Found {n} errors in validating vSphere computing clusters.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ self.get_all_vms()
+ self.validate_vms()
+
+ LOG.info(_("Finished step {!r}.").format('validate-yaml'))
+ if self.stop_at_step == 'validate-yaml':
+ raise AbortExecution('validate-yaml')
+
+ # -------------------------------------------------------------------------·
+ def get_all_vms(self):
+
+ LOG.info(_("Got a list of all VMs and templates ..."))
+ self.all_vms = {}
+ re_vm = re.compile(r'.*')
+
+ for vs_name in self.vsphere:
+
+ if vs_name not in self.all_vms:
+ self.all_vms[vs_name] = {}
+
+ vm_list = self.vsphere[vs_name].get_vms(re_vm, name_only=True)
+ for vm_tuple in vm_list:
+ vm_name = vm_tuple[0]
+ vm_path = vm_tuple[1]
+ if vm_name in self.all_vms[vs_name]:
+ self.all_vms[vs_name][vm_name].append(vm_path)
+ else:
+ self.all_vms[vs_name][vm_name] = [vm_path]
+
+ if self.verbose > 2:
+ msg = _("All existing VMs and templates:")
+ msg += '\n' + pp(self.all_vms)
+ LOG.debug(msg)
+
+ # -------------------------------------------------------------------------·
+ def exec_validate_storage(self):
+
+ if self.stop_at_step == 'validate-storage':
+ self.incr_verbosity()
+
+ self.validate_storages()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in validating VM storages.",
+ "Found {n} errors in validating VM storages.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('validate-storage'))
+ if self.stop_at_step == 'validate-storage':
+ raise AbortExecution('validate-storage')
+
+ # -------------------------------------------------------------------------·
+ def exec_validate_iface(self):
+
+ if self.stop_at_step == 'validate-iface':
+ self.incr_verbosity()
+
+ self.validate_interfaces()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in validating VM interfaces.",
+ "Found {n} errors in validating VM interfaces.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('validate-iface'))
+ if self.stop_at_step == 'validate-iface':
+ raise AbortExecution('validate-iface')
+
+ # -------------------------------------------------------------------------·
+ def exec_validate_dns(self):
+
+ if self.stop_at_step == 'validate-dns':
+ self.incr_verbosity()
+
+ self.validate_dns_mappings()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in validating DNS mappings.",
+ "Found {n} errors in validating DNS mappings.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('validate-dns'))
+ if self.stop_at_step == 'validate-dns':
+ raise AbortExecution('validate-dns')
+
+ # -------------------------------------------------------------------------·
+ def exec_perform_dns(self):
+
+ if self.stop_at_step == 'perform-dns':
+ self.incr_verbosity()
+
+ self.perform_dns()
+
+ LOG.info(_("Finished step {!r}.").format('perform-dns'))
+ if self.stop_at_step == 'perform-dns':
+ raise AbortExecution('perform-dns')
+
+ # -------------------------------------------------------------------------·
+ def exec_project_dir(self):
+
+ if self.stop_at_step == 'project-dir':
+ self.incr_verbosity()
+
+ self.ensure_project_dir()
+ self.clean_project_dir()
+
+ LOG.info(_("Finished step {!r}.").format('project-dir'))
+ if self.stop_at_step == 'project-dir':
+ raise AbortExecution('project-dir')
+
+ # -------------------------------------------------------------------------·
+ def exec_tf_files(self):
+
+ if self.stop_at_step == 'tf-files':
+ self.incr_verbosity()
+
+ self.create_terraform_files()
+
+ LOG.info(_("Finished step {!r}.").format('tf-files'))
+ if self.stop_at_step == 'tf-files':
+ raise AbortExecution('tf-files')
+
+ # -------------------------------------------------------------------------·
+ def exec_vsphere_folders(self):
+
+ if self.stop_at_step == 'ensure-vmw-folders':
+ self.incr_verbosity()
+
+ self.ensure_vsphere_folders()
+
+ LOG.info(_("Finished step {!r}.").format('ensure-vmw-folders'))
+ if self.stop_at_step == 'ensure-vmw-folders':
+ raise AbortExecution('ensure-vmw-folders')
+
+ # -------------------------------------------------------------------------·
+ def read_yaml_data(self, yaml_file):
+
+ LOG.info(_("Reading YAML file {!r} ...").format(str(yaml_file)))
+
+ open_opts = {}
+ if six.PY3 and self.config.encoding:
+ open_opts['encoding'] = self.config.encoding
+ open_opts['errors'] = 'surrogateescape'
+
+ try:
+ with open(str(yaml_file), 'r', **open_opts) as fh:
+ self.yaml_data = yaml.full_load(fh)
+ except yaml.YAMLError as e:
+ msg = _("Error in YAML file {f!r}: {e}.").format(
+ f=str(yaml_file), e=e)
+ if hasattr(e, 'problem_mark'):
+ mark = e.problem_mark
+ msg += " " + _("Error position: {li}:{c}").format(
+ li=mark.line + 1, c=mark.column + 1)
+ raise ExpectedHandlerError(msg)
+
+ if self.verbose > 2:
+ LOG.debug(_("Read data from YAML file:") + "\n" + pp(self.yaml_data))
+
+ if not isinstance(self.yaml_data, dict):
+ msg = _(
+ "Data read from YAML file {f!r} are not a dictionary, "
+ "but a {c} object instead.").format(
+ f=str(yaml_file), c=self.yaml_data.__class__.__name__)
+ raise ExpectedHandlerError(msg)
+
+ for key in self.yaml_data.keys():
+ if key.lower() == 'simulate':
+ self.simulate = to_bool(self.yaml_data[key])
+
+ # -------------------------------------------------------------------------·
+ def eval_yaml_data(self):
+
+ self.vm_names = []
+
+ # Searching for default VM definition
+ LOG.debug(_("Searching for default VM definition ..."))
+ for key in self.yaml_data.keys():
+
+ if self.re_default.match(key):
+ vm = self._eval_tpl_vm(name='Default VM', vm_def=self.yaml_data[key])
+ if vm:
+ self.default_vm = vm
+
+ # Searching for VM definitions
+ LOG.debug(_("Searching for VM definitions ..."))
+ for key in self.yaml_data.keys():
+ if self.re_vm_key.match(key):
+ for vm_def in self.yaml_data[key]:
+ vm = self._eval_vm(vm_def, template_vm=self.default_vm)
+ if vm:
+ self.vms.append(vm)
+
+ # Searching for groups
+ for key in self.yaml_data.keys():
+ if self.re_group.match(key):
+ self._eval_vm_groups(self.yaml_data[key], template_vm=self.default_vm, depth=1)
+
+ if self.verbose > 2:
+ vm_list = []
+ for vm in self.vms:
+ vm_list.append(vm.as_dict())
+ LOG.debug(_("Evaluated VMs:") + "\n" + pp(vm_list))
+
+ # -------------------------------------------------------------------------·
+ def _eval_tpl_vm(self, name, vm_def, template_vm=None):
+
+ try:
+ vm = TerraformVm.from_def(
+ vm_def, name=name, is_template=True, template_vm=template_vm, appname=self.appname,
+ verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
+ force=self.force, terminal_has_colors=self.terminal_has_colors)
+ except Exception as e:
+ if self.verbose > 2:
+ self.handle_error(str(e), e.__class__.__name__, True)
+ else:
+ LOG.error(_("{c} in evaluating template VM: {e}").format(
+ c=e.__class__.__name__, e=e))
+ self.eval_errors += 1
+ return None
+
+ if self.verbose > 2:
+ LOG.debug(_(
+ "Defined Terraform Template VM {n!r}:").format(
+ n=vm.name) + "\n" + pp(vm.as_dict()))
+
+ return vm
+
+ # -------------------------------------------------------------------------·
+ def _eval_vm(self, vm_def, template_vm=None):
+
+ try:
+ vm = TerraformVm.from_def(
+ vm_def, is_template=False, template_vm=template_vm, appname=self.appname,
+ verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
+ force=self.force, terminal_has_colors=self.terminal_has_colors)
+ except Exception as e:
+ if self.verbose > 2:
+ self.handle_error(str(e), e.__class__.__name__, True)
+ else:
+ LOG.error(_("{c} in evaluating VM: {e}").format(c=e.__class__.__name__, e=e))
+ self.eval_errors += 1
+ return None
+
+ if self.verbose > 3:
+ LOG.debug(_(
+ "Defined Terraform-VM {n!r}:").format(n=vm.name) + "\n" + pp(vm.as_dict()))
+
+ if vm.name in self.vm_names:
+ LOG.error(_("VM {!r} is already defined.").format(vm.name))
+ self.eval_errors += 1
+ return None
+
+ return vm
+
+ # -------------------------------------------------------------------------·
+ def _eval_vm_groups(self, groups_def, template_vm=None, depth=1):
+
+ if not isinstance(groups_def, list):
+ msg = _("Group definition list is not a list:") + "\n" + pp(groups_def)
+ LOG.error(msg)
+ self.eval_errors += 1
+ return
+
+ if depth >= self.max_groups_depth:
+ LOG.warn(_("Maximum recursion depth for VM groups of {} reached.").format(depth))
+ return
+
+ if self.verbose > 2:
+ LOG.debug(_("Evaluating group list:") + "\n" + pp(groups_def))
+ if self.verbose > 3:
+ LOG.debug(_("Used template: {!r}").format(template_vm))
+
+ for group_def in groups_def:
+ self._eval_vm_group(group_def, template_vm=template_vm, depth=depth)
+
+ # -------------------------------------------------------------------------·
+ def _eval_vm_group(self, group_def, template_vm=None, depth=1):
+
+ if not isinstance(group_def, dict):
+ msg = _("VM definition is not a dictionary:") + "\n" + pp(group_def)
+ LOG.error(msg)
+ self.eval_errors += 1
+ return
+
+ group_template = template_vm
+ group_name = None
+
+ # Searching for the group name ..."
+ for key in group_def.keys():
+ if self.re_group_name.match(key) and str(group_def[key]).strip():
+ group_name = str(group_def[key]).strip()
+
+ if not group_name:
+ LOG.error(_("No group name defined."))
+ return
+
+ # Searching for group default VM definition
+ LOG.debug(_("Searching for group default VM definition in group {!r} ...").format(
+ group_name))
+ for key in group_def.keys():
+
+ if self.re_default.match(key):
+ vm_name = 'Default VM group {!r}'.format(group_name)
+ vm = self._eval_tpl_vm(
+ name=vm_name, vm_def=group_def[key], template_vm=template_vm)
+ if vm:
+ group_template = vm
+ break
+
+ n = None
+ if group_template:
+ n = group_template.name
+ LOG.debug(_("Used template for creating VMs in group {g!r}: {n!r}").format(
+ g=group_name, n=n))
+ if self.verbose > 3:
+ LOG.debug(_("Used template structure:") + "\n" + pp(group_template.as_dict()))
+
+ # Searching for VM definitions
+ LOG.debug(_("Searching for VM definitions in group {!r} ...").format(group_name))
+ for key in group_def.keys():
+ if self.re_vm_key.match(key):
+ for vm_def in group_def[key]:
+ vm = self._eval_vm(vm_def, template_vm=group_template)
+ if vm:
+ self.vms.append(vm)
+
+ # Searching for nested groups
+ for key in group_def.keys():
+ if self.re_group.match(key):
+ self._eval_vm_groups(
+ group_def[key], template_vm=group_template, depth=depth + 1)
+
+ # -------------------------------------------------------------------------·
+ def explore_vsphere_templates(self):
+
+ LOG.info(_("Exploring all vSphere templates ..."))
+
+ for vname in self.vsphere:
+
+ if vname not in self.vsphere_templates:
+ self.vsphere_templates[vname] = {}
+
+ self.config.vsphere[vname].used_templates = []
+
+ for vm in self.vms:
+ template_name = vm.vm_template
+ if template_name:
+ if template_name not in self.config.vsphere[vname].used_templates:
+ self.config.vsphere[vname].used_templates.append(template_name)
+ else:
+ LOG.error(_("VM {!r} has not template defined.").format(vm.name))
+ self.eval_errors += 1
+
+ msg = _("All {} VSPhere templates to explore:").format(vname)
+ msg += "\n" + pp(self.config.vsphere[vname].used_templates)
+ LOG.debug(msg)
+
+ for template_name in self.config.vsphere[vname].used_templates:
+
+ if template_name in self.vsphere_templates[vname]:
+ continue
+
+ LOG.debug(_("Searching for template {t!r} in VSPhere {v!r} ...").format(
+ t=template_name, v=vname))
+ re_vm = re.compile(r'^' + re.escape(template_name) + r'$', re.IGNORECASE)
+ vm_list = self.vsphere[vname].get_vms(re_vm, as_obj=True, stop_at_found=True)
+ if vm_list:
+ vm = vm_list[0]
+ tname = vm.name.lower()
+ if tname not in self.vsphere_templates[vname]:
+ self.vsphere_templates[vname][template_name] = vm
+ else:
+ LOG.error(_("Template {t!r} not found in VSPhere {v!r}.").format(
+ t=template_name, v=vname))
+ self.eval_errors += 1
+
+ if self.verbose > 2:
+ msg = _("All explored vSphere templates:")
+ out_dict = {}
+ for vname in self.vsphere_templates:
+ out_dict[vname] = {}
+ for tname in self.vsphere_templates[vname]:
+ out_dict[vname][tname] = self.vsphere_templates[vname][tname].as_dict()
+ msg += "\n" + pp(out_dict)
+ LOG.debug(msg)
+
+ # -------------------------------------------------------------------------·
+ def validate_clusters(self):
+
+ print()
+ LOG.info(_("Validating existence of computing clusters of the VMs."))
+
+ clusters = {}
+
+ for vm in self.vms:
+
+ vname = vm.vsphere
+ if vname not in clusters:
+ clusters[vname] = {}
+
+ if vm.cluster in clusters:
+ clusters[vname][vm.cluster].append(vm.name)
+ else:
+ clusters[vname][vm.cluster] = [vm.name]
+
+ for vname in clusters.keys():
+ for cluster in clusters[vname].keys():
+
+ vms = clusters[vname][cluster]
+
+ cl = str(cluster)
+ LOG.debug(_(
+ "Checking existence of computing cluster {c!r} in VSPhere {v!r} ...").format(
+ c=cl, v=vname))
+
+ vsphere = self.vsphere[vname]
+ vmw_cluster = vsphere.get_cluster_by_name(cl)
+ if vmw_cluster:
+ if self.verbose > 1:
+ LOG.debug(_(
+ "Found computing cluster {cl!r} in VSPhere {v!r} (defined for VMs "
+ "{vms}).").format(cl=vmw_cluster.name, v=vname, vms=pp(vms)))
+ else:
+ LOG.error(_(
+ "Computing cluster {cl!r} (defined for VMs {vms}) in VSPhere {v!r} not "
+ "found.").format(cl=cl, vms=pp(vms), v=vname))
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def validate_vms(self):
+
+ print()
+ LOG.info(_("Validating existence of VMs in VMWare."))
+ vms2perform = []
+
+ for vm in sorted(self.vms, key=attrgetter('tf_name')):
+
+ print(" * {} ".format(vm.fqdn), end='', flush=True)
+ if self.verbose:
+ print()
+ vs_name = vm.vsphere
+ vsphere = self.vsphere[vs_name]
+
+ vm_paths = None
+ if vs_name in self.all_vms:
+ if vm.fqdn in self.all_vms[vs_name]:
+ vm_paths = self.all_vms[vs_name][vm.fqdn]
+
+ if vm_paths:
+ msg = _('[{m}] - VM is already existing in VSphere {v!r}, path {p!r}.').format(
+ m=self.colored('Existing', 'YELLOW'), v=vs_name, p=pp(vm_paths))
+ print(msg, end='', flush=True)
+ if self.verbose:
+ print()
+
+ vm_info = vsphere.get_vm(vm.fqdn, vsphere_name=vs_name, as_obj=True)
+ if self.verbose > 2:
+ LOG.debug(_("VM info:") + "\n" + pp(vm_info.as_dict(bare=True)))
+ ds = vm_info.config_path_storage
+ LOG.debug(_("Datastore of VM {vm!r}: {ds!r}.").format(vm=vm.name, ds=ds))
+ vm.datastore = ds
+ vm.already_existing = True
+ self.existing_vms.append(vm_info)
+
+ else:
+
+ print('[{}] '.format(self.colored('OK', 'GREEN')), end='', flush=True)
+ vm.already_existing = False
+
+ vms2perform.append(vm)
+ print()
+
+ self.vms = vms2perform
+
+ print()
+
+ if not len(self.vms):
+ print()
+ print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
+ print(self.colored('* ' + _('CAUTION!'), ('BOLD', 'RED')), file=sys.stderr)
+ print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
+ print()
+ print(
+ self.colored(_('Did not found any VM to deploy!'), ('BOLD', 'RED')),
+ file=sys.stderr)
+ print()
+ raise ExpectedHandlerError(_("No VMs to deploy"))
+
+ # -------------------------------------------------------------------------·
+ def validate_storages(self):
+
+ self._validate_ds_clusters()
+ self._validate_datastores()
+
+ if self.verbose:
+ if self.used_dc_clusters:
+ out_lines = []
+ for vs_name in self.used_dc_clusters:
+ for cluster in self.used_dc_clusters[vs_name]:
+ out_lines.append(' * VSphere {v!r}: {c}'.format(
+ v=vs_name, c=cluster))
+ out = '\n'.join(out_lines)
+ LOG.debug(_("Used datastore clusters:") + "\n" + out)
+ else:
+ LOG.debug(_("No datastore clusters are used."))
+ if self.used_datastores:
+ out_lines = []
+ for vs_name in self.used_datastores:
+ for ds in self.used_datastores[vs_name]:
+ out_lines.append(' * VSphere {v!r}: {ds}'.format(v=vs_name, ds=ds))
+ out = '\n'.join(out_lines)
+ LOG.debug(_("Used datastors:") + "\n" + out)
+ else:
+ LOG.debug(_("No datastores are used."))
+
+ # -------------------------------------------------------------------------·
+ def _validate_ds_clusters(self):
+
+ LOG.info(_("Validating given datastore clusters of VMs ..."))
+
+ for vm in self.vms:
+
+ if not vm.ds_cluster:
+ continue
+
+ self._validate_dscluster_vm(vm)
+
+ # -------------------------------------------------------------------------·
+ def _validate_dscluster_vm(self, vm):
+
+ needed_gb = 0.0
+ if not vm.already_existing:
+ for unit_number in vm.disks.keys():
+ disk = vm.disks[unit_number]
+ needed_gb += disk.size_gb
+
+ vs_name = vm.vsphere
+ vsphere = self.vsphere[vs_name]
+
+ found = False
+ for cluster_name in vsphere.ds_clusters.keys():
+ if cluster_name.lower() == vm.ds_cluster.lower():
+ if self.verbose > 2:
+ LOG.debug(_(
+ "Found datastore cluster {c!r} in VSphere {v!r} for VM {n!r}.").format(
+ n=vm.name, v=vs_name, c=vm.ds_cluster))
+ if vm.ds_cluster != cluster_name:
+ LOG.debug(_("Setting datastore cluster for VM {n!r} to {c!r} ...").format(
+ n=vm.name, c=cluster_name))
+ vm.ds_cluster = cluster_name
+ ds_cluster = vsphere.ds_clusters[cluster_name]
+ if self.verbose > 2:
+ LOG.debug(_(
+ "Free space of cluster {c!r} in VSphere {v!r} before provisioning: "
+ "{a:0.1f} GiB.").format(
+ c=cluster_name, v=vs_name, a=ds_cluster.avail_space_gb))
+ if ds_cluster.avail_space_gb < needed_gb:
+ LOG.error(_(
+ "Datastore cluster {d!r} in VSphere {v!r} has not sufficient space for "
+ "storage of VM {vm!r} (needed {n:0.1f} GiB, available {a:0.1f} "
+ "GiB).").format(
+ d=cluster_name, v=vs_name, vm=vm.name, n=needed_gb,
+ a=ds_cluster.avail_space_gb))
+ self.eval_errors += 1
+ else:
+ ds_cluster.calculated_usage += needed_gb
+ if self.verbose > 1:
+ LOG.debug(_(
+ "Free space in cluster {c!r} in VSphere {v!r} after provisioning: "
+ "{a:0.1f} GiB.").format(
+ c=cluster_name, v=vs_name, a=ds_cluster.avail_space_gb))
+ found = True
+ if vs_name not in self.used_dc_clusters:
+ self.used_dc_clusters[vs_name] = []
+ if cluster_name not in self.used_dc_clusters[vs_name]:
+ self.used_dc_clusters[vs_name].append(cluster_name)
+ break
+
+ if not found:
+ LOG.error(_("Datastore cluster {c!r} of VM {n!r} not found in VSphere {v!r}.").format(
+ n=vm.name, c=vm.ds_cluster, v=vs_name))
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def _validate_datastores(self):
+
+ LOG.info(_("Validating given datastores of VMs and assign failing ..."))
+
+ for vm in self.vms:
+
+ if vm.ds_cluster:
+ if vm.datastore:
+ LOG.debug(_("Removing defined datastore {d!r} for VM {n!r} ...").format(
+ d=vm.datastore, n=vm.name))
+ vm.datastore = None
+ continue
+
+ self._validate_ds_vm(vm)
+
+ # -------------------------------------------------------------------------·
+ def _validate_ds_vm(self, vm):
+
+ needed_gb = 0.0
+ if not vm.already_existing:
+ for unit_number in vm.disks.keys():
+ disk = vm.disks[unit_number]
+ needed_gb += disk.size_gb
+
+ vs_name = vm.vsphere
+ vsphere = self.vsphere[vs_name]
+
+ vm_cluster = None
+ for cluster in vsphere.clusters:
+ if cluster.name.lower() == vm.cluster.lower():
+ vm_cluster = cluster
+ break
+ if not vm_cluster:
+ msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
+ c=vm.cluster, n=vm.name)
+ raise HandlerError(msg)
+
+ if vm.datastore:
+ found = False
+ found_ds_name = None
+ for ds_name in vsphere.datastores:
+ if ds_name.lower() == vm.datastore.lower():
+ if self.verbose > 2:
+ LOG.debug(_("Found datastore {d!r} for VM {n!r} in VSPhere {v!r}.").format(
+ n=vm.name, d=vm.datastore, v=vs_name))
+ if ds_name not in vm_cluster.datastores:
+ LOG.warn(_("Datastore {d!r} not available in cluster {c!r}.").format(
+ d=ds_name, c=vm.cluster))
+ break
+ if vm.datastore != ds_name:
+ LOG.debug(_("Setting datastore for VM {n!r} to {d!r} ...").format(
+ n=vm.name, d=ds_name))
+ vm.datastore = ds_name
+ ds = vsphere.datastores[ds_name]
+ if ds.avail_space_gb < needed_gb:
+ LOG.error(_(
+ "Datastore {d!r} has not sufficient space for storage of VM "
+ "{v!r} (needed {n:0.1f} GiB, available {a:0.1f} GiB).").format(
+ d=ds_name, v=vm.name, n=needed_gb, a=ds.avail_space_gb))
+ self.eval_errors += 1
+ else:
+ ds.calculated_usage += needed_gb
+ found = True
+ found_ds_name = ds_name
+ break
+ if not found:
+ LOG.error(_("Datastore {d!r} of VM {n!r} not found in VSPhere {v!r}.").format(
+ n=vm.name, d=vm.datastore, v=vs_name))
+ self.eval_errors += 1
+ if vs_name not in self.used_datastores:
+ self.used_datastores[vs_name] = []
+ if found_ds_name not in self.used_datastores[vs_name]:
+ self.used_datastores[vs_name].append(found_ds_name)
+ return
+
+ ds_name = vsphere.datastores.find_ds(
+ needed_gb, vm.ds_type, use_ds=copy.copy(vm_cluster.datastores), no_k8s=True)
+ if ds_name:
+ LOG.debug(_("Found datastore {d!r} for VM {n!r} in VSPhere {v!r}.").format(
+ d=ds_name, n=vm.name, v=vs_name))
+ vm.datastore = ds_name
+ if vs_name not in self.used_datastores:
+ self.used_datastores[vs_name] = []
+ if ds_name not in self.used_datastores[vs_name]:
+ self.used_datastores[vs_name].append(ds_name)
+ else:
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def validate_interfaces(self):
+
+ LOG.info(_("Validating interfaces of VMs and assign networks ..."))
+ for vm in self.vms:
+ self._validate_interfaces_vm(vm)
+
+ if self.verbose > 2:
+ LOG.debug(_("Validated FQDNs:") + "\n" + pp(self.fqdns))
+ LOG.debug(_("Validated Addresses:") + "\n" + pp(self.addresses))
+
+ if self.verbose:
+
+ lines = []
+ for vs_name in self.used_networks:
+ for nw in self.used_networks[vs_name]:
+ lines.append(' * VSphere {v!r}: {n}'.format(
+ v=vs_name, n=nw))
+ out = '\n'.join(lines)
+ LOG.debug(_("Used networks:") + "\n" + out)
+
+ lines = []
+ for pair in self.dns_mapping['forward']:
+ line = ' * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
+ lines.append(line)
+ LOG.debug(_("Used forward DNS entries:") + "\n" + '\n'.join(lines))
+
+ lines = []
+ for pair in self.dns_mapping['reverse']:
+ line = ' * {a!r} => {n!r}'.format(n=pair[1], a=str(pair[0]))
+ lines.append(line)
+ LOG.debug(_("Used reverse DNS entries:") + "\n" + '\n'.join(lines))
+
+ # -------------------------------------------------------------------------·
+ def _validate_interfaces_vm(self, vm):
+
+ vs_name = vm.vsphere
+ LOG.debug(_("Checking interfaces of VM {n!r} in VSPhere {v!r} ...").format(
+ n=vm.name, v=vs_name))
+
+ if not vm.interfaces:
+ LOG.error(_("No interfaces defined for VM {!r}.").format(vm.name))
+ self.eval_errors += 1
+ return
+
+ vsphere = self.vsphere[vs_name]
+
+ vm_cluster = None
+ for cluster in vsphere.clusters:
+ if cluster.name.lower() == vm.cluster.lower():
+ vm_cluster = cluster
+ break
+ if not vm_cluster:
+ msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
+ c=vm.cluster, n=vm.name)
+ raise HandlerError(msg)
+
+ i = -1
+ for iface in vm.interfaces:
+ i += 1
+ self._validate_interface_of_vm(
+ vm_name=vm.name, iface=iface, vs_name=vs_name, vm_cluster=vm_cluster, i=i)
+
+ # -------------------------------------------------------------------------·
+ def _validate_interface_of_vm(self, vm_name, iface, vs_name, vm_cluster, i=0):
+
+ vsphere = self.vsphere[vs_name]
+
+ if self.verbose > 1:
+ LOG.debug(_("Checking interface {i} of VM {n!r} ...").format(
+ i=i, n=vm_name))
+
+ if not iface.address:
+ LOG.error(_("Interface {i} of VM {n!r} has no defined address.").format(
+ i=i, n=vm_name))
+ self.eval_errors += 1
+ return
+
+ if not iface.fqdn:
+ LOG.error(_("Interface {i} of VM {n!r} has no defined FQDN.").format(
+ i=i, n=vm_name))
+ self.eval_errors += 1
+ return
+
+ if iface.fqdn in self.fqdns:
+ LOG.error(_(
+ "FQDN {f!r} already defined for VM {va!r}({ia}) should be set "
+ "for interface {ib} of {vb!r}.").format(
+ f=iface.fqdn, va=self.fqdns[iface.fqdn][0], ia=self.fqdns[iface.fqdn][1],
+ ib=i, vb=vm_name))
+ self.eval_errors += 1
+ return
+
+ self.fqdns[iface.fqdn] = (vm_name, i)
+
+ if iface.address_v4:
+ if iface.address_v4 in self.addresses:
+ LOG.error(_(
+ "IPv4 address {a} already defined for VM {va!r}({ia}) should be set "
+ "for interface {ib} of {vb!r}.").format(
+ a=iface.address_v4, va=self.fqdns[iface.fqdn][0],
+ ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm_name))
+ self.eval_errors += 1
+ return
+ self.addresses[iface.address_v4] = (vm_name, i)
+ pair = (iface.fqdn, iface.address_v4)
+ self.dns_mapping['forward'].append(pair)
+ pair = (iface.address_v4, iface.fqdn)
+ self.dns_mapping['reverse'].append(pair)
+
+ if iface.address_v6:
+ if iface.address_v6 in self.addresses:
+ LOG.error(_(
+ "IPv6 address {a} already defined for VM {va!r}({ia}) should be set "
+ "for interface {ib} of {vb!r}.").format(
+ a=iface.address_v6, va=self.fqdns[iface.fqdn][0],
+ ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm_name))
+ self.eval_errors += 1
+ return
+ self.addresses[iface.address_v6] = (vm_name, i)
+ pair = (iface.fqdn, iface.address_v6)
+ self.dns_mapping['forward'].append(pair)
+ pair = (iface.address_v6, iface.fqdn)
+ self.dns_mapping['reverse'].append(pair)
+
+ network = iface.network
+ if network:
+ if network not in vsphere.networks:
+ LOG.error(_(
+ "Could not find network {n!r} for VM {v!r}, interface {i}.").format(
+ n=network, v=vm_name, i=i))
+ self.eval_errors += 1
+ return
+ else:
+ network = vsphere.networks.get_network_for_ip(
+ iface.address_v4, iface.address_v6)
+ if not network:
+ self.eval_errors += 1
+ return
+ iface.network = network
+ LOG.debug(_("Found network {n!r} for interface {i} of VM {v!r}.").format(
+ n=network, i=i, v=vm_name))
+
+ if network not in vm_cluster.networks:
+ LOG.error(_(
+ "Network {n!r} for interface {i} of VM {v!r} not available in "
+ "cluster {c!r}.").format(n=network, v=vm_name, i=i, c=vm_cluster.name))
+ self.eval_errors += 1
+ return
+ LOG.debug(_("Network {n!r} is available in cluster {c!r}.").format(
+ n=network, c=vm_cluster.name))
+
+ net = vsphere.networks[network]
+ if not iface.gateway:
+ LOG.debug(_("Setting gateway of interface {i} of VM {v!r} to {g}.").format(
+ i=i, v=vm_name, g=net.gateway))
+ iface.gateway = net.gateway
+
+ if net.network:
+ if net.network.version == 4:
+ if iface.netmask_v4 is None:
+ iface.netmask_v4 = net.network.prefixlen
+ else:
+ if iface.netmask_v6 is None:
+ iface.netmask_v6 = net.network.prefixlen
+
+ if vs_name not in self.used_networks:
+ self.used_networks[vs_name] = []
+ if network not in self.used_networks[vs_name]:
+ self.used_networks[vs_name].append(network)
+
+ # -------------------------------------------------------------------------·
+ def validate_dns_mappings(self):
+
+ LOG.info(_("Validating DNS mappings ..."))
+ self._validate_forward_dns_mappings()
+ self._validate_reverse_dns_mappings()
+
+ lines = []
+ if self.dns_mappings2create['forward']:
+ for pair in self.dns_mappings2create['forward']:
+ line = ' * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
+ lines.append(line)
+ else:
+ lines.append(self.colored('>>> ' + _('None') + ' <<<', 'AQUA'))
+ LOG.info(_("Forward DNS entries to create:") + "\n" + '\n'.join(lines))
+
+ lines = []
+ if self.dns_mappings2create['reverse']:
+ for pair in self.dns_mappings2create['reverse']:
+ line = ' * {r} ({a!r}) => {n!r}'.format(
+ r=pair[0].reverse_pointer, n=pair[1], a=str(pair[0]))
+ lines.append(line)
+ else:
+ lines.append(self.colored('>>> ' + _('None') + ' <<<', 'AQUA'))
+ LOG.info(_("Reverse DNS entries to create:") + "\n" + '\n'.join(lines))
+
+ # -------------------------------------------------------------------------·
+ def _validate_forward_dns_mappings(self):
+
+ if not self.dns_mapping['forward']:
+ return
+
+ LOG.debug(_("Validating forward DNS mappings ..."))
+
+ for (fqdn, address) in self.dns_mapping['forward']:
+
+ if self.verbose > 1:
+ LOG.debug(_("Validating {f!r} => {a!r}.").format(f=fqdn, a=str(address)))
+
+ results_v4 = []
+ results_v6 = []
+
+ try:
+ addr_infos = socket.getaddrinfo(fqdn, 80)
+ except socket.gaierror:
+ addr_infos = []
+
+ for addr_info in addr_infos:
+ if addr_info[0] not in (socket.AF_INET, socket.AF_INET6):
+ continue
+ addr = ipaddress.ip_address(addr_info[4][0])
+ if addr.version == 4:
+ if addr not in results_v4:
+ results_v4.append(addr)
+ else:
+ if addr not in results_v6:
+ results_v6.append(addr)
+ if self.verbose > 2:
+ if results_v4 or results_v6:
+ lines = []
+ for addr in results_v4 + results_v6:
+ lines.append(' * {}'.format(str(addr)))
+ out = '\n'.join(lines)
+ LOG.debug(_("Found existing addresses for {f!r}:").format(f=fqdn) + '\n' + out)
+ else:
+ LOG.debug(_("Did not found existing addresses for {!r}.").format(fqdn))
+
+ if address.version == 4:
+ if not results_v4:
+ self.dns_mappings2create['forward'].append((fqdn, address))
+ continue
+ if address in results_v4:
+ LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
+ f=fqdn, a=str(address)))
+ continue
+ else:
+ if not results_v6:
+ self.dns_mappings2create['forward'].append((fqdn, address))
+ continue
+ if address in results_v6:
+ LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
+ f=fqdn, a=str(address)))
+ continue
+
+ alist = '\n'.join(map(lambda x: ' * {}'.format(str(x)), results_v4 + results_v6))
+ msg = (_(
+ "FQDN {f!r} has already existing addresses, "
+ "but none of them are {a!r}:").format(f=fqdn, a=str(address)) + "\n" + alist)
+ if self.ignore_existing_dns:
+ LOG.warn(msg)
+ self.dns_mappings2create['forward'].append((fqdn, address))
+ else:
+ LOG.error(msg)
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def _validate_reverse_dns_mappings(self):
+
+ if not self.dns_mapping['reverse']:
+ return
+
+ LOG.debug(_("Validating reverse DNS mappings ..."))
+
+ for (address, fqdn) in self.dns_mapping['reverse']:
+
+ if self.verbose > 1:
+ LOG.debug(_("Validating {a!r} => {f!r}.").format(f=fqdn, a=str(address)))
+
+ try:
+ info = socket.gethostbyaddr(str(address))
+ except socket.herror:
+ info = []
+ if self.verbose > 2:
+ LOG.debug(_("Got reverse info:") + "\n" + str(info))
+ ptr = None
+ if info:
+ ptr = info[0]
+
+ if not ptr:
+ if self.verbose > 1:
+ LOG.debug(_("Did not found reverse pointer for {!r}.").format(str(address)))
+ self.dns_mappings2create['reverse'].append((address, fqdn))
+ continue
+
+ ptr = RE_DOT_AT_END.sub('', ptr).lower()
+ fqdn_canon = RE_DOT_AT_END.sub('', fqdn).lower()
+
+ if self.verbose > 1:
+ LOG.debug(_("Found reverse pointer {a!r} => {f!r}.").format(f=ptr, a=str(address)))
+ if fqdn_canon == ptr:
+ if self.verbose > 1:
+ LOG.debug(_("Reverse pointer for {!r} was already existing.").format(
+ str(address)))
+ continue
+
+ LOG.error(_("Address {a!r} has already an existing reverse pointer to {p!r}.").format(
+ a=str(address), p=ptr))
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def get_tf_name_network(self, net_name, *args):
+
+ default = None
+ has_default = False
+ if len(args):
+ if len(args) > 1:
+ msg = ngettext(
+ "Method {c}.{m} expected at most one argument, got {n}.",
+ "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+ c=self.__class__.__name__, e=2, m='get_tf_name_network', n=len(args))
+ raise TypeError(msg)
+ default = args[0]
+ has_default = True
+
+ if net_name in self.vsphere.network_mapping:
+ return self.vsphere.network_mapping[net_name]
+ if has_default:
+ return default
+ raise KeyError(_("Did not found network {!r}.").format(net_name))
+
+ # --------------------------------------------------------------------------
+ def get_tf_name_ds_cluster(self, dsc_name, *args):
+
+ default = None
+ has_default = False
+ if len(args):
+ if len(args) > 1:
+ msg = ngettext(
+ "Method {c}.{m} expected at most one argument, got {n}.",
+ "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+ c=self.__class__.__name__, e=2, m='get_tf_name_ds_cluster', n=len(args))
+ raise TypeError(msg)
+ default = args[0]
+ has_default = True
+
+ if dsc_name in self.vsphere.ds_cluster_mapping:
+ return self.vsphere.ds_cluster_mapping[dsc_name]
+ if has_default:
+ return default
+ raise KeyError(_("Did not found datastore cluster {!r}.").format(dsc_name))
+
+ # --------------------------------------------------------------------------
+ def get_tf_name_datastore(self, ds_name, *args):
+
+ default = None
+ has_default = False
+ if len(args):
+ if len(args) > 1:
+ msg = ngettext(
+ "Method {c}.{m} expected at most one argument, got {n}.",
+ "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+ c=self.__class__.__name__, e=2, m='get_tf_name_datastore', n=len(args))
+ raise TypeError(msg)
+ default = args[0]
+ has_default = True
+
+ if ds_name in self.vsphere.ds_mapping:
+ return self.vsphere.ds_mapping[ds_name]
+ if has_default:
+ return default
+ raise KeyError(_("Did not found datastore {!r}.").format(ds_name))
+
+ # --------------------------------------------------------------------------
+ def perform_dns(self):
+
+ if self.config.no_pdns:
+ LOG.debug(_("Power DNS actions are not eceuted."))
+ return
+
+ print()
+ LOG.info(_("Performing DNS actions ..."))
+ print()
+
+ # TODO: Check for simulate and mappings to create
+
+ errors = 0
+
+ for (fqdn, address) in self.dns_mappings2create['forward']:
+ if not self._perform_dns_forward(fqdn, address):
+ errors += 1
+
+ for (address, fqdn) in self.dns_mappings2create['reverse']:
+ if not self._perform_dns_reverse(address, fqdn):
+ errors += 1
+
+ if errors:
+ msg = ngettext(
+ "There was one error in creating DNS mappings.",
+ "There were {n} errors in creating DNS mappings.", errors).format(n=errors)
+ raise ExpectedHandlerError(msg)
+ else:
+ if self.verbose > 1:
+ LOG.debug(_("No errors in creating DNS mappings."))
+
+ print()
+
+ for zone_name in self.updated_zones:
+ self._increase_zone_serial(zone_name)
+
+ # --------------------------------------------------------------------------
+ def _increase_zone_serial(self, zone_name):
+
+ LOG.info(_("Increasing serial of zone {!r}.").format(zone_name))
+
+ zone = self.pdns.zones[zone_name]
+ zone.increase_serial()
+ zone.notify()
+
+ # --------------------------------------------------------------------------
+ def _perform_dns_forward(self, fqdn, address):
+
+ record_type = 'A'
+ addr_obj = ipaddress.ip_address(address)
+ if addr_obj.version == 6:
+ record_type = 'AAAA'
+
+ canon_fqdn = self.pdns.canon_name(fqdn)
+
+ zone_name = self.pdns.get_zone_for_item(canon_fqdn, is_fqdn=True)
+ if zone_name:
+ if self.verbose > 1:
+ LOG.debug(_("Got zone {z!r} for FQDN {f!r}.").format(
+ z=zone_name, f=canon_fqdn))
+ else:
+ LOG.error(_("Did not found zone to insert {t}-record for {f!r}.").format(
+ t=record_type, f=fqdn))
+ return False
+
+ zone = self.pdns.zones[zone_name]
+ if addr_obj.is_private:
+ zone.add_address_record(
+ fqdn, address, set_ptr=False, comment='local',
+ account=self.config.pdns_comment_account, append_comments=True)
+ else:
+ zone.add_address_record(fqdn, address, set_ptr=False)
+ if zone_name not in self.updated_zones:
+ self.updated_zones.append(zone_name)
+ return True
+
+ # --------------------------------------------------------------------------
+ def _perform_dns_reverse(self, address, fqdn):
+
+ LOG.debug(_("Trying to create PTR-record {a!r} => {f!r}.").format(
+ f=fqdn, a=str(address)))
+
+ pointer = self.pdns.canon_name(address.reverse_pointer)
+ if self.verbose > 1:
+ LOG.debug(_("PTR of {a!r}: {p!r}.").format(a=str(address), p=pointer))
+
+ zone_name = self.pdns.get_zone_for_item(pointer, is_fqdn=True)
+ if zone_name:
+ if self.verbose > 1:
+ LOG.debug(_("Got reverse zone {z!r} for address {a!r}.").format(
+ z=zone_name, a=str(address)))
+ else:
+ LOG.warn(_("Did not found zone to insert PTR-record {p!r} ({a}).").format(
+ p=pointer, a=str(address)))
+ return True
+
+ zone = self.pdns.zones[zone_name]
+ zone.add_ptr_record(pointer, fqdn)
+ if zone_name not in self.updated_zones:
+ self.updated_zones.append(zone_name)
+ return True
+
+ # --------------------------------------------------------------------------
+ def ensure_project_dir(self):
+
+ print()
+ LOG.info(_("Ensuring existence of directory {!r}.").format(str(self.project_dir)))
+
+ if self.project_dir.exists():
+ if self.project_dir.is_dir():
+ LOG.debug(_("Directory {!r} already exists.").format(str(self.project_dir)))
+ else:
+ msg = _("Path {!r} exists, but is not a directory.").format(str(self.project_dir))
+ raise ExpectedHandlerError(msg)
+ else:
+ LOG.info(_("Creating directory {!r} ...").format(str(self.project_dir)))
+ if self.simulate:
+ LOG.debug(_("Simulation mode - directory will not be created."))
+ else:
+ try:
+ os.makedirs(str(self.project_dir), mode=0o755)
+ except PermissionError as e:
+ msg = _("Could not create directory {d!r}: {e}").format(
+ d=str(self.project_dir), e=e)
+ raise ExpectedHandlerError(msg)
+
+ if not self.project_dir.exists():
+ if self.simulate:
+ return
+ else:
+ msg = _("Directory {!r} does not exists ?!?!").format(str(self.project_dir))
+ raise ExpectedHandlerError(msg)
+
+ if not os.access(str(self.project_dir), os.W_OK):
+ msg = _("No write access to directory {!r}.").format(str(self.project_dir))
+ raise ExpectedHandlerError(msg)
+
+ LOG.debug(_("Changing into directory {!r}.").format(str(self.project_dir)))
+ os.chdir(str(self.project_dir))
+
+ self.script_dir_rel = Path(os.path.relpath(
+ str(self.script_dir), str(self.project_dir)))
+ LOG.debug(_("Script-Dir relative to project dir: {!r}.").format(str(self.script_dir_rel)))
+
+ if self.verbose > 1:
+ LOG.debug(_("Checking {!r} for a previous terraform configuration.").format(
+ str(self.project_dir)))
+
+ tf_path = self.project_dir / '.terraform'
+ if tf_path.exists() and not tf_path.is_dir():
+ msg = _("In {d!r} there exists already {w!r}, but this is not a directory.").format(
+ d=str(self.project_dir), w='.terraform')
+ raise ExpectedHandlerError(msg)
+
+ state_path = self.project_dir / 'terraform.tfstate'
+ if state_path.exists() and not state_path.is_file():
+ msg = _("In {d!r} there exists already {w!r}, but this not a file.").format(
+ d=str(self.project_dir), w='terraform.tfstate')
+ raise ExpectedHandlerError(msg)
+
+ if tf_path.is_dir() and state_path.is_file():
+ msg = _(
+ "In directory {d!r} there are already existing both {w1!r} and {w2!r}. "
+ "Is this an old terraform project?").format(
+ d=str(self.project_dir), w1='.terraform', w2='terraform.tfstate')
+ raise ExpectedHandlerError(msg)
+
+ # --------------------------------------------------------------------------
+ def clean_project_dir(self):
+
+ print()
+ LOG.info(_("Cleaning project directory {!r}.").format(str(self.project_dir)))
+
+ files = []
+ for path in self.project_dir.glob('*'):
+ files.append(path)
+ for path in self.project_dir.glob('.terraform'):
+ files.append(path)
+
+ if not files:
+ LOG.debug(_("Directory {!r} is already clean.").format(str(self.project_dir)))
+ return
+ for pfile in files:
+ if pfile.exists():
+ if pfile.is_dir():
+ LOG.debug(_("Removing recursive directory {!r} ...").format(str(pfile)))
+ if not self.simulate:
+ shutil.rmtree(str(pfile))
+ else:
+ LOG.debug(_("Removing {!r} ...").format(str(pfile)))
+ if not self.simulate:
+ pfile.unlink()
+
+ # --------------------------------------------------------------------------
+ def create_terraform_files(self):
+
+ print()
+ print()
+ msg = _("Creating all necessary files for terraform.")
+ ll = 6
+ if len(msg) > ll:
+ ll = len(msg)
+ print(self.colored('#' * (ll + 6), 'AQUA'))
+ line = self.colored('#', 'AQUA') + ' '
+ line += self.colored(msg.center(ll), 'YELLOW')
+ line += ' ' + self.colored('#', 'AQUA')
+ print(line)
+ print(self.colored('#' * (ll + 6), 'AQUA'))
+ print()
+ print()
+
+ self.create_varfiles()
+ self.create_dcfile()
+ self.create_backend_file()
+ self.create_instance_files()
+
+ # --------------------------------------------------------------------------
+ def create_varfiles(self):
+
+ LOG.debug(_("Creating {!r} ...").format('terraform.tfvars'))
+
+ vs_name = None
+ for vs_name in self.vsphere.keys():
+ break
+ if self.verbose > 1:
+ LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
+ w='dcfile', v=vs_name))
+
+ vs_host = self.config.vsphere[vs_name].host
+ vs_user = self.config.vsphere[vs_name].user
+ vs_pwd = self.config.vsphere[vs_name].password
+ vs_dc = self.config.vsphere[vs_name].dc
+
+ content = textwrap.dedent('''\
+ ## filename: terraform.tfvars
+ ## This file declares the values for the variables to be used in the instance.tf playbook
+
+ #
+ # ATTENTION!
+ #
+ # To avoid annoying questions for password and API key
+ # create manually a file 'terraform-private.auto.tfvars"
+ # with the following content:
+ #
+ # vsphere_username = "<USERNAME>"
+ # vsphere_userpassword = "<PASSWORD>"
+ #
+ # with the correct values. This file will not be under GIT control
+ #
+
+ ''')
+
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open('terraform.tfvars', 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod('terraform.tfvars', self.std_file_permissions)
+
+ # Sensible stuff
+ if vs_user or vs_pwd:
+ content = '# Private sensible information. Please keep this file secret.\n\n'
+ if vs_user:
+ content += 'vsphere_username = "{}"\n'.format(vs_user)
+ if vs_pwd:
+ content += 'vsphere_userpassword = "{}"\n'.format(vs_pwd)
+ content += '\n'
+
+ LOG.debug(_("Creating {!r} ...").format('private.auto.tfvars'))
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open('private.auto.tfvars', 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod('private.auto.tfvars', self.std_secure_file_permissions)
+
+ # File with variable declarations
+ content = textwrap.dedent('''\
+ # filename: variables.tf
+ # definition of the variables to be used in the play
+ # declaration happens in the file terraform.tfvars and private.auto.tfvars
+
+ ''')
+
+ tpl = textwrap.dedent('''\
+ variable "vsphere_vcenter" {{
+ default = "{}"
+ description = "IP or DNS of the vSphere center."
+ type = string
+ }}
+
+ ''')
+ content += tpl.format(vs_host)
+
+ tpl = textwrap.dedent('''\
+ variable "vsphere_username" {
+ description = "vSphere accountname to be used."
+ type = string
+ }
+
+ variable "vsphere_userpassword" {
+ description = "Password for vSphere accountname."
+ type = string
+ }
+
+ ''')
+ content += tpl
+
+ tpl = textwrap.dedent('''\
+ variable "vsphere_datacenter" {{
+ default = "{dc}"
+ description = "Name of the vSphere datacenter to use."
+ type = string
+ }}
+
+ ''')
+ content += tpl.format(dc=vs_dc)
+
+ tpl = textwrap.dedent('''\
+ variable "timezone" {{
+ default = "{tz}"
+ description = "The global timezone used for VMs"
+ type = string
+ }}
+
+ ''')
+ content += tpl.format(tz=self.tz_name)
+
+ LOG.debug(_("Creating {!r} ...").format('variables.tf'))
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open('variables.tf', 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod('variables.tf', self.std_file_permissions)
+
+ # --------------------------------------------------------------------------
+ def create_dcfile(self):
+
+ vs_name = None
+ for vs_name in self.vsphere.keys():
+ break
+ vsphere = self.vsphere[vs_name]
+
+ LOG.debug(_("Creating {!r} ...").format('dc.tf'))
+ if self.verbose > 1:
+ LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
+ w='dcfile', v=vs_name))
+
+ content = textwrap.dedent('''\
+ # filename: dc.tf
+ # Configuring the VMware VSphere Provider and some dependend common used objects
+
+ provider "vsphere" {
+ vsphere_server = var.vsphere_vcenter
+ user = var.vsphere_username
+ password = var.vsphere_userpassword
+ allow_unverified_ssl = true
+ ''')
+
+# if self.min_version_vsphere_provider:
+# content += ' version = ">= {}"\n'.format(
+# str(self.min_version_vsphere_provider))
+
+ content += textwrap.dedent('''\
+ }
+
+ data "vsphere_datacenter" "dc" {
+ name = var.vsphere_datacenter
+ }
+
+ ''')
+
+ for cluster in vsphere.clusters:
+ tpl = textwrap.dedent('''\
+ data "vsphere_resource_pool" "{pv}" {{
+ name = "{pn}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(
+ pv=cluster.resource_pool_var, pn=cluster.resource_pool_name)
+
+ if self.used_dc_clusters:
+ for dsc_name in sorted(self.used_dc_clusters[vs_name], key=str.lower):
+ dsc_tf_name = vsphere.ds_cluster_mapping[dsc_name]
+ tpl = textwrap.dedent('''\
+ data "vsphere_datastore_cluster" "{tn}" {{
+ name = "{n}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(tn=dsc_tf_name, n=dsc_name)
+
+ if self.used_datastores:
+ for ds_name in sorted(self.used_datastores[vs_name], key=str.lower):
+ ds_tf_name = vsphere.ds_mapping[ds_name]
+ tpl = textwrap.dedent('''\
+ data "vsphere_datastore" "{tn}" {{
+ name = "{n}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(tn=ds_tf_name, n=ds_name)
+
+ for net_name in sorted(self.used_networks[vs_name], key=str.lower):
+ net_tf_name = vsphere.network_mapping[net_name]
+ tpl = textwrap.dedent('''\
+ data "vsphere_network" "{tn}" {{
+ name = "{n}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(n=net_name, tn=net_tf_name)
+
+ if self.vsphere_templates:
+ for tname in sorted(self.vsphere_templates[vs_name].keys(), key=str.lower):
+ tpl_tf_name = self.vsphere_templates[vs_name][tname].tf_name
+ tpl = textwrap.dedent('''\
+ data "vsphere_virtual_machine" "{tn}" {{
+ name = "{n}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(tn=tpl_tf_name, n=tname)
+
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open('dc.tf', 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod('dc.tf', self.std_file_permissions)
+
+ # --------------------------------------------------------------------------
+ def create_backend_file(self):
+
+ file_name = 'backend.tf'
+ LOG.debug(_("Creating {!r} ...").format(file_name))
+
+ tpl = textwrap.dedent('''\
+ # Configuration of the backend for storing the terraform status information
+ # and the minimum required version of terraform
+
+ terraform {{
+ backend "consul" {{
+ address = "{host}"
+ scheme = "{scheme}"
+ path = "{prefix}/{project}"
+ }}
+ ''')
+
+ project = self.full_project_name
+ if not project:
+ project = self.project_name
+
+ content = tpl.format(
+ host=self.config.tf_backend_host, scheme=self.config.tf_backend_scheme,
+ prefix=self.config.tf_backend_path_prefix, project=project)
+
+ if self.min_version_terraform:
+ content += ' required_version = ">= {}"\n'.format(str(self.min_version_terraform))
+ else:
+ LOG.warn(_("No minimum version of Terraform defined."))
+
+ content += '}\n\n'
+
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open(file_name, 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod(file_name, self.std_file_permissions)
+
+ # --------------------------------------------------------------------------
+ def create_instance_files(self):
+
+ LOG.debug(_("Creating terraform files for VM instances."))
+
+ for vm in sorted(self.vms, key=lambda x: x.tf_name):
+ self.create_instance_file(vm)
+
+ # --------------------------------------------------------------------------
+ def create_instance_file(self, vm):
+
+ vs_name = vm.vsphere
+
+ fname = 'instance.' + vm.name + '.tf'
+ LOG.debug(_("Creating file {f!r} for VM instance {n!r}.").format(
+ f=fname, n=vm.name))
+
+ guest_id = self.config.guest_id
+ tpl_vm = None
+ if vm.vm_template:
+ tpl_vm = self.vsphere_templates[vs_name][vm.vm_template]
+ if self.verbose > 3:
+ LOG.debug(_("Using template:") + "\n" + pp(tpl_vm))
+ guest_id = 'data.vsphere_virtual_machine.{}.guest_id'.format(tpl_vm.tf_name)
+ else:
+ guest_id = '"' + guest_id + '"'
+
+ content = self._create_instfile_general(vm, guest_id, tpl_vm)
+
+ i = 0
+ for iface in vm.interfaces:
+ content += self._create_instfile_if(vm, iface, i, tpl_vm)
+ i += 1
+
+ for unit_id in sorted(vm.disks.keys()):
+ content += self._create_instfile_disk(vm, unit_id)
+
+ content += textwrap.indent(textwrap.dedent('''\
+ cdrom {
+ client_device = "true"
+ }
+
+ '''), ' ')
+
+ content += self._create_instfile_custom(vm, tpl_vm)
+
+ if self.verbose > 1:
+ LOG.debug(_("Writing {!r}").format(fname))
+
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open(fname, 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod(fname, self.std_file_permissions)
+
+ # --------------------------------------------------------------------------
+ def _create_instfile_general(self, vm, guest_id, tpl_vm):
+
+ vs_name = vm.vsphere
+
+ # ## General definitions of VM
+ if self.verbose > 1:
+ LOG.debug(_("Generating global definitions of {!r}.").format(vm.name))
+ content = textwrap.dedent('''\
+ # Definition of the VM instance {!r}.
+
+ ''').format(vm.name)
+
+ cluster = self.vsphere[vs_name].get_cluster_by_name(vm.cluster)
+ if not cluster:
+ msg = _("Cluster {!r} not found - this shouldn't be happened.").format(
+ vm.cluster)
+ raise RuntimeError(msg)
+
+ content += textwrap.dedent('''\
+ resource "vsphere_virtual_machine" "{tn}" {{
+
+ resource_pool_id = data.vsphere_resource_pool.{pv}.id
+ name = "{n}"
+ ''').format(tn=vm.tf_name, n=vm.name, pv=cluster.resource_pool_var)
+
+ if vm.ds_cluster:
+ dsc_tf_name = self.vsphere[vs_name].ds_cluster_mapping[vm.ds_cluster]
+ tpl = ' datastore_cluster_id = data.vsphere_datastore_cluster.{}.id\n'
+ content += tpl.format(dsc_tf_name)
+
+ if vm.datastore:
+ ds_tf_name = self.vsphere[vs_name].ds_mapping[vm.datastore]
+ tpl = ' datastore_id = data.vsphere_datastore.{}.id\n'
+ content += tpl.format(ds_tf_name)
+
+ content += textwrap.indent(textwrap.dedent('''\
+ num_cpus = "{cpu}"
+ folder = "{f}"
+ num_cores_per_socket = "1"
+ cpu_hot_add_enabled = "true"
+ cpu_hot_remove_enabled = "true"
+ memory = "{m}"
+ memory_hot_add_enabled = "true"
+ boot_delay = "{b}"
+ guest_id = {g}
+ '''), ' ').format(
+ g=guest_id, cpu=vm.num_cpus, f=vm.folder, m=vm.memory, b=int(vm.boot_delay * 1000))
+ if vm.vm_template:
+ tpl = ' scsi_type = data.vsphere_virtual_machine.{}.scsi_type\n'
+ content += tpl.format(tpl_vm.tf_name)
+ content += '\n'
+
+ content += textwrap.indent(textwrap.dedent('''\
+ lifecycle {
+ ignore_changes = all
+ }
+ '''), ' ')
+ content += '\n'
+
+ return content
+
+ # --------------------------------------------------------------------------
+ def _create_instfile_if(self, vm, iface, i, tpl_vm):
+
+ vs_name = vm.vsphere
+
+ # ## Interface definition
+
+ if self.verbose > 1:
+ LOG.debug(_("Generating interface definition {i} of {v!r}.").format(i=i, v=vm.name))
+ nw = iface.network
+ nw_name = self.vsphere[vs_name].network_mapping[nw]
+
+ content = textwrap.indent(textwrap.dedent('''\
+ network_interface {{
+ network_id = data.vsphere_network.{n}.id
+ adapter_type = data.{vvm}.{t}.{nit}[0]
+ }}
+ '''), ' ').format(
+ n=nw_name, t=tpl_vm.tf_name,
+ vvm='vsphere_virtual_machine', nit='network_interface_types')
+ content += '\n'
+
+ return content
+
+ # --------------------------------------------------------------------------
+ def _create_instfile_disk(self, vm, unit_id):
+
+ # ## Disk definitions
+ if self.verbose > 1:
+ LOG.debug(_("Generating disk definition {i} of {v!r}.").format(i=unit_id, v=vm.name))
+ disk = vm.disks[unit_id]
+ content = textwrap.indent(textwrap.dedent('''\
+ disk {{
+ label = "disk{i}"
+ size = "{s}"
+ eagerly_scrub = "false"
+ thin_provisioned = "false"
+ '''), ' ').format(i=unit_id, s=int(disk.size_gb))
+ if unit_id > 0:
+ content += ' unit_number = {}\n'.format(unit_id)
+ content += ' }\n\n'
+
+ return content
+
+ # --------------------------------------------------------------------------
+ def _create_instfile_custom(self, vm, tpl_vm):
+
+ # ## Customization of VM
+ if self.verbose > 1:
+ LOG.debug(_("Generating customization of {v!r}.").format(v=vm.name))
+
+ content = textwrap.indent(textwrap.dedent('''\
+ clone {{
+ template_uuid = data.vsphere_virtual_machine.{t}.id
+
+ customize {{
+ linux_options {{
+ host_name = "{h}"
+ domain = "{d}"
+ time_zone = var.timezone
+ }}
+
+ '''), ' ').format(
+ t=tpl_vm.tf_name, h=vm.hostname, d=vm.domain)
+
+ content += self._create_instfile_nw(vm)
+ content += ' }\n'
+ content += ' }\n\n'
+
+ # ## local SSH cleanup before any actions
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "local-exec" {{
+ command = "ssh-keygen -R {h} || true"
+ }}
+
+ provisioner "local-exec" {{
+ command = "ssh-keygen -R {i} || true"
+ }}
+
+ '''), ' ').format(h=vm.fqdn, i=vm.interfaces[0].address)
+
+ # ## Copying postinstall scripts to VM
+
+ files = ['conf-resolver', 'create-motd']
+ if vm.has_puppet:
+ files.append('init-puppet')
+ files.append('update-all-packages')
+
+ for sname in files:
+
+ if self.verbose > 1:
+ LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
+ f=sname, v=vm.name))
+
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "file" {{
+ source = "{d}/{f}"
+ destination = "/tmp/{f}"
+ connection {{
+ type = "ssh"
+ user = "root"
+ host = "{h}"
+ }}
+ }}
+
+ '''), ' ').format(
+ d=self.script_dir_rel, f=sname, h=vm.fqdn)
+
+ # ## Postinstall commands on host
+ commands = []
+
+ commands.append("usermod -c 'root {}' root".format(vm.fqdn))
+
+ commands.append("chmod +x /tmp/conf-resolver")
+ cmd = '/tmp/conf-resolver'
+ for ns in vm.nameservers:
+ cmd += ' --ns {!r}'.format(str(ns))
+ for dom in vm.searchdomains:
+ cmd += ' --search {!r}'.format(dom)
+ if vm.dns_options:
+ cmd += ' --options {!r}'.format(vm.dns_options)
+ else:
+ cmd += ' --options {!r}'.format('')
+ commands.append(cmd)
+ commands.append("rm -f /tmp/conf-resolver")
+
+ purpose = self.re_doublequote.sub('\\\"', vm.purpose)
+
+ zone = "{z}/{c}".format(z=vm.vsphere, c=vm.cluster)
+
+ commands.append("chmod +x /tmp/create-motd")
+ cmd = (
+ "/tmp/create-motd --purpose '{p}' --hardware 'vmware (x86_64)' --owner '{o}' "
+ "--location 'VMWare' --zone '{z}' --customer '{c}' --email '{m}' --tier '{t}' "
+ "--environment '{e}' --role '{r}'").format(
+ p=purpose, t=vm.puppet_tier, o=vm.customer, z=zone, c=vm.puppet_customer,
+ m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
+ if vm.puppet_project:
+ cmd += " --project '{pr}'".format(pr=vm.puppet_project)
+ cmd += " | tee /etc/motd"
+ commands.append(cmd)
+ commands.append("rm -f /tmp/create-motd")
+
+ # ## Backup - Legato networker
+ commands.append("systemctl stop networker.service")
+ commands.append("rm -rfv /nsr/tmp /nsr/res")
+ if vm.has_backup:
+ commands.append("mkdir -pv /nsr/res")
+ commands.append(
+ "if [ ! -f /nsr/res/servers ] ; then "
+ "echo 'legato01.pixelpark.com' > /nsr/res/servers; fi")
+ commands.append("systemctl start networker.service; sleep 2")
+ commands.append("nsrports -S 7937-7999; sleep 2")
+ commands.append("systemctl stop networker.service; sleep 2")
+ commands.append(
+ "systemctl enable networker.service; systemctl start networker.service; sleep 2")
+ commands.append("nsrports; sleep 2")
+ else:
+ commands.append("systemctl disable networker.service")
+
+ # ## Configuring and starting puppet
+ if vm.has_puppet:
+ commands.append("chmod +x /tmp/init-puppet")
+ cmd = "/tmp/init-puppet --environment '{e}' --customer '{c}' "
+ if vm.puppet_project:
+ cmd += "--project '{pr}' "
+ cmd += "--role '{r}' --owner '{o}' --tier '{t}' --purpose '{p}' --email '{m}'"
+ cmd += " --zone '{z}'"
+ if vm.puppet_initial_install:
+ cmd += " --initial-install"
+ cmd = cmd.format(
+ p=purpose, t=vm.puppet_tier, o=vm.customer, c=vm.puppet_customer, z=zone,
+ pr=vm.puppet_project, m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
+ commands.append(cmd)
+ commands.append("rm -f /tmp/init-puppet")
+
+ content += ' provisioner "remote-exec" {\n'
+ content += ' inline = [\n'
+ for cmd in commands:
+ content += ' "{}",\n'.format(cmd)
+ content += ' ]\n'
+ content += ' connection {\n'
+ content += ' type = "ssh"\n'
+ content += ' user = "root"\n'
+ content += ' host = "{}"\n'.format(vm.fqdn)
+ content += ' }\n'
+ content += ' }\n\n'
+
+ # ## postconfigure actions with puppet
+ if vm.has_puppet:
+ content += self._create_instfile_puppet(vm)
+
+ # ## local SSH cleanup on destroy
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "local-exec" {{
+ command = "ssh-keygen -R {h} || true"
+ when = destroy
+ }}
+
+ provisioner "local-exec" {{
+ command = "ssh-keygen -R {i} || true"
+ when = destroy
+ }}
+ '''), ' ').format(h=vm.fqdn, i=vm.interfaces[0].address)
+
+ content += '}\n\n'
+
+ return content
+
+ # -------------------------------------------------------------------------·
+ def _create_instfile_nw(self, vm):
+
+ content = ''
+
+ gw4 = None
+ gw6 = None
+ for iface in vm.interfaces:
+
+ content += " network_interface {\n"
+ if iface.address_v4:
+ content += ' ipv4_address = "{}"\n'.format(iface.address_v4)
+ if iface.netmask_v4 is not None:
+ content += ' ipv4_netmask = "{}"\n'.format(iface.netmask_v4)
+ if iface.address_v6:
+ content += ' ipv6_address = "{}"\n'.format(iface.address_v6)
+ if iface.netmask_v6 is not None:
+ content += ' ipv6_netmask = "{}"\n'.format(iface.netmask_v6)
+ content += ' }\n\n'
+
+ if not gw4:
+ gw4 = iface.gateway_v4
+ if not gw6:
+ gw6 = iface.gateway_v6
+
+ if gw4:
+ content += ' ipv4_gateway = "{}"\n'.format(gw4)
+ if gw6:
+ content += ' ipv6_gateway = "{}"\n'.format(gw6)
+
+ ns = ', '.join(map(lambda x: '"{}"'.format(x), vm.nameservers))
+ content += ' dns_server_list = [{}]\n'.format(ns)
+
+ return content
+
+ # -------------------------------------------------------------------------·
+ def _create_instfile_puppet(self, vm):
+
+ cmd = (
+ "ssh -o StrictHostKeyChecking=no {ca} "
+ "'sudo /opt/puppetlabs/bin/puppetserver ca sign --certname {h} || true'").format(
+ ca=self.config.puppetca, h=vm.fqdn)
+
+ content = textwrap.indent(textwrap.dedent('''\
+ provisioner "local-exec" {{
+ command = "{cmd}"
+ }}
+
+ provisioner "remote-exec" {{
+ inline = [
+ "/opt/puppetlabs/bin/puppet agent --test || true",
+ "/usr/bin/systemctl start puppet.service",
+ "/usr/bin/systemctl enable puppet.service",
+ "chmod +x /tmp/update-all-packages",
+ "/tmp/update-all-packages",
+ "rm -f /tmp/update-all-packages",
+ ]
+ connection {{
+ type = "ssh"
+ user = "root"
+ host = "{h}"
+ }}
+ }}
+
+ '''), ' ').format(cmd=cmd, h=vm.fqdn,)
+
+ # Destroy actions with puppet
+ cmd1 = "ssh -o StrictHostKeyChecking=no {ma} "
+ cmd1 += "'sudo /opt/puppetlabs/bin/puppet node deactivate {h} || true'"
+ cmd1 = cmd1.format(ma=self.config.puppetmaster, h=vm.fqdn)
+
+ cmd2 = "ssh -o StrictHostKeyChecking=no {ca} "
+ cmd2 += "'sudo /opt/puppetlabs/bin/puppetserver ca clean --certname {h} || true'"
+ cmd2 = cmd2.format(ca=self.config.puppetca, h=vm.fqdn)
+
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "remote-exec" {{
+ inline = [
+ "/usr/bin/systemctl stop puppet.service || true",
+ ]
+ when = destroy
+ connection {{
+ type = "ssh"
+ user = "root"
+ host = "{h}"
+ }}
+ }}
+
+ provisioner "local-exec" {{
+ command = "{cmd1}"
+ when = destroy
+ }}
+
+ provisioner "local-exec" {{
+ command = "{cmd2}"
+ when = destroy
+ }}
+
+ '''), ' ').format(cmd1=cmd1, cmd2=cmd2, h=vm.fqdn)
+
+ return content
+
+ # -------------------------------------------------------------------------·
+ def ensure_vsphere_folders(self):
+
+ vs_name = None
+ for vs_name in self.vsphere.keys():
+ break
+ vsphere = self.vsphere[vs_name]
+
+ print()
+ LOG.info(_("Ensuring existence of all necessary vSphere VM folders."))
+ vsphere.ensure_vm_folders(copy.copy(self.vsphere_folders))
+
+ # -------------------------------------------------------------------------·
+ def exec_terraform(self):
+
+ tf_timeout = 30
+
+ print()
+ LOG.info(_("Executing {!r} ...").format('terraform init'))
+ cmd = [str(self.terraform_cmd), 'init']
+ try:
+ result = self.run(
+ cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+ except CalledProcessError as e:
+ if e.stdout:
+ print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
+ if e.stderr:
+ print(self.colored("Error message", ('BOLD', 'RED')) + ':\n' + to_str(e.stderr))
+ raise ExpectedHandlerError(str(e))
+ LOG.debug(_("Completed process:") + "\n" + str(result))
+
+ if self.existing_vms:
+ print()
+ LOG.info(_("Importing existing virtual machines ..."))
+
+ for vm in self.existing_vms:
+
+ vs_name = vm.vsphere
+ print()
+ LOG.info(_("Importing VM {!r}.").format(vm.name))
+ vm_obj = 'vsphere_virtual_machine.{}'.format(vm.tf_name)
+ path = '/{dc}/{f}/{p}/{n}'.format(
+ dc=self.vsphere[vs_name].dc, f=self.vsphere[vs_name].dc_obj.vm_folder,
+ p=vm.path, n=vm.name)
+ cmd = [str(self.terraform_cmd), 'import', vm_obj, path]
+ try:
+ result = self.run(
+ cmd, may_simulate=True, timeout=tf_timeout,
+ stdout=PIPE, stderr=PIPE, check=True)
+ except CalledProcessError as e:
+ if e.stdout:
+ print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
+ if e.stderr:
+ msg = self.colored("Error message", ('BOLD', 'RED')) + ':\n'
+ msg += to_str(e.stderr)
+ print(msg)
+ LOG.warn(_("Error on importing VM {!r}:").format(vm.name) + ' ' + str(e))
+
+ LOG.debug(_("Completed process:") + "\n" + str(result))
+
+# print()
+# LOG.info(_("Executing {!r} ...").format('terraform plan'))
+# cmd = [str(self.terraform_cmd), 'plan']
+# try:
+# result = self.run(
+# cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+# except CalledProcessError as e:
+# if e.stdout:
+# print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
+# if e.stderr:
+# print(self.colored("Error message", ('BOLD', 'RED')) + ':\n' + to_str(e.stderr))
+# raise ExpectedHandlerError(str(e))
+# LOG.debug(_("Completed process:") + "\n" + str(result))
+
+ goto = Path(os.path.relpath(self.project_dir, self.start_dir))
+
+ print()
+ print()
+ print(self.colored(_("Congratulations!"), 'GREEN'))
+ print()
+ print(_("Now you are ready to deploy the following virtual machines:"))
+ for vm in sorted(self.vms, key=lambda x: x.tf_name):
+ print(" * {}".format(vm.fqdn))
+ print()
+ print(_("To start the deployment process change to directory {}").format(
+ self.colored(str(goto), 'GREEN')))
+ print()
+ print(_("and enter: {}").format(self.colored('terraform apply', 'GREEN')))
+ print()
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list