]> Frank Brehm's Git Trees - pixelpark/create-terraform.git/commitdiff
Moving some methods into mixin module lib/cr_tf/handler/files.py
authorFrank Brehm <frank.brehm@pixelpark.com>
Fri, 13 Oct 2023 14:31:27 +0000 (16:31 +0200)
committerFrank Brehm <frank.brehm@pixelpark.com>
Fri, 13 Oct 2023 14:31:27 +0000 (16:31 +0200)
lib/cr_tf/handler/__init__.py
lib/cr_tf/handler/files.py [new file with mode: 0644]
lib/cr_tf/handler/vmware.py

index 878d57e9fd63b92f55f1b488b0d1d221363a6b06..3c54520acfc3cd644715c170e74b8906a35225ce 100644 (file)
@@ -12,9 +12,7 @@ from __future__ import absolute_import, print_function
 import os
 import logging
 import re
-import shutil
 import stat
-import textwrap
 import copy
 
 from pathlib import Path
@@ -34,6 +32,7 @@ from fb_tools.handler import BaseHandler
 
 # Own modules
 from .dns import CrTfHandlerDnsMixin
+from .files import CrTfHandlerFilesMixin
 from .first import CrTfHandlerFirstMixin
 from .read import CrTfHandlerReadMixin
 from .vmware import CrTfHandlerVmwMixin
@@ -47,7 +46,7 @@ from ..errors import AbortExecution
 
 from ..xlate import XLATOR
 
-__version__ = '3.9.5'
+__version__ = '3.9.6'
 LOG = logging.getLogger(__name__)
 
 _ = XLATOR.gettext
@@ -57,7 +56,7 @@ ngettext = XLATOR.ngettext
 # =============================================================================
 class CreateTerraformHandler(
         BaseHandler, CrTfHandlerFirstMixin, CrTfHandlerReadMixin, CrTfHandlerDnsMixin,
-        CrTfHandlerVmwMixin):
+        CrTfHandlerVmwMixin, CrTfHandlerFilesMixin):
     """
     A handler class for creating the terraform environment
     """
@@ -414,917 +413,6 @@ class CreateTerraformHandler(
 
         print()
 
-    # -------------------------------------------------------------------------·
-    def exec_project_dir(self):
-
-        if self.stop_at_step == 'project-dir':
-            self.incr_verbosity()
-
-        self.ensure_project_dir()
-        self.clean_project_dir()
-
-        LOG.info(_("Finished step {!r}.").format('project-dir'))
-        if self.stop_at_step == 'project-dir':
-            raise AbortExecution('project-dir')
-
-    # -------------------------------------------------------------------------·
-    def exec_tf_files(self):
-
-        if self.stop_at_step == 'tf-files':
-            self.incr_verbosity()
-
-        self.create_terraform_files()
-
-        LOG.info(_("Finished step {!r}.").format('tf-files'))
-        if self.stop_at_step == 'tf-files':
-            raise AbortExecution('tf-files')
-
-    # -------------------------------------------------------------------------·
-    def get_tf_name_network(self, net_name, *args):
-
-        default = None
-        has_default = False
-        if len(args):
-            if len(args) > 1:
-                msg = ngettext(
-                    "Method {c}.{m} expected at most one argument, got {n}.",
-                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
-                    c=self.__class__.__name__, e=2, m='get_tf_name_network', n=len(args))
-                raise TypeError(msg)
-            default = args[0]
-            has_default = True
-
-        if net_name in self.vsphere.network_mapping:
-            return self.vsphere.network_mapping[net_name]
-        if has_default:
-            return default
-        raise KeyError(_("Did not found network {!r}.").format(net_name))
-
-    # --------------------------------------------------------------------------
-    def get_tf_name_ds_cluster(self, dsc_name, *args):
-
-        default = None
-        has_default = False
-        if len(args):
-            if len(args) > 1:
-                msg = ngettext(
-                    "Method {c}.{m} expected at most one argument, got {n}.",
-                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
-                    c=self.__class__.__name__, e=2, m='get_tf_name_ds_cluster', n=len(args))
-                raise TypeError(msg)
-            default = args[0]
-            has_default = True
-
-        if dsc_name in self.vsphere.ds_cluster_mapping:
-            return self.vsphere.ds_cluster_mapping[dsc_name]
-        if has_default:
-            return default
-        raise KeyError(_("Did not found datastore cluster {!r}.").format(dsc_name))
-
-    # --------------------------------------------------------------------------
-    def get_tf_name_datastore(self, ds_name, *args):
-
-        default = None
-        has_default = False
-        if len(args):
-            if len(args) > 1:
-                msg = ngettext(
-                    "Method {c}.{m} expected at most one argument, got {n}.",
-                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
-                    c=self.__class__.__name__, e=2, m='get_tf_name_datastore', n=len(args))
-                raise TypeError(msg)
-            default = args[0]
-            has_default = True
-
-        if ds_name in self.vsphere.ds_mapping:
-            return self.vsphere.ds_mapping[ds_name]
-        if has_default:
-            return default
-        raise KeyError(_("Did not found datastore {!r}.").format(ds_name))
-
-    # --------------------------------------------------------------------------
-    def ensure_project_dir(self):
-
-        print()
-        LOG.info(_("Ensuring existence of directory {!r}.").format(str(self.project_dir)))
-
-        if self.project_dir.exists():
-            if self.project_dir.is_dir():
-                LOG.debug(_("Directory {!r} already exists.").format(str(self.project_dir)))
-            else:
-                msg = _("Path {!r} exists, but is not a directory.").format(str(self.project_dir))
-                raise ExpectedHandlerError(msg)
-        else:
-            LOG.info(_("Creating directory {!r} ...").format(str(self.project_dir)))
-            if self.simulate:
-                LOG.debug(_("Simulation mode - directory will not be created."))
-            else:
-                try:
-                    os.makedirs(str(self.project_dir), mode=0o755)
-                except PermissionError as e:
-                    msg = _("Could not create directory {d!r}: {e}").format(
-                        d=str(self.project_dir), e=e)
-                    raise ExpectedHandlerError(msg)
-
-        if not self.project_dir.exists():
-            if self.simulate:
-                return
-            else:
-                msg = _("Directory {!r} does not exists ?!?!").format(str(self.project_dir))
-                raise ExpectedHandlerError(msg)
-
-        if not os.access(str(self.project_dir), os.W_OK):
-            msg = _("No write access to directory {!r}.").format(str(self.project_dir))
-            raise ExpectedHandlerError(msg)
-
-        LOG.debug(_("Changing into directory {!r}.").format(str(self.project_dir)))
-        os.chdir(str(self.project_dir))
-
-        self.script_dir_rel = Path(os.path.relpath(
-            str(self.script_dir), str(self.project_dir)))
-        LOG.debug(_("Script-Dir relative to project dir: {!r}.").format(str(self.script_dir_rel)))
-
-        if self.verbose > 1:
-            LOG.debug(_("Checking {!r} for a previous terraform configuration.").format(
-                str(self.project_dir)))
-
-        tf_path = self.project_dir / '.terraform'
-        if tf_path.exists() and not tf_path.is_dir():
-            msg = _("In {d!r} there exists already {w!r}, but this is not a directory.").format(
-                d=str(self.project_dir), w='.terraform')
-            raise ExpectedHandlerError(msg)
-
-        state_path = self.project_dir / 'terraform.tfstate'
-        if state_path.exists() and not state_path.is_file():
-            msg = _("In {d!r} there exists already {w!r}, but this not a file.").format(
-                d=str(self.project_dir), w='terraform.tfstate')
-            raise ExpectedHandlerError(msg)
-
-        if tf_path.is_dir() and state_path.is_file():
-            msg = _(
-                "In directory {d!r} there are already existing both {w1!r} and {w2!r}. "
-                "Is this an old terraform project?").format(
-                    d=str(self.project_dir), w1='.terraform', w2='terraform.tfstate')
-            raise ExpectedHandlerError(msg)
-
-    # --------------------------------------------------------------------------
-    def clean_project_dir(self):
-
-        print()
-        LOG.info(_("Cleaning project directory {!r}.").format(str(self.project_dir)))
-
-        files = []
-        for path in self.project_dir.glob('*'):
-            files.append(path)
-        for path in self.project_dir.glob('.terraform'):
-            files.append(path)
-
-        if not files:
-            LOG.debug(_("Directory {!r} is already clean.").format(str(self.project_dir)))
-            return
-        for pfile in files:
-            if pfile.exists():
-                if pfile.is_dir():
-                    LOG.debug(_("Removing recursive directory {!r} ...").format(str(pfile)))
-                    if not self.simulate:
-                        shutil.rmtree(str(pfile))
-                else:
-                    LOG.debug(_("Removing {!r} ...").format(str(pfile)))
-                    if not self.simulate:
-                        pfile.unlink()
-
-    # --------------------------------------------------------------------------
-    def create_terraform_files(self):
-
-        print()
-        print()
-        msg = _("Creating all necessary files for terraform.")
-        ll = 6
-        if len(msg) > ll:
-            ll = len(msg)
-        print(self.colored('#' * (ll + 6), 'AQUA'))
-        line = self.colored('#', 'AQUA') + '  '
-        line += self.colored(msg.center(ll), 'YELLOW')
-        line += '  ' + self.colored('#', 'AQUA')
-        print(line)
-        print(self.colored('#' * (ll + 6), 'AQUA'))
-        print()
-        print()
-
-        self.create_varfiles()
-        self.create_dcfile()
-        self.create_backend_file()
-        self.create_instance_files()
-
-    # --------------------------------------------------------------------------
-    def create_varfiles(self):
-
-        LOG.debug(_("Creating {!r} ...").format('terraform.tfvars'))
-
-        vs_name = None
-        for vs_name in self.vsphere.keys():
-            break
-        if self.verbose > 1:
-            LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
-                w='dcfile', v=vs_name))
-
-        vs_host = self.config.vsphere[vs_name].host
-        vs_user = self.config.vsphere[vs_name].user
-        vs_pwd = self.config.vsphere[vs_name].password
-        vs_dc = self.config.vsphere[vs_name].dc
-
-        content = textwrap.dedent('''\
-        ## filename: terraform.tfvars
-        ## This file declares the values for the variables to be used in the instance.tf playbook
-
-        #
-        # ATTENTION!
-        #
-        # To avoid annoying questions for password and API key
-        # create manually a file 'terraform-private.auto.tfvars"
-        # with the following content:
-        #
-        #   vsphere_username     = "<USERNAME>"
-        #   vsphere_userpassword = "<PASSWORD>"
-        #
-        # with the correct values. This file will not be under GIT control
-        #
-
-        ''')
-
-        if self.simulate:
-            if self.verbose:
-                print(content)
-        else:
-            with open('terraform.tfvars', 'w', **self.open_opts) as fh:
-                fh.write(content)
-            os.chmod('terraform.tfvars', self.std_file_permissions)
-
-        # Sensible stuff
-        if vs_user or vs_pwd:
-            content = '# Private sensible information. Please keep this file secret.\n\n'
-            if vs_user:
-                content += 'vsphere_username     = "{}"\n'.format(vs_user)
-            if vs_pwd:
-                content += 'vsphere_userpassword = "{}"\n'.format(vs_pwd)
-            content += '\n'
-
-            LOG.debug(_("Creating {!r} ...").format('private.auto.tfvars'))
-            if self.simulate:
-                if self.verbose:
-                    print(content)
-            else:
-                with open('private.auto.tfvars', 'w', **self.open_opts) as fh:
-                    fh.write(content)
-                os.chmod('private.auto.tfvars', self.std_secure_file_permissions)
-
-        # File with variable declarations
-        content = textwrap.dedent('''\
-        # filename: variables.tf
-        # definition of the variables to be used in the play
-        # declaration happens in the file terraform.tfvars and private.auto.tfvars
-
-        ''')
-
-        tpl = textwrap.dedent('''\
-        variable "vsphere_vcenter" {{
-          default     = "{}"
-          description = "IP or DNS of the vSphere center."
-          type        = string
-        }}
-
-        ''')
-        content += tpl.format(vs_host)
-
-        tpl = textwrap.dedent('''\
-        variable "vsphere_username" {
-          description = "vSphere accountname to be used."
-          type        = string
-        }
-
-        variable "vsphere_userpassword" {
-          description = "Password for vSphere accountname."
-          type        = string
-        }
-
-        ''')
-        content += tpl
-
-        tpl = textwrap.dedent('''\
-        variable "vsphere_datacenter" {{
-          default     = "{dc}"
-          description = "Name of the vSphere datacenter to use."
-          type        = string
-        }}
-
-        ''')
-        content += tpl.format(dc=vs_dc)
-
-        tpl = textwrap.dedent('''\
-        variable "timezone" {{
-          default     = "{tz}"
-          description = "The global timezone used for VMs"
-          type        = string
-        }}
-
-        ''')
-        content += tpl.format(tz=self.tz_name)
-
-        LOG.debug(_("Creating {!r} ...").format('variables.tf'))
-        if self.simulate:
-            if self.verbose:
-                print(content)
-        else:
-            with open('variables.tf', 'w', **self.open_opts) as fh:
-                fh.write(content)
-            os.chmod('variables.tf', self.std_file_permissions)
-
-    # --------------------------------------------------------------------------
-    def create_dcfile(self):
-
-        vs_name = None
-        for vs_name in self.vsphere.keys():
-            break
-        vsphere = self.vsphere[vs_name]
-
-        LOG.debug(_("Creating {!r} ...").format('dc.tf'))
-        if self.verbose > 1:
-            LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
-                w='dcfile', v=vs_name))
-
-        content = textwrap.dedent('''\
-        # filename: dc.tf
-        # Configuring the VMware VSphere Provider and some dependend common used objects
-
-        provider "vsphere" {
-          vsphere_server       = var.vsphere_vcenter
-          user                 = var.vsphere_username
-          password             = var.vsphere_userpassword
-          allow_unverified_ssl = true
-        ''')
-
-#        if self.min_version_vsphere_provider:
-#            content += '  version              = ">= {}"\n'.format(
-#                str(self.min_version_vsphere_provider))
-
-        content += textwrap.dedent('''\
-        }
-
-        data "vsphere_datacenter" "dc" {
-          name = var.vsphere_datacenter
-        }
-
-        ''')
-
-        for cluster in vsphere.clusters:
-            tpl = textwrap.dedent('''\
-            data "vsphere_resource_pool" "{pv}" {{
-              name          = "{pn}"
-              datacenter_id = data.vsphere_datacenter.dc.id
-            }}
-
-            ''')
-            content += tpl.format(
-                pv=cluster.resource_pool_var, pn=cluster.resource_pool_name)
-
-        if self.used_dc_clusters:
-            for dsc_name in sorted(self.used_dc_clusters[vs_name], key=str.lower):
-                dsc_tf_name = vsphere.ds_cluster_mapping[dsc_name]
-                tpl = textwrap.dedent('''\
-                data "vsphere_datastore_cluster" "{tn}" {{
-                  name          = "{n}"
-                  datacenter_id = data.vsphere_datacenter.dc.id
-                }}
-
-                ''')
-                content += tpl.format(tn=dsc_tf_name, n=dsc_name)
-
-        if self.used_datastores:
-            for ds_name in sorted(self.used_datastores[vs_name], key=str.lower):
-                ds_tf_name = vsphere.ds_mapping[ds_name]
-                tpl = textwrap.dedent('''\
-                data "vsphere_datastore" "{tn}" {{
-                  name          = "{n}"
-                  datacenter_id = data.vsphere_datacenter.dc.id
-                }}
-
-                ''')
-                content += tpl.format(tn=ds_tf_name, n=ds_name)
-
-        for net_name in sorted(self.used_networks[vs_name], key=str.lower):
-            net_tf_name = vsphere.network_mapping[net_name]
-            tpl = textwrap.dedent('''\
-            data "vsphere_network" "{tn}" {{
-              name          = "{n}"
-              datacenter_id = data.vsphere_datacenter.dc.id
-            }}
-
-            ''')
-            content += tpl.format(n=net_name, tn=net_tf_name)
-
-        if self.vsphere_templates:
-            for tname in sorted(self.vsphere_templates[vs_name].keys(), key=str.lower):
-                tpl_tf_name = self.vsphere_templates[vs_name][tname].tf_name
-                tpl = textwrap.dedent('''\
-                data "vsphere_virtual_machine" "{tn}" {{
-                  name          = "{n}"
-                  datacenter_id = data.vsphere_datacenter.dc.id
-                }}
-
-                ''')
-                content += tpl.format(tn=tpl_tf_name, n=tname)
-
-        if self.simulate:
-            if self.verbose:
-                print(content)
-        else:
-            with open('dc.tf', 'w', **self.open_opts) as fh:
-                fh.write(content)
-            os.chmod('dc.tf', self.std_file_permissions)
-
-    # --------------------------------------------------------------------------
-    def create_backend_file(self):
-
-        file_name = 'backend.tf'
-        LOG.debug(_("Creating {!r} ...").format(file_name))
-
-        tpl = textwrap.dedent('''\
-        # Configuration of the backend for storing the terraform status information
-        # and the minimum required version of terraform
-
-        terraform {{
-          backend "consul" {{
-            address = "{host}"
-            scheme  = "{scheme}"
-            path    = "{prefix}/{project}"
-          }}
-        ''')
-
-        project = self.full_project_name
-        if not project:
-            project = self.project_name
-
-        content = tpl.format(
-            host=self.config.tf_backend_host, scheme=self.config.tf_backend_scheme,
-            prefix=self.config.tf_backend_path_prefix, project=project)
-
-        if self.min_version_terraform:
-            content += '  required_version = ">= {}"\n'.format(str(self.min_version_terraform))
-        else:
-            LOG.warn(_("No minimum version of Terraform defined."))
-
-        content += '}\n\n'
-
-        if self.simulate:
-            if self.verbose:
-                print(content)
-        else:
-            with open(file_name, 'w', **self.open_opts) as fh:
-                fh.write(content)
-            os.chmod(file_name, self.std_file_permissions)
-
-    # --------------------------------------------------------------------------
-    def create_instance_files(self):
-
-        LOG.debug(_("Creating terraform files for VM instances."))
-
-        for vm in sorted(self.vms, key=lambda x: x.tf_name):
-            self.create_instance_file(vm)
-
-    # --------------------------------------------------------------------------
-    def create_instance_file(self, vm):
-
-        vs_name = vm.vsphere
-
-        fname = 'instance.' + vm.name + '.tf'
-        LOG.debug(_("Creating file {f!r} for VM instance {n!r}.").format(
-            f=fname, n=vm.name))
-
-        guest_id = self.config.guest_id
-        tpl_vm = None
-        if vm.vm_template:
-            tpl_vm = self.vsphere_templates[vs_name][vm.vm_template]
-            if self.verbose > 3:
-                LOG.debug(_("Using template:") + "\n" + pp(tpl_vm))
-            guest_id = 'data.vsphere_virtual_machine.{}.guest_id'.format(tpl_vm.tf_name)
-        else:
-            guest_id = '"' + guest_id + '"'
-
-        content = self._create_instfile_general(vm, guest_id, tpl_vm)
-
-        i = 0
-        for iface in vm.interfaces:
-            content += self._create_instfile_if(vm, iface, i, tpl_vm)
-            i += 1
-
-        for unit_id in sorted(vm.disks.keys()):
-            content += self._create_instfile_disk(vm, unit_id)
-
-        content += textwrap.indent(textwrap.dedent('''\
-        cdrom {
-          client_device = "true"
-        }
-
-        '''), '  ')
-
-        content += self._create_instfile_custom(vm, tpl_vm)
-
-        if self.verbose > 1:
-            LOG.debug(_("Writing {!r}").format(fname))
-
-        if self.simulate:
-            if self.verbose:
-                print(content)
-        else:
-            with open(fname, 'w', **self.open_opts) as fh:
-                fh.write(content)
-            os.chmod(fname, self.std_file_permissions)
-
-    # --------------------------------------------------------------------------
-    def _create_instfile_general(self, vm, guest_id, tpl_vm):
-
-        vs_name = vm.vsphere
-
-        # ## General definitions of VM
-        if self.verbose > 1:
-            LOG.debug(_("Generating global definitions of {!r}.").format(vm.name))
-        content = textwrap.dedent('''\
-        # Definition of the VM instance {!r}.
-
-        ''').format(vm.name)
-
-        cluster = self.vsphere[vs_name].get_cluster_by_name(vm.cluster)
-        if not cluster:
-            msg = _("Cluster {!r} not found - this shouldn't be happened.").format(
-                vm.cluster)
-            raise RuntimeError(msg)
-
-        content += textwrap.dedent('''\
-        resource "vsphere_virtual_machine" "{tn}" {{
-
-          resource_pool_id       = data.vsphere_resource_pool.{pv}.id
-          name                   = "{n}"
-        ''').format(tn=vm.tf_name, n=vm.name, pv=cluster.resource_pool_var)
-
-        if vm.ds_cluster:
-            dsc_tf_name = self.vsphere[vs_name].ds_cluster_mapping[vm.ds_cluster]
-            tpl = '  datastore_cluster_id   = data.vsphere_datastore_cluster.{}.id\n'
-            content += tpl.format(dsc_tf_name)
-
-        if vm.datastore:
-            ds_tf_name = self.vsphere[vs_name].ds_mapping[vm.datastore]
-            tpl = '  datastore_id           = data.vsphere_datastore.{}.id\n'
-            content += tpl.format(ds_tf_name)
-
-        content += textwrap.indent(textwrap.dedent('''\
-          num_cpus               = "{cpu}"
-          folder                 = "{f}"
-          num_cores_per_socket   = "1"
-          cpu_hot_add_enabled    = "true"
-          cpu_hot_remove_enabled = "true"
-          memory                 = "{m}"
-          memory_hot_add_enabled = "true"
-          boot_delay             = "{b}"
-          guest_id               = {g}
-        '''), '  ').format(
-            g=guest_id, cpu=vm.num_cpus, f=vm.folder, m=vm.memory, b=int(vm.boot_delay * 1000))
-        if vm.vm_template:
-            tpl = '  scsi_type              = data.vsphere_virtual_machine.{}.scsi_type\n'
-            content += tpl.format(tpl_vm.tf_name)
-        content += '\n'
-
-        content += textwrap.indent(textwrap.dedent('''\
-        lifecycle {
-          ignore_changes = all
-        }
-        '''), '  ')
-        content += '\n'
-
-        return content
-
-    # --------------------------------------------------------------------------
-    def _create_instfile_if(self, vm, iface, i, tpl_vm):
-
-        vs_name = vm.vsphere
-
-        # ## Interface definition
-
-        if self.verbose > 1:
-            LOG.debug(_("Generating interface definition {i} of {v!r}.").format(i=i, v=vm.name))
-        nw = iface.network
-        nw_name = self.vsphere[vs_name].network_mapping[nw]
-
-        content = textwrap.indent(textwrap.dedent('''\
-        network_interface {{
-          network_id   = data.vsphere_network.{n}.id
-          adapter_type = data.{vvm}.{t}.{nit}[0]
-        }}
-        '''), '  ').format(
-            n=nw_name, t=tpl_vm.tf_name,
-            vvm='vsphere_virtual_machine', nit='network_interface_types')
-        content += '\n'
-
-        return content
-
-    # --------------------------------------------------------------------------
-    def _create_instfile_disk(self, vm, unit_id):
-
-        # ## Disk definitions
-        if self.verbose > 1:
-            LOG.debug(_("Generating disk definition {i} of {v!r}.").format(i=unit_id, v=vm.name))
-        disk = vm.disks[unit_id]
-        content = textwrap.indent(textwrap.dedent('''\
-        disk {{
-          label            = "disk{i}"
-          size             = "{s}"
-          eagerly_scrub    = "false"
-          thin_provisioned = "false"
-        '''), '  ').format(i=unit_id, s=int(disk.size_gb))
-        if unit_id > 0:
-            content += '    unit_number      = {}\n'.format(unit_id)
-        content += '  }\n\n'
-
-        return content
-
-    # --------------------------------------------------------------------------
-    def _create_instfile_custom(self, vm, tpl_vm):
-
-        # ## Customization of VM
-        if self.verbose > 1:
-            LOG.debug(_("Generating customization of {v!r}.").format(v=vm.name))
-
-        content = textwrap.indent(textwrap.dedent('''\
-        clone {{
-          template_uuid = data.vsphere_virtual_machine.{t}.id
-
-          customize {{
-            linux_options {{
-              host_name = "{h}"
-              domain    = "{d}"
-              time_zone = var.timezone
-            }}
-
-        '''), '  ').format(
-            t=tpl_vm.tf_name, h=vm.hostname, d=vm.domain)
-
-        content += self._create_instfile_nw(vm)
-        content += '    }\n'
-        content += '  }\n\n'
-
-        # ## local SSH cleanup before any actions
-        content += textwrap.indent(textwrap.dedent('''\
-        provisioner "local-exec" {{
-          command = "ssh-keygen -R {h} || true"
-        }}
-
-        provisioner "local-exec" {{
-          command = "ssh-keygen -R {i} || true"
-        }}
-
-        '''), '  ').format(h=vm.fqdn, i=vm.interfaces[0].address)
-
-        # ## Copying postinstall scripts to VM
-
-        files = ['conf-resolver', 'create-motd']
-        if vm.has_puppet:
-            files.append('init-puppet')
-            files.append('update-all-packages')
-
-        for sname in files:
-
-            if self.verbose > 1:
-                LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
-                    f=sname, v=vm.name))
-
-            content += textwrap.indent(textwrap.dedent('''\
-            provisioner "file" {{
-              source      = "{d}/{f}"
-              destination = "/tmp/{f}"
-              connection {{
-                type = "ssh"
-                user = "root"
-                host = "{h}"
-              }}
-            }}
-
-            '''), '  ').format(
-                d=self.script_dir_rel, f=sname, h=vm.fqdn)
-
-        # ## Postinstall commands on host
-        commands = []
-
-        commands.append("usermod -c 'root {}' root".format(vm.fqdn))
-
-        commands.append("chmod +x /tmp/conf-resolver")
-        cmd = '/tmp/conf-resolver'
-        for ns in vm.nameservers:
-            cmd += ' --ns {!r}'.format(str(ns))
-        for dom in vm.searchdomains:
-            cmd += ' --search {!r}'.format(dom)
-        if vm.dns_options:
-            cmd += ' --options {!r}'.format(vm.dns_options)
-        else:
-            cmd += ' --options {!r}'.format('')
-        commands.append(cmd)
-        commands.append("rm -f /tmp/conf-resolver")
-
-        purpose = self.re_doublequote.sub('\\\"', vm.purpose)
-
-        zone = "{z}/{c}".format(z=vm.vsphere, c=vm.cluster)
-
-        commands.append("chmod +x /tmp/create-motd")
-        cmd = (
-            "/tmp/create-motd --purpose '{p}' --hardware 'vmware (x86_64)' --owner '{o}' "
-            "--location 'VMWare' --zone '{z}' --customer '{c}'  --email '{m}' --tier '{t}' "
-            "--environment '{e}' --role '{r}'").format(
-            p=purpose, t=vm.puppet_tier, o=vm.customer, z=zone, c=vm.puppet_customer,
-            m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
-        if vm.puppet_project:
-            cmd += " --project '{pr}'".format(pr=vm.puppet_project)
-        cmd += " | tee /etc/motd"
-        commands.append(cmd)
-        commands.append("rm -f /tmp/create-motd")
-
-        # ## Backup - Legato networker
-        commands.append("systemctl stop networker.service")
-        commands.append("rm -rfv /nsr/tmp /nsr/res")
-        if vm.has_backup:
-            commands.append("mkdir -pv /nsr/res")
-            commands.append(
-                "if [ ! -f /nsr/res/servers ] ; then "
-                "echo 'legato01.pixelpark.com' > /nsr/res/servers; fi")
-            commands.append("systemctl start networker.service; sleep 2")
-            commands.append("nsrports -S 7937-7999; sleep 2")
-            commands.append("systemctl stop networker.service; sleep 2")
-            commands.append(
-                "systemctl enable networker.service; systemctl start networker.service; sleep 2")
-            commands.append("nsrports; sleep 2")
-        else:
-            commands.append("systemctl disable networker.service")
-
-        # ## Configuring and starting puppet
-        if vm.has_puppet:
-            commands.append("chmod +x /tmp/init-puppet")
-            cmd = "/tmp/init-puppet --environment '{e}' --customer '{c}' "
-            if vm.puppet_project:
-                cmd += "--project '{pr}' "
-            cmd += "--role '{r}' --owner '{o}' --tier '{t}' --purpose '{p}' --email '{m}'"
-            cmd += " --zone '{z}'"
-            if vm.puppet_initial_install:
-                cmd += " --initial-install"
-            cmd = cmd.format(
-                p=purpose, t=vm.puppet_tier, o=vm.customer, c=vm.puppet_customer, z=zone,
-                pr=vm.puppet_project, m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
-            commands.append(cmd)
-            commands.append("rm -f /tmp/init-puppet")
-
-        content += '  provisioner "remote-exec" {\n'
-        content += '    inline = [\n'
-        for cmd in commands:
-            content += '      "{}",\n'.format(cmd)
-        content += '    ]\n'
-        content += '    connection {\n'
-        content += '      type = "ssh"\n'
-        content += '      user = "root"\n'
-        content += '      host = "{}"\n'.format(vm.fqdn)
-        content += '    }\n'
-        content += '  }\n\n'
-
-        # ## postconfigure actions with puppet
-        if vm.has_puppet:
-            content += self._create_instfile_puppet(vm)
-
-        # ## local SSH cleanup on destroy
-        content += textwrap.indent(textwrap.dedent('''\
-        provisioner "local-exec" {{
-          command = "ssh-keygen -R {h} || true"
-          when    = destroy
-        }}
-
-        provisioner "local-exec" {{
-          command = "ssh-keygen -R {i} || true"
-          when    = destroy
-        }}
-        '''), '  ').format(h=vm.fqdn, i=vm.interfaces[0].address)
-
-        content += '}\n\n'
-
-        return content
-
-    # -------------------------------------------------------------------------·
-    def _create_instfile_nw(self, vm):
-
-        content = ''
-
-        gw4 = None
-        gw6 = None
-        for iface in vm.interfaces:
-
-            content += "      network_interface {\n"
-            if iface.address_v4:
-                content += '        ipv4_address = "{}"\n'.format(iface.address_v4)
-                if iface.netmask_v4 is not None:
-                    content += '        ipv4_netmask = "{}"\n'.format(iface.netmask_v4)
-            if iface.address_v6:
-                content += '        ipv6_address = "{}"\n'.format(iface.address_v6)
-                if iface.netmask_v6 is not None:
-                    content += '        ipv6_netmask = "{}"\n'.format(iface.netmask_v6)
-            content += '      }\n\n'
-
-            if not gw4:
-                gw4 = iface.gateway_v4
-            if not gw6:
-                gw6 = iface.gateway_v6
-
-        if gw4:
-            content += '      ipv4_gateway    = "{}"\n'.format(gw4)
-        if gw6:
-            content += '      ipv6_gateway    = "{}"\n'.format(gw6)
-
-        ns = ', '.join(map(lambda x: '"{}"'.format(x), vm.nameservers))
-        content += '      dns_server_list = [{}]\n'.format(ns)
-
-        return content
-
-    # -------------------------------------------------------------------------·
-    def _create_instfile_puppet(self, vm):
-
-        cmd = (
-            "ssh -o StrictHostKeyChecking=no {ca} "
-            "'sudo /opt/puppetlabs/bin/puppetserver ca sign --certname {h} || true'").format(
-            ca=self.config.puppetca, h=vm.fqdn)
-
-        content = textwrap.indent(textwrap.dedent('''\
-        provisioner "local-exec" {{
-          command = "{cmd}"
-        }}
-
-        provisioner "remote-exec" {{
-          inline = [
-            "/opt/puppetlabs/bin/puppet agent --test || true",
-            "/usr/bin/systemctl start puppet.service",
-            "/usr/bin/systemctl enable puppet.service",
-            "chmod +x /tmp/update-all-packages",
-            "/tmp/update-all-packages",
-            "rm -f /tmp/update-all-packages",
-          ]
-          connection {{
-            type = "ssh"
-            user = "root"
-            host = "{h}"
-          }}
-        }}
-
-        '''), '  ').format(cmd=cmd, h=vm.fqdn,)
-
-        # Destroy actions with puppet
-        cmd1 = "ssh -o StrictHostKeyChecking=no {ma} "
-        cmd1 += "'sudo /opt/puppetlabs/bin/puppet node deactivate {h} || true'"
-        cmd1 = cmd1.format(ma=self.config.puppetmaster, h=vm.fqdn)
-
-        cmd2 = "ssh -o StrictHostKeyChecking=no {ca} "
-        cmd2 += "'sudo /opt/puppetlabs/bin/puppetserver ca clean --certname {h} || true'"
-        cmd2 = cmd2.format(ca=self.config.puppetca, h=vm.fqdn)
-
-        content += textwrap.indent(textwrap.dedent('''\
-        provisioner "remote-exec" {{
-          inline = [
-            "/usr/bin/systemctl stop puppet.service || true",
-          ]
-          when = destroy
-          connection {{
-            type = "ssh"
-            user = "root"
-            host = "{h}"
-          }}
-        }}
-
-        provisioner "local-exec" {{
-          command = "{cmd1}"
-          when    = destroy
-        }}
-
-        provisioner "local-exec" {{
-          command = "{cmd2}"
-          when    = destroy
-        }}
-
-        '''), '  ').format(cmd1=cmd1, cmd2=cmd2, h=vm.fqdn)
-
-        return content
-
-    # -------------------------------------------------------------------------·
-    def ensure_vsphere_folders(self):
-
-        vs_name = None
-        for vs_name in self.vsphere.keys():
-            break
-        vsphere = self.vsphere[vs_name]
-
-        print()
-        LOG.info(_("Ensuring existence of all necessary vSphere VM folders."))
-        vsphere.ensure_vm_folders(copy.copy(self.vsphere_folders))
-
     # -------------------------------------------------------------------------·
     def exec_terraform(self):
 
diff --git a/lib/cr_tf/handler/files.py b/lib/cr_tf/handler/files.py
new file mode 100644 (file)
index 0000000..10057ea
--- /dev/null
@@ -0,0 +1,949 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2023 by Frank Brehm, Berlin
+@summary: A mixin module for the handler for methods for creating terraform project files.
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import logging
+import os
+import shutil
+import textwrap
+
+from pathlib import Path
+
+# Third party modules
+
+from fb_tools.common import pp
+from fb_tools.errors import ExpectedHandlerError
+
+# Own modules
+from ..errors import AbortExecution
+
+
+from ..xlate import XLATOR
+
+__version__ = '0.1.0'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CrTfHandlerFilesMixin():
+    """A mixin module for the handler module for reading and evaluation."""
+
+    # -------------------------------------------------------------------------·
+    def exec_project_dir(self):
+
+        if self.stop_at_step == 'project-dir':
+            self.incr_verbosity()
+
+        self.ensure_project_dir()
+        self.clean_project_dir()
+
+        LOG.info(_("Finished step {!r}.").format('project-dir'))
+        if self.stop_at_step == 'project-dir':
+            raise AbortExecution('project-dir')
+
+    # -------------------------------------------------------------------------·
+    def exec_tf_files(self):
+
+        if self.stop_at_step == 'tf-files':
+            self.incr_verbosity()
+
+        self.create_terraform_files()
+
+        LOG.info(_("Finished step {!r}.").format('tf-files'))
+        if self.stop_at_step == 'tf-files':
+            raise AbortExecution('tf-files')
+
+    # -------------------------------------------------------------------------·
+    def get_tf_name_network(self, net_name, *args):
+
+        default = None
+        has_default = False
+        if len(args):
+            if len(args) > 1:
+                msg = ngettext(
+                    "Method {c}.{m} expected at most one argument, got {n}.",
+                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+                    c=self.__class__.__name__, e=2, m='get_tf_name_network', n=len(args))
+                raise TypeError(msg)
+            default = args[0]
+            has_default = True
+
+        if net_name in self.vsphere.network_mapping:
+            return self.vsphere.network_mapping[net_name]
+        if has_default:
+            return default
+        raise KeyError(_("Did not found network {!r}.").format(net_name))
+
+    # --------------------------------------------------------------------------
+    def get_tf_name_ds_cluster(self, dsc_name, *args):
+
+        default = None
+        has_default = False
+        if len(args):
+            if len(args) > 1:
+                msg = ngettext(
+                    "Method {c}.{m} expected at most one argument, got {n}.",
+                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+                    c=self.__class__.__name__, e=2, m='get_tf_name_ds_cluster', n=len(args))
+                raise TypeError(msg)
+            default = args[0]
+            has_default = True
+
+        if dsc_name in self.vsphere.ds_cluster_mapping:
+            return self.vsphere.ds_cluster_mapping[dsc_name]
+        if has_default:
+            return default
+        raise KeyError(_("Did not found datastore cluster {!r}.").format(dsc_name))
+
+    # --------------------------------------------------------------------------
+    def get_tf_name_datastore(self, ds_name, *args):
+
+        default = None
+        has_default = False
+        if len(args):
+            if len(args) > 1:
+                msg = ngettext(
+                    "Method {c}.{m} expected at most one argument, got {n}.",
+                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+                    c=self.__class__.__name__, e=2, m='get_tf_name_datastore', n=len(args))
+                raise TypeError(msg)
+            default = args[0]
+            has_default = True
+
+        if ds_name in self.vsphere.ds_mapping:
+            return self.vsphere.ds_mapping[ds_name]
+        if has_default:
+            return default
+        raise KeyError(_("Did not found datastore {!r}.").format(ds_name))
+
+    # --------------------------------------------------------------------------
+    def ensure_project_dir(self):
+
+        print()
+        LOG.info(_("Ensuring existence of directory {!r}.").format(str(self.project_dir)))
+
+        if self.project_dir.exists():
+            if self.project_dir.is_dir():
+                LOG.debug(_("Directory {!r} already exists.").format(str(self.project_dir)))
+            else:
+                msg = _("Path {!r} exists, but is not a directory.").format(str(self.project_dir))
+                raise ExpectedHandlerError(msg)
+        else:
+            LOG.info(_("Creating directory {!r} ...").format(str(self.project_dir)))
+            if self.simulate:
+                LOG.debug(_("Simulation mode - directory will not be created."))
+            else:
+                try:
+                    os.makedirs(str(self.project_dir), mode=0o755)
+                except PermissionError as e:
+                    msg = _("Could not create directory {d!r}: {e}").format(
+                        d=str(self.project_dir), e=e)
+                    raise ExpectedHandlerError(msg)
+
+        if not self.project_dir.exists():
+            if self.simulate:
+                return
+            else:
+                msg = _("Directory {!r} does not exists ?!?!").format(str(self.project_dir))
+                raise ExpectedHandlerError(msg)
+
+        if not os.access(str(self.project_dir), os.W_OK):
+            msg = _("No write access to directory {!r}.").format(str(self.project_dir))
+            raise ExpectedHandlerError(msg)
+
+        LOG.debug(_("Changing into directory {!r}.").format(str(self.project_dir)))
+        os.chdir(str(self.project_dir))
+
+        self.script_dir_rel = Path(os.path.relpath(
+            str(self.script_dir), str(self.project_dir)))
+        LOG.debug(_("Script-Dir relative to project dir: {!r}.").format(str(self.script_dir_rel)))
+
+        if self.verbose > 1:
+            LOG.debug(_("Checking {!r} for a previous terraform configuration.").format(
+                str(self.project_dir)))
+
+        tf_path = self.project_dir / '.terraform'
+        if tf_path.exists() and not tf_path.is_dir():
+            msg = _("In {d!r} there exists already {w!r}, but this is not a directory.").format(
+                d=str(self.project_dir), w='.terraform')
+            raise ExpectedHandlerError(msg)
+
+        state_path = self.project_dir / 'terraform.tfstate'
+        if state_path.exists() and not state_path.is_file():
+            msg = _("In {d!r} there exists already {w!r}, but this not a file.").format(
+                d=str(self.project_dir), w='terraform.tfstate')
+            raise ExpectedHandlerError(msg)
+
+        if tf_path.is_dir() and state_path.is_file():
+            msg = _(
+                "In directory {d!r} there are already existing both {w1!r} and {w2!r}. "
+                "Is this an old terraform project?").format(
+                    d=str(self.project_dir), w1='.terraform', w2='terraform.tfstate')
+            raise ExpectedHandlerError(msg)
+
+    # --------------------------------------------------------------------------
+    def clean_project_dir(self):
+
+        print()
+        LOG.info(_("Cleaning project directory {!r}.").format(str(self.project_dir)))
+
+        files = []
+        for path in self.project_dir.glob('*'):
+            files.append(path)
+        for path in self.project_dir.glob('.terraform'):
+            files.append(path)
+
+        if not files:
+            LOG.debug(_("Directory {!r} is already clean.").format(str(self.project_dir)))
+            return
+        for pfile in files:
+            if pfile.exists():
+                if pfile.is_dir():
+                    LOG.debug(_("Removing recursive directory {!r} ...").format(str(pfile)))
+                    if not self.simulate:
+                        shutil.rmtree(str(pfile))
+                else:
+                    LOG.debug(_("Removing {!r} ...").format(str(pfile)))
+                    if not self.simulate:
+                        pfile.unlink()
+
+    # --------------------------------------------------------------------------
+    def create_terraform_files(self):
+
+        print()
+        print()
+        msg = _("Creating all necessary files for terraform.")
+        ll = 6
+        if len(msg) > ll:
+            ll = len(msg)
+        print(self.colored('#' * (ll + 6), 'AQUA'))
+        line = self.colored('#', 'AQUA') + '  '
+        line += self.colored(msg.center(ll), 'YELLOW')
+        line += '  ' + self.colored('#', 'AQUA')
+        print(line)
+        print(self.colored('#' * (ll + 6), 'AQUA'))
+        print()
+        print()
+
+        self.create_varfiles()
+        self.create_dcfile()
+        self.create_backend_file()
+        self.create_instance_files()
+
+    # --------------------------------------------------------------------------
+    def create_varfiles(self):
+
+        LOG.debug(_("Creating {!r} ...").format('terraform.tfvars'))
+
+        vs_name = None
+        for vs_name in self.vsphere.keys():
+            break
+        if self.verbose > 1:
+            LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
+                w='dcfile', v=vs_name))
+
+        vs_host = self.config.vsphere[vs_name].host
+        vs_user = self.config.vsphere[vs_name].user
+        vs_pwd = self.config.vsphere[vs_name].password
+        vs_dc = self.config.vsphere[vs_name].dc
+
+        content = textwrap.dedent('''\
+        ## filename: terraform.tfvars
+        ## This file declares the values for the variables to be used in the instance.tf playbook
+
+        #
+        # ATTENTION!
+        #
+        # To avoid annoying questions for password and API key
+        # create manually a file 'terraform-private.auto.tfvars"
+        # with the following content:
+        #
+        #   vsphere_username     = "<USERNAME>"
+        #   vsphere_userpassword = "<PASSWORD>"
+        #
+        # with the correct values. This file will not be under GIT control
+        #
+
+        ''')
+
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open('terraform.tfvars', 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod('terraform.tfvars', self.std_file_permissions)
+
+        # Sensible stuff
+        if vs_user or vs_pwd:
+            content = '# Private sensible information. Please keep this file secret.\n\n'
+            if vs_user:
+                content += 'vsphere_username     = "{}"\n'.format(vs_user)
+            if vs_pwd:
+                content += 'vsphere_userpassword = "{}"\n'.format(vs_pwd)
+            content += '\n'
+
+            LOG.debug(_("Creating {!r} ...").format('private.auto.tfvars'))
+            if self.simulate:
+                if self.verbose:
+                    print(content)
+            else:
+                with open('private.auto.tfvars', 'w', **self.open_opts) as fh:
+                    fh.write(content)
+                os.chmod('private.auto.tfvars', self.std_secure_file_permissions)
+
+        # File with variable declarations
+        content = textwrap.dedent('''\
+        # filename: variables.tf
+        # definition of the variables to be used in the play
+        # declaration happens in the file terraform.tfvars and private.auto.tfvars
+
+        ''')
+
+        tpl = textwrap.dedent('''\
+        variable "vsphere_vcenter" {{
+          default     = "{}"
+          description = "IP or DNS of the vSphere center."
+          type        = string
+        }}
+
+        ''')
+        content += tpl.format(vs_host)
+
+        tpl = textwrap.dedent('''\
+        variable "vsphere_username" {
+          description = "vSphere accountname to be used."
+          type        = string
+        }
+
+        variable "vsphere_userpassword" {
+          description = "Password for vSphere accountname."
+          type        = string
+        }
+
+        ''')
+        content += tpl
+
+        tpl = textwrap.dedent('''\
+        variable "vsphere_datacenter" {{
+          default     = "{dc}"
+          description = "Name of the vSphere datacenter to use."
+          type        = string
+        }}
+
+        ''')
+        content += tpl.format(dc=vs_dc)
+
+        tpl = textwrap.dedent('''\
+        variable "timezone" {{
+          default     = "{tz}"
+          description = "The global timezone used for VMs"
+          type        = string
+        }}
+
+        ''')
+        content += tpl.format(tz=self.tz_name)
+
+        LOG.debug(_("Creating {!r} ...").format('variables.tf'))
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open('variables.tf', 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod('variables.tf', self.std_file_permissions)
+
+    # --------------------------------------------------------------------------
+    def create_dcfile(self):
+
+        vs_name = None
+        for vs_name in self.vsphere.keys():
+            break
+        vsphere = self.vsphere[vs_name]
+
+        LOG.debug(_("Creating {!r} ...").format('dc.tf'))
+        if self.verbose > 1:
+            LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
+                w='dcfile', v=vs_name))
+
+        content = textwrap.dedent('''\
+        # filename: dc.tf
+        # Configuring the VMware VSphere Provider and some dependend common used objects
+
+        provider "vsphere" {
+          vsphere_server       = var.vsphere_vcenter
+          user                 = var.vsphere_username
+          password             = var.vsphere_userpassword
+          allow_unverified_ssl = true
+        ''')
+
+#        if self.min_version_vsphere_provider:
+#            content += '  version              = ">= {}"\n'.format(
+#                str(self.min_version_vsphere_provider))
+
+        content += textwrap.dedent('''\
+        }
+
+        data "vsphere_datacenter" "dc" {
+          name = var.vsphere_datacenter
+        }
+
+        ''')
+
+        for cluster in vsphere.clusters:
+            tpl = textwrap.dedent('''\
+            data "vsphere_resource_pool" "{pv}" {{
+              name          = "{pn}"
+              datacenter_id = data.vsphere_datacenter.dc.id
+            }}
+
+            ''')
+            content += tpl.format(
+                pv=cluster.resource_pool_var, pn=cluster.resource_pool_name)
+
+        if self.used_dc_clusters:
+            for dsc_name in sorted(self.used_dc_clusters[vs_name], key=str.lower):
+                dsc_tf_name = vsphere.ds_cluster_mapping[dsc_name]
+                tpl = textwrap.dedent('''\
+                data "vsphere_datastore_cluster" "{tn}" {{
+                  name          = "{n}"
+                  datacenter_id = data.vsphere_datacenter.dc.id
+                }}
+
+                ''')
+                content += tpl.format(tn=dsc_tf_name, n=dsc_name)
+
+        if self.used_datastores:
+            for ds_name in sorted(self.used_datastores[vs_name], key=str.lower):
+                ds_tf_name = vsphere.ds_mapping[ds_name]
+                tpl = textwrap.dedent('''\
+                data "vsphere_datastore" "{tn}" {{
+                  name          = "{n}"
+                  datacenter_id = data.vsphere_datacenter.dc.id
+                }}
+
+                ''')
+                content += tpl.format(tn=ds_tf_name, n=ds_name)
+
+        for net_name in sorted(self.used_networks[vs_name], key=str.lower):
+            net_tf_name = vsphere.network_mapping[net_name]
+            tpl = textwrap.dedent('''\
+            data "vsphere_network" "{tn}" {{
+              name          = "{n}"
+              datacenter_id = data.vsphere_datacenter.dc.id
+            }}
+
+            ''')
+            content += tpl.format(n=net_name, tn=net_tf_name)
+
+        if self.vsphere_templates:
+            for tname in sorted(self.vsphere_templates[vs_name].keys(), key=str.lower):
+                tpl_tf_name = self.vsphere_templates[vs_name][tname].tf_name
+                tpl = textwrap.dedent('''\
+                data "vsphere_virtual_machine" "{tn}" {{
+                  name          = "{n}"
+                  datacenter_id = data.vsphere_datacenter.dc.id
+                }}
+
+                ''')
+                content += tpl.format(tn=tpl_tf_name, n=tname)
+
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open('dc.tf', 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod('dc.tf', self.std_file_permissions)
+
+    # --------------------------------------------------------------------------
+    def create_backend_file(self):
+
+        file_name = 'backend.tf'
+        LOG.debug(_("Creating {!r} ...").format(file_name))
+
+        tpl = textwrap.dedent('''\
+        # Configuration of the backend for storing the terraform status information
+        # and the minimum required version of terraform
+
+        terraform {{
+          backend "consul" {{
+            address = "{host}"
+            scheme  = "{scheme}"
+            path    = "{prefix}/{project}"
+          }}
+        ''')
+
+        project = self.full_project_name
+        if not project:
+            project = self.project_name
+
+        content = tpl.format(
+            host=self.config.tf_backend_host, scheme=self.config.tf_backend_scheme,
+            prefix=self.config.tf_backend_path_prefix, project=project)
+
+        if self.min_version_terraform:
+            content += '  required_version = ">= {}"\n'.format(str(self.min_version_terraform))
+        else:
+            LOG.warn(_("No minimum version of Terraform defined."))
+
+        content += '}\n\n'
+
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open(file_name, 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod(file_name, self.std_file_permissions)
+
+    # --------------------------------------------------------------------------
+    def create_instance_files(self):
+
+        LOG.debug(_("Creating terraform files for VM instances."))
+
+        for vm in sorted(self.vms, key=lambda x: x.tf_name):
+            self.create_instance_file(vm)
+
+    # --------------------------------------------------------------------------
+    def create_instance_file(self, vm):
+
+        vs_name = vm.vsphere
+
+        fname = 'instance.' + vm.name + '.tf'
+        LOG.debug(_("Creating file {f!r} for VM instance {n!r}.").format(
+            f=fname, n=vm.name))
+
+        guest_id = self.config.guest_id
+        tpl_vm = None
+        if vm.vm_template:
+            tpl_vm = self.vsphere_templates[vs_name][vm.vm_template]
+            if self.verbose > 3:
+                LOG.debug(_("Using template:") + "\n" + pp(tpl_vm))
+            guest_id = 'data.vsphere_virtual_machine.{}.guest_id'.format(tpl_vm.tf_name)
+        else:
+            guest_id = '"' + guest_id + '"'
+
+        content = self._create_instfile_general(vm, guest_id, tpl_vm)
+
+        i = 0
+        for iface in vm.interfaces:
+            content += self._create_instfile_if(vm, iface, i, tpl_vm)
+            i += 1
+
+        for unit_id in sorted(vm.disks.keys()):
+            content += self._create_instfile_disk(vm, unit_id)
+
+        content += textwrap.indent(textwrap.dedent('''\
+        cdrom {
+          client_device = "true"
+        }
+
+        '''), '  ')
+
+        content += self._create_instfile_custom(vm, tpl_vm)
+
+        if self.verbose > 1:
+            LOG.debug(_("Writing {!r}").format(fname))
+
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open(fname, 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod(fname, self.std_file_permissions)
+
+    # --------------------------------------------------------------------------
+    def _create_instfile_general(self, vm, guest_id, tpl_vm):
+
+        vs_name = vm.vsphere
+
+        # ## General definitions of VM
+        if self.verbose > 1:
+            LOG.debug(_("Generating global definitions of {!r}.").format(vm.name))
+        content = textwrap.dedent('''\
+        # Definition of the VM instance {!r}.
+
+        ''').format(vm.name)
+
+        cluster = self.vsphere[vs_name].get_cluster_by_name(vm.cluster)
+        if not cluster:
+            msg = _("Cluster {!r} not found - this shouldn't be happened.").format(
+                vm.cluster)
+            raise RuntimeError(msg)
+
+        content += textwrap.dedent('''\
+        resource "vsphere_virtual_machine" "{tn}" {{
+
+          resource_pool_id       = data.vsphere_resource_pool.{pv}.id
+          name                   = "{n}"
+        ''').format(tn=vm.tf_name, n=vm.name, pv=cluster.resource_pool_var)
+
+        if vm.ds_cluster:
+            dsc_tf_name = self.vsphere[vs_name].ds_cluster_mapping[vm.ds_cluster]
+            tpl = '  datastore_cluster_id   = data.vsphere_datastore_cluster.{}.id\n'
+            content += tpl.format(dsc_tf_name)
+
+        if vm.datastore:
+            ds_tf_name = self.vsphere[vs_name].ds_mapping[vm.datastore]
+            tpl = '  datastore_id           = data.vsphere_datastore.{}.id\n'
+            content += tpl.format(ds_tf_name)
+
+        content += textwrap.indent(textwrap.dedent('''\
+          num_cpus               = "{cpu}"
+          folder                 = "{f}"
+          num_cores_per_socket   = "1"
+          cpu_hot_add_enabled    = "true"
+          cpu_hot_remove_enabled = "true"
+          memory                 = "{m}"
+          memory_hot_add_enabled = "true"
+          boot_delay             = "{b}"
+          guest_id               = {g}
+        '''), '  ').format(
+            g=guest_id, cpu=vm.num_cpus, f=vm.folder, m=vm.memory, b=int(vm.boot_delay * 1000))
+        if vm.vm_template:
+            tpl = '  scsi_type              = data.vsphere_virtual_machine.{}.scsi_type\n'
+            content += tpl.format(tpl_vm.tf_name)
+        content += '\n'
+
+        content += textwrap.indent(textwrap.dedent('''\
+        lifecycle {
+          ignore_changes = all
+        }
+        '''), '  ')
+        content += '\n'
+
+        return content
+
+    # --------------------------------------------------------------------------
+    def _create_instfile_if(self, vm, iface, i, tpl_vm):
+
+        vs_name = vm.vsphere
+
+        # ## Interface definition
+
+        if self.verbose > 1:
+            LOG.debug(_("Generating interface definition {i} of {v!r}.").format(i=i, v=vm.name))
+        nw = iface.network
+        nw_name = self.vsphere[vs_name].network_mapping[nw]
+
+        content = textwrap.indent(textwrap.dedent('''\
+        network_interface {{
+          network_id   = data.vsphere_network.{n}.id
+          adapter_type = data.{vvm}.{t}.{nit}[0]
+        }}
+        '''), '  ').format(
+            n=nw_name, t=tpl_vm.tf_name,
+            vvm='vsphere_virtual_machine', nit='network_interface_types')
+        content += '\n'
+
+        return content
+
+    # --------------------------------------------------------------------------
+    def _create_instfile_disk(self, vm, unit_id):
+
+        # ## Disk definitions
+        if self.verbose > 1:
+            LOG.debug(_("Generating disk definition {i} of {v!r}.").format(i=unit_id, v=vm.name))
+        disk = vm.disks[unit_id]
+        content = textwrap.indent(textwrap.dedent('''\
+        disk {{
+          label            = "disk{i}"
+          size             = "{s}"
+          eagerly_scrub    = "false"
+          thin_provisioned = "false"
+        '''), '  ').format(i=unit_id, s=int(disk.size_gb))
+        if unit_id > 0:
+            content += '    unit_number      = {}\n'.format(unit_id)
+        content += '  }\n\n'
+
+        return content
+
+    # --------------------------------------------------------------------------
+    def _create_instfile_custom(self, vm, tpl_vm):
+
+        # ## Customization of VM
+        if self.verbose > 1:
+            LOG.debug(_("Generating customization of {v!r}.").format(v=vm.name))
+
+        content = textwrap.indent(textwrap.dedent('''\
+        clone {{
+          template_uuid = data.vsphere_virtual_machine.{t}.id
+
+          customize {{
+            linux_options {{
+              host_name = "{h}"
+              domain    = "{d}"
+              time_zone = var.timezone
+            }}
+
+        '''), '  ').format(
+            t=tpl_vm.tf_name, h=vm.hostname, d=vm.domain)
+
+        content += self._create_instfile_nw(vm)
+        content += '    }\n'
+        content += '  }\n\n'
+
+        # ## local SSH cleanup before any actions
+        content += textwrap.indent(textwrap.dedent('''\
+        provisioner "local-exec" {{
+          command = "ssh-keygen -R {h} || true"
+        }}
+
+        provisioner "local-exec" {{
+          command = "ssh-keygen -R {i} || true"
+        }}
+
+        '''), '  ').format(h=vm.fqdn, i=vm.interfaces[0].address)
+
+        # ## Copying postinstall scripts to VM
+
+        files = ['conf-resolver', 'create-motd']
+        if vm.has_puppet:
+            files.append('init-puppet')
+            files.append('update-all-packages')
+
+        for sname in files:
+
+            if self.verbose > 1:
+                LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
+                    f=sname, v=vm.name))
+
+            content += textwrap.indent(textwrap.dedent('''\
+            provisioner "file" {{
+              source      = "{d}/{f}"
+              destination = "/tmp/{f}"
+              connection {{
+                type = "ssh"
+                user = "root"
+                host = "{h}"
+              }}
+            }}
+
+            '''), '  ').format(
+                d=self.script_dir_rel, f=sname, h=vm.fqdn)
+
+        # ## Postinstall commands on host
+        commands = []
+
+        commands.append("usermod -c 'root {}' root".format(vm.fqdn))
+
+        commands.append("chmod +x /tmp/conf-resolver")
+        cmd = '/tmp/conf-resolver'
+        for ns in vm.nameservers:
+            cmd += ' --ns {!r}'.format(str(ns))
+        for dom in vm.searchdomains:
+            cmd += ' --search {!r}'.format(dom)
+        if vm.dns_options:
+            cmd += ' --options {!r}'.format(vm.dns_options)
+        else:
+            cmd += ' --options {!r}'.format('')
+        commands.append(cmd)
+        commands.append("rm -f /tmp/conf-resolver")
+
+        purpose = self.re_doublequote.sub('\\\"', vm.purpose)
+
+        zone = "{z}/{c}".format(z=vm.vsphere, c=vm.cluster)
+
+        commands.append("chmod +x /tmp/create-motd")
+        cmd = (
+            "/tmp/create-motd --purpose '{p}' --hardware 'vmware (x86_64)' --owner '{o}' "
+            "--location 'VMWare' --zone '{z}' --customer '{c}'  --email '{m}' --tier '{t}' "
+            "--environment '{e}' --role '{r}'").format(
+            p=purpose, t=vm.puppet_tier, o=vm.customer, z=zone, c=vm.puppet_customer,
+            m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
+        if vm.puppet_project:
+            cmd += " --project '{pr}'".format(pr=vm.puppet_project)
+        cmd += " | tee /etc/motd"
+        commands.append(cmd)
+        commands.append("rm -f /tmp/create-motd")
+
+        # ## Backup - Legato networker
+        commands.append("systemctl stop networker.service")
+        commands.append("rm -rfv /nsr/tmp /nsr/res")
+        if vm.has_backup:
+            commands.append("mkdir -pv /nsr/res")
+            commands.append(
+                "if [ ! -f /nsr/res/servers ] ; then "
+                "echo 'legato01.pixelpark.com' > /nsr/res/servers; fi")
+            commands.append("systemctl start networker.service; sleep 2")
+            commands.append("nsrports -S 7937-7999; sleep 2")
+            commands.append("systemctl stop networker.service; sleep 2")
+            commands.append(
+                "systemctl enable networker.service; systemctl start networker.service; sleep 2")
+            commands.append("nsrports; sleep 2")
+        else:
+            commands.append("systemctl disable networker.service")
+
+        # ## Configuring and starting puppet
+        if vm.has_puppet:
+            commands.append("chmod +x /tmp/init-puppet")
+            cmd = "/tmp/init-puppet --environment '{e}' --customer '{c}' "
+            if vm.puppet_project:
+                cmd += "--project '{pr}' "
+            cmd += "--role '{r}' --owner '{o}' --tier '{t}' --purpose '{p}' --email '{m}'"
+            cmd += " --zone '{z}'"
+            if vm.puppet_initial_install:
+                cmd += " --initial-install"
+            cmd = cmd.format(
+                p=purpose, t=vm.puppet_tier, o=vm.customer, c=vm.puppet_customer, z=zone,
+                pr=vm.puppet_project, m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
+            commands.append(cmd)
+            commands.append("rm -f /tmp/init-puppet")
+
+        content += '  provisioner "remote-exec" {\n'
+        content += '    inline = [\n'
+        for cmd in commands:
+            content += '      "{}",\n'.format(cmd)
+        content += '    ]\n'
+        content += '    connection {\n'
+        content += '      type = "ssh"\n'
+        content += '      user = "root"\n'
+        content += '      host = "{}"\n'.format(vm.fqdn)
+        content += '    }\n'
+        content += '  }\n\n'
+
+        # ## postconfigure actions with puppet
+        if vm.has_puppet:
+            content += self._create_instfile_puppet(vm)
+
+        # ## local SSH cleanup on destroy
+        content += textwrap.indent(textwrap.dedent('''\
+        provisioner "local-exec" {{
+          command = "ssh-keygen -R {h} || true"
+          when    = destroy
+        }}
+
+        provisioner "local-exec" {{
+          command = "ssh-keygen -R {i} || true"
+          when    = destroy
+        }}
+        '''), '  ').format(h=vm.fqdn, i=vm.interfaces[0].address)
+
+        content += '}\n\n'
+
+        return content
+
+    # -------------------------------------------------------------------------·
+    def _create_instfile_nw(self, vm):
+
+        content = ''
+
+        gw4 = None
+        gw6 = None
+        for iface in vm.interfaces:
+
+            content += "      network_interface {\n"
+            if iface.address_v4:
+                content += '        ipv4_address = "{}"\n'.format(iface.address_v4)
+                if iface.netmask_v4 is not None:
+                    content += '        ipv4_netmask = "{}"\n'.format(iface.netmask_v4)
+            if iface.address_v6:
+                content += '        ipv6_address = "{}"\n'.format(iface.address_v6)
+                if iface.netmask_v6 is not None:
+                    content += '        ipv6_netmask = "{}"\n'.format(iface.netmask_v6)
+            content += '      }\n\n'
+
+            if not gw4:
+                gw4 = iface.gateway_v4
+            if not gw6:
+                gw6 = iface.gateway_v6
+
+        if gw4:
+            content += '      ipv4_gateway    = "{}"\n'.format(gw4)
+        if gw6:
+            content += '      ipv6_gateway    = "{}"\n'.format(gw6)
+
+        ns = ', '.join(map(lambda x: '"{}"'.format(x), vm.nameservers))
+        content += '      dns_server_list = [{}]\n'.format(ns)
+
+        return content
+
+    # -------------------------------------------------------------------------·
+    def _create_instfile_puppet(self, vm):
+
+        cmd = (
+            "ssh -o StrictHostKeyChecking=no {ca} "
+            "'sudo /opt/puppetlabs/bin/puppetserver ca sign --certname {h} || true'").format(
+            ca=self.config.puppetca, h=vm.fqdn)
+
+        content = textwrap.indent(textwrap.dedent('''\
+        provisioner "local-exec" {{
+          command = "{cmd}"
+        }}
+
+        provisioner "remote-exec" {{
+          inline = [
+            "/opt/puppetlabs/bin/puppet agent --test || true",
+            "/usr/bin/systemctl start puppet.service",
+            "/usr/bin/systemctl enable puppet.service",
+            "chmod +x /tmp/update-all-packages",
+            "/tmp/update-all-packages",
+            "rm -f /tmp/update-all-packages",
+          ]
+          connection {{
+            type = "ssh"
+            user = "root"
+            host = "{h}"
+          }}
+        }}
+
+        '''), '  ').format(cmd=cmd, h=vm.fqdn,)
+
+        # Destroy actions with puppet
+        cmd1 = "ssh -o StrictHostKeyChecking=no {ma} "
+        cmd1 += "'sudo /opt/puppetlabs/bin/puppet node deactivate {h} || true'"
+        cmd1 = cmd1.format(ma=self.config.puppetmaster, h=vm.fqdn)
+
+        cmd2 = "ssh -o StrictHostKeyChecking=no {ca} "
+        cmd2 += "'sudo /opt/puppetlabs/bin/puppetserver ca clean --certname {h} || true'"
+        cmd2 = cmd2.format(ca=self.config.puppetca, h=vm.fqdn)
+
+        content += textwrap.indent(textwrap.dedent('''\
+        provisioner "remote-exec" {{
+          inline = [
+            "/usr/bin/systemctl stop puppet.service || true",
+          ]
+          when = destroy
+          connection {{
+            type = "ssh"
+            user = "root"
+            host = "{h}"
+          }}
+        }}
+
+        provisioner "local-exec" {{
+          command = "{cmd1}"
+          when    = destroy
+        }}
+
+        provisioner "local-exec" {{
+          command = "{cmd2}"
+          when    = destroy
+        }}
+
+        '''), '  ').format(cmd1=cmd1, cmd2=cmd2, h=vm.fqdn)
+
+        return content
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+    pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
index 78bd2837fd31e6044a9ad7323061cd71a407186d..aaad32410753d1dea3ef033434c1dc68c6e902a0 100644 (file)
@@ -31,7 +31,7 @@ from ..errors import AbortExecution
 
 from ..xlate import XLATOR
 
-__version__ = '0.1.0'
+__version__ = '0.1.1'
 LOG = logging.getLogger(__name__)
 
 _ = XLATOR.gettext
@@ -934,6 +934,18 @@ class CrTfHandlerVmwMixin():
         if network not in self.used_networks[vs_name]:
             self.used_networks[vs_name].append(network)
 
+    # -------------------------------------------------------------------------·
+    def ensure_vsphere_folders(self):
+
+        vs_name = None
+        for vs_name in self.vsphere.keys():
+            break
+        vsphere = self.vsphere[vs_name]
+
+        print()
+        LOG.info(_("Ensuring existence of all necessary vSphere VM folders."))
+        vsphere.ensure_vm_folders(copy.copy(self.vsphere_folders))
+
 
 # =============================================================================