--- /dev/null
+[General]
+
+; type of operations of this provisioning daemon
+; possible values: 'pserver', 'storage', 'loadbalancer', 'gw'
+;operation_type = pserver
+
+; Base directory for all operations in file system
+base_dir = /var/lib/py-provisioning-daemon
+
+; directory containing all trace files
+trace_dir = %(base_dir)s/trace
+
+; performing tracing of jobs
+do_trace = yes
+
+; where to find provisioning plugins
+plugin_dir = /usr/share/py-provisioning-daemon/plugins
+
+; The log file for stderr messages
+error_log = %(base_dir)s/error.log
+
+; the port number, where the daemon is listening
+;port = 8073
+
+; an IP address (IPv4 or IPv6) to use for socket bind to listen on a port,
+; if not given or '*', the listening socket will binded to all
+; available IP addresses (0.0.0.0 and ::)
+;listen_address = *
+
+; The client port to use for broadcasting messages to peers
+; it may not be the same like port
+;udp_client_port = 8074
+
+; the maximum number of child processes (must be between 1 and 999)
+;max_children = 20
+
+; maximum timeout on collecting finished children per stage
+; if there are too many child processes
+; on every stage an increased level of signal is sent
+; to all child processes (None -> SIGHUP -> SIGTERM -> SIGKILL)
+;timeout_collect_childs = 10
+
+; the user, the daemon should running as
+;user = ppd
+
+; the group, the daemon should running as
+;group = users
+
+; flag for logging all XML traffic under base_dir
+;xml_log = False
+
+; buffer_size: The size of the buffer for receiving data from sockets
+;buffer_size = 8192
+
+; the name of the current node if it shoul be different
+; to socket.gethostname()
+;node_name = Pserver22
+
+; the broadcast address, where to send all UDP peer request
+; and publishing messages
+; IT MUST BE SET
+broadcast_address = 10.33.255.255
+
+; an own address to publish as the IP of the current node
+; to publish to the other peers
+; IT MUST BE SET
+mgmt_address = 10.33.64.52
+
+; request_timeout: timout in communication with requestor (if working
+; as a socket client)
+;request_timeout = 30
+
+; the standard time intervall for looking for new requests
+;polling_interval = 0.2
+
+; the maximum number of queued connections (between 0 and 5)
+;request_queue_size = 5
+
+; interval in seconds between publications of the current node to the peers
+;minimum_publishing_interval = 3600
+
+; maximum second to change the publishing_interval from one publication
+; to the next publication in a randomized way
+;publishing_jitter = 300
+
+; the minimum interval in second from one publication of itself
+; to the next publication, to avoid broadcast storms
+;minimum_publishing_interval = 60
+
+; timeout for receiving UDP responses
+;udp_rcv_timeout = 2
+
+; number of tries to get a response to UDP requests
+;udp_rcv_tries = 3
+
+; a maybe additional storage server configuration file
+;add_storage_server_config =
+
+; flag, whether to get the informations from configuration files
+; instead from SOAP PdtService
+;info_from_config = False
+
+[ProvWebService]
+
+; The URL of the provisioning web service for sending status messages
+url = https://appserver/ProvisioningService/ProvisioningServiceWSService
+
+; the user name for authentication to the web service
+user = profitbricks-vcb
+
+; the password for authentication to the web service
+pwd = ProfitBricks-VCB
+
+; timeout on connection to the webservice
+timeout = 30
+
+[PdtService]
+
+; The URL of the WSDL of the PDT Web service
+;url = https://appserver/PDTService/PDTService?wsdl
+
+; the user name for authentication to the web service
+;user = profitbricks-dms
+
+; the password for authentication to the web service
+;pwd = ProfitBricks-DMS
+
+[Handler]
+; some parameters valid for all handlers (both pserver and storage)
+
+; maximum tries to wait for existence of a device
+;max_tries_device_wait = 6
+
+; initial delay to wait for existence of a device in seconds
+;init_delay_device_wait = 1
+
+; increasing time in seconds for delay to wait for existence of a device
+;inc_delay_device_wait = 2
+
+[Pserver]
+; put here options only valid on a pserver
+
+; lockfile for locking iSCSI refresh (shared using with pb_storage_vol_refresh)
+;iscsi_refresh_lockfile = /tmp/iscsi-refresh.lock
+
+; flag for using Device-Mapper things
+;perform_dmsetup = False
+
+[Storage]
+
+; Enable compression on newly created ZFS volumes
+;compress_volume = false
+
+; a local IP address (or hostname), what could be used as target
+; address for ZFS send
+;sync_address = storageXY-ib0
+
+; Name of the ZFS Pool as the root of all ZFS volumes
+;zfs_pool = storage/volumes
+
+; name of a mounted filesystem for temporary purposes
+; (e.g. template files to convert). It must exists.
+;temp_fs = /storage/templates
+
+; the name of the iSCSI target group
+;iscsi_target_group = cloudstorage
+
+; enable replication in creating storage volumes
+; Note: Disable on a backup host
+;enable_replication = True
+
+; hostname or IP address of the master host for sndradm (replication)
+;host_master = storage01-ib
+
+; hostname or IP address of the backup host for sndradm (replication)
+;host_backup = storage02-ib
+
+; URI of the backup host to send the
+; commands to create the backup volume
+;host_backup_mgmt = vcb://storage02:8073
+
+; defines the role in the replication system (master or backup)
+;replication_role = master
+
+; Should be created a view to standard targets (host group = all,
+; target group = all) for a new created iSCSI LUN?
+;add_lun_std_view = False
+
+; the delay in seconds between logical removing a volume (setting the remove property)
+; and the real destroying the volume. Default: 48 hours
+;remove_delay = 172800
+
+; path to the dump-device.sh script for dumping a device to STDOUT
+;dump_device_bin = /opt/profitbricks/bin/dump-device.sh
+
+[FtpServer]
+
+; some data to get informations about template images and
+; to stream them to STDOUT via SSH
+
+Host = de-dc1-c1-ftp
+; hostname or IP address of the FTP server, where the SSH daemon is accessible
+
+User = ppd
+; the username on the FTP server to execute the image scripts as root by sudo
+
+ImageInfo = /home/ppd/work/image-info
+; complete path name of the image info script to get the real size of the image
+
+ImageStream = /home/ppd/work/image-stream
+; complete path name of the image stream script to stream the converted image to STDOUT
+
+[StorageServer]
+
+; in this chapter is a list of all available storage servers
+; keys are free chaicable, but unique names of storage servers,
+; values are the URI's of the appropriate machines.
+; Currently the only supported URI schema is vcb://,
+; whats look like 'vcb://ip-adress[:port], where the standard port
+; is 8073
+
+Storage01 = vcb://10.1.65.10:8073
+Storage02 = vcb://10.1.65.20:8073
+Storage16 = vcb://10.1.64.116:8073
+Storage17 = vcb://10.1.64.117:8073
+
+
+
+
+; vim: filetype=cfg fileencoding=utf-8 ts=4 expandtab
mkdir -p './texmf/dvips.d'
mkdir -p './unixODBC/ODBCDataSources'
maybe chmod 0755 '.'
+maybe chmod 0644 './._cfg0000_ppd.cfg.default'
maybe chmod 0700 './.etckeeper'
maybe chmod 0600 './.gitignore'
maybe chmod 0600 './.pwd.lock'
maybe chmod 0644 './conf.d/openvpn'
maybe chmod 0644 './conf.d/pciparm'
maybe chmod 0644 './conf.d/postgresql-9.1'
+maybe chmod 0644 './conf.d/ppd'
maybe chmod 0644 './conf.d/pydoc-2.7'
maybe chmod 0644 './conf.d/pydoc-3.2'
maybe chmod 0644 './conf.d/rfcomm'
maybe chmod 0660 './courier/authlib/authpgsqlrc.dist'
maybe chmod 0755 './cron.d'
maybe chmod 0644 './cron.d/.keep_sys-process_vixie-cron-0'
+maybe chmod 0644 './cron.d/py-provisioning-daemon'
maybe chmod 0750 './cron.daily'
maybe chmod 0644 './cron.daily/.keep_sys-process_cronbase-0'
maybe chmod 0755 './cron.daily/00-logwatch'
maybe chmod 0755 './init.d/php-fpm'
maybe chmod 0755 './init.d/postfix'
maybe chmod 0755 './init.d/postgresql-9.1'
+maybe chmod 0755 './init.d/ppd'
maybe chmod 0755 './init.d/procfs'
maybe chmod 0755 './init.d/pwcheck'
maybe chmod 0755 './init.d/pydoc-2.7'
maybe chmod 0644 './mtab'
maybe chmod 0755 './mtools'
maybe chmod 0644 './mtools/mtools.conf'
+maybe chmod 0644 './multipath.conf'
maybe chmod 0755 './mysql'
maybe chmod 0644 './mysql/my.cnf'
maybe chmod 0644 './mysql/mysqlaccess.conf'
maybe chmod 0600 './postfix/saslpass'
maybe chmod 0755 './postgresql-9.1'
maybe chmod 0644 './postgresql-9.1/.keep_dev-db_postgresql-base-9.1'
+maybe chmod 0644 './ppd.cfg'
maybe chmod 0644 './ppd.cfg.default'
maybe chmod 0755 './ppp'
maybe chmod 0600 './ppp/chap-secrets'
maybe chmod 0700 './ssl/private'
maybe chmod 0644 './ssl/private/.keep_dev-libs_openssl-0'
maybe chmod 0440 './sudoers'
+maybe chmod 0755 './sudoers.d'
+maybe chmod 0644 './sudoers.d/ppd'
maybe chmod 0644 './sysctl.conf'
maybe chmod 0755 './sysctl.d'
maybe chmod 0644 './sysctl.d/libvirtd.conf'
maybe chmod 0755 './udev'
maybe chmod 0755 './udev/rules.d'
maybe chmod 0644 './udev/rules.d/.keep_sys-fs_udev-0'
+maybe chmod 0644 './udev/rules.d/60-kpartx.rules'
+maybe chmod 0644 './udev/rules.d/60-persistent-storage-dm.rules'
+maybe chmod 0644 './udev/rules.d/70-iscsi.rules'
maybe chmod 0644 './udev/rules.d/70-persistent-cd.rules'
maybe chmod 0644 './udev/rules.d/70-persistent-net.rules'
maybe chmod 0644 './udev/rules.d/77-mm-ericsson-mbm.rules'
--- /dev/null
+# Defaults for py-provisioning-daemon initscript
+# sourced by /etc/init.d/ppd and /usr/sbin/ppd-watchdog
+# installed at /etc/default/ppd by the maintainer scripts
+
+#
+# This is a POSIX shell fragment
+#
+
+# Verbosity of the init script
+VERBOSE=yes
+
+# Path to the executable ppd
+#PPD="/usr/sbin/ppd"
+
+# Path to the watchdog executable
+#PPD_WATCHDOG="/usr/sbin/ppd-watchdog"
+
+# Working directory of ppd
+#PPD_DIR="/var/lib/py-provisioning-daemon"
+
+# Log file of the PPD watchdog process
+#WATCHDOG_LOG="/var/log/ppd-watchdog.log"
+
+# Watchdog PID file
+#WATCHDOG_PID_FILE="/var/run/ppd-watchdog.pid"
+
+# Start-stop-logfile
+#START_LOG="/var/log/ppd-start.log"
+
+# PID file of PPD itself
+#PPD_PIDFILE="${PPD_DIR}/ppd.pid"
+
+# Debug mode for the PPD watchdog process (0 or 1)
+#DEBUG=1
+
+# Syslog facility for the PPD watchdog process
+#LOG_FACILITY="daemon"
+
+# if an existent file, no watchdog will started, only PPD itself
+#NO_WATCHDOG_FILE="${PPD_DIR}/no-watchdog"
+
+# enable external watchdog
+#ENABLE_EXTERNAL_WATCHDOG=yes
+
+# Main configuration file of ppd
+#PPD_CONFIG="/etc/ppd.cfg"
+
+# Additional arguments that are passed to the Daemon.
+#DAEMON_ARGS="-v -F ${LOG_FACILITY}"
+#DAEMON_ARGS="-v"
+#DAEMON_ARGS=""
+
+# vim: ts=4 expandtab fileencoding=utf-8 filetype=sh
--- /dev/null
+# /etc/cron.d/py-provisioning-daemon: crontab entries for the py-provisioning-daemon
+
+SHELL=/bin/sh
+PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+
+1-59/10 * * * * ppd test -x /opt/profitbricks/bin/perform-ppd-trace-data.sh && /opt/profitbricks/bin/perform-ppd-trace-data.sh &>/dev/null
+
+# vim: filetype=crontab ts=4
--- /dev/null
+#!/sbin/runscript
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+extra_started_commands="reload watchdogrestart watchdogcrashrestart"
+
+# Path to the daemon
+PPD="/usr/sbin/ppd"
+
+# Path to the watchdog executable
+PPD_WATCHDOG="/usr/sbin/ppd-watchdog"
+
+# Introduce a short description here
+DESC="PB python provisioning daemon"
+
+# Working directory
+PPD_DIR="/var/lib/py-provisioning-daemon"
+
+# enable external watchdog
+ENABLE_EXTERNAL_WATCHDOG=yes
+
+# Watchdog PID file
+WATCHDOG_PID_FILE="/var/run/ppd-watchdog.pid"
+
+# PID file of PPD itself
+PPD_PIDFILE="${PPD_DIR}/ppd.pid"
+
+WATCHDOG_NAME="ppd-watchdog"
+LOG_FACILITY="daemon"
+
+#-------------------------------------------------------------------
+depend() {
+ need net netmount logger
+}
+
+#-------------------------------------------------------------------
+wait_on_pid() {
+
+ local pid="${1}"
+
+ log "Waiting for PID ${pid} for ending"
+
+ running=0
+ if kill -0 "${pid}" 2>/dev/null ; then
+ running=1
+ fi
+
+ while [ "${running}" = "1" ] ; do
+
+ sleep 1
+ if kill -0 "${pid}" 2>/dev/null ; then
+ running=1
+ else
+ running=0
+ fi
+
+ done
+
+}
+
+#-------------------------------------------------------------------
+start_watchdog() {
+ if [ "${ENABLE_EXTERNAL_WATCHDOG}" != "yes" ] ; then
+ return 0
+ fi
+ ebegin "Starting ${DESC} watchdog"
+ start-stop-daemon \
+ --start \
+ --background \
+ --make-pidfile \
+ --pidfile "${WATCHDOG_PID_FILE}" \
+ --exec "${PPD_WATCHDOG}" \
+ --name "${WATCHDOG_NAME}"
+ eend $? "Failed to Start ${DESC} watchdog"
+}
+
+#-------------------------------------------------------------------
+start_ppd() {
+ ebegin "Starting ${DESC}"
+ start-stop-daemon --start --pidfile ${PPD_PIDFILE} --exec ${PPD} ${DAEMON_ARGS}
+ eend $? "Failed to Start ${DESC}"
+}
+
+#-------------------------------------------------------------------
+stop_ppd() {
+
+ local signal="-s TERM"
+ if [ "${1}" = "graceful" ] ; then
+ signal="-s HUP"
+ fi
+
+ ebegin "Stopping ${DESC}"
+ if [ -f "${PPD_PIDFILE}" ] ; then
+ PID=$(cat "${PPD_PIDFILE}")
+ else
+ PID=$(pidof 'python /usr/sbin/ppd')
+ fi
+
+ if [ -n "${PID}" ]; then
+ kill ${signal} ${PID}
+ eend $? "Failed to Stop ${DESC}"
+ else
+ ewarn "${DESC} not running"
+ fi
+
+}
+
+#-------------------------------------------------------------------
+stop_watchdog() {
+
+ if [ "${ENABLE_EXTERNAL_WATCHDOG}" = "yes" ] ; then
+ ebegin "Stopping ${DESC} watchdog"
+ start-stop-daemon \
+ --stop \
+ --pidfile "${WATCHDOG_PID_FILE}" \
+ --name "${WATCHDOG_NAME}"
+ eend $? "Failed to Stop ${DESC} watchdog"
+ else
+ start-stop-daemon \
+ --stop \
+ --pidfile "${WATCHDOG_PID_FILE}" \
+ --name "${WATCHDOG_NAME}" \
+ --quiet
+ fi
+}
+
+#-------------------------------------------------------------------
+watchdogrestart() {
+
+ local noisy="n"
+ if [ "${1}" = "noisy" ] ; then
+ noisy="y"
+ fi
+
+ stop_ppd
+ start_ppd
+ if [ "${noisy}" = "y" ]; then
+ logger -i -t ppd-init \
+ -p "${LOG_FACILITY}".warning \
+ "${DESC} died, restarted"
+ fi
+
+}
+
+#-------------------------------------------------------------------
+watchdogcrashrestart() {
+ watchdogrestart noisy
+}
+
+#-------------------------------------------------------------------
+start() {
+ start_ppd || return 1
+ start_watchdog || return 2
+}
+
+#-------------------------------------------------------------------
+stop() {
+ stop_watchdog || return 2
+ stop_ppd
+}
+
+#-------------------------------------------------------------------
+restart() {
+ stop_watchdog
+ stop_ppd
+ start_ppd || return 1
+ start_watchdog || return 2
+}
+
+#-------------------------------------------------------------------
+reload() {
+ #stop_ppd graceful
+ local PID
+ if [ -f "${WATCHDOG_PID_FILE}" ] ; then
+ PID=$(cat "${WATCHDOG_PID_FILE}")
+ kill -s HUP "${PID}"
+ else
+ PID=$(pidof 'python /usr/sbin/ppd')
+ wait_on_pid "${PID}"
+ start_ppd
+ fi
+}
+
+# vim: ts=4 filetype=gentoo-init-d fileencoding=utf-8
--- /dev/null
+blacklist {
+}
+
+defaults {
+# # name : failback
+# # scope : multipathd
+# # desc : tell the daemon to manage path group failback, or not to.
+# # 0 means immediate failback, values >0 means deffered
+# # failback expressed in seconds.
+# # values : manual|immediate|n > 0
+# # default : manual
+# #
+ failback 10
+
+# # name : selector
+# # scope : multipath
+# # desc : the default path selector algorithm to use
+# # these algorithms are offered by the kernel multipath target
+# # values : "round-robin 0"
+# # default : "round-robin 0"
+# #
+# selector "round-robin 0"
+
+# # name : path_grouping_policy
+# # scope : multipath
+# # desc : the default path grouping policy to apply to unspecified
+# # multipaths
+# # values : failover = 1 path per priority group
+# # multibus = all valid paths in 1 priority group
+# # group_by_serial = 1 priority group per detected serial
+# # number
+# # group_by_prio = 1 priority group per path priority
+# # w value
+# # group_by_node_name = 1 priority group per target node name
+# # default : failover
+# #
+# path_grouping_policy failover
+
+# # name : user_friendly_names
+# # scope : multipath
+# # desc : If set to "yes", using the bindings file
+# # /var/lib/multipath/bindings to assign a persistent and
+# # unique alias to the multipath, in the form of mpath<n>.
+# # If set to "no" use the WWID as the alias. In either case
+# # this be will be overriden by any specific aliases in this
+# # file.
+# # values : yes|no
+# # default : no
+# user_friendly_names no
+
+# # name : path_checker, checker
+# # scope : multipath & multipathd
+# # desc : the default method used to determine the paths' state
+# # values : readsector0|tur|emc_clariion|hp_sw|directio|rdac|cciss_tur
+# # default : directio
+# path_checker directio
+
+# # name : no_path_retry
+# # scope : multipath & multipathd
+# # desc : tell the number of retries until disable queueing, or
+# # "fail" means immediate failure (no queueing),
+# # "queue" means never stop queueing
+# # values : queue|fail|n (>0)
+# # default : (null)
+ no_path_retry queue
+
+}
--- /dev/null
+[General]
+
+operation_type = storage
+
+base_dir = /var/lib/py-provisioning-daemon
+plugin_dir = /usr/share/py-provisioning-daemon/plugins
+error_log = %(base_dir)s/error.log
+trace_dir = %(base_dir)s/trace
+do_trace = yes
+;port = 8072
+;port = 8073
+request_timeout = 90
+xml_log = True
+max_children = 50
+
+node_name = ${HOSTNAME}
+broadcast_address = 10.1.255.255
+mgmt_address = ${HOST_IP}
+
+;request_timeout = 30
+;polling_interval = 0.2
+;request_queue_size = 5
+
+user = ppd
+group = staff
+
+[Storage]
+
+zfs_pool = storage/volumes
+;iscsi_target_group = cloudstorage
+enable_replication = True
+host_master = storage01-ib
+host_backup = storage02-ib
+host_backup_mgmt = vcb://storage02:8073
+; receive_port_dir = /var/lib/py-provisioning-daemon/zfs-receive
+
+replication_role = master
+
+[ProvWebService]
+url = https://appserver/ProvisioningService/ProvisioningServiceWSService
+user = profitbricks-vcb
+pwd = ProfitBricks-VCB
+timeout = 60
+
+[PdtService]
+url = https://appserver/PDTService/PDTService?wsdl
+user = profitbricks-dms
+pwd = ProfitBricks-DMS
+
+; vim: ts=4 expandtab fileencoding=utf-8 filetype=cfg
--- /dev/null
+# Enable commands for ppd (user of the ProfitBricks Python provisioning daemon)
+#ppd ALL=(root) NOPASSWD: /sbin/dmsetup, /usr/bin/iscsiadm, /sbin/udevadm, /sbin/blockdev, /bin/chown, /sbin/parted, /bin/mount, /bin/umount, /sbin/kpartx, /sbin/losetup, /usr/bin/mkpasswd, /bin/chmod, /sbin/blkid, /sbin/mkswap, /sbin/resize2fs, /sbin/partprobe, /sbin/modprobe, /bin/rm, /usr/bin/ssh-keygen, /usr/share/py-provisioning-daemon/delete_blockdevice.sh, /usr/share/py-provisioning-daemon/rescan_iscsi_bus.sh, /lib/udev/scsi_id
+ppd ALL=(root) NOPASSWD: ALL
--- /dev/null
+#
+# persistent links for device-mapper devices
+# only hardware-backed device-mapper devices (ie multipath, dmraid,
+# and kpartx) have meaningful persistent device names
+#
+
+KERNEL!="dm-*", GOTO="kpartx_end"
+ACTION=="remove", GOTO="kpartx_end"
+
+# This is a temporary hack until Debian's dmsetup properly supports "dmsetup
+# export". For more information see: #434241, #487881, #493078
+IMPORT{program}="dmsetup_env %M %m"
+
+ENV{DM_TABLE_STATE}!="LIVE", GOTO="kpartx_end"
+
+ENV{DM_UUID}=="?*", IMPORT{program}=="kpartx_id %M %m $env{DM_UUID}"
+
+OPTIONS="link_priority=50"
+
+# Create persistent links for multipath tables
+ENV{DM_UUID}=="mpath-*", \
+ SYMLINK+="disk/by-id/$env{DM_TYPE}-$env{DM_NAME}"
+
+# Create persistent links for dmraid tables
+ENV{DM_UUID}=="dmraid-*", \
+ SYMLINK+="disk/by-id/$env{DM_TYPE}-$env{DM_NAME}"
+
+# Create persistent links for partitions
+ENV{DM_PART}=="?*", \
+ SYMLINK+="disk/by-id/$env{DM_TYPE}-$env{DM_NAME}-part$env{DM_PART}"
+
+# Create dm tables for partitions
+
+# Deactivated creating of partition devices for DM devices
+# (Frank + Michael, 2012-01-17)
+#ENV{DM_STATE}=="ACTIVE", ENV{DM_UUID}=="mpath-*", \
+# RUN+="/sbin/kpartx -a -p -part /dev/$name"
+ENV{DM_STATE}=="ACTIVE", ENV{DM_UUID}=="dmraid-*", \
+ RUN+="/sbin/kpartx -a -p -part /dev/$name"
+
+LABEL="kpartx_end"
+
--- /dev/null
+# Udev rules for device-mapper devices.
+# See /usr/share/doc/dmsetup/README.udev for further information.
+#
+#ENV{DM_UDEV_RULES}=="", GOTO="persistent_storage_dm_end"
+#
+#OPTIONS="link_priority=-100"
+#ENV{DM_UUID}=="DMRAID-*", OPTIONS="link_priority=100"
+#
+#SYMLINK+="disk/by-id/dm-name-$env{DM_NAME}"
+#ENV{DM_UUID}=="?*", SYMLINK+="disk/by-id/dm-uuid-$env{DM_UUID}"
+#
+#ENV{DM_UDEV_DISABLE_DISK_RULES_FLAG}!="", GOTO="persistent_storage_dm_end"
+#
+#IMPORT{program}="/sbin/blkid -o udev -p $tempnode"
+#ENV{ID_FS_USAGE}=="filesystem|other|crypto", ENV{ID_FS_UUID_ENC}=="?*", SYMLINK+="disk/by-uuid/$env{ID_FS_UUID_ENC}"
+#ENV{ID_FS_USAGE}=="filesystem|other", ENV{ID_FS_LABEL_ENC}=="?*", SYMLINK+="disk/by-label/$env{ID_FS_LABEL_ENC}"
+#
+#LABEL="persistent_storage_dm_end"
--- /dev/null
+# /etc/udev/rules.d/70-iscsi.rules
+# ENV{ID_MODEL}=="COMSTAR", ENV{ID_SERIAL_SHORT}=="600144f0*", ENV{DEVTYPE}=="disk", SYMLINK="iscsi-$env{ID_SERIAL_SHORT}"
+# ENV{ID_MODEL}=="COMSTAR", ENV{ID_SERIAL_SHORT}=="600144f0*", ENV{DEVTYPE}=="disk", SYMLINK="mapper/$env{ID_SERIAL}"
+# ENV{DM_TYPE}=="scsi", ENV{DM_NAME}=="3600144f0*", ENV{DEVTYPE}=="disk", SYMLINK="iscsi-dm-$env{DM_NAME}"
+
+ENV{ID_MODEL}=="COMSTAR", ENV{ID_SERIAL_SHORT}=="600144f0*"
+