summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2016-08-10 09:06:15 -0600
committerScott Moser <smoser@ubuntu.com>2016-08-10 09:06:15 -0600
commitc3c3dc693c14175e110b5fe125d4d5f98ace9700 (patch)
tree8858702c2c8a6ad4bf1bb861a4565e0a9c28e588
parent5bd3493d732e5b1902872958e8681f17cbc81ce5 (diff)
downloadcloud-init-trunk.tar.gz
README: Mention move of revision control to git.HEADtrunk
cloud-init development has moved its revision control to git. It is available at https://code.launchpad.net/cloud-init Clone with git clone https://git.launchpad.net/cloud-init or git clone git+ssh://git.launchpad.net/cloud-init For more information see https://git.launchpad.net/cloud-init/tree/HACKING.rst
-rw-r--r--.bzrignore4
-rw-r--r--ChangeLog776
-rw-r--r--HACKING.rst48
-rw-r--r--LICENSE674
-rw-r--r--MANIFEST.in8
-rw-r--r--Makefile82
-rw-r--r--README11
-rw-r--r--TODO.rst43
-rw-r--r--cloudinit/__init__.py21
-rw-r--r--cloudinit/cloud.py109
-rw-r--r--cloudinit/cmd/__init__.py21
-rw-r--r--cloudinit/cmd/main.py685
-rw-r--r--cloudinit/config/__init__.py58
-rw-r--r--cloudinit/config/cc_apt_configure.py319
-rw-r--r--cloudinit/config/cc_apt_pipelining.py57
-rw-r--r--cloudinit/config/cc_bootcmd.py54
-rw-r--r--cloudinit/config/cc_byobu.py80
-rw-r--r--cloudinit/config/cc_ca_certs.py104
-rw-r--r--cloudinit/config/cc_chef.py342
-rw-r--r--cloudinit/config/cc_debug.py109
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py36
-rw-r--r--cloudinit/config/cc_disk_setup.py863
-rw-r--r--cloudinit/config/cc_emit_upstart.py69
-rw-r--r--cloudinit/config/cc_fan.py101
-rw-r--r--cloudinit/config/cc_final_message.py73
-rw-r--r--cloudinit/config/cc_foo.py52
-rw-r--r--cloudinit/config/cc_growpart.py300
-rw-r--r--cloudinit/config/cc_grub_dpkg.py73
-rw-r--r--cloudinit/config/cc_keys_to_console.py62
-rw-r--r--cloudinit/config/cc_landscape.py99
-rw-r--r--cloudinit/config/cc_locale.py37
-rw-r--r--cloudinit/config/cc_lxd.py177
-rw-r--r--cloudinit/config/cc_mcollective.py106
-rw-r--r--cloudinit/config/cc_migrator.py85
-rw-r--r--cloudinit/config/cc_mounts.py405
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py99
-rw-r--r--cloudinit/config/cc_phone_home.py122
-rw-r--r--cloudinit/config/cc_power_state_change.py223
-rw-r--r--cloudinit/config/cc_puppet.py118
-rw-r--r--cloudinit/config/cc_resizefs.py185
-rw-r--r--cloudinit/config/cc_resolv_conf.py116
-rw-r--r--cloudinit/config/cc_rh_subscription.py408
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py102
-rw-r--r--cloudinit/config/cc_rsyslog.py366
-rw-r--r--cloudinit/config/cc_runcmd.py38
-rw-r--r--cloudinit/config/cc_salt_minion.py59
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py41
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py41
-rw-r--r--cloudinit/config/cc_scripts_per_once.py41
-rw-r--r--cloudinit/config/cc_scripts_user.py42
-rw-r--r--cloudinit/config/cc_scripts_vendor.py43
-rw-r--r--cloudinit/config/cc_seed_random.py94
-rw-r--r--cloudinit/config/cc_set_hostname.py37
-rw-r--r--cloudinit/config/cc_set_passwords.py167
-rw-r--r--cloudinit/config/cc_snappy.py304
-rw-r--r--cloudinit/config/cc_ssh.py142
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py105
-rw-r--r--cloudinit/config/cc_ssh_import_id.py99
-rw-r--r--cloudinit/config/cc_timezone.py39
-rw-r--r--cloudinit/config/cc_ubuntu_init_switch.py162
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py60
-rw-r--r--cloudinit/config/cc_update_hostname.py43
-rw-r--r--cloudinit/config/cc_users_groups.py34
-rw-r--r--cloudinit/config/cc_write_files.py105
-rw-r--r--cloudinit/config/cc_yum_add_repo.py107
-rw-r--r--cloudinit/cs_utils.py106
-rw-r--r--cloudinit/distros/__init__.py980
-rw-r--r--cloudinit/distros/arch.py201
-rw-r--r--cloudinit/distros/debian.py236
-rw-r--r--cloudinit/distros/fedora.py31
-rw-r--r--cloudinit/distros/freebsd.py417
-rw-r--r--cloudinit/distros/gentoo.py160
-rw-r--r--cloudinit/distros/net_util.py182
-rw-r--r--cloudinit/distros/parsers/__init__.py28
-rw-r--r--cloudinit/distros/parsers/hostname.py88
-rw-r--r--cloudinit/distros/parsers/hosts.py92
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py169
-rw-r--r--cloudinit/distros/parsers/sys_conf.py113
-rw-r--r--cloudinit/distros/rhel.py230
-rw-r--r--cloudinit/distros/rhel_util.py89
-rw-r--r--cloudinit/distros/sles.py179
-rw-r--r--cloudinit/distros/ubuntu.py31
-rw-r--r--cloudinit/ec2_utils.py201
-rw-r--r--cloudinit/filters/__init__.py21
-rw-r--r--cloudinit/filters/launch_index.py75
-rw-r--r--cloudinit/gpg.py74
-rw-r--r--cloudinit/handlers/__init__.py274
-rw-r--r--cloudinit/handlers/boot_hook.py70
-rw-r--r--cloudinit/handlers/cloud_config.py163
-rw-r--r--cloudinit/handlers/shell_script.py55
-rw-r--r--cloudinit/handlers/upstart_job.py119
-rw-r--r--cloudinit/helpers.py460
-rw-r--r--cloudinit/importer.py58
-rw-r--r--cloudinit/log.py155
-rw-r--r--cloudinit/mergers/__init__.py166
-rw-r--r--cloudinit/mergers/m_dict.py88
-rw-r--r--cloudinit/mergers/m_list.py89
-rw-r--r--cloudinit/mergers/m_str.py46
-rw-r--r--cloudinit/net/__init__.py371
-rw-r--r--cloudinit/net/cmdline.py203
-rw-r--r--cloudinit/net/eni.py504
-rw-r--r--cloudinit/net/network_state.py454
-rw-r--r--cloudinit/net/renderer.py48
-rw-r--r--cloudinit/net/sysconfig.py400
-rw-r--r--cloudinit/net/udev.py54
-rw-r--r--cloudinit/netinfo.py249
-rw-r--r--cloudinit/patcher.py58
-rw-r--r--cloudinit/registry.py37
-rw-r--r--cloudinit/reporting/__init__.py42
-rw-r--r--cloudinit/reporting/events.py248
-rw-r--r--cloudinit/reporting/handlers.py91
-rw-r--r--cloudinit/safeyaml.py32
-rw-r--r--cloudinit/serial.py50
-rw-r--r--cloudinit/settings.py68
-rw-r--r--cloudinit/signal_handler.py71
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py292
-rw-r--r--cloudinit/sources/DataSourceAzure.py651
-rw-r--r--cloudinit/sources/DataSourceBigstep.py57
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py132
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py253
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py278
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py110
-rw-r--r--cloudinit/sources/DataSourceEc2.py211
-rw-r--r--cloudinit/sources/DataSourceGCE.py167
-rw-r--r--cloudinit/sources/DataSourceMAAS.py353
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py323
-rw-r--r--cloudinit/sources/DataSourceNone.py57
-rw-r--r--cloudinit/sources/DataSourceOVF.py429
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py429
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py168
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py781
-rw-r--r--cloudinit/sources/__init__.py371
-rw-r--r--cloudinit/sources/helpers/__init__.py13
-rw-r--r--cloudinit/sources/helpers/azure.py279
-rw-r--r--cloudinit/sources/helpers/openstack.py648
-rw-r--r--cloudinit/sources/helpers/vmware/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py95
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py129
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py247
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py23
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py27
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py128
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py147
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py154
-rw-r--r--cloudinit/ssh_util.py314
-rw-r--r--cloudinit/stages.py890
-rw-r--r--cloudinit/templater.py155
-rw-r--r--cloudinit/type_utils.py52
-rw-r--r--cloudinit/url_helper.py509
-rw-r--r--cloudinit/user_data.py356
-rw-r--r--cloudinit/util.py2246
-rw-r--r--cloudinit/version.py27
-rw-r--r--config/cloud.cfg115
-rw-r--r--config/cloud.cfg-freebsd88
-rw-r--r--config/cloud.cfg.d/05_logging.cfg66
-rw-r--r--config/cloud.cfg.d/README3
-rw-r--r--doc/README4
-rw-r--r--doc/examples/cloud-config-add-apt-repos.txt34
-rw-r--r--doc/examples/cloud-config-archive-launch-index.txt30
-rw-r--r--doc/examples/cloud-config-archive.txt16
-rw-r--r--doc/examples/cloud-config-boot-cmds.txt15
-rw-r--r--doc/examples/cloud-config-ca-certs.txt31
-rw-r--r--doc/examples/cloud-config-chef-oneiric.txt90
-rw-r--r--doc/examples/cloud-config-chef.txt95
-rw-r--r--doc/examples/cloud-config-datasources.txt73
-rw-r--r--doc/examples/cloud-config-disk-setup.txt251
-rw-r--r--doc/examples/cloud-config-final-message.txt7
-rw-r--r--doc/examples/cloud-config-gluster.txt18
-rw-r--r--doc/examples/cloud-config-growpart.txt31
-rw-r--r--doc/examples/cloud-config-install-packages.txt15
-rw-r--r--doc/examples/cloud-config-landscape.txt22
-rw-r--r--doc/examples/cloud-config-launch-index.txt23
-rw-r--r--doc/examples/cloud-config-lxd.txt55
-rw-r--r--doc/examples/cloud-config-mcollective.txt49
-rw-r--r--doc/examples/cloud-config-mount-points.txt46
-rw-r--r--doc/examples/cloud-config-phone-home.txt14
-rw-r--r--doc/examples/cloud-config-power-state.txt40
-rw-r--r--doc/examples/cloud-config-puppet.txt51
-rw-r--r--doc/examples/cloud-config-reporting.txt17
-rw-r--r--doc/examples/cloud-config-resolv-conf.txt20
-rw-r--r--doc/examples/cloud-config-rh_subscription.txt49
-rw-r--r--doc/examples/cloud-config-rsyslog.txt46
-rw-r--r--doc/examples/cloud-config-run-cmds.txt22
-rw-r--r--doc/examples/cloud-config-salt-minion.txt53
-rw-r--r--doc/examples/cloud-config-seed-random.txt32
-rw-r--r--doc/examples/cloud-config-ssh-keys.txt46
-rw-r--r--doc/examples/cloud-config-update-apt.txt7
-rw-r--r--doc/examples/cloud-config-update-packages.txt8
-rw-r--r--doc/examples/cloud-config-user-groups.txt109
-rw-r--r--doc/examples/cloud-config-vendor-data.txt16
-rw-r--r--doc/examples/cloud-config-write-files.txt33
-rw-r--r--doc/examples/cloud-config-yum-repo.txt20
-rw-r--r--doc/examples/cloud-config.txt752
-rw-r--r--doc/examples/include-once.txt7
-rw-r--r--doc/examples/include.txt5
-rw-r--r--doc/examples/kernel-cmdline.txt18
-rw-r--r--doc/examples/part-handler-v2.txt38
-rw-r--r--doc/examples/part-handler.txt23
-rw-r--r--doc/examples/plain-ignored.txt2
-rw-r--r--doc/examples/seed/README22
-rw-r--r--doc/examples/seed/meta-data30
-rw-r--r--doc/examples/seed/user-data3
-rw-r--r--doc/examples/upstart-cloud-config.txt12
-rw-r--r--doc/examples/upstart-rclocal.txt12
-rw-r--r--doc/examples/user-script.txt8
-rw-r--r--doc/merging.rst194
-rw-r--r--doc/rtd/conf.py77
-rw-r--r--doc/rtd/index.rst31
-rw-r--r--doc/rtd/static/logo.pngbin12751 -> 0 bytes
-rw-r--r--doc/rtd/static/logo.svg89
-rw-r--r--doc/rtd/topics/availability.rst20
-rw-r--r--doc/rtd/topics/capabilities.rst24
-rw-r--r--doc/rtd/topics/datasources.rst200
-rw-r--r--doc/rtd/topics/dir_layout.rst81
-rw-r--r--doc/rtd/topics/examples.rst133
-rw-r--r--doc/rtd/topics/format.rst159
-rw-r--r--doc/rtd/topics/hacking.rst1
-rw-r--r--doc/rtd/topics/merging.rst5
-rw-r--r--doc/rtd/topics/modules.rst342
-rw-r--r--doc/rtd/topics/moreinfo.rst12
-rw-r--r--doc/sources/altcloud/README.rst87
-rw-r--r--doc/sources/azure/README.rst134
-rw-r--r--doc/sources/cloudsigma/README.rst38
-rw-r--r--doc/sources/cloudstack/README.rst29
-rw-r--r--doc/sources/configdrive/README.rst123
-rw-r--r--doc/sources/digitalocean/README.rst21
-rw-r--r--doc/sources/kernel-cmdline.txt48
-rw-r--r--doc/sources/nocloud/README.rst71
-rw-r--r--doc/sources/opennebula/README.rst142
-rw-r--r--doc/sources/openstack/README.rst24
-rw-r--r--doc/sources/ovf/README83
-rw-r--r--doc/sources/ovf/example/ovf-env.xml46
-rw-r--r--doc/sources/ovf/example/ubuntu-server.ovf130
-rwxr-xr-xdoc/sources/ovf/make-iso156
-rw-r--r--doc/sources/ovf/ovf-env.xml.tmpl28
-rw-r--r--doc/sources/ovf/ovfdemo.pem27
-rw-r--r--doc/sources/ovf/user-data7
-rw-r--r--doc/sources/smartos/README.rst149
-rw-r--r--doc/status.txt53
-rw-r--r--doc/userdata.txt79
-rw-r--r--doc/var-lib-cloud.txt63
-rw-r--r--doc/vendordata.txt53
-rwxr-xr-xpackages/bddeb267
-rwxr-xr-xpackages/brpm280
-rw-r--r--packages/debian/changelog.in6
-rw-r--r--packages/debian/cloud-init.postinst16
-rw-r--r--packages/debian/cloud-init.preinst20
-rw-r--r--packages/debian/compat1
-rw-r--r--packages/debian/control.in29
-rw-r--r--packages/debian/copyright29
-rw-r--r--packages/debian/dirs6
-rwxr-xr-xpackages/debian/rules.in23
-rw-r--r--packages/debian/watch2
-rw-r--r--packages/redhat/cloud-init.spec.in204
-rw-r--r--packages/suse/cloud-init.spec.in163
-rw-r--r--requirements.txt40
-rwxr-xr-xsetup.py217
-rw-r--r--systemd/cloud-config.service16
-rw-r--r--systemd/cloud-config.target11
-rw-r--r--systemd/cloud-final.service17
-rwxr-xr-xsystemd/cloud-init-generator130
-rw-r--r--systemd/cloud-init-local.service22
-rw-r--r--systemd/cloud-init.service18
-rw-r--r--systemd/cloud-init.target6
-rw-r--r--sysvinit/debian/cloud-config64
-rw-r--r--sysvinit/debian/cloud-final66
-rwxr-xr-xsysvinit/debian/cloud-init64
-rw-r--r--sysvinit/debian/cloud-init-local63
-rwxr-xr-xsysvinit/freebsd/cloudconfig35
-rwxr-xr-xsysvinit/freebsd/cloudfinal35
-rwxr-xr-xsysvinit/freebsd/cloudinit35
-rwxr-xr-xsysvinit/freebsd/cloudinitlocal35
-rw-r--r--sysvinit/gentoo/cloud-config13
-rw-r--r--sysvinit/gentoo/cloud-final11
-rw-r--r--sysvinit/gentoo/cloud-init12
-rw-r--r--sysvinit/gentoo/cloud-init-local13
-rwxr-xr-xsysvinit/redhat/cloud-config121
-rwxr-xr-xsysvinit/redhat/cloud-final121
-rwxr-xr-xsysvinit/redhat/cloud-init121
-rwxr-xr-xsysvinit/redhat/cloud-init-local124
-rw-r--r--templates/chef_client.rb.tmpl58
-rw-r--r--templates/hosts.debian.tmpl26
-rw-r--r--templates/hosts.freebsd.tmpl24
-rw-r--r--templates/hosts.redhat.tmpl24
-rw-r--r--templates/hosts.suse.tmpl26
-rw-r--r--templates/resolv.conf.tmpl30
-rw-r--r--templates/sources.list.debian.tmpl32
-rw-r--r--templates/sources.list.ubuntu.tmpl113
-rw-r--r--test-requirements.txt18
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/configs/sample1.yaml52
-rw-r--r--tests/data/filter_cloud_multipart.yaml30
-rw-r--r--tests/data/filter_cloud_multipart_1.email11
-rw-r--r--tests/data/filter_cloud_multipart_2.email39
-rw-r--r--tests/data/filter_cloud_multipart_header.email11
-rw-r--r--tests/data/merge_sources/expected1.yaml1
-rw-r--r--tests/data/merge_sources/expected10.yaml7
-rw-r--r--tests/data/merge_sources/expected11.yaml5
-rw-r--r--tests/data/merge_sources/expected12.yaml5
-rw-r--r--tests/data/merge_sources/expected2.yaml3
-rw-r--r--tests/data/merge_sources/expected3.yaml1
-rw-r--r--tests/data/merge_sources/expected4.yaml2
-rw-r--r--tests/data/merge_sources/expected5.yaml7
-rw-r--r--tests/data/merge_sources/expected6.yaml9
-rw-r--r--tests/data/merge_sources/expected7.yaml38
-rw-r--r--tests/data/merge_sources/expected8.yaml7
-rw-r--r--tests/data/merge_sources/expected9.yaml5
-rw-r--r--tests/data/merge_sources/source1-1.yaml3
-rw-r--r--tests/data/merge_sources/source1-2.yaml5
-rw-r--r--tests/data/merge_sources/source10-1.yaml6
-rw-r--r--tests/data/merge_sources/source10-2.yaml6
-rw-r--r--tests/data/merge_sources/source11-1.yaml5
-rw-r--r--tests/data/merge_sources/source11-2.yaml3
-rw-r--r--tests/data/merge_sources/source11-3.yaml3
-rw-r--r--tests/data/merge_sources/source12-1.yaml8
-rw-r--r--tests/data/merge_sources/source12-2.yaml5
-rw-r--r--tests/data/merge_sources/source2-1.yaml6
-rw-r--r--tests/data/merge_sources/source2-2.yaml5
-rw-r--r--tests/data/merge_sources/source3-1.yaml4
-rw-r--r--tests/data/merge_sources/source3-2.yaml4
-rw-r--r--tests/data/merge_sources/source4-1.yaml3
-rw-r--r--tests/data/merge_sources/source4-2.yaml6
-rw-r--r--tests/data/merge_sources/source5-1.yaml6
-rw-r--r--tests/data/merge_sources/source5-2.yaml8
-rw-r--r--tests/data/merge_sources/source6-1.yaml5
-rw-r--r--tests/data/merge_sources/source6-2.yaml8
-rw-r--r--tests/data/merge_sources/source7-1.yaml27
-rw-r--r--tests/data/merge_sources/source7-2.yaml17
-rw-r--r--tests/data/merge_sources/source8-1.yaml7
-rw-r--r--tests/data/merge_sources/source8-2.yaml6
-rw-r--r--tests/data/merge_sources/source9-1.yaml5
-rw-r--r--tests/data/merge_sources/source9-2.yaml6
-rw-r--r--tests/data/mountinfo_precise_ext4.txt24
-rw-r--r--tests/data/mountinfo_raring_btrfs.txt13
-rw-r--r--tests/data/roots/simple_ubuntu/etc/networks/interfaces3
-rw-r--r--tests/data/user_data.1.txt15
-rw-r--r--tests/data/vmware/cust-dhcp-2nic.cfg34
-rw-r--r--tests/data/vmware/cust-static-2nic.cfg39
-rw-r--r--tests/unittests/__init__.py0
-rw-r--r--tests/unittests/helpers.py291
-rw-r--r--tests/unittests/test__init__.py211
-rw-r--r--tests/unittests/test_builtin_handlers.py73
-rw-r--r--tests/unittests/test_cli.py34
-rw-r--r--tests/unittests/test_cs_util.py63
-rw-r--r--tests/unittests/test_data.py576
-rw-r--r--tests/unittests/test_datasource/__init__.py0
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py452
-rw-r--r--tests/unittests/test_datasource/test_azure.py640
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py412
-rw-r--r--tests/unittests/test_datasource/test_cloudsigma.py99
-rw-r--r--tests/unittests/test_datasource/test_cloudstack.py78
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py597
-rw-r--r--tests/unittests/test_datasource/test_digitalocean.py127
-rw-r--r--tests/unittests/test_datasource/test_gce.py166
-rw-r--r--tests/unittests/test_datasource/test_maas.py163
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py178
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py300
-rw-r--r--tests/unittests/test_datasource/test_openstack.py347
-rw-r--r--tests/unittests/test_datasource/test_smartos.py543
-rw-r--r--tests/unittests/test_distros/__init__.py0
-rw-r--r--tests/unittests/test_distros/test_generic.py233
-rw-r--r--tests/unittests/test_distros/test_hostname.py38
-rw-r--r--tests/unittests/test_distros/test_hosts.py41
-rw-r--r--tests/unittests/test_distros/test_netconfig.py381
-rw-r--r--tests/unittests/test_distros/test_resolv.py67
-rw-r--r--tests/unittests/test_distros/test_sysconfig.py82
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py297
-rw-r--r--tests/unittests/test_ec2_util.py139
-rw-r--r--tests/unittests/test_filters/__init__.py0
-rw-r--r--tests/unittests/test_filters/test_launch_index.py132
-rw-r--r--tests/unittests/test_handler/__init__.py0
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure.py109
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list.py180
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source.py516
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py271
-rw-r--r--tests/unittests/test_handler/test_handler_chef.py192
-rw-r--r--tests/unittests/test_handler/test_handler_debug.py81
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py30
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py220
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py67
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py134
-rw-r--r--tests/unittests/test_handler/test_handler_mcollective.py148
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py133
-rw-r--r--tests/unittests/test_handler/test_handler_power_state.py127
-rw-r--r--tests/unittests/test_handler/test_handler_rsyslog.py174
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py227
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py72
-rw-r--r--tests/unittests/test_handler/test_handler_snappy.py306
-rw-r--r--tests/unittests/test_handler/test_handler_timezone.py76
-rw-r--r--tests/unittests/test_handler/test_handler_write_files.py112
-rw-r--r--tests/unittests/test_handler/test_handler_yum_add_repo.py68
-rw-r--r--tests/unittests/test_helpers.py33
-rw-r--r--tests/unittests/test_merging.py257
-rw-r--r--tests/unittests/test_net.py689
-rw-r--r--tests/unittests/test_pathprefix2dict.py44
-rw-r--r--tests/unittests/test_registry.py28
-rw-r--r--tests/unittests/test_reporting.py371
-rw-r--r--tests/unittests/test_rh_subscription.py226
-rw-r--r--tests/unittests/test_runs/__init__.py0
-rw-r--r--tests/unittests/test_runs/test_merge_run.py54
-rw-r--r--tests/unittests/test_runs/test_simple_run.py81
-rw-r--r--tests/unittests/test_sshutil.py171
-rw-r--r--tests/unittests/test_templating.py119
-rw-r--r--tests/unittests/test_util.py489
-rw-r--r--tests/unittests/test_vmware_config_file.py103
-rw-r--r--tools/21-cloudinit.conf6
-rwxr-xr-xtools/Z99-cloud-locale-test.sh98
-rwxr-xr-xtools/build-on-freebsd66
-rwxr-xr-xtools/ccfg-merge-debug90
-rwxr-xr-xtools/cloud-init-per60
-rwxr-xr-xtools/hacking.py170
-rwxr-xr-xtools/make-dist-tarball21
-rwxr-xr-xtools/make-mime.py60
-rwxr-xr-xtools/make-tarball39
-rwxr-xr-xtools/mock-meta.py454
-rwxr-xr-xtools/motd-hook35
-rwxr-xr-xtools/read-dependencies29
-rwxr-xr-xtools/read-version26
-rwxr-xr-xtools/run-pep821
-rwxr-xr-xtools/run-pyflakes18
-rwxr-xr-xtools/run-pyflakes32
-rwxr-xr-xtools/tox-venv42
-rwxr-xr-xtools/uncloud-init141
-rwxr-xr-xtools/validate-yaml.py25
-rwxr-xr-xtools/write-ssh-key-fingerprints38
-rw-r--r--tox.ini30
-rw-r--r--udev/66-azure-ephemeral.rules18
-rw-r--r--upstart/cloud-config.conf9
-rw-r--r--upstart/cloud-final.conf10
-rw-r--r--upstart/cloud-init-blocknet.conf83
-rw-r--r--upstart/cloud-init-container.conf57
-rw-r--r--upstart/cloud-init-local.conf16
-rw-r--r--upstart/cloud-init-nonet.conf66
-rw-r--r--upstart/cloud-init.conf9
-rw-r--r--upstart/cloud-log-shutdown.conf19
441 files changed, 11 insertions, 53183 deletions
diff --git a/.bzrignore b/.bzrignore
deleted file mode 100644
index 926e4581..00000000
--- a/.bzrignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.tox
-dist
-cloud_init.egg-info
-__pycache__
diff --git a/ChangeLog b/ChangeLog
deleted file mode 100644
index bae982e3..00000000
--- a/ChangeLog
+++ /dev/null
@@ -1,776 +0,0 @@
-0.7.7:
- - open 0.7.7
- - Digital Ocean: add datasource for Digital Ocean. [Neal Shrader]
- - expose uses_systemd as a distro function (fix rhel7)
- - fix broken 'output' config (LP: #1387340)
- - begin adding cloud config module docs to config modules (LP: #1383510)
- - retain trailing eol from template files (sources.list) when
- rendered with jinja (LP: #1355343)
- - Only use datafiles and initsys addon outside virtualenvs
- - Fix the digital ocean test case on python 2.6
- - Increase the usefulness, robustness, configurability of the chef module
- so that it is more useful, more documented and better for users
- - Fix how '=' signs are not handled that well in ssh_utils (LP: #1391303)
- - Be more tolerant of ssh keys passed into 'ssh_authorized_keys'; allowing
- for list, tuple, set, dict, string types and warning on other unexpected
- types
- - Update to use newer/better OMNIBUS_URL for chef module
- - GCE: Allow base64 encoded user-data (LP: #1404311) [Wayne Witzell III]
- - GCE: use short hostname rather than fqdn (LP: #1383794) [Ben Howard]
- - systemd: make init stage run before login prompts shown [Steve Langasek]
- - hostname: on first boot apply hostname to be same as is written for
- persistent hostname. (LP: #1246485)
- - remove usage of dmidecode on linux in favor of /sys interface [Ben Howard]
- - python3 support [Barry Warsaw, Daniel Watkins, Josh Harlow] (LP: #1247132)
- - support managing gpt partitions in disk config [Daniel Watkins]
- - Azure: utilze gpt support for ephemeral formating [Daniel Watkins]
- - CloudStack: support fetching password from virtual router [Daniel Watkins]
- (LP: #1422388)
- - readurl, read_file_or_url returns bytes, user must convert as necessary
- - SmartOS: use v2 metadata service (LP: #1436417) [Daniel Watkins]
- - NoCloud: fix local datasource claiming found without explicit dsmode
- - Snappy: add support for installing snappy packages and configuring.
- - systemd: use network-online instead of network.target (LP: #1440180)
- [Steve Langasek]
- - Add functionality to fixate the uid of a newly added user.
- - Don't overwrite the hostname if the user has changed it after we set it.
- - GCE datasource does not handle instance ssh keys (LP: 1403617)
- - sysvinit: make cloud-init-local run before network (LP: #1275098)
- [Surojit Pathak]
- - Azure: do not re-set hostname if user has changed it (LP: #1375252)
- - Fix exception when running with no arguments on Python 3. [Daniel Watkins]
- - Centos: detect/expect use of systemd on centos 7. [Brian Rak]
- - Azure: remove dependency on walinux-agent [Daniel Watkins]
- - EC2: know about eu-central-1 availability-zone (LP: #1456684)
- - Azure: remove password from on-disk ovf-env.xml (LP: #1443311) [Ben Howard]
- - Doc: include information on user-data in OpenStack [Daniel Watkins]
- - Systemd: check for systemd using sd_booted symantics (LP: #1461201)
- [Lars Kellogg-Stedman]
- - Add an rh_subscription module to handle registration of Red Hat instances.
- [Brent Baude]
- - cc_apt_configure: fix importing keys under python3 (LP: #1463373)
- - cc_growpart: fix specification of 'devices' list (LP: #1465436)
- - CloudStack: fix password setting on cloudstack > 4.5.1 (LP: #1464253)
- - GCE: fix determination of availability zone (LP: #1470880)
- - ssh: generate ed25519 host keys (LP: #1461242)
- - distro mirrors: provide datasource to mirror selection code to support
- GCE regional mirrors. (LP: #1470890)
- - add udev rules that identify ephemeral device on Azure (LP: #1411582)
- - _read_dmi_syspath: fix bad log message causing unintended exception
- - rsyslog: add additional configuration mode (LP: #1478103)
- - status_wrapper in main: fix use of print_exc when handling exception
- - reporting: add reporting module for web hook or logging of events.
- - NoCloud: fix consumption of vendordata (LP: #1493453)
- - power_state_change: support 'condition' to disable or enable poweroff
- - ubuntu fan: support for config and installing of ubuntu fan (LP: #1504604)
- - Azure: support extracting SSH key values from ovf-env.xml (LP: #1506244)
- - AltCloud: fix call to udevadm settle (LP: #1507526)
- - Ubuntu templates: modify sources.list template to provide same sources
- as install from server or desktop ISO. (LP: #1177432)
- - cc_mounts: use 'nofail' if system uses systemd. (LP: #1514485)
- - Azure: get instance id from dmi instead of SharedConfig (LP: #1506187)
- - systemd/power_state: fix power_state to work even if cloud-final
- exited non-zero (LP: #1449318)
- - SmartOS: Add support for Joyent LX-Brand Zones (LP: #1540965)
- [Robert C Jennings]
- - systemd: support using systemd-detect-virt to detect container
- (LP: #1539016) [Martin Pitt]
- - docs: fix lock_passwd documentation [Robert C Jennings]
- - Azure: Handle escaped quotes in WALinuxAgentShim.find_endpoint.
- (LP: #1488891) [Dan Watkins]
- - lxd: add support for setting up lxd using 'lxd init' (LP: #1522879)
- - Add Image Customization Parser for VMware vSphere Hypervisor
- Support. [Sankar Tanguturi]
- - timezone: use a symlink rather than copy for /etc/localtime
- unless it is already a file (LP: #1543025).
- - Enable password changing via a hashed string [Alex Sirbu]
- - Added BigStep datasource [Alex Sirbu]
- - No longer run pollinate in seed_random (LP: #1554152)
- - groups: add defalt user to 'lxd' group. Create groups listed
- for a user if they do not exist. (LP: #1539317)
- - dmi data: fix failure of reading dmi data for unset dmi values
- - doc: mention label for nocloud datasource must be 'cidata' [Peter Hurley]
- - ssh_pwauth: fix module to support 'unchanged' and match behavior
- described in documentation [Chris Cosby]
- - quickly check to see if the previous instance id is still valid to
- avoid dependency on network metadata service on every boot (LP: #1553815)
- - support network configuration in cloud-init --local with support
- device naming via systemd.link.
- - FreeBSD: add support for installing packages, setting password and
- timezone. Change default user to 'freebsd'. [Ben Arblaster]
- - locale: list unsupported environment settings in warning (LP: #1558069)
- - disk_setup: correctly send --force to mkfs on block devices (LP: #1548772)
- - chef: fix chef install from gems (LP: #1553345)
- - systemd: do not specify After of obsolete syslog.target (LP: #1536964)
- - centos: Ensure that resolve conf object is written as a str (LP: #1479988)
- - chef: straighten out validation_cert and validation_key (LP: #1568940)
- - phone_home: allow usage of fqdn (LP: #1566824) [Ollie Armstrong]
- - cloudstack: Only use DHCPv4 lease files as a datasource (LP: #1576273)
- [Wido den Hollander]
- - Paths: fix instance path if datasource's id has a '/'. (LP: #1575938)
- [Robert Jennings]
- - Ec2: do not retry requests for user-data path on 404.
- - settings on the kernel command line (cc:) override all local settings
- rather than only those in /etc/cloud/cloud.cfg (LP: #1582323)
- - Improve merging documentation [Daniel Watkins]
- - apt sources: support inserting key/key-id only, custom sources.list,
- long gpg key fingerprints with spaces, and dictionary format (LP: #1574113)
- - SmartOS: datasource improvements and support for metadata service
- providing networking information.
- - Datasources: centrally handle 'dsmode' and no longer require datasources
- to "pass" if modules_init should be executed with network access.
- - ConfigDrive: improved support for networking information from
- a network_data.json or older interfaces formated network_config.
- - Change missing Cheetah log warning to debug [Andrew Jorgensen]
- - Remove trailing dot from GCE metadata URL (LP: #1581200) [Phil Roche]
- - support network rendering to sysconfig (for centos and RHEL)
- - write_files: if no permissions are given, just use default without warn.
- - user_data: fix error when user-data is not utf-8 decodable (LP: #1532072)
- - fix mcollective module with python3 (LP: #1597699) [Sergii Golovatiuk]
-
-0.7.6:
- - open 0.7.6
- - Enable vendordata on CloudSigma datasource (LP: #1303986)
- - Poll on /dev/ttyS1 in CloudSigma datasource only if dmidecode says
- we're running on cloudsigma (LP: #1316475) [Kiril Vladimiroff]
- - SmartOS test: do not require existance of /dev/ttyS1. [LP: #1316597]
- - doc: fix user-groups doc to reference plural ssh-authorized-keys
- (LP: #1327065) [Joern Heissler]
- - fix 'make test' in python 2.6
- - support jinja2 as a templating engine. Drop the hard requirement on
- cheetah. This helps in python3 effort. (LP: #1219223)
- - change install path for systemd files to /lib/systemd/system
- [Dimitri John Ledkov]
- - change trunk debian packaging to use pybuild and drop cdbs.
- [Dimitri John Ledkov]
- - SeLinuxGuard: remove invalid check that looked for stat.st_mode in os.lstat.
- - do not write comments in /etc/timezone (LP: #1341710)
- - ubuntu: provide 'ubuntu-init-switch' module to aid in systemd testing.
- - status/result json: remove 'end' entry which was always null
- - systemd: make cloud-init block ssh service startup to guarantee keys
- are generated. [Jordan Evans] (LP: #1333920)
- - default settings: fix typo resulting in OpenStack and GCE not working
- unless config explicitly provided (LP: #1329583) [Garrett Holmstrom])
- - fix rendering resolv.conf if no 'options' are provided (LP: #1328953)
- - docs: fix disk-setup to reference 'table_type' [Rail Aliiev] (LP: #1313114)
- - ssh_authkey_fingerprints: fix bug that prevented disabling the module.
- (LP: #1340903) [Patrick Lucas]
- - no longer use pylint as a checker, fix pep8 [Jay Faulkner].
- - Openstack: do not load some urls twice.
- - FreeBsd: fix initscripts and add working config file [Harm Weites]
- - Datasource: fix broken logic to provide hostname if datasource does not
- provide one
- - Improved and less verbose logging.
- - resizefs: first check that device is writable.
- - configdrive: fix reading of vendor data to be like metadata service reader.
- [Jay Faulkner]
- - resizefs: fix broken background resizing [Jay Faulkner] (LP: #1338614)
- - cc_grub_dpkg: fix EC2 hvm instances to avoid prompt on grub update.
- (LP: #1336855)
- - FreeBsd: support config drive datasource [Joseph bajin]
- - cc_mounts: support creating a swap file
- - DigitalOcean & GCE: fix get_hostname consistency
-0.7.5:
- - open 0.7.5
- - Add a debug log message around import failures
- - add a 'debug' module for easily printing out some information about
- datasource and cloud-init [Shraddha Pandhe]
- - support running apt with 'eatmydata' via configuration token
- apt_get_wrapper (LP: #1236531).
- - convert paths provided in config-drive 'files' to string before writing
- (LP: #1260072).
- - Azure: minor changes in logging output. ensure filenames are strings (not
- unicode).
- - config/cloud.cfg.d/05_logging.cfg: provide a default 'output' setting, to
- redirect cloud-init stderr and stdout /var/log/cloud-init-output.log.
- - drop support for resizing partitions with parted entirely (LP: #1212492).
- This was broken as it was anyway.
- - add support for vendordata in SmartOS and NoCloud datasources.
- - drop dependency on boto for crawling ec2 metadata service.
- - add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and
- 'Recommends' in the debian/control.in [Vlastimil Holer]
- - if mount_info reports /dev/root is a device path for /, then convert
- that to a device via help of kernel cmdline.
- - configdrive: consider partitions as possible datasources if they have
- theh correct filesystem label. [Paul Querna]
- - initial freebsd support [Harm Weites]
- - fix in is_ipv4 to accept IP addresses with a '0' in them.
- - Azure: fix issue when stale data in /var/lib/waagent (LP: #1269626)
- - skip config_modules that declare themselves only verified on a set of
- distros. Add them to 'unverified_modules' list to run anyway.
- - Add CloudSigma datasource [Kiril Vladimiroff]
- - Add initial support for Gentoo and Arch distributions [Nate House]
- - Add GCE datasource [Vaidas Jablonskis]
- - Add native Openstack datasource which reads openstack metadata
- rather than relying on EC2 data in openstack metadata service.
- - SmartOS, AltCloud: disable running on arm systems due to bug
- (LP: #1243287, #1285686) [Oleg Strikov]
- - Allow running a command to seed random, default is 'pollinate -q'
- (LP: #1286316) [Dustin Kirkland]
- - Write status to /run/cloud-init/status.json for consumption by
- other programs (LP: #1284439)
- - Azure: if a reboot causes ephemeral storage to be re-provisioned
- Then we need to re-format it. (LP: #1292648)
- - OpenNebula: support base64 encoded user-data
- [Enol Fernandez, Peter Kotcauer]
-0.7.4:
- - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
- partitioned block device with target filesystem on ephemeral0.1.
- (LP: #1236594)
- - fix DataSourceAzure incompatibility with 2.6 (LP: #1232175)
- - fix power_state_change config module so that example works. Improve
- its documentation and add reference to 'timeout'
- - support apt-add-archive with 'cloud-archive:' format. (LP: #1244355)
- - Change SmartOS verb for availability zone (LP: #1249124)
- - documentation fix for boothooks to use 'cloud-init-per'
- - fix resizefs module by supporting kernels that do not have
- /proc/PID/mountinfo. (LP: #1248625) [Tim Daly Jr.]
- - fix 'make rpm' by removing 0.6.4 entry from ChangeLog (LP: #1241834)
-0.7.3:
- - fix omnibus chef installer (LP: #1182265) [Chris Wing]
- - small fix for OVF datasource for iso transport on non-iso9660 filesystem
- - determine if upstart version is suitable for
- 'initctl reload-configuration' (LP: #1124384). If so, then invoke it.
- supports setting up instance-store disk with partition table and filesystem.
- - add Azure datasource.
- - add support for SuSE / SLES [Juerg Haefliger]
- - add a trailing carriage return to chpasswd input, which reportedly
- caused a problem on rhel5 if missing.
- - support individual MIME segments to be gzip compressed (LP: #1203203)
- - always finalize handlers even if processing failed (LP: #1203368)
- - support merging into cloud-config via jsonp. (LP: #1200476)
- - add datasource 'SmartOS' for Joyent Cloud. Adds a dependency on serial.
- - add 'log_time' helper to util for timing how long things take
- which also reads from uptime. uptime is useful as clock may change during
- boot due to ntp.
- - prefer growpart resizer to 'parted resizepart' (LP: #1212492)
- - support random data seed from config drive or azure, and a module
- 'seed_random' to read that and write it to /dev/urandom.
- - add OpenNebula Datasource [Vlastimil Holer]
- - add 'cc_disk_setup' config module for paritioning disks and creating
- filesystems. Useful if attached disks are not formatted (LP: #1218506)
- - Fix usage of libselinux-python when selinux is disabled. [Garrett Holmstrom]
- - multi_log: only write to /dev/console if it exists [Garrett Holmstrom]
- - config/cloud.cfg: add 'sudo' to list groups for the default user
- (LP: #1228228)
- - documentation fix for use of 'mkpasswd' [Eric Nordlund]
- - respect /etc/growroot-disabled file (LP: #1234331)
-0.7.2:
- - add a debian watch file
- - add 'sudo' entry to ubuntu's default user (LP: #1080717)
- - fix resizefs module when 'noblock' was provided (LP: #1080985)
- - make sure there is no blank line before cloud-init entry in
- there are no blank lines in /etc/ca-certificates.conf (LP: #1077020)
- - fix sudoers writing when entry is a string (LP: #1079002)
- - tools/write-ssh-key-fingerprints: use '-s' rather than '--stderr'
- option (LP: #1083715)
- - make install of puppet configurable (LP: #1090205) [Craig Tracey]
- - support omnibus installer for chef [Anatoliy Dobrosynets]
- - fix bug where cloud-config in user-data could not modify system_info
- settings (LP: #1090482)
- - fix CloudStack DataSource to use Virtual Router as described by
- CloudStack documentation if it is available by searching through dhclient
- lease files. If it is not available, then fall back to the default
- gateway. (LP: #1089989)
- - fix redaction of password field in log (LP: #1096417)
- - fix to cloud-config user setup. Previously, lock_passwd was broken and
- all accounts would be locked unless 'system' was given (LP: #1096423).
- - Allow 'sr0' (or sr[0-9]) to be specified without /dev/ as a source for
- mounts. [Vlastimil Holer]
- - allow config-drive-data to come from a CD device by more correctly
- filtering out partitions. (LP: #1100545)
- - setup docs to be available on read-the-docs
- https://cloudinit.readthedocs.org/en/latest/ (LP: #1093039)
- - add HACKING file for information on contributing
- - handle the legacy 'user:' configuration better, making it affect the
- configured OS default user (LP: #1100920)
- - Adding a resolv.conf configuration module (LP: #1100434). Currently only
- working on redhat systems (no support for resolvconf)
- - support grouping linux distros into "os_families". This allows a module
- to operate on the family (redhat or debian) rather than the distro (ubuntu,
- debian, fedora, rhel) (LP: #1100029)
- - fix /etc/hosts writing when templates are used (LP: #1100036)
- - add package versioning logic to package installation
- functionality (LP: #1108047)
- - fix documentation for write_files to correctly list 'permissions'
- rather than 'perms' (LP: #1111205)
- - cloud-init-container.conf: ensure /run/network before running ifquery
- - DataSourceNoCloud: allow user-data and meta-data to be specified
- in config (LP: #1115833).
- - improve debian support in sysvinit scripts, package build scripts, and
- split sources.list template to be distro specific.
- - support for resizing btrfs root filesystems [Blair Zajac]
- - fix issue when writing ssh keys to .ssh/authorized_keys (LP: #1136343)
- - upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other
- output does not get a 'killed by TERM signal' message.
- - support resizing partitions via growpart or parted (LP: #1136936)
- - allow specifying apt-get command in distro config ('apt_get_command')
- - support different and user-suppliable merging algorithms for cloud-config
- (LP: #1023179)
- - use python-requests rather than urllib2. By using recent versions of
- python-requests, we get https support (LP: #1067888).
- - make apt-get invoke 'dist-upgrade' rather than 'upgrade' for
- package_upgrade. (LP: #1164147)
- - improvements for systemd with Fedora 18
- - workaround 2.6 kernel issue that stopped blkid from showing /dev/sr0
- - add new, backwards compatible merging syntax so merging of cloud-config
- can be more useful.
-
-0.7.1:
- - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6
- - config-drive: map hostname to local-hostname (LP: #1061964)
- - landscape: install landscape-client package if not installed.
- only take action if cloud-config is present (LP: #1066115)
- - cc_landscape: restart landscape after install or config (LP: #1070345)
- - multipart/archive. do not fail on unknown headers in multipart
- mime or cloud-archive config (LP: #1065116).
- - tools/Z99-cloud-locale-test.sh: avoid warning when user's shell is
- zsh (LP: #1073077)
- - fix stack trace when unknown user-data input had unicode (LP: #1075756)
- - split 'apt-update-upgrade' config module into 'apt-configure' and
- 'package-update-upgrade-install'. The 'package-update-upgrade-install'
- will be a cross distro module.
- - Cleanups:
- - Remove usage of paths.join, as all code should run through util helpers
- - Fix pylint complaining about tests folder 'helpers.py' not being found
- - Add a pylintrc file that is used instead options hidden in 'run_pylint'
- - fix bug where cloud-config from user-data could not affect system_info
- settings [revno 703] (LP: #1076811)
- - for write fqdn to system config for rh/fedora [revno 704]
- - add yaml/cloud config examples checking tool [revno 706]
- - Fix the merging of group configuration when that group configuration is a
- dict => members. [revno 707]
- - add yum_add_repo configuration module for adding additional yum repos
- - fix public key importing with config-drive-v2 datasource (LP: #1077700)
- - handle renaming and fixing up of marker names (LP: 1075980) [revno 710]
- this relieves that burden from the distro/packaging.
- - group config: fix how group members weren't being translated correctly
- when the group: [member, member...] format was used (LP: #1077245)
- - sysconfig: fix how the /etc/sysconfig/network should be using the fully
- qualified domain name instead of the partially qualified domain name
- which is used in the ubuntu/debian case (LP: #1076759)
- - fix how string escaping was not working when the string was a unicode
- string which was causing the warning message not to be written
- out (LP: #1075756)
- - for boto > 0.6.0 there was a lazy load of the metadata added, when
- cloud-init runs the usage of this lazy loading is hidden and since that lazy
- loading will be performed on future attribute access we must traverse the
- lazy loaded dictionary and force it to full expand so that if cloud-init
- blocks the ec2 metadata port the lazy loaded dictionary will continue
- working properly instead of trying to make additional url calls which will
- fail (LP: #1068801)
- - use a set of helper/parsing classes to perform system configuration
- for easier test. (/etc/sysconfig, /etc/hostname, resolv.conf, /etc/hosts)
- - add power_state_change config module for shutting down stystem after
- cloud-init finishes. (LP: #1064665)
-0.7.0:
- - add a 'exception_cb' argument to 'wait_for_url'. If provided, this
- method will be called back with the exception received and the message.
- - utilize the 'exception_cb' above to modify the oauth timestamp in
- DataSourceMAAS requests if a 401 or 403 is received. (LP: #978127)
- - catch signals and exit rather than stack tracing
- - if logging fails, enable a fallback logger by patching the logging module
- - do not 'start networking' in cloud-init-nonet, but add
- cloud-init-container job that runs only if in container and emits
- net-device-added (LP: #1031065)
- - search only top level dns for 'instance-data' in
- DataSourceEc2 (LP: #1040200)
- - add support for config-drive-v2 (LP:#1037567)
- - support creating users, including the default user.
- [Ben Howard] (LP: #1028503)
- - add apt_reboot_if_required to reboot if an upgrade or package installation
- forced the need for one (LP: #1038108)
- - allow distro mirror selection to include availability-zone (LP: #1037727)
- - allow arch specific mirror selection (select ports.ubuntu.com on arm)
- LP: #1028501
- - allow specification of security mirrors (LP: #1006963)
- - add the 'None' datasource (LP: #906669), which will allow jobs
- to run even if there is no "real" datasource found.
- - write ssh authorized keys to console, ssh_authkey_fingerprints
- config module [Joshua Harlow] (LP: #1010582)
- - Added RHEVm and vSphere support as source AltCloud [Joseph VLcek]
- - add write-files module (LP: #1012854)
- - Add setuptools + cheetah to debian package build dependencies (LP: #1022101)
- - Adjust the sysvinit local script to provide 'cloud-init-local' and have
- the cloud-config script depend on that as well.
- - Add the 'bzr' name to all packages built
- - Reduce logging levels for certain non-critical cases to DEBUG instead of the
- previous level of WARNING
- - unified binary that activates the various stages
- - Now using argparse + subcommands to specify the various CLI options
- - a stage module that clearly separates the stages of the different
- components (also described how they are used and in what order in the
- new unified binary)
- - user_data is now a module that just does user data processing while the
- actual activation and 'handling' of the processed user data is done via
- a separate set of files (and modules) with the main 'init' stage being the
- controller of this
- - creation of boot_hook, cloud_config, shell_script, upstart_job version 2
- modules (with classes that perform there functionality) instead of those
- having functionality that is attached to the cloudinit object (which
- reduces reuse and limits future functionality, and makes testing harder)
- - removal of global config that defined paths, shared config, now this is
- via objects making unit testing testing and global side-effects a non issue
- - creation of a 'helpers.py'
- - this contains an abstraction for the 'lock' like objects that the various
- module/handler running stages use to avoid re-running a given
- module/handler for a given frequency. this makes it separated from
- the actual usage of that object (thus helpful for testing and clear lines
- usage and how the actual job is accomplished)
- - a common 'runner' class is the main entrypoint using these locks to
- run function objects passed in (along with there arguments) and there
- frequency
- - add in a 'paths' object that provides access to the previously global
- and/or config based paths (thus providing a single entrypoint object/type
- that provides path information)
- - this also adds in the ability to change the path when constructing
- that path 'object' and adding in additional config that can be used to
- alter the root paths of 'joins' (useful for testing or possibly useful
- in chroots?)
- - config options now avaiable that can alter the 'write_root' and the
- 'read_root' when backing code uses the paths join() function
- - add a config parser subclass that will automatically add unknown sections
- and return default values (instead of throwing exceptions for these cases)
- - a new config merging class that will be the central object that knows
- how to do the common configuration merging from the various configuration
- sources. The order is the following:
- - cli config files override environment config files
- which override instance configs which override datasource
- configs which override base configuration which overrides
- default configuration.
- - remove the passing around of the 'cloudinit' object as a 'cloud' variable
- and instead pass around an 'interface' object that can be given to modules
- and handlers as there cloud access layer while the backing of that
- object can be varied (good for abstraction and testing)
- - use a single set of functions to do importing of modules
- - add a function in which will search for a given set of module names with
- a given set of attributes and return those which are found
- - refactor logging so that instead of using a single top level 'log' that
- instead each component/module can use its own logger (if desired), this
- should be backwards compatible with handlers and config modules that used
- the passed in logger (its still passed in)
- - ensure that all places where exception are caught and where applicable
- that the util logexc() is called, so that no exceptions that may occur
- are dropped without first being logged (where it makes sense for this
- to happen)
- - add a 'requires' file that lists cloud-init dependencies
- - applying it in package creation (bdeb and brpm) as well as using it
- in the modified setup.py to ensure dependencies are installed when
- using that method of packaging
- - add a 'version.py' that lists the active version (in code) so that code
- inside cloud-init can report the version in messaging and other config files
- - cleanup of subprocess usage so that all subprocess calls go through the
- subp() utility method, which now has an exception type that will provide
- detailed information on python 2.6 and 2.7
- - forced all code loading, moving, chmod, writing files and other system
- level actions to go through standard set of util functions, this greatly
- helps in debugging and determining exactly which system actions cloud-init
- is performing
- - adjust url fetching and url trying to go through a single function that
- reads urls in the new 'url helper' file, this helps in tracing, debugging
- and knowing which urls are being called and/or posted to from with-in
- cloud-init code
- - add in the sending of a 'User-Agent' header for all urls fetched that
- do not provide there own header mapping, derive this user-agent from
- the following template, 'Cloud-Init/{version}' where the version is the
- cloud-init version number
- - using prettytable for netinfo 'debug' printing since it provides a standard
- and defined output that should be easier to parse than a custom format
- - add a set of distro specific classes, that handle distro specific actions
- that modules and or handler code can use as needed, this is organized into
- a base abstract class with child classes that implement the shared
- functionality. config determines exactly which subclass to load, so it can
- be easily extended as needed.
- - current functionality
- - network interface config file writing
- - hostname setting/updating
- - locale/timezone/ setting
- - updating of /etc/hosts (with templates or generically)
- - package commands (ie installing, removing)/mirror finding
- - interface up/down activating
- - implemented a debian + ubuntu subclass
- - implemented a redhat + fedora subclass
- - adjust the root 'cloud.cfg' file to now have distrobution/path specific
- configuration values in it. these special configs are merged as the normal
- config is, but the system level config is not passed into modules/handlers
- - modules/handlers must go through the path and distro object instead
- - have the cloudstack datasource test the url before calling into boto to
- avoid the long wait for boto to finish retrying and finally fail when
- the gateway meta-data address is unavailable
- - add a simple mock ec2 meta-data python based http server that can serve a
- very simple set of ec2 meta-data back to callers
- - useful for testing or for understanding what the ec2 meta-data
- service can provide in terms of data or functionality
- - for ssh key and authorized key file parsing add in classes and util
- functions that maintain the state of individual lines, allowing for a
- clearer separation of parsing and modification (useful for testing and
- tracing)
- - add a set of 'base' init.d scripts that can be used on systems that do
- not have full upstart or systemd support (or support that does not match
- the standard fedora/ubuntu implementation)
- - currently these are being tested on RHEL 6.2
- - separate the datasources into there own subdirectory (instead of being
- a top-level item), this matches how config 'modules' and user-data
- 'handlers' are also in there own subdirectory (thus helping new developers
- and others understand the code layout in a quicker manner)
- - add the building of rpms based off a new cli tool and template 'spec' file
- that will templatize and perform the necessary commands to create a source
- and binary package to be used with a cloud-init install on a 'rpm'
- supporting system
- - uses the new standard set of requires and converts those pypi requirements
- into a local set of package requirments (that are known to exist on RHEL
- systems but should also exist on fedora systems)
- - adjust the bdeb builder to be a python script (instead of a shell script)
- and make its 'control' file a template that takes in the standard set of
- pypi dependencies and uses a local mapping (known to work on ubuntu) to
- create the packages set of dependencies (that should also work on
- ubuntu-like systems)
- - pythonify a large set of various pieces of code
- - remove wrapping return statements with () when it has no effect
- - upper case all constants used
- - correctly 'case' class and method names (where applicable)
- - use os.path.join (and similar commands) instead of custom path creation
- - use 'is None' instead of the frowned upon '== None' which picks up a large
- set of 'true' cases than is typically desired (ie for objects that have
- there own equality)
- - use context managers on locks, tempdir, chdir, file, selinux, umask,
- unmounting commands so that these actions do not have to be closed and/or
- cleaned up manually in finally blocks, which is typically not done and
- will eventually be a bug in the future
- - use the 'abc' module for abstract classes base where possible
- - applied in the datasource root class, the distro root class, and the
- user-data v2 root class
- - when loading yaml, check that the 'root' type matches a predefined set of
- valid types (typically just 'dict') and throw a type error if a mismatch
- occurs, this seems to be a good idea to do when loading user config files
- - when forking a long running task (ie resizing a filesytem) use a new util
- function that will fork and then call a callback, instead of having to
- implement all that code in a non-shared location (thus allowing it to be
- used by others in the future)
- - when writing out filenames, go through a util function that will attempt to
- ensure that the given filename is 'filesystem' safe by replacing '/' with
- '_' and removing characters which do not match a given whitelist of allowed
- filename characters
- - for the varying usages of the 'blkid' command make a function in the util
- module that can be used as the single point of entry for interaction with
- that command (and its results) instead of having X separate implementations
- - place the rfc 8222 time formatting and uptime repeated pieces of code in the
- util module as a set of function with the name 'time_rfc2822'/'uptime'
- - separate the pylint+pep8 calling from one tool into two indivudal tools so
- that they can be called independently, add make file sections that can be
- used to call these independently
- - remove the support for the old style config that was previously located in
- '/etc/ec2-init/ec2-config.cfg', no longer supported!
- - instead of using a altered config parser that added its own 'dummy' section
- on in the 'mcollective' module, use configobj which handles the parsing of
- config without sections better (and it also maintains comments instead of
- removing them)
- - use the new defaulting config parser (that will not raise errors on sections
- that do not exist or return errors when values are fetched that do not
- exist) in the 'puppet' module
- - for config 'modules' add in the ability for the module to provide a list of
- distro names which it is known to work with, if when ran and the distro
- being used name does not match one of those in this list, a warning will be
- written out saying that this module may not work correctly on this
- distrobution
- - for all dynamically imported modules ensure that they are fixed up before
- they are used by ensuring that they have certain attributes, if they do not
- have those attributes they will be set to a sensible set of defaults instead
- - adjust all 'config' modules and handlers to use the adjusted util functions
- and the new distro objects where applicable so that those pieces of code can
- benefit from the unified and enhanced functionality being provided in that
- util module
- - fix a potential bug whereby when a #includeonce was encountered it would
- enable checking of urls against a cache, if later a #include was encountered
- it would continue checking against that cache, instead of refetching (which
- would likely be the expected case)
- - add a openstack/nova based pep8 extension utility ('hacking.py') that allows
- for custom checks (along with the standard pep8 checks) to occur when
- running 'make pep8' and its derivatives
- - support relative path in AuthorizedKeysFile (LP: #970071).
- - make apt-get update run with --quiet (suitable for logging) (LP: #1012613)
- - cc_salt_minion: use package 'salt-minion' rather than 'salt' (LP: #996166)
- - use yaml.safe_load rather than yaml.load (LP: #1015818)
-0.6.3:
- - add sample systemd config files [Garrett Holmstrom]
- - add Fedora support [Garrent Holstrom] (LP: #883286)
- - fix bug in netinfo.debug_info if no net devices available (LP: #883367)
- - use python module hashlib rather than md5 to avoid deprecation warnings.
- - support configuration of mirror based on dns name ubuntu-mirror in
- local domain.
- - support setting of Acquire::HTTP::Proxy via 'apt_proxy'
- - DataSourceEc2: more resilliant to slow metadata service
- - config change: 'retries' dropped, 'max_wait' added, timeout increased
- - close stdin in all cloud-init programs that are launched at boot
- (LP: #903993)
- - revert management of /etc/hosts to 0.6.1 style (LP: #890501, LP: #871966)
- - write full ssh keys to console for easy machine consumption (LP: #893400)
- - put INSTANCE_ID environment variable in bootcmd scripts
- - add 'cloud-init-per' script for easily running things with a given frequency
- - replace cloud-init-run-module with cloud-init-per
- - support configuration of landscape-client via cloud-config (LP: #857366)
- - part-handlers now get base64 decoded content rather than 2xbase64 encoded
- in the payload parameter. (LP: #874342)
- - add test case framework [Mike Milner] (LP: #890851)
- - fix pylint warnings [Juerg Haefliger] (LP: #914739)
- - add support for adding and deleting CA Certificates [Mike Milner]
- (LP: #915232)
- - in ci-info lines, use '.' to indicate empty field for easier machine reading
- - support empty lines in "#include" files (LP: #923043)
- - support configuration of salt minions (Jeff Bauer) (LP: #927795)
- - DataSourceOVF: only search for OVF data on ISO9660 filesystems (LP: #898373)
- - DataSourceConfigDrive: support getting data from openstack config drive
- (LP: #857378)
- - DataSourceNoCloud: support seed from external disk of ISO or vfat
- (LP: #857378)
- - DataSourceNoCloud: support inserting /etc/network/interfaces
- - DataSourceMaaS: add data source for Ubuntu Machines as a Service (MaaS)
- (LP: #942061)
- - DataSourceCloudStack: add support for CloudStack datasource [Cosmin Luta]
- - add option 'apt_pipelining' to address issue with S3 mirrors
- (LP: #948461) [Ben Howard]
- - warn on non-multipart, non-handled user-data [Martin Packman]
- - run resizefs in the background in order to not block boot (LP: #961226)
- - Fix bug in Chef support where validation_key was present in config, but
- 'validation_cert' was not (LP: #960547)
- - Provide user friendly message when an invalid locale is set
- [Ben Howard] (LP: #859814)
- - Support reading cloud-config from kernel command line parameter and
- populating local file with it, which can then provide data for DataSources
- - improve chef examples for working configurations on 11.10 and 12.04
- [Lorin Hochstein] (LP: #960564)
-
-0.6.2:
- - fix bug where update was not done unless update was explicitly set.
- It would not be run if 'upgrade' or packages were set to be installed
- - fix bug in part-handler code, that prevented working part-handlers
- (LP: #739694)
- - fix bug in resizefs cloud-config that would cause trace based on
- failure of 'blkid /dev/root' (LP: #726938)
- - convert dos formated files to unix for user-scripts, boothooks,
- and upstart jobs (LP: #744965)
- - fix bug in seeding of grub dpkg configuration (LP: #752361) due
- to renamed devices in newer (natty) kernels (/dev/sda1 -> /dev/xvda1)
- - make metadata urls configurable, to support eucalyptus in
- STATIC or SYSTEM modes (LP: #761847)
- - support disabling byobu in cloud-config
- - run cc_ssh as a cloud-init module so it is guaranteed to run before
- ssh starts (LP: #781101)
- - make prefix for keys added to /root/.ssh/authorized_keys configurable
- and add 'no-port-forwarding,no-agent-forwarding,no-X11-forwarding'
- to the default (LP: #798505)
- - make 'cloud-config ready' command configurable (LP: #785551)
- - make fstab fields used to 'fill in' shorthand entries configurable
- This means you do not have to have 'nobootwait' in the values
- (LP: #785542)
- - read /etc/ssh/sshd_config for AuthorizedKeysFile rather than
- assuming ~/.ssh/authorized_keys (LP: #731849)
- - fix cloud-init in ubuntu lxc containers (LP: #800824)
- - sanitize hosts file for system's hostname to 127.0.1.1 (LP: #802637)
- - add chef support (cloudinit/CloudConfig/cc_chef.py) (LP: ##798844)
- - do not give trace on failure to resize in lxc container (LP: #800856)
- - increase the timeout on url gets for "seedfrom" values (LP: #812646)
- - do not write entries for ephemeral0 on t1.micro (LP: #744019)
- - support 'include-once' so that expiring or one-time use urls can
- be used for '#include' to provide sensitive data.
- - support for passing public and private keys to mcollective via cloud-config
- - support multiple staticly configured network devices, as long as
- all of them come up early (LP: #810044)
- - Changes to handling user data mean that:
- * boothooks will now run more than once as they were intended (and as
- bootcmd commands do)
- * cloud-config and user-scripts will be updated from user data every boot
- - Fix issue where 'isatty' would return true for apt-add-repository.
- apt-add-repository would get stdin which was attached to a terminal
- (/dev/console) and would thus hang when running during boot. (LP: 831505)
- This was done by changing all users of util.subp to have None input unless
- specified
- - Add some debug info to the console when cloud-init runs.
- This is useful if debugging, IP and route information is printed to the
- console.
- - change the mechanism for handling .ssh/authorized_keys, to update entries
- rather than appending. This ensures that the authorized_keys that are
- being inserted actually do something (LP: #434076, LP: #833499)
- - log warning on failure to set hostname (LP: #832175)
- - upstart/cloud-init-nonet.conf: wait for all network interfaces to be up
- allow for the possibility of /var/run != /run.
- - DataSourceNoCloud, DataSourceOVF : do not provide a default hostname.
- This way the configured hostname of the system will be used if not provided
- by metadata (LP: #838280)
- - DataSourceOVF: change the default instance id to 'iid-dsovf' from 'nocloud'
- - Improve the OVF documentation, and provide a simple command line
- tool for creating a useful ISO file.
-
-0.6.1:
- - fix bug in fixing permission on /var/log/cloud-init.log (LP: #704509)
- - improve comment strings in rsyslog file tools/21-cloudinit.conf
- - add previous-instance-id and previous-datasource files to datadir
- - add 'datasource' file to instance dir
- - add setting of passwords and enabling/disabling of PasswordAuthentication
- for sshd. By default no changes are done to sshd.
- - fix for puppet configuration options (LP: #709946) [Ryan Lane]
- - fix pickling of DataSource, which broke seeding.
- - turn resize_rootfs default to True
- - avoid mounts in DataSourceOVF if 'read' on device fails
- 'mount /dev/sr0' for an empty virtual cdrom device was taking 18 seconds
- - add 'manual_cache_clean' option to select manual cleaning of
- the /var/lib/cloud/instance/ link, for a data source that might
- not be present on every boot
- - make DataSourceEc2 retries and timeout configurable
- - add helper routines for apt-get update and install
- - add 'bootcmd' like 'runcmd' to cloud-config syntax for running things early
- - move from '#opt_include' in config file format to conf_d.
- ie, now files in /etc/cloud.cfg.d/ is read rather than reading
- '#opt_include <filename>' or '#include <filename>' in cloud.cfg
- - allow /etc/hosts to be written from hosts.tmpl. which allows
- getting local-hostname into /etc/hosts (LP: #720440)
- - better handle startup if there is no eth0 (LP: #714807)
- - update rather than append in puppet config [Marc Cluet]
- - add cloud-config for mcollective [Marc Cluet]
-0.6.0:
- - change permissions of /var/log/cloud-init.log to accomodate
- syslog writing to it (LP: #704509)
- - rework of /var/lib/cloud layout
- - remove updates-check (LP: #653220)
- - support resizing / on first boot (enabled by default)
- - added support for running CloudConfig modules at cloud-init time
- rather than cloud-config time, and the new 'cloud_init_modules'
- entry in cloud.cfg to indicate which should run then.
- The driving force behind this was to have the rsyslog module
- able to run before rsyslog even runs so that a restart would
- not be needed (rsyslog on ubuntu runs on 'filesystem')
- - moved setting and updating of hostname to cloud_init_modules
- this allows the user to easily disable these from running.
- This also means:
- - the semaphore name for 'set_hostname' and 'update_hostname'
- changes to 'config_set_hostname' and 'config_update_hostname'
- - added cloud-config option 'hostname' for setting hostname
- - moved upstart/cloud-run-user-script.conf to upstart/cloud-final.conf
- - cloud-final.conf now runs runs cloud-config modules similar
- to cloud-config and cloud-init.
- - LP: #653271
- - added writing of "boot-finished" to /var/lib/cloud/instance/boot-finished
- this is the last thing done, indicating cloud-init is finished booting
- - writes message to console with timestamp and uptime
- - write ssh keys to console as one of the last things done
- this is to ensure they don't get run off the 'get-console-ouptut' buffer
- - user_scripts run via cloud-final and thus semaphore renamed from
- user_scripts to config_user_scripts
- - add support for redirecting output of cloud-init, cloud-config, cloud-final
- via the config file, or user data config file
- - add support for posting data about the instance to a url (phone_home)
- - add minimal OVF transport (iso) support
- - make DataSources that are attempted dynamic and configurable from
- system config. changen "cloud_type: auto" as configuration for this
- to 'datasource_list: [ "Ec2" ]'. Each of the items in that list
- must be modules that can be loaded by "DataSource<item>"
- - add 'timezone' option to cloud-config (LP: #645458)
- - Added an additional archive format, that can be used for multi-part
- input to cloud-init. This may be more user friendly then mime-multipart
- See example in doc/examples/cloud-config-archive.txt (LP: #641504)
- - add support for reading Rightscale style user data (LP: #668400)
- and acting on it in cloud-config (cc_rightscale_userdata.py)
- - make the message on 'disable_root' more clear (LP: #672417)
- - do not require public key if private is given in ssh cloud-config
- (LP: #648905)
-# vi: syntax=text textwidth=79
diff --git a/HACKING.rst b/HACKING.rst
deleted file mode 100644
index 6bfe4b4d..00000000
--- a/HACKING.rst
+++ /dev/null
@@ -1,48 +0,0 @@
-=====================
-Hacking on cloud-init
-=====================
-
-To get changes into cloud-init, the process to follow is:
-
-* If you have not already, be sure to sign the CCA:
-
- - `Canonical Contributor Agreement`_
-
-* Get your changes into a local bzr branch.
- Initialize a repo, and checkout trunk (init repo is to share bzr info across multiple checkouts, its different than git):
-
- - ``bzr init-repo cloud-init``
- - ``bzr branch lp:cloud-init trunk.dist``
- - ``bzr branch trunk.dist my-topic-branch``
-
-* Commit your changes (note, you can make multiple commits, fixes, more commits.):
-
- - ``bzr commit``
-
-* Check pep8 and test, and address any issues:
-
- - ``make test pep8``
-
-* Push to launchpad to a personal branch:
-
- - ``bzr push lp:~<YOUR_USERNAME>/cloud-init/<BRANCH_NAME>``
-
-* Propose that for a merge into lp:cloud-init via web browser.
-
- - Open the branch in `Launchpad`_
-
- - It will typically be at ``https://code.launchpad.net/<YOUR_USERNAME>/<PROJECT>/<BRANCH_NAME>``
- - ie. https://code.launchpad.net/~smoser/cloud-init/mybranch
-
-* Click 'Propose for merging'
-* Select 'lp:cloud-init' as the target branch
-
-Then, someone on cloud-init-dev (currently `Scott Moser`_ and `Joshua Harlow`_) will
-review your changes and follow up in the merge request.
-
-Feel free to ping and/or join #cloud-init on freenode (irc) if you have any questions.
-
-.. _Launchpad: https://launchpad.net
-.. _Canonical Contributor Agreement: http://www.canonical.com/contributors
-.. _Scott Moser: https://launchpad.net/~smoser
-.. _Joshua Harlow: https://launchpad.net/~harlowja
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 94a9ed02..00000000
--- a/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (C) <year> <name of author>
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (C) <year> <name of author>
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 90f6c7d5..00000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,8 +0,0 @@
-include *.py MANIFEST.in ChangeLog
-global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh
-graft tools
-prune build
-prune dist
-prune .tox
-prune .bzr
-exclude .bzrignore
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 32c50aee..00000000
--- a/Makefile
+++ /dev/null
@@ -1,82 +0,0 @@
-CWD=$(shell pwd)
-PYVER ?= 3
-noseopts ?= -v
-
-YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f )
-YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
-
-CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version)
-CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()")
-
-PIP_INSTALL := pip install
-
-ifeq ($(PYVER),3)
- pyflakes = pyflakes3
- unittests = unittest3
- yaml = yaml
-else
-ifeq ($(PYVER),2)
- pyflakes = pyflakes
- unittests = unittest
-else
- pyflakes = pyflakes pyflakes3
- unittests = unittest unittest3
-endif
-endif
-
-ifeq ($(distro),)
- distro = redhat
-endif
-
-all: check
-
-check: check_version pep8 $(pyflakes) test $(yaml)
-
-pep8:
- @$(CWD)/tools/run-pep8
-
-pyflakes:
- @$(CWD)/tools/run-pyflakes
-
-pyflakes3:
- @$(CWD)/tools/run-pyflakes3
-
-unittest: clean_pyc
- nosetests $(noseopts) tests/unittests
-
-unittest3: clean_pyc
- nosetests3 $(noseopts) tests/unittests
-
-pip-requirements:
- @echo "Installing cloud-init dependencies..."
- $(PIP_INSTALL) -r "$@.txt" -q
-
-pip-test-requirements:
- @echo "Installing cloud-init test dependencies..."
- $(PIP_INSTALL) -r "$@.txt" -q
-
-test: $(unittests)
-
-check_version:
- @if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \
- echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \
- "not equal to code version $(CODE_VERSION)"; exit 2; \
- else true; fi
-
-clean_pyc:
- @find . -type f -name "*.pyc" -delete
-
-clean: clean_pyc
- rm -rf /var/log/cloud-init.log /var/lib/cloud/
-
-yaml:
- @$(CWD)/tools/validate-yaml.py $(YAML_FILES)
-
-rpm:
- ./packages/brpm --distro $(distro)
-
-deb:
- ./packages/bddeb
-
-.PHONY: test pyflakes pyflakes3 clean pep8 rpm deb yaml check_version
-.PHONY: pip-test-requirements pip-requirements clean_pyc unittest unittest3
diff --git a/README b/README
new file mode 100644
index 00000000..b12e6dff
--- /dev/null
+++ b/README
@@ -0,0 +1,11 @@
+cloud-init development has moved its revision control to git.
+It is available at
+ https://code.launchpad.net/cloud-init
+
+Clone with
+ git clone https://git.launchpad.net/cloud-init
+or
+ git clone git+ssh://git.launchpad.net/cloud-init
+
+For more information see
+ https://git.launchpad.net/cloud-init/tree/HACKING.rst
diff --git a/TODO.rst b/TODO.rst
deleted file mode 100644
index 7d126864..00000000
--- a/TODO.rst
+++ /dev/null
@@ -1,43 +0,0 @@
-==============================================
-Things that cloud-init may do (better) someday
-==============================================
-
-- Consider making ``failsafe`` ``DataSource``
- - sets the user password, writing it to console
-
-- Consider a ``previous`` ``DataSource``, if no other data source is
- found, fall back to the ``previous`` one that worked.
-- Rewrite ``cloud-init-query`` (currently not implemented)
-- Possibly have a ``DataSource`` expose explicit fields:
-
- - instance-id
- - hostname
- - mirror
- - release
- - ssh public keys
-
-- Remove the conversion of the ubuntu network interface format conversion
- to a RH/fedora format and replace it with a top level format that uses
- the netcf libraries format instead (which itself knows how to translate
- into the specific formats). See for example `netcf`_ which seems to be
- an active project that has this capability.
-- Replace the ``apt*`` modules with variants that now use the distro classes
- to perform distro independent packaging commands (wherever possible).
-- Replace some the LOG.debug calls with a LOG.info where appropriate instead
- of how right now there is really only 2 levels (``WARN`` and ``DEBUG``)
-- Remove the ``cc_`` prefix for config modules, either have them fully
- specified (ie ``cloudinit.config.resizefs``) or by default only look in
- the ``cloudinit.config`` namespace for these modules (or have a combination
- of the above), this avoids having to understand where your modules are
- coming from (which can be altered by the current python inclusion path)
-- Instead of just warning when a module is being ran on a ``unknown``
- distribution perhaps we should not run that module in that case? Or we might
- want to start reworking those modules so they will run on all
- distributions? Or if that is not the case, then maybe we want to allow
- fully specified python paths for modules and start encouraging
- packages of ``ubuntu`` modules, packages of ``rhel`` specific modules that
- people can add instead of having them all under the cloud-init ``root``
- tree? This might encourage more development of other modules instead of
- having to go edit the cloud-init code to accomplish this.
-
-.. _netcf: https://fedorahosted.org/netcf/
diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py
deleted file mode 100644
index da124641..00000000
--- a/cloudinit/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
deleted file mode 100644
index 3e6be203..00000000
--- a/cloudinit/cloud.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import os
-
-from cloudinit import log as logging
-from cloudinit.reporting import events
-
-LOG = logging.getLogger(__name__)
-
-# This class is the high level wrapper that provides
-# access to cloud-init objects without exposing the stage objects
-# to handler and or module manipulation. It allows for cloud
-# init to restrict what those types of user facing code may see
-# and or adjust (which helps avoid code messing with each other)
-#
-# It also provides util functions that avoid having to know
-# how to get a certain member from this submembers as well
-# as providing a backwards compatible object that can be maintained
-# while the stages/other objects can be worked on independently...
-
-
-class Cloud(object):
- def __init__(self, datasource, paths, cfg, distro, runners, reporter=None):
- self.datasource = datasource
- self.paths = paths
- self.distro = distro
- self._cfg = cfg
- self._runners = runners
- if reporter is None:
- reporter = events.ReportEventStack(
- name="unnamed-cloud-reporter",
- description="unnamed-cloud-reporter",
- reporting_enabled=False)
- self.reporter = reporter
-
- # If a 'user' manipulates logging or logging services
- # it is typically useful to cause the logging to be
- # setup again.
- def cycle_logging(self):
- logging.resetLogging()
- logging.setupLogging(self.cfg)
-
- @property
- def cfg(self):
- # Ensure that not indirectly modified
- return copy.deepcopy(self._cfg)
-
- def run(self, name, functor, args, freq=None, clear_on_fail=False):
- return self._runners.run(name, functor, args, freq, clear_on_fail)
-
- def get_template_filename(self, name):
- fn = self.paths.template_tpl % (name)
- if not os.path.isfile(fn):
- LOG.warn("No template found at %s for template named %s", fn, name)
- return None
- return fn
-
- # The rest of thes are just useful proxies
- def get_userdata(self, apply_filter=True):
- return self.datasource.get_userdata(apply_filter)
-
- def get_instance_id(self):
- return self.datasource.get_instance_id()
-
- @property
- def launch_index(self):
- return self.datasource.launch_index
-
- def get_public_ssh_keys(self):
- return self.datasource.get_public_ssh_keys()
-
- def get_locale(self):
- return self.datasource.get_locale()
-
- def get_hostname(self, fqdn=False):
- return self.datasource.get_hostname(fqdn=fqdn)
-
- def device_name_to_device(self, name):
- return self.datasource.device_name_to_device(name)
-
- def get_ipath_cur(self, name=None):
- return self.paths.get_ipath_cur(name)
-
- def get_cpath(self, name=None):
- return self.paths.get_cpath(name)
-
- def get_ipath(self, name=None):
- return self.paths.get_ipath(name)
diff --git a/cloudinit/cmd/__init__.py b/cloudinit/cmd/__init__.py
deleted file mode 100644
index da124641..00000000
--- a/cloudinit/cmd/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
deleted file mode 100644
index 63621c1d..00000000
--- a/cloudinit/cmd/main.py
+++ /dev/null
@@ -1,685 +0,0 @@
-#!/usr/bin/python
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import argparse
-import json
-import os
-import sys
-import tempfile
-import time
-import traceback
-
-from cloudinit import patcher
-patcher.patch() # noqa
-
-from cloudinit import log as logging
-from cloudinit import netinfo
-from cloudinit import signal_handler
-from cloudinit import sources
-from cloudinit import stages
-from cloudinit import templater
-from cloudinit import util
-from cloudinit import version
-
-from cloudinit import reporting
-from cloudinit.reporting import events
-
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CLOUD_CONFIG)
-
-
-# Pretty little cheetah formatted welcome message template
-WELCOME_MSG_TPL = ("Cloud-init v. ${version} running '${action}' at "
- "${timestamp}. Up ${uptime} seconds.")
-
-# Module section template
-MOD_SECTION_TPL = "cloud_%s_modules"
-
-# Things u can query on
-QUERY_DATA_TYPES = [
- 'data',
- 'data_raw',
- 'instance_id',
-]
-
-# Frequency shortname to full name
-# (so users don't have to remember the full name...)
-FREQ_SHORT_NAMES = {
- 'instance': PER_INSTANCE,
- 'always': PER_ALWAYS,
- 'once': PER_ONCE,
-}
-
-LOG = logging.getLogger()
-
-
-# Used for when a logger may not be active
-# and we still want to print exceptions...
-def print_exc(msg=''):
- if msg:
- sys.stderr.write("%s\n" % (msg))
- sys.stderr.write('-' * 60)
- sys.stderr.write("\n")
- traceback.print_exc(file=sys.stderr)
- sys.stderr.write('-' * 60)
- sys.stderr.write("\n")
-
-
-def welcome(action, msg=None):
- if not msg:
- msg = welcome_format(action)
- util.multi_log("%s\n" % (msg),
- console=False, stderr=True, log=LOG)
- return msg
-
-
-def welcome_format(action):
- tpl_params = {
- 'version': version.version_string(),
- 'uptime': util.uptime(),
- 'timestamp': util.time_rfc2822(),
- 'action': action,
- }
- return templater.render_string(WELCOME_MSG_TPL, tpl_params)
-
-
-def extract_fns(args):
- # Files are already opened so lets just pass that along
- # since it would of broke if it couldn't have
- # read that file already...
- fn_cfgs = []
- if args.files:
- for fh in args.files:
- # The realpath is more useful in logging
- # so lets resolve to that...
- fn_cfgs.append(os.path.realpath(fh.name))
- return fn_cfgs
-
-
-def run_module_section(mods, action_name, section):
- full_section_name = MOD_SECTION_TPL % (section)
- (which_ran, failures) = mods.run_section(full_section_name)
- total_attempted = len(which_ran) + len(failures)
- if total_attempted == 0:
- msg = ("No '%s' modules to run"
- " under section '%s'") % (action_name, full_section_name)
- sys.stderr.write("%s\n" % (msg))
- LOG.debug(msg)
- return []
- else:
- LOG.debug("Ran %s modules with %s failures",
- len(which_ran), len(failures))
- return failures
-
-
-def apply_reporting_cfg(cfg):
- if cfg.get('reporting'):
- reporting.update_configuration(cfg.get('reporting'))
-
-
-def main_init(name, args):
- deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
- if args.local:
- deps = [sources.DEP_FILESYSTEM]
-
- if not args.local:
- # See doc/kernel-cmdline.txt
- #
- # This is used in maas datasource, in "ephemeral" (read-only root)
- # environment where the instance netboots to iscsi ro root.
- # and the entity that controls the pxe config has to configure
- # the maas datasource.
- #
- # Could be used elsewhere, only works on network based (not local).
- root_name = "%s.d" % (CLOUD_CONFIG)
- target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
- util.read_write_cmdline_url(target_fn)
-
- # Cloud-init 'init' stage is broken up into the following sub-stages
- # 1. Ensure that the init object fetches its config without errors
- # 2. Setup logging/output redirections with resultant config (if any)
- # 3. Initialize the cloud-init filesystem
- # 4. Check if we can stop early by looking for various files
- # 5. Fetch the datasource
- # 6. Connect to the current instance location + update the cache
- # 7. Consume the userdata (handlers get activated here)
- # 8. Construct the modules object
- # 9. Adjust any subsequent logging/output redirections using the modules
- # objects config as it may be different from init object
- # 10. Run the modules for the 'init' stage
- # 11. Done!
- if not args.local:
- w_msg = welcome_format(name)
- else:
- w_msg = welcome_format("%s-local" % (name))
- init = stages.Init(ds_deps=deps, reporter=args.reporter)
- # Stage 1
- init.read_cfg(extract_fns(args))
- # Stage 2
- outfmt = None
- errfmt = None
- try:
- LOG.debug("Closing stdin")
- util.close_stdin()
- (outfmt, errfmt) = util.fixup_output(init.cfg, name)
- except Exception:
- util.logexc(LOG, "Failed to setup output redirection!")
- print_exc("Failed to setup output redirection!")
- if args.debug:
- # Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
- logging.resetLogging()
- logging.setupLogging(init.cfg)
- apply_reporting_cfg(init.cfg)
-
- # Any log usage prior to setupLogging above did not have local user log
- # config applied. We send the welcome message now, as stderr/out have
- # been redirected and log now configured.
- welcome(name, msg=w_msg)
-
- # Stage 3
- try:
- init.initialize()
- except Exception:
- util.logexc(LOG, "Failed to initialize, likely bad things to come!")
- # Stage 4
- path_helper = init.paths
- mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
-
- if mode == sources.DSMODE_NETWORK:
- existing = "trust"
- sys.stderr.write("%s\n" % (netinfo.debug_info()))
- LOG.debug(("Checking to see if files that we need already"
- " exist from a previous run that would allow us"
- " to stop early."))
- # no-net is written by upstart cloud-init-nonet when network failed
- # to come up
- stop_files = [
- os.path.join(path_helper.get_cpath("data"), "no-net"),
- ]
- existing_files = []
- for fn in stop_files:
- if os.path.isfile(fn):
- existing_files.append(fn)
-
- if existing_files:
- LOG.debug("[%s] Exiting. stop file %s existed",
- mode, existing_files)
- return (None, [])
- else:
- LOG.debug("Execution continuing, no previous run detected that"
- " would allow us to stop early.")
- else:
- existing = "check"
- if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False):
- existing = "trust"
-
- init.purge_cache()
- # Delete the non-net file as well
- util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
-
- # Stage 5
- try:
- init.fetch(existing=existing)
- # if in network mode, and the datasource is local
- # then work was done at that stage.
- if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s in local mode",
- mode, init.datasource)
- return (None, [])
- except sources.DataSourceNotFoundException:
- # In the case of 'cloud-init init' without '--local' it is a bit
- # more likely that the user would consider it failure if nothing was
- # found. When using upstart it will also mentions job failure
- # in console log if exit code is != 0.
- if mode == sources.DSMODE_LOCAL:
- LOG.debug("No local datasource found")
- else:
- util.logexc(LOG, ("No instance datasource found!"
- " Likely bad things to come!"))
- if not args.force:
- init.apply_network_config(bring_up=not args.local)
- LOG.debug("[%s] Exiting without datasource in local mode", mode)
- if mode == sources.DSMODE_LOCAL:
- return (None, [])
- else:
- return (None, ["No instance datasource found."])
- else:
- LOG.debug("[%s] barreling on in force mode without datasource",
- mode)
-
- # Stage 6
- iid = init.instancify()
- LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
- mode, name, iid, init.is_new_instance())
-
- init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
-
- if mode == sources.DSMODE_LOCAL:
- if init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s not in local mode.",
- mode, init.datasource)
- return (init.datasource, [])
- else:
- LOG.debug("[%s] %s is in local mode, will apply init modules now.",
- mode, init.datasource)
-
- # update fully realizes user-data (pulling in #include if necessary)
- init.update()
- # Stage 7
- try:
- # Attempt to consume the data per instance.
- # This may run user-data handlers and/or perform
- # url downloads and such as needed.
- (ran, _results) = init.cloudify().run('consume_data',
- init.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- if not ran:
- # Just consume anything that is set to run per-always
- # if nothing ran in the per-instance code
- #
- # See: https://bugs.launchpad.net/bugs/819507 for a little
- # reason behind this...
- init.consume_data(PER_ALWAYS)
- except Exception:
- util.logexc(LOG, "Consuming user data failed!")
- return (init.datasource, ["Consuming user data failed!"])
-
- apply_reporting_cfg(init.cfg)
-
- # Stage 8 - re-read and apply relevant cloud-config to include user-data
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
- # Stage 9
- try:
- outfmt_orig = outfmt
- errfmt_orig = errfmt
- (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
- if outfmt_orig != outfmt or errfmt_orig != errfmt:
- LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
- (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
- except Exception:
- util.logexc(LOG, "Failed to re-adjust output redirection!")
- logging.setupLogging(mods.cfg)
-
- # Stage 10
- return (init.datasource, run_module_section(mods, name, name))
-
-
-def main_modules(action_name, args):
- name = args.mode
- # Cloud-init 'modules' stages are broken up into the following sub-stages
- # 1. Ensure that the init object fetches its config without errors
- # 2. Get the datasource from the init object, if it does
- # not exist then that means the main_init stage never
- # worked, and thus this stage can not run.
- # 3. Construct the modules object
- # 4. Adjust any subsequent logging/output redirections using
- # the modules objects configuration
- # 5. Run the modules for the given stage name
- # 6. Done!
- w_msg = welcome_format("%s:%s" % (action_name, name))
- init = stages.Init(ds_deps=[], reporter=args.reporter)
- # Stage 1
- init.read_cfg(extract_fns(args))
- # Stage 2
- try:
- init.fetch(existing="trust")
- except sources.DataSourceNotFoundException:
- # There was no datasource found, theres nothing to do
- msg = ('Can not apply stage %s, no datasource found! Likely bad '
- 'things to come!' % name)
- util.logexc(LOG, msg)
- print_exc(msg)
- if not args.force:
- return [(msg)]
- # Stage 3
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
- # Stage 4
- try:
- LOG.debug("Closing stdin")
- util.close_stdin()
- util.fixup_output(mods.cfg, name)
- except Exception:
- util.logexc(LOG, "Failed to setup output redirection!")
- if args.debug:
- # Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
- logging.resetLogging()
- logging.setupLogging(mods.cfg)
- apply_reporting_cfg(init.cfg)
-
- # now that logging is setup and stdout redirected, send welcome
- welcome(name, msg=w_msg)
-
- # Stage 5
- return run_module_section(mods, name, name)
-
-
-def main_query(name, _args):
- raise NotImplementedError(("Action '%s' is not"
- " currently implemented") % (name))
-
-
-def main_single(name, args):
- # Cloud-init single stage is broken up into the following sub-stages
- # 1. Ensure that the init object fetches its config without errors
- # 2. Attempt to fetch the datasource (warn if it doesn't work)
- # 3. Construct the modules object
- # 4. Adjust any subsequent logging/output redirections using
- # the modules objects configuration
- # 5. Run the single module
- # 6. Done!
- mod_name = args.name
- w_msg = welcome_format(name)
- init = stages.Init(ds_deps=[], reporter=args.reporter)
- # Stage 1
- init.read_cfg(extract_fns(args))
- # Stage 2
- try:
- init.fetch(existing="trust")
- except sources.DataSourceNotFoundException:
- # There was no datasource found,
- # that might be bad (or ok) depending on
- # the module being ran (so continue on)
- util.logexc(LOG, ("Failed to fetch your datasource,"
- " likely bad things to come!"))
- print_exc(("Failed to fetch your datasource,"
- " likely bad things to come!"))
- if not args.force:
- return 1
- # Stage 3
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
- mod_args = args.module_args
- if mod_args:
- LOG.debug("Using passed in arguments %s", mod_args)
- mod_freq = args.frequency
- if mod_freq:
- LOG.debug("Using passed in frequency %s", mod_freq)
- mod_freq = FREQ_SHORT_NAMES.get(mod_freq)
- # Stage 4
- try:
- LOG.debug("Closing stdin")
- util.close_stdin()
- util.fixup_output(mods.cfg, None)
- except Exception:
- util.logexc(LOG, "Failed to setup output redirection!")
- if args.debug:
- # Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
- logging.resetLogging()
- logging.setupLogging(mods.cfg)
- apply_reporting_cfg(init.cfg)
-
- # now that logging is setup and stdout redirected, send welcome
- welcome(name, msg=w_msg)
-
- # Stage 5
- (which_ran, failures) = mods.run_single(mod_name,
- mod_args,
- mod_freq)
- if failures:
- LOG.warn("Ran %s but it failed!", mod_name)
- return 1
- elif not which_ran:
- LOG.warn("Did not run %s, does it exist?", mod_name)
- return 1
- else:
- # Guess it worked
- return 0
-
-
-def atomic_write_file(path, content, mode='w'):
- tf = None
- try:
- tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path),
- delete=False, mode=mode)
- tf.write(content)
- tf.close()
- os.rename(tf.name, path)
- except Exception as e:
- if tf is not None:
- os.unlink(tf.name)
- raise e
-
-
-def atomic_write_json(path, data):
- return atomic_write_file(path, json.dumps(data, indent=1) + "\n")
-
-
-def status_wrapper(name, args, data_d=None, link_d=None):
- if data_d is None:
- data_d = os.path.normpath("/var/lib/cloud/data")
- if link_d is None:
- link_d = os.path.normpath("/run/cloud-init")
-
- status_path = os.path.join(data_d, "status.json")
- status_link = os.path.join(link_d, "status.json")
- result_path = os.path.join(data_d, "result.json")
- result_link = os.path.join(link_d, "result.json")
-
- util.ensure_dirs((data_d, link_d,))
-
- (_name, functor) = args.action
-
- if name == "init":
- if args.local:
- mode = "init-local"
- else:
- mode = "init"
- elif name == "modules":
- mode = "modules-%s" % args.mode
- else:
- raise ValueError("unknown name: %s" % name)
-
- modes = ('init', 'init-local', 'modules-config', 'modules-final')
-
- status = None
- if mode == 'init-local':
- for f in (status_link, result_link, status_path, result_path):
- util.del_file(f)
- else:
- try:
- status = json.loads(util.load_file(status_path))
- except Exception:
- pass
-
- if status is None:
- nullstatus = {
- 'errors': [],
- 'start': None,
- 'finished': None,
- }
- status = {'v1': {}}
- for m in modes:
- status['v1'][m] = nullstatus.copy()
- status['v1']['datasource'] = None
-
- v1 = status['v1']
- v1['stage'] = mode
- v1[mode]['start'] = time.time()
-
- atomic_write_json(status_path, status)
- util.sym_link(os.path.relpath(status_path, link_d), status_link,
- force=True)
-
- try:
- ret = functor(name, args)
- if mode in ('init', 'init-local'):
- (datasource, errors) = ret
- if datasource is not None:
- v1['datasource'] = str(datasource)
- else:
- errors = ret
-
- v1[mode]['errors'] = [str(e) for e in errors]
-
- except Exception as e:
- util.logexc(LOG, "failed stage %s", mode)
- print_exc("failed run of stage %s" % mode)
- v1[mode]['errors'] = [str(e)]
-
- v1[mode]['finished'] = time.time()
- v1['stage'] = None
-
- atomic_write_json(status_path, status)
-
- if mode == "modules-final":
- # write the 'finished' file
- errors = []
- for m in modes:
- if v1[m]['errors']:
- errors.extend(v1[m].get('errors', []))
-
- atomic_write_json(result_path,
- {'v1': {'datasource': v1['datasource'],
- 'errors': errors}})
- util.sym_link(os.path.relpath(result_path, link_d), result_link,
- force=True)
-
- return len(v1[mode]['errors'])
-
-
-def main(sysv_args=None):
- if sysv_args is not None:
- parser = argparse.ArgumentParser(prog=sysv_args[0])
- sysv_args = sysv_args[1:]
- else:
- parser = argparse.ArgumentParser()
-
- # Top level args
- parser.add_argument('--version', '-v', action='version',
- version='%(prog)s ' + (version.version_string()))
- parser.add_argument('--file', '-f', action='append',
- dest='files',
- help=('additional yaml configuration'
- ' files to use'),
- type=argparse.FileType('rb'))
- parser.add_argument('--debug', '-d', action='store_true',
- help=('show additional pre-action'
- ' logging (default: %(default)s)'),
- default=False)
- parser.add_argument('--force', action='store_true',
- help=('force running even if no datasource is'
- ' found (use at your own risk)'),
- dest='force',
- default=False)
-
- parser.set_defaults(reporter=None)
- subparsers = parser.add_subparsers()
-
- # Each action and its sub-options (if any)
- parser_init = subparsers.add_parser('init',
- help=('initializes cloud-init and'
- ' performs initial modules'))
- parser_init.add_argument("--local", '-l', action='store_true',
- help="start in local mode (default: %(default)s)",
- default=False)
- # This is used so that we can know which action is selected +
- # the functor to use to run this subcommand
- parser_init.set_defaults(action=('init', main_init))
-
- # These settings are used for the 'config' and 'final' stages
- parser_mod = subparsers.add_parser('modules',
- help=('activates modules using '
- 'a given configuration key'))
- parser_mod.add_argument("--mode", '-m', action='store',
- help=("module configuration name "
- "to use (default: %(default)s)"),
- default='config',
- choices=('init', 'config', 'final'))
- parser_mod.set_defaults(action=('modules', main_modules))
-
- # These settings are used when you want to query information
- # stored in the cloud-init data objects/directories/files
- parser_query = subparsers.add_parser('query',
- help=('query information stored '
- 'in cloud-init'))
- parser_query.add_argument("--name", '-n', action="store",
- help="item name to query on",
- required=True,
- choices=QUERY_DATA_TYPES)
- parser_query.set_defaults(action=('query', main_query))
-
- # This subcommand allows you to run a single module
- parser_single = subparsers.add_parser('single',
- help=('run a single module '))
- parser_single.set_defaults(action=('single', main_single))
- parser_single.add_argument("--name", '-n', action="store",
- help="module name to run",
- required=True)
- parser_single.add_argument("--frequency", action="store",
- help=("frequency of the module"),
- required=False,
- choices=list(FREQ_SHORT_NAMES.keys()))
- parser_single.add_argument("--report", action="store_true",
- help="enable reporting",
- required=False)
- parser_single.add_argument("module_args", nargs="*",
- metavar='argument',
- help=('any additional arguments to'
- ' pass to this module'))
- parser_single.set_defaults(action=('single', main_single))
-
- args = parser.parse_args(args=sysv_args)
-
- try:
- (name, functor) = args.action
- except AttributeError:
- parser.error('too few arguments')
-
- # Setup basic logging to start (until reinitialized)
- # iff in debug mode...
- if args.debug:
- logging.setupBasicLogging()
-
- # Setup signal handlers before running
- signal_handler.attach_handlers()
-
- if name in ("modules", "init"):
- functor = status_wrapper
-
- report_on = True
- if name == "init":
- if args.local:
- rname, rdesc = ("init-local", "searching for local datasources")
- else:
- rname, rdesc = ("init-network",
- "searching for network datasources")
- elif name == "modules":
- rname, rdesc = ("modules-%s" % args.mode,
- "running modules for %s" % args.mode)
- elif name == "single":
- rname, rdesc = ("single/%s" % args.name,
- "running single module %s" % args.name)
- report_on = args.report
-
- args.reporter = events.ReportEventStack(
- rname, rdesc, reporting_enabled=report_on)
- with args.reporter:
- return util.log_time(
- logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
- get_uptime=True, func=functor, args=(name, args))
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
deleted file mode 100644
index d57453be..00000000
--- a/cloudinit/config/__init__.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2008-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Chuck Short <chuck.short@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
-
-from cloudinit import log as logging
-
-LOG = logging.getLogger(__name__)
-
-# This prefix is used to make it less
-# of a chance that when importing
-# we will not find something else with the same
-# name in the lookup path...
-MOD_PREFIX = "cc_"
-
-
-def form_module_name(name):
- canon_name = name.replace("-", "_")
- if canon_name.lower().endswith(".py"):
- canon_name = canon_name[0:(len(canon_name) - 3)]
- canon_name = canon_name.strip()
- if not canon_name:
- return None
- if not canon_name.startswith(MOD_PREFIX):
- canon_name = '%s%s' % (MOD_PREFIX, canon_name)
- return canon_name
-
-
-def fixup_module(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
- else:
- freq = mod.frequency
- if freq and freq not in FREQUENCIES:
- LOG.warn("Module %s has an unknown frequency %s", mod, freq)
- if not hasattr(mod, 'distros'):
- setattr(mod, 'distros', [])
- if not hasattr(mod, 'osfamilies'):
- setattr(mod, 'osfamilies', [])
- return mod
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
deleted file mode 100644
index 05ad4b03..00000000
--- a/cloudinit/config/cc_apt_configure.py
+++ /dev/null
@@ -1,319 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import os
-import re
-
-from cloudinit import gpg
-from cloudinit import templater
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
-APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
-APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
-
-# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.is_false(cfg.get('apt_configure_enabled', True)):
- log.debug("Skipping module named %s, disabled by config.", name)
- return
-
- release = get_release()
- mirrors = find_apt_mirror_info(cloud, cfg)
- if not mirrors or "primary" not in mirrors:
- log.debug(("Skipping module named %s,"
- " no package 'mirror' located"), name)
- return
-
- # backwards compatibility
- mirror = mirrors["primary"]
- mirrors["mirror"] = mirror
-
- log.debug("Mirror info: %s" % mirrors)
-
- if not util.get_cfg_option_bool(cfg,
- 'apt_preserve_sources_list', False):
- generate_sources_list(cfg, release, mirrors, cloud, log)
- old_mirrors = cfg.get('apt_old_mirrors',
- {"primary": "archive.ubuntu.com/ubuntu",
- "security": "security.ubuntu.com/ubuntu"})
- rename_apt_lists(old_mirrors, mirrors)
-
- try:
- apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
- except Exception as e:
- log.warn("failed to proxy or apt config info: %s", e)
-
- # Process 'apt_sources'
- if 'apt_sources' in cfg:
- params = mirrors
- params['RELEASE'] = release
- params['MIRROR'] = mirror
-
- matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
- if matchcfg:
- matcher = re.compile(matchcfg).search
- else:
- def matcher(x):
- return False
-
- errors = add_apt_sources(cfg['apt_sources'], params,
- aa_repo_match=matcher)
- for e in errors:
- log.warn("Add source error: %s", ':'.join(e))
-
- dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
- if dconf_sel:
- log.debug("Setting debconf selections per cloud config")
- try:
- util.subp(('debconf-set-selections', '-'), dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections")
-
-
-def mirrorurl_to_apt_fileprefix(mirror):
- string = mirror
- # take off http:// or ftp://
- if string.endswith("/"):
- string = string[0:-1]
- pos = string.find("://")
- if pos >= 0:
- string = string[pos + 3:]
- string = string.replace("/", "_")
- return string
-
-
-def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
- for (name, omirror) in old_mirrors.items():
- nmirror = new_mirrors.get(name)
- if not nmirror:
- continue
- oprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(omirror))
- nprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(nmirror))
- if oprefix == nprefix:
- continue
- olen = len(oprefix)
- for filename in glob.glob("%s_*" % oprefix):
- util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
-
-
-def get_release():
- (stdout, _stderr) = util.subp(['lsb_release', '-cs'])
- return stdout.strip()
-
-
-def generate_sources_list(cfg, codename, mirrors, cloud, log):
- params = {'codename': codename}
- for k in mirrors:
- params[k] = mirrors[k]
-
- custtmpl = cfg.get('apt_custom_sources_list', None)
- if custtmpl is not None:
- templater.render_string_to_file(custtmpl,
- '/etc/apt/sources.list', params)
- return
-
- template_fn = cloud.get_template_filename('sources.list.%s' %
- (cloud.distro.name))
- if not template_fn:
- template_fn = cloud.get_template_filename('sources.list')
- if not template_fn:
- log.warn("No template found, not rendering /etc/apt/sources.list")
- return
-
- templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
-
-
-def add_apt_key_raw(key):
- """
- actual adding of a key as defined in key argument
- to the system
- """
- try:
- util.subp(('apt-key', 'add', '-'), key)
- except util.ProcessExecutionError:
- raise ValueError('failed to add apt GPG Key to apt keyring')
-
-
-def add_apt_key(ent):
- """
- add key to the system as defined in ent (if any)
- supports raw keys or keyid's
- The latter will as a first step fetch the raw key from a keyserver
- """
- if 'keyid' in ent and 'key' not in ent:
- keyserver = "keyserver.ubuntu.com"
- if 'keyserver' in ent:
- keyserver = ent['keyserver']
- ent['key'] = gpg.get_key_by_id(ent['keyid'], keyserver)
-
- if 'key' in ent:
- add_apt_key_raw(ent['key'])
-
-
-def convert_to_new_format(srclist):
- """convert_to_new_format
- convert the old list based format to the new dict based one
- """
- srcdict = {}
- if isinstance(srclist, list):
- for srcent in srclist:
- if 'filename' not in srcent:
- # file collides for multiple !filename cases for compatibility
- # yet we need them all processed, so not same dictionary key
- srcent['filename'] = "cloud_config_sources.list"
- key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
- else:
- # all with filename use that as key (matching new format)
- key = srcent['filename']
- srcdict[key] = srcent
- elif isinstance(srclist, dict):
- srcdict = srclist
- else:
- raise ValueError("unknown apt_sources format")
-
- return srcdict
-
-
-def add_apt_sources(srclist, template_params=None, aa_repo_match=None):
- """
- add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srclist'. When rendering template, also
- include the values in dictionary searchList
- """
- if template_params is None:
- template_params = {}
-
- if aa_repo_match is None:
- def _aa_repo_match(x):
- return False
- aa_repo_match = _aa_repo_match
-
- errorlist = []
- srcdict = convert_to_new_format(srclist)
-
- for filename in srcdict:
- ent = srcdict[filename]
- if 'filename' not in ent:
- ent['filename'] = filename
-
- # keys can be added without specifying a source
- try:
- add_apt_key(ent)
- except ValueError as detail:
- errorlist.append([ent, detail])
-
- if 'source' not in ent:
- errorlist.append(["", "missing source"])
- continue
- source = ent['source']
- source = templater.render_string(source, template_params)
-
- if not ent['filename'].startswith(os.path.sep):
- ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
- ent['filename'])
-
- if aa_repo_match(source):
- try:
- util.subp(["add-apt-repository", source])
- except util.ProcessExecutionError as e:
- errorlist.append([source,
- ("add-apt-repository failed. " + str(e))])
- continue
-
- try:
- contents = "%s\n" % (source)
- util.write_file(ent['filename'], contents, omode="ab")
- except Exception:
- errorlist.append([source,
- "failed write to file %s" % ent['filename']])
-
- return errorlist
-
-
-def find_apt_mirror_info(cloud, cfg):
- """find an apt_mirror given the cloud and cfg provided."""
-
- mirror = None
-
- # this is less preferred way of specifying mirror preferred would be to
- # use the distro's search or package_mirror.
- mirror = cfg.get("apt_mirror", None)
-
- search = cfg.get("apt_mirror_search", None)
- if not mirror and search:
- mirror = util.search_for_mirror(search)
-
- if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
- mydom = ""
- doms = []
-
- # if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- mydom = ".".join(fqdn.split(".")[1:])
- if mydom:
- doms.append(".%s" % mydom)
-
- doms.extend((".localdomain", "",))
-
- mirror_list = []
- distro = cloud.distro.name
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
- for post in doms:
- mirror_list.append(mirrorfmt % (post))
-
- mirror = util.search_for_mirror(mirror_list)
-
- mirror_info = cloud.datasource.get_package_mirror_info()
-
- # this is a bit strange.
- # if mirror is set, then one of the legacy options above set it
- # but they do not cover security. so we need to get that from
- # get_package_mirror_info
- if mirror:
- mirror_info.update({'primary': mirror})
-
- return mirror_info
-
-
-def apply_apt_config(cfg, proxy_fname, config_fname):
- # Set up any apt proxy
- cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
- ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))
-
- proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
- if len(proxies):
- util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
- elif os.path.isfile(proxy_fname):
- util.del_file(proxy_fname)
-
- if cfg.get('apt_config', None):
- util.write_file(config_fname, cfg.get('apt_config'))
- elif os.path.isfile(config_fname):
- util.del_file(config_fname)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
deleted file mode 100644
index 40c32c84..00000000
--- a/cloudinit/config/cc_apt_pipelining.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-distros = ['ubuntu', 'debian']
-
-DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
-
-APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
- 'Acquire::http::Pipeline-Depth "%s";\n')
-
-# Acquire::http::Pipeline-Depth can be a value
-# from 0 to 5 indicating how many outstanding requests APT should send.
-# A value of zero MUST be specified if the remote host does not properly linger
-# on TCP connections - otherwise data corruption will occur.
-
-
-def handle(_name, cfg, _cloud, log, _args):
-
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
- apt_pipe_value_s = str(apt_pipe_value).lower().strip()
-
- if apt_pipe_value_s == "false":
- write_apt_snippet("0", log, DEFAULT_FILE)
- elif apt_pipe_value_s in ("none", "unchanged", "os"):
- return
- elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
- write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
- else:
- log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
-
-
-def write_apt_snippet(setting, log, f_name):
- """Writes f_name with apt pipeline depth 'setting'."""
-
- file_contents = APT_PIPE_TPL % (setting)
- util.write_file(f_name, file_contents)
- log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
deleted file mode 100644
index b763a3c3..00000000
--- a/cloudinit/config/cc_bootcmd.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
-
- if "bootcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'bootcmd' key in configuration"), name)
- return
-
- with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
- try:
- content = util.shellify(cfg["bootcmd"])
- tmpf.write(util.encode_text(content))
- tmpf.flush()
- except Exception:
- util.logexc(log, "Failed to shellify bootcmd")
- raise
-
- try:
- env = os.environ.copy()
- iid = cloud.get_instance_id()
- if iid:
- env['INSTANCE_ID'] = str(iid)
- cmd = ['/bin/sh', tmpf.name]
- util.subp(cmd, env=env, capture=False)
- except Exception:
- util.logexc(log, "Failed to run bootcmd module %s", name)
- raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
deleted file mode 100644
index ef0ce7ab..00000000
--- a/cloudinit/config/cc_byobu.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- value = args[0]
- else:
- value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
-
- if not value:
- log.debug("Skipping module named %s, no 'byobu' values found", name)
- return
-
- if value == "user" or value == "system":
- value = "enable-%s" % value
-
- valid = ("enable-user", "enable-system", "enable",
- "disable-user", "disable-system", "disable")
- if value not in valid:
- log.warn("Unknown value %s for byobu_by_default", value)
-
- mod_user = value.endswith("-user")
- mod_sys = value.endswith("-system")
- if value.startswith("enable"):
- bl_inst = "install"
- dc_val = "byobu byobu/launch-by-default boolean true"
- mod_sys = True
- else:
- if value == "disable":
- mod_user = True
- mod_sys = True
- bl_inst = "uninstall"
- dc_val = "byobu byobu/launch-by-default boolean false"
-
- shcmd = ""
- if mod_user:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- if not user:
- log.warn(("No default byobu user provided, "
- "can not launch %s for the default user"), bl_inst)
- else:
- shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
- shcmd += " || X=$(($X+1)); "
- if mod_sys:
- shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
- shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
- shcmd += " || X=$(($X+1)); "
-
- if len(shcmd):
- cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
- log.debug("Setting byobu to %s", value)
- util.subp(cmd, capture=False)
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
deleted file mode 100644
index 8248b020..00000000
--- a/cloudinit/config/cc_ca_certs.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Mike Milner <mike.milner@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-CA_CERT_PATH = "/usr/share/ca-certificates/"
-CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
-CA_CERT_CONFIG = "/etc/ca-certificates.conf"
-CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
-CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
-
-distros = ['ubuntu', 'debian']
-
-
-def update_ca_certs():
- """
- Updates the CA certificate cache on the current machine.
- """
- util.subp(["update-ca-certificates"], capture=False)
-
-
-def add_ca_certs(certs):
- """
- Adds certificates to the system. To actually apply the new certificates
- you must also call L{update_ca_certs}.
-
- @param certs: A list of certificate strings.
- """
- if certs:
- # First ensure they are strings...
- cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
-
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([l for l in orig.splitlines()
- if l != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
- util.write_file(CA_CERT_CONFIG, out, omode="wb")
-
-
-def remove_default_ca_certs():
- """
- Removes all default trusted CA certificates from the system. To actually
- apply the change you must also call L{update_ca_certs}.
- """
- util.delete_dir_contents(CA_CERT_PATH)
- util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
- util.write_file(CA_CERT_CONFIG, "", mode=0o644)
- debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- util.subp(('debconf-set-selections', '-'), debconf_sel)
-
-
-def handle(name, cfg, _cloud, log, _args):
- """
- Call to handle ca-cert sections in cloud-config file.
-
- @param name: The module name "ca-cert" from cloud.cfg
- @param cfg: A nested dict containing the entire cloud config contents.
- @param cloud: The L{CloudInit} object in use.
- @param log: Pre-initialized Python logger object to use for logging.
- @param args: Any module arguments from cloud.cfg
- """
- # If there isn't a ca-certs section in the configuration don't do anything
- if "ca-certs" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'ca-certs' key in configuration"), name)
- return
-
- ca_cert_cfg = cfg['ca-certs']
-
- # If there is a remove-defaults option set to true, remove the system
- # default trusted CA certs first.
- if ca_cert_cfg.get("remove-defaults", False):
- log.debug("Removing default certificates")
- remove_default_ca_certs()
-
- # If we are given any new trusted CA certs to add, add them.
- if "trusted" in ca_cert_cfg:
- trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
- if trusted_certs:
- log.debug("Adding %d certificates" % len(trusted_certs))
- add_ca_certs(trusted_certs)
-
- # Update the system with the new cert configuration.
- log.debug("Updating certificates")
- update_ca_certs()
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
deleted file mode 100644
index 4c28be6a..00000000
--- a/cloudinit/config/cc_chef.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Avishai Ish-Shalom <avishai@fewbytes.com>
-# Author: Mike Moulton <mike@meltmedia.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** module that configures, starts and installs chef.
-
-**Description:** This module enables chef to be installed (from packages or
-from gems, or from omnibus). Before this occurs chef configurations are
-written to disk (validation.pem, client.pem, firstboot.json, client.rb),
-and needed chef folders/directories are created (/etc/chef and /var/log/chef
-and so-on). Then once installing proceeds correctly if configured chef will
-be started (in daemon mode or in non-daemon mode) and then once that has
-finished (if ran in non-daemon mode this will be when chef finishes
-converging, if ran in daemon mode then no further actions are possible since
-chef will have forked into its own process) then a post run function can
-run that can do finishing activities (such as removing the validation pem
-file).
-
-It can be configured with the following option structure::
-
- chef:
- directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
- /var/cache/chef, /var/backups/chef, /var/run/chef)
- validation_cert: (optional string to be written to file validation_key)
- special value 'system' means set use existing file
- validation_key: (optional the path for validation_cert. default
- /etc/chef/validation.pem)
- firstboot_path: (path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to /etc/chef/firstboot.json)
- exec: boolean to run or not run chef (defaults to false, unless
- a gem installed is requested
- where this will then default
- to true)
-
- chef.rb template keys (if falsey, then will be skipped and not
- written to /etc/chef/client.rb)
-
- chef:
- client_key:
- environment:
- file_backup_path:
- file_cache_path:
- json_attribs:
- log_level:
- log_location:
- node_name:
- pid_file:
- server_url:
- show_time:
- ssl_verify_mode:
- validation_cert:
- validation_key:
- validation_name:
-"""
-
-import itertools
-import json
-import os
-
-from cloudinit import templater
-from cloudinit import url_helper
-from cloudinit import util
-
-import six
-
-RUBY_VERSION_DEFAULT = "1.8"
-
-CHEF_DIRS = tuple([
- '/etc/chef',
- '/var/log/chef',
- '/var/lib/chef',
- '/var/cache/chef',
- '/var/backups/chef',
- '/var/run/chef',
-])
-REQUIRED_CHEF_DIRS = tuple([
- '/etc/chef',
-])
-
-# Used if fetching chef from a omnibus style package
-OMNIBUS_URL = "https://www.getchef.com/chef/install.sh"
-OMNIBUS_URL_RETRIES = 5
-
-CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
-CHEF_FB_PATH = '/etc/chef/firstboot.json'
-CHEF_RB_TPL_DEFAULTS = {
- # These are ruby symbols...
- 'ssl_verify_mode': ':verify_none',
- 'log_level': ':info',
- # These are not symbols...
- 'log_location': '/var/log/chef/client.log',
- 'validation_key': CHEF_VALIDATION_PEM_PATH,
- 'validation_cert': None,
- 'client_key': "/etc/chef/client.pem",
- 'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': "/var/cache/chef",
- 'file_backup_path': "/var/backups/chef",
- 'pid_file': "/var/run/chef/client.pid",
- 'show_time': True,
-}
-CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
-CHEF_RB_TPL_PATH_KEYS = frozenset([
- 'log_location',
- 'validation_key',
- 'client_key',
- 'file_cache_path',
- 'json_attribs',
- 'file_cache_path',
- 'pid_file',
-])
-CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
-CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
-CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS)
-CHEF_RB_TPL_KEYS.extend([
- 'server_url',
- 'node_name',
- 'environment',
- 'validation_name',
-])
-CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
-CHEF_RB_PATH = '/etc/chef/client.rb'
-CHEF_EXEC_PATH = '/usr/bin/chef-client'
-CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
-
-
-def is_installed():
- if not os.path.isfile(CHEF_EXEC_PATH):
- return False
- if not os.access(CHEF_EXEC_PATH, os.X_OK):
- return False
- return True
-
-
-def post_run_chef(chef_cfg, log):
- delete_pem = util.get_cfg_option_bool(chef_cfg,
- 'delete_validation_post_exec',
- default=False)
- if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH):
- os.unlink(CHEF_VALIDATION_PEM_PATH)
-
-
-def get_template_params(iid, chef_cfg, log):
- params = CHEF_RB_TPL_DEFAULTS.copy()
- # Allow users to overwrite any of the keys they want (if they so choose),
- # when a value is None, then the value will be set to None and no boolean
- # or string version will be populated...
- for (k, v) in chef_cfg.items():
- if k not in CHEF_RB_TPL_KEYS:
- log.debug("Skipping unknown chef template key '%s'", k)
- continue
- if v is None:
- params[k] = None
- else:
- # This will make the value a boolean or string...
- if k in CHEF_RB_TPL_BOOL_KEYS:
- params[k] = util.get_cfg_option_bool(chef_cfg, k)
- else:
- params[k] = util.get_cfg_option_str(chef_cfg, k)
- # These ones are overwritten to be exact values...
- params.update({
- 'generated_by': util.make_header(),
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- default=iid),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- default='_default'),
- # These two are mandatory...
- 'server_url': chef_cfg['server_url'],
- 'validation_name': chef_cfg['validation_name'],
- })
- return params
-
-
-def handle(name, cfg, cloud, log, _args):
- """Handler method activated by cloud-init."""
-
- # If there isn't a chef key in the configuration don't do anything
- if 'chef' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'chef' key in configuration"), name)
- return
- chef_cfg = cfg['chef']
-
- # Ensure the chef directories we use exist
- chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
- if not chef_dirs:
- chef_dirs = list(CHEF_DIRS)
- for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
- util.ensure_dir(d)
-
- vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH)
- vcert = chef_cfg.get('validation_cert')
- # special value 'system' means do not overwrite the file
- # but still render the template to contain 'validation_key'
- if vcert:
- if vcert != "system":
- util.write_file(vkey_path, vcert)
- elif not os.path.isfile(vkey_path):
- log.warn("chef validation_cert provided as 'system', but "
- "validation_key path '%s' does not exist.",
- vkey_path)
-
- # Create the chef config from template
- template_fn = cloud.get_template_filename('chef_client.rb')
- if template_fn:
- iid = str(cloud.datasource.get_instance_id())
- params = get_template_params(iid, chef_cfg, log)
- # Do a best effort attempt to ensure that the template values that
- # are associated with paths have there parent directory created
- # before they are used by the chef-client itself.
- param_paths = set()
- for (k, v) in params.items():
- if k in CHEF_RB_TPL_PATH_KEYS and v:
- param_paths.add(os.path.dirname(v))
- util.ensure_dirs(param_paths)
- templater.render_to_file(template_fn, CHEF_RB_PATH, params)
- else:
- log.warn("No template found, not rendering to %s",
- CHEF_RB_PATH)
-
- # Set the firstboot json
- fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
- default=CHEF_FB_PATH)
- if not fb_filename:
- log.info("First boot path empty, not writing first boot json file")
- else:
- initial_json = {}
- if 'run_list' in chef_cfg:
- initial_json['run_list'] = chef_cfg['run_list']
- if 'initial_attributes' in chef_cfg:
- initial_attributes = chef_cfg['initial_attributes']
- for k in list(initial_attributes.keys()):
- initial_json[k] = initial_attributes[k]
- util.write_file(fb_filename, json.dumps(initial_json))
-
- # Try to install chef, if its not already installed...
- force_install = util.get_cfg_option_bool(chef_cfg,
- 'force_install', default=False)
- if not is_installed() or force_install:
- run = install_chef(cloud, chef_cfg, log)
- elif is_installed():
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
- else:
- run = False
- if run:
- run_chef(chef_cfg, log)
- post_run_chef(chef_cfg, log)
-
-
-def run_chef(chef_cfg, log):
- log.debug('Running chef-client')
- cmd = [CHEF_EXEC_PATH]
- if 'exec_arguments' in chef_cfg:
- cmd_args = chef_cfg['exec_arguments']
- if isinstance(cmd_args, (list, tuple)):
- cmd.extend(cmd_args)
- elif isinstance(cmd_args, six.string_types):
- cmd.append(cmd_args)
- else:
- log.warn("Unknown type %s provided for chef"
- " 'exec_arguments' expected list, tuple,"
- " or string", type(cmd_args))
- cmd.extend(CHEF_EXEC_DEF_ARGS)
- else:
- cmd.extend(CHEF_EXEC_DEF_ARGS)
- util.subp(cmd, capture=False)
-
-
-def install_chef(cloud, chef_cfg, log):
- # If chef is not installed, we install chef based on 'install_type'
- install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
- 'packages')
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
- if install_type == "gems":
- # This will install and run the chef-client from gems
- chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
- ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
- RUBY_VERSION_DEFAULT)
- install_chef_from_gems(ruby_version, chef_version, cloud.distro)
- # Retain backwards compat, by preferring True instead of False
- # when not provided/overriden...
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
- elif install_type == 'packages':
- # This will install and run the chef-client from packages
- cloud.distro.install_packages(('chef',))
- elif install_type == 'omnibus':
- # This will install as a omnibus unified package
- url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
- retries = max(0, util.get_cfg_option_int(chef_cfg,
- "omnibus_url_retries",
- default=OMNIBUS_URL_RETRIES))
- content = url_helper.readurl(url=url, retries=retries)
- with util.tempdir() as tmpd:
- # Use tmpdir over tmpfile to avoid 'text file busy' on execute
- tmpf = "%s/chef-omnibus-install" % tmpd
- util.write_file(tmpf, content, mode=0o700)
- util.subp([tmpf], capture=False)
- else:
- log.warn("Unknown chef install type '%s'", install_type)
- run = False
- return run
-
-
-def get_ruby_packages(version):
- # return a list of packages needed to install ruby at version
- pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
- if version == "1.8":
- pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
- return pkgs
-
-
-def install_chef_from_gems(ruby_version, chef_version, distro):
- distro.install_packages(get_ruby_packages(ruby_version))
- if not os.path.exists('/usr/bin/gem'):
- util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
- if not os.path.exists('/usr/bin/ruby'):
- util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
- if chef_version:
- util.subp(['/usr/bin/gem', 'install', 'chef',
- '-v %s' % chef_version, '--no-ri',
- '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
- else:
- util.subp(['/usr/bin/gem', 'install', 'chef',
- '--no-ri', '--no-rdoc', '--bindir',
- '/usr/bin', '-q'], capture=False)
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
deleted file mode 100644
index bdc32fe6..00000000
--- a/cloudinit/config/cc_debug.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Yahoo! Inc.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** helper to debug cloud-init *internal* datastructures.
-
-**Description:** This module will enable for outputting various internal
-information that cloud-init sources provide to either a file or to the output
-console/log location that this cloud-init has been configured with when
-running.
-
-It can be configured with the following option structure::
-
- debug:
- verbose: (defaulting to true)
- output: (location to write output, defaulting to console + log)
-
-.. note::
-
- Log configurations are not output.
-"""
-
-import copy
-
-from six import StringIO
-
-from cloudinit import type_utils
-from cloudinit import util
-
-SKIP_KEYS = frozenset(['log_cfgs'])
-
-
-def _make_header(text):
- header = StringIO()
- header.write("-" * 80)
- header.write("\n")
- header.write(text.center(80, ' '))
- header.write("\n")
- header.write("-" * 80)
- header.write("\n")
- return header.getvalue()
-
-
-def _dumps(obj):
- text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False)
- return text.rstrip()
-
-
-def handle(name, cfg, cloud, log, args):
- """Handler method activated by cloud-init."""
-
- verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True)
- if args:
- # if args are provided (from cmdline) then explicitly set verbose
- out_file = args[0]
- verbose = True
- else:
- out_file = util.get_cfg_by_path(cfg, ('debug', 'output'))
-
- if not verbose:
- log.debug(("Skipping module named %s,"
- " verbose printing disabled"), name)
- return
- # Clean out some keys that we just don't care about showing...
- dump_cfg = copy.deepcopy(cfg)
- for k in SKIP_KEYS:
- dump_cfg.pop(k, None)
- all_keys = list(dump_cfg)
- for k in all_keys:
- if k.startswith("_"):
- dump_cfg.pop(k, None)
- # Now dump it...
- to_print = StringIO()
- to_print.write(_make_header("Config"))
- to_print.write(_dumps(dump_cfg))
- to_print.write("\n")
- to_print.write(_make_header("MetaData"))
- to_print.write(_dumps(cloud.datasource.metadata))
- to_print.write("\n")
- to_print.write(_make_header("Misc"))
- to_print.write("Datasource: %s\n" %
- (type_utils.obj_name(cloud.datasource)))
- to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro)))
- to_print.write("Hostname: %s\n" % (cloud.get_hostname(True)))
- to_print.write("Instance ID: %s\n" % (cloud.get_instance_id()))
- to_print.write("Locale: %s\n" % (cloud.get_locale()))
- to_print.write("Launch IDX: %s\n" % (cloud.launch_index))
- contents = to_print.getvalue()
- content_to_file = []
- for line in contents.splitlines():
- line = "ci-info: %s\n" % (line)
- content_to_file.append(line)
- if out_file:
- util.write_file(out_file, "".join(content_to_file), 0o644, "w")
- else:
- util.multi_log("".join(content_to_file), console=True, stderr=False)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
deleted file mode 100644
index 3fd2c20f..00000000
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
-
-
-def handle(name, cfg, _cloud, log, _args):
- disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
- if disabled:
- util.subp(REJECT_CMD, capture=False)
- else:
- log.debug(("Skipping module named %s,"
- " disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
deleted file mode 100644
index b642f1f8..00000000
--- a/cloudinit/config/cc_disk_setup.py
+++ /dev/null
@@ -1,863 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-import logging
-import os
-import shlex
-
-frequency = PER_INSTANCE
-
-# Define the commands to use
-UDEVADM_CMD = util.which('udevadm')
-SFDISK_CMD = util.which("sfdisk")
-SGDISK_CMD = util.which("sgdisk")
-LSBLK_CMD = util.which("lsblk")
-BLKID_CMD = util.which("blkid")
-BLKDEV_CMD = util.which("blockdev")
-WIPEFS_CMD = util.which("wipefs")
-
-LOG = logging.getLogger(__name__)
-
-
-def handle(_name, cfg, cloud, log, _args):
- """
- See doc/examples/cloud-config_disk-setup.txt for documentation on the
- format.
- """
- disk_setup = cfg.get("disk_setup")
- if isinstance(disk_setup, dict):
- update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
- log.debug("Partitioning disks: %s", str(disk_setup))
- for disk, definition in disk_setup.items():
- if not isinstance(definition, dict):
- log.warn("Invalid disk definition for %s" % disk)
- continue
-
- try:
- log.debug("Creating new partition table/disk")
- util.log_time(logfunc=LOG.debug,
- msg="Creating partition on %s" % disk,
- func=mkpart, args=(disk, definition))
- except Exception as e:
- util.logexc(LOG, "Failed partitioning operation\n%s" % e)
-
- fs_setup = cfg.get("fs_setup")
- if isinstance(fs_setup, list):
- log.debug("setting up filesystems: %s", str(fs_setup))
- update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
- for definition in fs_setup:
- if not isinstance(definition, dict):
- log.warn("Invalid file system definition: %s" % definition)
- continue
-
- try:
- log.debug("Creating new filesystem.")
- device = definition.get('device')
- util.log_time(logfunc=LOG.debug,
- msg="Creating fs for %s" % device,
- func=mkfs, args=(definition,))
- except Exception as e:
- util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
-
-
-def update_disk_setup_devices(disk_setup, tformer):
- # update 'disk_setup' dictionary anywhere were a device may occur
- # update it with the response from 'tformer'
- for origname in disk_setup.keys():
- transformed = tformer(origname)
- if transformed is None or transformed == origname:
- continue
- if transformed in disk_setup:
- LOG.info("Replacing %s in disk_setup for translation of %s",
- origname, transformed)
- del disk_setup[transformed]
-
- disk_setup[transformed] = disk_setup[origname]
- disk_setup[transformed]['_origname'] = origname
- del disk_setup[origname]
- LOG.debug("updated disk_setup device entry '%s' to '%s'",
- origname, transformed)
-
-
-def update_fs_setup_devices(disk_setup, tformer):
- # update 'fs_setup' dictionary anywhere were a device may occur
- # update it with the response from 'tformer'
- for definition in disk_setup:
- if not isinstance(definition, dict):
- LOG.warn("entry in disk_setup not a dict: %s", definition)
- continue
-
- origname = definition.get('device')
-
- if origname is None:
- continue
-
- (dev, part) = util.expand_dotted_devname(origname)
-
- tformed = tformer(dev)
- if tformed is not None:
- dev = tformed
- LOG.debug("%s is mapped to disk=%s part=%s",
- origname, tformed, part)
- definition['_origname'] = origname
- definition['device'] = tformed
-
- if part and 'partition' in definition:
- definition['_partition'] = definition['partition']
- definition['partition'] = part
-
-
-def value_splitter(values, start=None):
- """
- Returns the key/value pairs of output sent as string
- like: FOO='BAR' HOME='127.0.0.1'
- """
- _values = shlex.split(values)
- if start:
- _values = _values[start:]
-
- for key, value in [x.split('=') for x in _values]:
- yield key, value
-
-
-def enumerate_disk(device, nodeps=False):
- """
- Enumerate the elements of a child device.
-
- Parameters:
- device: the kernel device name
- nodeps <BOOL>: don't enumerate children devices
-
- Return a dict describing the disk:
- type: the entry type, i.e disk or part
- fstype: the filesystem type, if it exists
- label: file system label, if it exists
- name: the device name, i.e. sda
- """
-
- lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
- device]
-
- if nodeps:
- lsblk_cmd.append('--nodeps')
-
- info = None
- try:
- info, _err = util.subp(lsblk_cmd)
- except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
-
- parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
-
- for part in parts:
- d = {
- 'name': None,
- 'type': None,
- 'fstype': None,
- 'label': None,
- }
-
- for key, value in value_splitter(part):
- d[key.lower()] = value
-
- yield d
-
-
-def device_type(device):
- """
- Return the device type of the device by calling lsblk.
- """
-
- for d in enumerate_disk(device, nodeps=True):
- if "type" in d:
- return d["type"].lower()
- return None
-
-
-def is_device_valid(name, partition=False):
- """
- Check if the device is a valid device.
- """
- d_type = ""
- try:
- d_type = device_type(name)
- except Exception:
- LOG.warn("Query against device %s failed" % name)
- return False
-
- if partition and d_type == 'part':
- return True
- elif not partition and d_type == 'disk':
- return True
- return False
-
-
-def check_fs(device):
- """
- Check if the device has a filesystem on it
-
- Output of blkid is generally something like:
- /dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4"
-
- Return values are device, label, type, uuid
- """
- out, label, fs_type, uuid = None, None, None, None
-
- blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
- try:
- out, _err = util.subp(blkid_cmd, rcs=[0, 2])
- except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
-
- if out:
- if len(out.splitlines()) == 1:
- for key, value in value_splitter(out, start=1):
- if key.lower() == 'label':
- label = value
- elif key.lower() == 'type':
- fs_type = value
- elif key.lower() == 'uuid':
- uuid = value
-
- return label, fs_type, uuid
-
-
-def is_filesystem(device):
- """
- Returns true if the device has a file system.
- """
- _, fs_type, _ = check_fs(device)
- return fs_type
-
-
-def find_device_node(device, fs_type=None, label=None, valid_targets=None,
- label_match=True, replace_fs=None):
- """
- Find a device that is either matches the spec, or the first
-
- The return is value is (<device>, <bool>) where the device is the
- device to use and the bool is whether the device matches the
- fs_type and label.
-
- Note: This works with GPT partition tables!
- """
- # label of None is same as no label
- if label is None:
- label = ""
-
- if not valid_targets:
- valid_targets = ['disk', 'part']
-
- raw_device_used = False
- for d in enumerate_disk(device):
-
- if d['fstype'] == replace_fs and label_match is False:
- # We found a device where we want to replace the FS
- return ('/dev/%s' % d['name'], False)
-
- if (d['fstype'] == fs_type and
- ((label_match and d['label'] == label) or not label_match)):
- # If we find a matching device, we return that
- return ('/dev/%s' % d['name'], True)
-
- if d['type'] in valid_targets:
-
- if d['type'] != 'disk' or d['fstype']:
- raw_device_used = True
-
- if d['type'] == 'disk':
- # Skip the raw disk, its the default
- pass
-
- elif not d['fstype']:
- return ('/dev/%s' % d['name'], False)
-
- if not raw_device_used:
- return (device, False)
-
- LOG.warn("Failed to find device during available device search.")
- return (None, False)
-
-
-def is_disk_used(device):
- """
- Check if the device is currently used. Returns true if the device
- has either a file system or a partition entry
- is no filesystem found on the disk.
- """
-
- # If the child count is higher 1, then there are child nodes
- # such as partition or device mapper nodes
- if len(list(enumerate_disk(device))) > 1:
- return True
-
- # If we see a file system, then its used
- _, check_fstype, _ = check_fs(device)
- if check_fstype:
- return True
-
- return False
-
-
-def get_dyn_func(*args):
- """
- Call the appropriate function.
-
- The first value is the template for function name
- The second value is the template replacement
- The remain values are passed to the function
-
- For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
- would call "foo_bar" with args of 1, 2, 3
- """
- if len(args) < 2:
- raise Exception("Unable to determine dynamic funcation name")
-
- func_name = (args[0] % args[1])
- func_args = args[2:]
-
- try:
- if func_args:
- return globals()[func_name](*func_args)
- else:
- return globals()[func_name]
-
- except KeyError:
- raise Exception("No such function %s to call!" % func_name)
-
-
-def get_mbr_hdd_size(device):
- size_cmd = [SFDISK_CMD, '--show-size', device]
- size = None
- try:
- size, _err = util.subp(size_cmd)
- except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e))
-
- return int(size.strip())
-
-
-def get_gpt_hdd_size(device):
- out, _ = util.subp([SGDISK_CMD, '-p', device])
- return out.splitlines()[0].split()[2]
-
-
-def get_hdd_size(table_type, device):
- """
- Returns the hard disk size.
- This works with any disk type, including GPT.
- """
- return get_dyn_func("get_%s_hdd_size", table_type, device)
-
-
-def check_partition_mbr_layout(device, layout):
- """
- Returns true if the partition layout matches the one on the disk
-
- Layout should be a list of values. At this time, this only
- verifies that the number of partitions and their labels is correct.
- """
-
- read_parttbl(device)
- prt_cmd = [SFDISK_CMD, "-l", device]
- try:
- out, _err = util.subp(prt_cmd, data="%s\n" % layout)
- except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
-
- found_layout = []
- for line in out.splitlines():
- _line = line.split()
- if len(_line) == 0:
- continue
-
- if device in _line[0]:
- # We don't understand extended partitions yet
- if _line[-1].lower() in ['extended', 'empty']:
- continue
-
- # Find the partition types
- type_label = None
- for x in sorted(range(1, len(_line)), reverse=True):
- if _line[x].isdigit() and _line[x] != '/':
- type_label = _line[x]
- break
-
- found_layout.append(type_label)
- return found_layout
-
-
-def check_partition_gpt_layout(device, layout):
- prt_cmd = [SGDISK_CMD, '-p', device]
- try:
- out, _err = util.subp(prt_cmd)
- except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
-
- out_lines = iter(out.splitlines())
- # Skip header
- for line in out_lines:
- if line.strip().startswith('Number'):
- break
-
- return [line.strip().split()[-1] for line in out_lines]
-
-
-def check_partition_layout(table_type, device, layout):
- """
- See if the partition lay out matches.
-
- This is future a future proofing function. In order
- to add support for other disk layout schemes, add a
- function called check_partition_%s_layout
- """
- found_layout = get_dyn_func(
- "check_partition_%s_layout", table_type, device, layout)
-
- if isinstance(layout, bool):
- # if we are using auto partitioning, or "True" be happy
- # if a single partition exists.
- if layout and len(found_layout) >= 1:
- return True
- return False
-
- else:
- if len(found_layout) != len(layout):
- return False
- else:
- # This just makes sure that the number of requested
- # partitions and the type labels are right
- for x in range(1, len(layout) + 1):
- if isinstance(layout[x - 1], tuple):
- _, part_type = layout[x]
- if int(found_layout[x]) != int(part_type):
- return False
- return True
-
- return False
-
-
-def get_partition_mbr_layout(size, layout):
- """
- Calculate the layout of the partition table. Partition sizes
- are defined as percentage values or a tuple of percentage and
- partition type.
-
- For example:
- [ 33, [66: 82] ]
-
- Defines the first partition to be a size of 1/3 the disk,
- while the remaining 2/3's will be of type Linux Swap.
- """
-
- if not isinstance(layout, list) and isinstance(layout, bool):
- # Create a single partition
- return "0,"
-
- if ((len(layout) == 0 and isinstance(layout, list)) or
- not isinstance(layout, list)):
- raise Exception("Partition layout is invalid")
-
- last_part_num = len(layout)
- if last_part_num > 4:
- raise Exception("Only simply partitioning is allowed.")
-
- part_definition = []
- part_num = 0
- for part in layout:
- part_type = 83 # Default to Linux
- percent = part
- part_num += 1
-
- if isinstance(part, list):
- if len(part) != 2:
- raise Exception("Partition was incorrectly defined: %s" % part)
- percent, part_type = part
-
- part_size = int((float(size) * (float(percent) / 100)) / 1024)
-
- if part_num == last_part_num:
- part_definition.append(",,%s" % part_type)
- else:
- part_definition.append(",%s,%s" % (part_size, part_type))
-
- sfdisk_definition = "\n".join(part_definition)
- if len(part_definition) > 4:
- raise Exception("Calculated partition definition is too big\n%s" %
- sfdisk_definition)
-
- return sfdisk_definition
-
-
-def get_partition_gpt_layout(size, layout):
- if isinstance(layout, bool):
- return [(None, [0, 0])]
-
- partition_specs = []
- for partition in layout:
- if isinstance(partition, list):
- if len(partition) != 2:
- raise Exception(
- "Partition was incorrectly defined: %s" % partition)
- percent, partition_type = partition
- else:
- percent = partition
- partition_type = None
-
- part_size = int(float(size) * (float(percent) / 100))
- partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
-
- # The last partition should use up all remaining space
- partition_specs[-1][-1][-1] = 0
- return partition_specs
-
-
-def purge_disk_ptable(device):
- # wipe the first and last megabyte of a disk (or file)
- # gpt stores partition table both at front and at end.
- null = '\0'
- start_len = 1024 * 1024
- end_len = 1024 * 1024
- with open(device, "rb+") as fp:
- fp.write(null * (start_len))
- fp.seek(-end_len, os.SEEK_END)
- fp.write(null * end_len)
- fp.flush()
-
- read_parttbl(device)
-
-
-def purge_disk(device):
- """
- Remove parition table entries
- """
-
- # wipe any file systems first
- for d in enumerate_disk(device):
- if d['type'] not in ["disk", "crypt"]:
- wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
- try:
- LOG.info("Purging filesystem on /dev/%s" % d['name'])
- util.subp(wipefs_cmd)
- except Exception:
- raise Exception("Failed FS purge of /dev/%s" % d['name'])
-
- purge_disk_ptable(device)
-
-
-def get_partition_layout(table_type, size, layout):
- """
- Call the appropriate function for creating the table
- definition. Returns the table definition
-
- This is a future proofing function. To add support for
- other layouts, simply add a "get_partition_%s_layout"
- function.
- """
- return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
-
-
-def read_parttbl(device):
- """
- Use partprobe instead of 'udevadm'. Partprobe is the only
- reliable way to probe the partition table.
- """
- blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
- udev_cmd = [UDEVADM_CMD, 'settle']
- try:
- util.subp(udev_cmd)
- util.subp(blkdev_cmd)
- util.subp(udev_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed reading the partition table %s" % e)
-
-
-def exec_mkpart_mbr(device, layout):
- """
- Break out of mbr partition to allow for future partition
- types, i.e. gpt
- """
- # Create the partitions
- prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device]
- try:
- util.subp(prt_cmd, data="%s\n" % layout)
- except Exception as e:
- raise Exception("Failed to partition device %s\n%s" % (device, e))
-
- read_parttbl(device)
-
-
-def exec_mkpart_gpt(device, layout):
- try:
- util.subp([SGDISK_CMD, '-Z', device])
- for index, (partition_type, (start, end)) in enumerate(layout):
- index += 1
- util.subp([SGDISK_CMD,
- '-n', '{}:{}:{}'.format(index, start, end), device])
- if partition_type is not None:
- util.subp(
- [SGDISK_CMD,
- '-t', '{}:{}'.format(index, partition_type), device])
- except Exception:
- LOG.warn("Failed to partition device %s" % device)
- raise
-
-
-def exec_mkpart(table_type, device, layout):
- """
- Fetches the function for creating the table type.
- This allows to dynamically find which function to call.
-
- Paramaters:
- table_type: type of partition table to use
- device: the device to work on
- layout: layout definition specific to partition table
- """
- return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
-
-
-def mkpart(device, definition):
- """
- Creates the partition table.
-
- Parameters:
- definition: dictionary describing how to create the partition.
-
- The following are supported values in the dict:
- overwrite: Should the partition table be created regardless
- of any pre-exisiting data?
- layout: the layout of the partition table
- table_type: Which partition table to use, defaults to MBR
- device: the device to work on.
- """
- # ensure that we get a real device rather than a symbolic link
- device = os.path.realpath(device)
-
- LOG.debug("Checking values for %s definition" % device)
- overwrite = definition.get('overwrite', False)
- layout = definition.get('layout', False)
- table_type = definition.get('table_type', 'mbr')
-
- # Check if the default device is a partition or not
- LOG.debug("Checking against default devices")
-
- if (isinstance(layout, bool) and not layout) or not layout:
- LOG.debug("Device is not to be partitioned, skipping")
- return # Device is not to be partitioned
-
- # This prevents you from overwriting the device
- LOG.debug("Checking if device %s is a valid device", device)
- if not is_device_valid(device):
- raise Exception("Device %s is not a disk device!", device)
-
- # Remove the partition table entries
- if isinstance(layout, str) and layout.lower() == "remove":
- LOG.debug("Instructed to remove partition table entries")
- purge_disk(device)
- return
-
- LOG.debug("Checking if device layout matches")
- if check_partition_layout(table_type, device, layout):
- LOG.debug("Device partitioning layout matches")
- return True
-
- LOG.debug("Checking if device is safe to partition")
- if not overwrite and (is_disk_used(device) or is_filesystem(device)):
- LOG.debug("Skipping partitioning on configured device %s" % device)
- return
-
- LOG.debug("Checking for device size")
- device_size = get_hdd_size(table_type, device)
-
- LOG.debug("Calculating partition layout")
- part_definition = get_partition_layout(table_type, device_size, layout)
- LOG.debug(" Layout is: %s" % part_definition)
-
- LOG.debug("Creating partition table on %s", device)
- exec_mkpart(table_type, device, part_definition)
-
- LOG.debug("Partition table created for %s", device)
-
-
-def lookup_force_flag(fs):
- """
- A force flag might be -F or -F, this look it up
- """
- flags = {
- 'ext': '-F',
- 'btrfs': '-f',
- 'xfs': '-f',
- 'reiserfs': '-f',
- }
-
- if 'ext' in fs.lower():
- fs = 'ext'
-
- if fs.lower() in flags:
- return flags[fs]
-
- LOG.warn("Force flag for %s is unknown." % fs)
- return ''
-
-
-def mkfs(fs_cfg):
- """
- Create a file system on the device.
-
- label: defines the label to use on the device
- fs_cfg: defines how the filesystem is to look
- The following values are required generally:
- device: which device or cloud defined default_device
- filesystem: which file system type
- overwrite: indiscriminately create the file system
- partition: when device does not define a partition,
- setting this to a number will mean
- device + partition. When set to 'auto', the
- first free device or the first device which
- matches both label and type will be used.
-
- 'any' means the first filesystem that matches
- on the device.
-
- When 'cmd' is provided then no other parameter is required.
- """
- label = fs_cfg.get('label')
- device = fs_cfg.get('device')
- partition = str(fs_cfg.get('partition', 'any'))
- fs_type = fs_cfg.get('filesystem')
- fs_cmd = fs_cfg.get('cmd', [])
- fs_opts = fs_cfg.get('extra_opts', [])
- fs_replace = fs_cfg.get('replace_fs', False)
- overwrite = fs_cfg.get('overwrite', False)
-
- # ensure that we get a real device rather than a symbolic link
- device = os.path.realpath(device)
-
- # This allows you to define the default ephemeral or swap
- LOG.debug("Checking %s against default devices", device)
-
- if not partition or partition.isdigit():
- # Handle manual definition of partition
- if partition.isdigit():
- device = "%s%s" % (device, partition)
- LOG.debug("Manual request of partition %s for %s",
- partition, device)
-
- # Check to see if the fs already exists
- LOG.debug("Checking device %s", device)
- check_label, check_fstype, _ = check_fs(device)
- LOG.debug("Device %s has %s %s", device, check_label, check_fstype)
-
- if check_label == label and check_fstype == fs_type:
- LOG.debug("Existing file system found at %s", device)
-
- if not overwrite:
- LOG.debug("Device %s has required file system", device)
- return
- else:
- LOG.warn("Destroying filesystem on %s", device)
-
- else:
- LOG.debug("Device %s is cleared for formating", device)
-
- elif partition and str(partition).lower() in ('auto', 'any'):
- # For auto devices, we match if the filesystem does exist
- odevice = device
- LOG.debug("Identifying device to create %s filesytem on", label)
-
- # any mean pick the first match on the device with matching fs_type
- label_match = True
- if partition.lower() == 'any':
- label_match = False
-
- device, reuse = find_device_node(device, fs_type=fs_type, label=label,
- label_match=label_match,
- replace_fs=fs_replace)
- LOG.debug("Automatic device for %s identified as %s", odevice, device)
-
- if reuse:
- LOG.debug("Found filesystem match, skipping formating.")
- return
-
- if not reuse and fs_replace and device:
- LOG.debug("Replacing file system on %s as instructed." % device)
-
- if not device:
- LOG.debug("No device aviable that matches request. "
- "Skipping fs creation for %s", fs_cfg)
- return
- elif not partition or str(partition).lower() == 'none':
- LOG.debug("Using the raw device to place filesystem %s on" % label)
-
- else:
- LOG.debug("Error in device identification handling.")
- return
-
- LOG.debug("File system %s will be created on %s", label, device)
-
- # Make sure the device is defined
- if not device:
- LOG.warn("Device is not known: %s", device)
- return
-
- # Check that we can create the FS
- if not (fs_type or fs_cmd):
- raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
- "must be set.", label)
-
- # Create the commands
- if fs_cmd:
- fs_cmd = fs_cfg['cmd'] % {
- 'label': label,
- 'filesystem': fs_type,
- 'device': device,
- }
- else:
- # Find the mkfs command
- mkfs_cmd = util.which("mkfs.%s" % fs_type)
- if not mkfs_cmd:
- mkfs_cmd = util.which("mk%s" % fs_type)
-
- if not mkfs_cmd:
- LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
- fs_type)
- return
-
- fs_cmd = [mkfs_cmd, device]
-
- if label:
- fs_cmd.extend(["-L", label])
-
- # File systems that support the -F flag
- if overwrite or device_type(device) == "disk":
- fs_cmd.append(lookup_force_flag(fs_type))
-
- # Add the extends FS options
- if fs_opts:
- fs_cmd.extend(fs_opts)
-
- LOG.debug("Creating file system %s on %s", label, device)
- LOG.debug(" Using cmd: %s", " ".join(fs_cmd))
- try:
- util.subp(fs_cmd)
- except Exception as e:
- raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
deleted file mode 100644
index 98828b9e..00000000
--- a/cloudinit/config/cc_emit_upstart.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-distros = ['ubuntu', 'debian']
-LOG = logging.getLogger(__name__)
-
-
-def is_upstart_system():
- if not os.path.isfile("/sbin/initctl"):
- LOG.debug("no /sbin/initctl located")
- return False
-
- myenv = os.environ.copy()
- if 'UPSTART_SESSION' in myenv:
- del myenv['UPSTART_SESSION']
- check_cmd = ['initctl', 'version']
- try:
- (out, err) = util.subp(check_cmd, env=myenv)
- return 'upstart' in out
- except util.ProcessExecutionError as e:
- LOG.debug("'%s' returned '%s', not using upstart",
- ' '.join(check_cmd), e.exit_code)
- return False
-
-
-def handle(name, _cfg, cloud, log, args):
- event_names = args
- if not event_names:
- # Default to the 'cloud-config'
- # event for backwards compat.
- event_names = ['cloud-config']
-
- if not is_upstart_system():
- log.debug("not upstart system, '%s' disabled", name)
- return
-
- cfgpath = cloud.paths.get_ipath_cur("cloud_config")
- for n in event_names:
- cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
- try:
- util.subp(cmd)
- except Exception as e:
- # TODO(harlowja), use log exception from utils??
- log.warn("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
deleted file mode 100644
index 545fee22..00000000
--- a/cloudinit/config/cc_fan.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-fan module allows configuration of Ubuntu Fan
- https://wiki.ubuntu.com/FanNetworking
-
-Example config:
- #cloud-config
- fan:
- config: |
- # fan 240
- 10.0.0.0/8 eth0/16 dhcp
- 10.0.0.0/8 eth1/16 dhcp off
- # fan 241
- 241.0.0.0/8 eth0/16 dhcp
- config_path: /etc/network/fan
-
-If cloud-init sees a 'fan' entry in cloud-config it will
- a.) write 'config_path' with the contents
- b.) install the package 'ubuntu-fan' if it is not installed
- c.) ensure the service is started (or restarted if was previously running)
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-BUILTIN_CFG = {
- 'config': None,
- 'config_path': '/etc/network/fan',
-}
-
-
-def stop_update_start(service, config_file, content, systemd=False):
- if systemd:
- cmds = {'stop': ['systemctl', 'stop', service],
- 'start': ['systemctl', 'start', service],
- 'enable': ['systemctl', 'enable', service]}
- else:
- cmds = {'stop': ['service', 'stop'],
- 'start': ['service', 'start']}
-
- def run(cmd, msg):
- try:
- return util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
- LOG.warn("failed: %s (%s): %s", service, cmd, e)
- return False
-
- stop_failed = not run(cmds['stop'], msg='stop %s' % service)
- if not content.endswith('\n'):
- content += '\n'
- util.write_file(config_file, content, omode="w")
-
- ret = run(cmds['start'], msg='start %s' % service)
- if ret and stop_failed:
- LOG.warn("success: %s started", service)
-
- if 'enable' in cmds:
- ret = run(cmds['enable'], msg='enable %s' % service)
-
- return ret
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('fan')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- if not mycfg.get('config'):
- LOG.debug("%s: no 'fan' config entry. disabling", name)
- return
-
- util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
- distro = cloud.distro
- if not util.which('fanctl'):
- distro.install_packages(['ubuntu-fan'])
-
- stop_update_start(
- service='ubuntu-fan', config_file=mycfg.get('config_path'),
- content=mycfg.get('config'), systemd=distro.uses_systemd())
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
deleted file mode 100644
index c9021eb1..00000000
--- a/cloudinit/config/cc_final_message.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-from cloudinit import version
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-# Jinja formated default message
-FINAL_MESSAGE_DEF = (
- "## template: jinja\n"
- "Cloud-init v. {{version}} finished at {{timestamp}}."
- " Datasource {{datasource}}. Up {{uptime}} seconds"
-)
-
-
-def handle(_name, cfg, cloud, log, args):
-
- msg_in = ''
- if len(args) != 0:
- msg_in = str(args[0])
- else:
- msg_in = util.get_cfg_option_str(cfg, "final_message", "")
-
- msg_in = msg_in.strip()
- if not msg_in:
- msg_in = FINAL_MESSAGE_DEF
-
- uptime = util.uptime()
- ts = util.time_rfc2822()
- cver = version.version_string()
- try:
- subs = {
- 'uptime': uptime,
- 'timestamp': ts,
- 'version': cver,
- 'datasource': str(cloud.datasource),
- }
- subs.update(dict([(k.upper(), v) for k, v in subs.items()]))
- util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
- console=False, stderr=True, log=log)
- except Exception:
- util.logexc(log, "Failed to render final message template")
-
- boot_fin_fn = cloud.paths.boot_finished
- try:
- contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
- util.write_file(boot_fin_fn, contents)
- except Exception:
- util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
-
- if cloud.datasource.is_disconnected:
- log.warn("Used fallback datasource")
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
deleted file mode 100644
index 95aab4dd..00000000
--- a/cloudinit/config/cc_foo.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-
-# Modules are expected to have the following attributes.
-# 1. A required 'handle' method which takes the following params.
-# a) The name will not be this files name, but instead
-# the name specified in configuration (which is the name
-# which will be used to find this module).
-# b) A configuration object that is the result of the merging
-# of cloud configs configuration with legacy configuration
-# as well as any datasource provided configuration
-# c) A cloud object that can be used to access various
-# datasource and paths for the given distro and data provided
-# by the various datasource instance types.
-# d) A argument list that may or may not be empty to this module.
-# Typically those are from module configuration where the module
-# is defined with some extra configuration that will eventually
-# be translated from yaml into arguments to this module.
-# 2. A optional 'frequency' that defines how often this module should be ran.
-# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
-# provided PER_INSTANCE will be assumed.
-# See settings.py for these constants.
-# 3. A optional 'distros' array/set/tuple that defines the known distros
-# this module will work with (if not all of them). This is used to write
-# a warning out if a module is being ran on a untested distribution for
-# informational purposes. If non existent all distros are assumed and
-# no warning occurs.
-
-frequency = PER_INSTANCE
-
-
-def handle(name, _cfg, _cloud, log, _args):
- log.debug("Hi from module %s", name)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
deleted file mode 100644
index 40560f11..00000000
--- a/cloudinit/config/cc_growpart.py
+++ /dev/null
@@ -1,300 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import os.path
-import re
-import stat
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-DEFAULT_CONFIG = {
- 'mode': 'auto',
- 'devices': ['/'],
- 'ignore_growroot_disabled': False,
-}
-
-
-class RESIZE(object):
- SKIPPED = "SKIPPED"
- CHANGED = "CHANGED"
- NOCHANGE = "NOCHANGE"
- FAILED = "FAILED"
-
-
-LOG = logging.getLogger(__name__)
-
-
-def resizer_factory(mode):
- resize_class = None
- if mode == "auto":
- for (_name, resizer) in RESIZERS:
- cur = resizer()
- if cur.available():
- resize_class = cur
- break
-
- if not resize_class:
- raise ValueError("No resizers available")
-
- else:
- mmap = {}
- for (k, v) in RESIZERS:
- mmap[k] = v
-
- if mode not in mmap:
- raise TypeError("unknown resize mode %s" % mode)
-
- mclass = mmap[mode]()
- if mclass.available():
- resize_class = mclass
-
- if not resize_class:
- raise ValueError("mode %s not available" % mode)
-
- return resize_class
-
-
-class ResizeFailedException(Exception):
- pass
-
-
-class ResizeGrowPart(object):
- def available(self):
- myenv = os.environ.copy()
- myenv['LANG'] = 'C'
-
- try:
- (out, _err) = util.subp(["growpart", "--help"], env=myenv)
- if re.search(r"--update\s+", out, re.DOTALL):
- return True
-
- except util.ProcessExecutionError:
- pass
- return False
-
- def resize(self, diskdev, partnum, partdev):
- before = get_size(partdev)
- try:
- util.subp(["growpart", '--dry-run', diskdev, partnum])
- except util.ProcessExecutionError as e:
- if e.exit_code != 1:
- util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
- diskdev, partnum)
- raise ResizeFailedException(e)
- return (before, before)
-
- try:
- util.subp(["growpart", diskdev, partnum])
- except util.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e)
-
- return (before, get_size(partdev))
-
-
-class ResizeGpart(object):
- def available(self):
- if not util.which('gpart'):
- return False
- return True
-
- def resize(self, diskdev, partnum, partdev):
- """
- GPT disks store metadata at the beginning (primary) and at the
- end (secondary) of the disk. When launching an image with a
- larger disk compared to the original image, the secondary copy
- is lost. Thus, the metadata will be marked CORRUPT, and need to
- be recovered.
- """
- try:
- util.subp(["gpart", "recover", diskdev])
- except util.ProcessExecutionError as e:
- if e.exit_code != 0:
- util.logexc(LOG, "Failed: gpart recover %s", diskdev)
- raise ResizeFailedException(e)
-
- before = get_size(partdev)
- try:
- util.subp(["gpart", "resize", "-i", partnum, diskdev])
- except util.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
- raise ResizeFailedException(e)
-
- # Since growing the FS requires a reboot, make sure we reboot
- # first when this module has finished.
- open('/var/run/reboot-required', 'a').close()
-
- return (before, get_size(partdev))
-
-
-def get_size(filename):
- fd = os.open(filename, os.O_RDONLY)
- try:
- return os.lseek(fd, 0, os.SEEK_END)
- finally:
- os.close(fd)
-
-
-def device_part_info(devpath):
- # convert an entry in /dev/ to parent disk and partition number
-
- # input of /dev/vdb or /dev/disk/by-label/foo
- # rpath is hopefully a real-ish path in /dev (vda, sdb..)
- rpath = os.path.realpath(devpath)
-
- bname = os.path.basename(rpath)
- syspath = "/sys/class/block/%s" % bname
-
- # FreeBSD doesn't know of sysfs so just get everything we need from
- # the device, like /dev/vtbd0p2.
- if util.system_info()["platform"].startswith('FreeBSD'):
- m = re.search('^(/dev/.+)p([0-9])$', devpath)
- return (m.group(1), m.group(2))
-
- if not os.path.exists(syspath):
- raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
-
- ptpath = os.path.join(syspath, "partition")
- if not os.path.exists(ptpath):
- raise TypeError("%s not a partition" % devpath)
-
- ptnum = util.load_file(ptpath).rstrip()
-
- # for a partition, real syspath is something like:
- # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1
- rsyspath = os.path.realpath(syspath)
- disksyspath = os.path.dirname(rsyspath)
-
- diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip()
- diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin)
-
- # diskdevpath has something like 253:0
- # and udev has put links in /dev/block/253:0 to the device name in /dev/
- return (diskdevpath, ptnum)
-
-
-def devent2dev(devent):
- if devent.startswith("/dev/"):
- return devent
- else:
- result = util.get_mount_info(devent)
- if not result:
- raise ValueError("Could not determine device of '%s' % dev_ent")
- return result[0]
-
-
-def resize_devices(resizer, devices):
- # returns a tuple of tuples containing (entry-in-devices, action, message)
- info = []
- for devent in devices:
- try:
- blockdev = devent2dev(devent)
- except ValueError as e:
- info.append((devent, RESIZE.SKIPPED,
- "unable to convert to device: %s" % e,))
- continue
-
- try:
- statret = os.stat(blockdev)
- except OSError as e:
- info.append((devent, RESIZE.SKIPPED,
- "stat of '%s' failed: %s" % (blockdev, e),))
- continue
-
- if (not stat.S_ISBLK(statret.st_mode) and
- not stat.S_ISCHR(statret.st_mode)):
- info.append((devent, RESIZE.SKIPPED,
- "device '%s' not a block device" % blockdev,))
- continue
-
- try:
- (disk, ptnum) = device_part_info(blockdev)
- except (TypeError, ValueError) as e:
- info.append((devent, RESIZE.SKIPPED,
- "device_part_info(%s) failed: %s" % (blockdev, e),))
- continue
-
- try:
- (old, new) = resizer.resize(disk, ptnum, blockdev)
- if old == new:
- info.append((devent, RESIZE.NOCHANGE,
- "no change necessary (%s, %s)" % (disk, ptnum),))
- else:
- info.append((devent, RESIZE.CHANGED,
- "changed (%s, %s) from %s to %s" %
- (disk, ptnum, old, new),))
-
- except ResizeFailedException as e:
- info.append((devent, RESIZE.FAILED,
- "failed to resize: disk=%s, ptnum=%s: %s" %
- (disk, ptnum, e),))
-
- return info
-
-
-def handle(_name, cfg, _cloud, log, _args):
- if 'growpart' not in cfg:
- log.debug("No 'growpart' entry in cfg. Using default: %s" %
- DEFAULT_CONFIG)
- cfg['growpart'] = DEFAULT_CONFIG
-
- mycfg = cfg.get('growpart')
- if not isinstance(mycfg, dict):
- log.warn("'growpart' in config was not a dict")
- return
-
- mode = mycfg.get('mode', "auto")
- if util.is_false(mode):
- log.debug("growpart disabled: mode=%s" % mode)
- return
-
- if util.is_false(mycfg.get('ignore_growroot_disabled', False)):
- if os.path.isfile("/etc/growroot-disabled"):
- log.debug("growpart disabled: /etc/growroot-disabled exists")
- log.debug("use ignore_growroot_disabled to ignore")
- return
-
- devices = util.get_cfg_option_list(mycfg, "devices", ["/"])
- if not len(devices):
- log.debug("growpart: empty device list")
- return
-
- try:
- resizer = resizer_factory(mode)
- except (ValueError, TypeError) as e:
- log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
- if mode != "auto":
- raise e
- return
-
- resized = util.log_time(logfunc=log.debug, msg="resize_devices",
- func=resize_devices, args=(resizer, devices))
- for (entry, action, msg) in resized:
- if action == RESIZE.CHANGED:
- log.info("'%s' resized: %s" % (entry, msg))
- else:
- log.debug("'%s' %s: %s" % (entry, action, msg))
-
-RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
deleted file mode 100644
index 156722d9..00000000
--- a/cloudinit/config/cc_grub_dpkg.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-
-def handle(name, cfg, _cloud, log, _args):
-
- mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
- if not mycfg:
- mycfg = {}
-
- enabled = mycfg.get('enabled', True)
- if util.is_false(enabled):
- log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
- return
-
- idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
- idevs_empty = util.get_cfg_option_str(
- mycfg, "grub-pc/install_devices_empty", None)
-
- if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
- if idevs is None:
- idevs = ""
- if idevs_empty is None:
- idevs_empty = "true"
- else:
- if idevs_empty is None:
- idevs_empty = "false"
- if idevs is None:
- idevs = "/dev/sda"
- for dev in ("/dev/sda", "/dev/vda", "/dev/xvda",
- "/dev/sda1", "/dev/vda1", "/dev/xvda1"):
- if os.path.exists(dev):
- idevs = dev
- break
-
- # now idevs and idevs_empty are set to determined values
- # or, those set by user
-
- dconf_sel = (("grub-pc grub-pc/install_devices string %s\n"
- "grub-pc grub-pc/install_devices_empty boolean %s\n") %
- (idevs, idevs_empty))
-
- log.debug("Setting grub debconf-set-selections with '%s','%s'" %
- (idevs, idevs_empty))
-
- try:
- util.subp(['debconf-set-selections'], dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
deleted file mode 100644
index 9a02f056..00000000
--- a/cloudinit/config/cc_keys_to_console.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-# This is a tool that cloud init provides
-HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints'
-
-
-def _get_helper_tool_path(distro):
- try:
- base_lib = distro.usr_lib_exec
- except AttributeError:
- base_lib = '/usr/lib'
- return HELPER_TOOL_TPL % base_lib
-
-
-def handle(name, cfg, cloud, log, _args):
- helper_path = _get_helper_tool_path(cloud.distro)
- if not os.path.exists(helper_path):
- log.warn(("Unable to activate module %s,"
- " helper tool not found at %s"), name, helper_path)
- return
-
- fp_blacklist = util.get_cfg_option_list(cfg,
- "ssh_fp_console_blacklist", [])
- key_blacklist = util.get_cfg_option_list(cfg,
- "ssh_key_console_blacklist",
- ["ssh-dss"])
-
- try:
- cmd = [helper_path]
- cmd.append(','.join(fp_blacklist))
- cmd.append(','.join(key_blacklist))
- (stdout, _stderr) = util.subp(cmd)
- util.multi_log("%s\n" % (stdout.strip()),
- stderr=False, console=True)
- except Exception:
- log.warn("Writing keys to the system console failed!")
- raise
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
deleted file mode 100644
index 68fcb27f..00000000
--- a/cloudinit/config/cc_landscape.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from six import StringIO
-
-from configobj import ConfigObj
-
-from cloudinit import type_utils
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
-LS_DEFAULT_FILE = "/etc/default/landscape-client"
-
-distros = ['ubuntu']
-
-# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
-LSC_BUILTIN_CFG = {
- 'client': {
- 'log_level': "info",
- 'url': "https://landscape.canonical.com/message-system",
- 'ping_url': "http://landscape.canonical.com/ping",
- 'data_path': "/var/lib/landscape/client",
- }
-}
-
-
-def handle(_name, cfg, cloud, log, _args):
- """
- Basically turn a top level 'landscape' entry with a 'client' dict
- and render it to ConfigObj format under '[client]' section in
- /etc/landscape/client.conf
- """
-
- ls_cloudcfg = cfg.get("landscape", {})
-
- if not isinstance(ls_cloudcfg, (dict)):
- raise RuntimeError(("'landscape' key existed in config,"
- " but not a dictionary type,"
- " is a %s instead"),
- type_utils.obj_name(ls_cloudcfg))
- if not ls_cloudcfg:
- return
-
- cloud.distro.install_packages(('landscape-client',))
-
- merge_data = [
- LSC_BUILTIN_CFG,
- LSC_CLIENT_CFG_FILE,
- ls_cloudcfg,
- ]
- merged = merge_together(merge_data)
- contents = StringIO()
- merged.write(contents)
-
- util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
- util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue())
- log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
-
- util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
- util.subp(["service", "landscape-client", "restart"])
-
-
-def merge_together(objs):
- """
- merge together ConfigObj objects or things that ConfigObj() will take in
- later entries override earlier
- """
- cfg = ConfigObj({})
- for obj in objs:
- if not obj:
- continue
- if isinstance(obj, ConfigObj):
- cfg.merge(obj)
- else:
- cfg.merge(ConfigObj(obj))
- return cfg
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
deleted file mode 100644
index bbe5fcae..00000000
--- a/cloudinit/config/cc_locale.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- locale = args[0]
- else:
- locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
-
- if util.is_false(locale):
- log.debug("Skipping module named %s, disabled by config: %s",
- name, locale)
- return
-
- log.debug("Setting locale to %s", locale)
- locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
- cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
deleted file mode 100644
index 70d4e7c3..00000000
--- a/cloudinit/config/cc_lxd.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module initializes lxd using 'lxd init'
-
-Example config:
- #cloud-config
- lxd:
- init:
- network_address: <ip addr>
- network_port: <port>
- storage_backend: <zfs/dir>
- storage_create_device: <dev>
- storage_create_loop: <size>
- storage_pool: <name>
- trust_password: <password>
- bridge:
- mode: <new, existing or none>
- name: <name>
- ipv4_address: <ip addr>
- ipv4_netmask: <cidr>
- ipv4_dhcp_first: <ip addr>
- ipv4_dhcp_last: <ip addr>
- ipv4_dhcp_leases: <size>
- ipv4_nat: <bool>
- ipv6_address: <ip addr>
- ipv6_netmask: <cidr>
- ipv6_nat: <bool>
- domain: <domain>
-"""
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, args):
- # Get config
- lxd_cfg = cfg.get('lxd')
- if not lxd_cfg:
- log.debug("Skipping module named %s, not present or disabled by cfg",
- name)
- return
- if not isinstance(lxd_cfg, dict):
- log.warn("lxd config must be a dictionary. found a '%s'",
- type(lxd_cfg))
- return
-
- # Grab the configuration
- init_cfg = lxd_cfg.get('init')
- if not isinstance(init_cfg, dict):
- log.warn("lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg))
- init_cfg = {}
-
- bridge_cfg = lxd_cfg.get('bridge')
- if not isinstance(bridge_cfg, dict):
- log.warn("lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg))
- bridge_cfg = {}
-
- # Install the needed packages
- packages = []
- if not util.which("lxd"):
- packages.append('lxd')
-
- if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
- packages.append('zfs')
-
- if len(packages):
- try:
- cloud.distro.install_packages(packages)
- except util.ProcessExecutionError as exc:
- log.warn("failed to install packages %s: %s", packages, exc)
- return
-
- # Set up lxd if init config is given
- if init_cfg:
- init_keys = (
- 'network_address', 'network_port', 'storage_backend',
- 'storage_create_device', 'storage_create_loop',
- 'storage_pool', 'trust_password')
- cmd = ['lxd', 'init', '--auto']
- for k in init_keys:
- if init_cfg.get(k):
- cmd.extend(["--%s=%s" %
- (k.replace('_', '-'), str(init_cfg[k]))])
- util.subp(cmd)
-
- # Set up lxd-bridge if bridge config is given
- dconf_comm = "debconf-communicate"
- if bridge_cfg and util.which(dconf_comm):
- debconf = bridge_to_debconf(bridge_cfg)
-
- # Update debconf database
- try:
- log.debug("Setting lxd debconf via " + dconf_comm)
- data = "\n".join(["set %s %s" % (k, v)
- for k, v in debconf.items()]) + "\n"
- util.subp(['debconf-communicate'], data)
- except Exception:
- util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm)
-
- # Remove the existing configuration file (forces re-generation)
- util.del_file("/etc/default/lxd-bridge")
-
- # Run reconfigure
- log.debug("Running dpkg-reconfigure for lxd")
- util.subp(['dpkg-reconfigure', 'lxd',
- '--frontend=noninteractive'])
- elif bridge_cfg:
- raise RuntimeError(
- "Unable to configure lxd bridge without %s." + dconf_comm)
-
-
-def bridge_to_debconf(bridge_cfg):
- debconf = {}
-
- if bridge_cfg.get("mode") == "none":
- debconf["lxd/setup-bridge"] = "false"
- debconf["lxd/bridge-name"] = ""
-
- elif bridge_cfg.get("mode") == "existing":
- debconf["lxd/setup-bridge"] = "false"
- debconf["lxd/use-existing-bridge"] = "true"
- debconf["lxd/bridge-name"] = bridge_cfg.get("name")
-
- elif bridge_cfg.get("mode") == "new":
- debconf["lxd/setup-bridge"] = "true"
- if bridge_cfg.get("name"):
- debconf["lxd/bridge-name"] = bridge_cfg.get("name")
-
- if bridge_cfg.get("ipv4_address"):
- debconf["lxd/bridge-ipv4"] = "true"
- debconf["lxd/bridge-ipv4-address"] = \
- bridge_cfg.get("ipv4_address")
- debconf["lxd/bridge-ipv4-netmask"] = \
- bridge_cfg.get("ipv4_netmask")
- debconf["lxd/bridge-ipv4-dhcp-first"] = \
- bridge_cfg.get("ipv4_dhcp_first")
- debconf["lxd/bridge-ipv4-dhcp-last"] = \
- bridge_cfg.get("ipv4_dhcp_last")
- debconf["lxd/bridge-ipv4-dhcp-leases"] = \
- bridge_cfg.get("ipv4_dhcp_leases")
- debconf["lxd/bridge-ipv4-nat"] = \
- bridge_cfg.get("ipv4_nat", "true")
-
- if bridge_cfg.get("ipv6_address"):
- debconf["lxd/bridge-ipv6"] = "true"
- debconf["lxd/bridge-ipv6-address"] = \
- bridge_cfg.get("ipv6_address")
- debconf["lxd/bridge-ipv6-netmask"] = \
- bridge_cfg.get("ipv6_netmask")
- debconf["lxd/bridge-ipv6-nat"] = \
- bridge_cfg.get("ipv6_nat", "false")
-
- if bridge_cfg.get("domain"):
- debconf["lxd/bridge-domain"] = bridge_cfg.get("domain")
-
- else:
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
-
- return debconf
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
deleted file mode 100644
index ada535f8..00000000
--- a/cloudinit/config/cc_mcollective.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Marc Cluet <marc.cluet@canonical.com>
-# Based on code by Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from six import BytesIO
-
-# Used since this can maintain comments
-# and doesn't need a top level section
-from configobj import ConfigObj
-
-from cloudinit import log as logging
-from cloudinit import util
-
-PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
-PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
-SERVER_CFG = '/etc/mcollective/server.cfg'
-
-LOG = logging.getLogger(__name__)
-
-
-def configure(config, server_cfg=SERVER_CFG,
- pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
- # Read server.cfg values from the
- # original file in order to be able to mix the rest up
- try:
- mcollective_config = ConfigObj(server_cfg, file_error=True)
- existed = True
- except IOError:
- LOG.debug("Did not find file %s", server_cfg)
- mcollective_config = ConfigObj()
- existed = False
-
- for (cfg_name, cfg) in config.items():
- if cfg_name == 'public-cert':
- util.write_file(pubcert_file, cfg, mode=0o644)
- mcollective_config[
- 'plugin.ssl_server_public'] = pubcert_file
- mcollective_config['securityprovider'] = 'ssl'
- elif cfg_name == 'private-cert':
- util.write_file(pricert_file, cfg, mode=0o600)
- mcollective_config[
- 'plugin.ssl_server_private'] = pricert_file
- mcollective_config['securityprovider'] = 'ssl'
- else:
- if isinstance(cfg, six.string_types):
- # Just set it in the 'main' section
- mcollective_config[cfg_name] = cfg
- elif isinstance(cfg, (dict)):
- # Iterate through the config items, create a section if
- # it is needed and then add/or create items as needed
- if cfg_name not in mcollective_config.sections:
- mcollective_config[cfg_name] = {}
- for (o, v) in cfg.items():
- mcollective_config[cfg_name][o] = v
- else:
- # Otherwise just try to convert it to a string
- mcollective_config[cfg_name] = str(cfg)
-
- if existed:
- # We got all our config as wanted we'll rename
- # the previous server.cfg and create our new one
- util.rename(server_cfg, "%s.old" % (server_cfg))
-
- # Now we got the whole file, write to disk...
- contents = BytesIO()
- mcollective_config.write(contents)
- util.write_file(server_cfg, contents.getvalue(), mode=0o644)
-
-
-def handle(name, cfg, cloud, log, _args):
-
- # If there isn't a mcollective key in the configuration don't do anything
- if 'mcollective' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'mcollective' key in configuration"), name)
- return
-
- mcollective_cfg = cfg['mcollective']
-
- # Start by installing the mcollective package ...
- cloud.distro.install_packages(("mcollective",))
-
- # ... and then update the mcollective configuration
- if 'conf' in mcollective_cfg:
- configure(config=mcollective_cfg['conf'])
-
- # restart mcollective to handle updated config
- util.subp(['service', 'mcollective', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
deleted file mode 100644
index facaa538..00000000
--- a/cloudinit/config/cc_migrator.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import shutil
-
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-
-def _migrate_canon_sems(cloud):
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
- am_adjusted = 0
- for sem_path in paths:
- if not sem_path or not os.path.exists(sem_path):
- continue
- for p in os.listdir(sem_path):
- full_path = os.path.join(sem_path, p)
- if os.path.isfile(full_path):
- (name, ext) = os.path.splitext(p)
- canon_name = helpers.canon_sem_name(name)
- if canon_name != name:
- new_path = os.path.join(sem_path, canon_name + ext)
- shutil.move(full_path, new_path)
- am_adjusted += 1
- return am_adjusted
-
-
-def _migrate_legacy_sems(cloud, log):
- legacy_adjust = {
- 'apt-update-upgrade': [
- 'apt-configure',
- 'package-update-upgrade-install',
- ],
- }
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
- for sem_path in paths:
- if not sem_path or not os.path.exists(sem_path):
- continue
- sem_helper = helpers.FileSemaphores(sem_path)
- for (mod_name, migrate_to) in legacy_adjust.items():
- possibles = [mod_name, helpers.canon_sem_name(mod_name)]
- old_exists = []
- for p in os.listdir(sem_path):
- (name, _ext) = os.path.splitext(p)
- if name in possibles and os.path.isfile(p):
- old_exists.append(p)
- for p in old_exists:
- util.del_file(os.path.join(sem_path, p))
- (_name, freq) = os.path.splitext(p)
- for m in migrate_to:
- log.debug("Migrating %s => %s with the same frequency",
- p, m)
- with sem_helper.lock(m, freq):
- pass
-
-
-def handle(name, cfg, cloud, log, _args):
- do_migrate = util.get_cfg_option_str(cfg, "migrate", True)
- if not util.translate_bool(do_migrate):
- log.debug("Skipping module named %s, migration disabled", name)
- return
- sems_moved = _migrate_canon_sems(cloud)
- log.debug("Migrated %s semaphore files to there canonicalized names",
- sems_moved)
- _migrate_legacy_sems(cloud, log)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
deleted file mode 100644
index 2b981935..00000000
--- a/cloudinit/config/cc_mounts.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from string import whitespace
-
-import logging
-import os.path
-import re
-
-from cloudinit import type_utils
-from cloudinit import util
-
-# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
-DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
-DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
-WS = re.compile("[%s]+" % (whitespace))
-FSTAB_PATH = "/etc/fstab"
-
-LOG = logging.getLogger(__name__)
-
-
-def is_meta_device_name(name):
- # return true if this is a metadata service name
- if name in ["ami", "root", "swap"]:
- return True
- # names 'ephemeral0' or 'ephemeral1'
- # 'ebs[0-9]' appears when '--block-device-mapping sdf=snap-d4d90bbc'
- for enumname in ("ephemeral", "ebs"):
- if name.startswith(enumname) and name.find(":") == -1:
- return True
- return False
-
-
-def _get_nth_partition_for_device(device_path, partition_number):
- potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
- '-part%s' % (partition_number,)]
- for suffix in potential_suffixes:
- potential_partition_device = '%s%s' % (device_path, suffix)
- if os.path.exists(potential_partition_device):
- return potential_partition_device
- return None
-
-
-def _is_block_device(device_path, partition_path=None):
- device_name = os.path.realpath(device_path).split('/')[-1]
- sys_path = os.path.join('/sys/block/', device_name)
- if partition_path is not None:
- sys_path = os.path.join(
- sys_path, os.path.realpath(partition_path).split('/')[-1])
- return os.path.exists(sys_path)
-
-
-def sanitize_devname(startname, transformer, log):
- log.debug("Attempting to determine the real name of %s", startname)
-
- # workaround, allow user to specify 'ephemeral'
- # rather than more ec2 correct 'ephemeral0'
- devname = startname
- if devname == "ephemeral":
- devname = "ephemeral0"
- log.debug("Adjusted mount option from ephemeral to ephemeral0")
-
- device_path, partition_number = util.expand_dotted_devname(devname)
-
- if is_meta_device_name(device_path):
- orig = device_path
- device_path = transformer(device_path)
- if not device_path:
- return None
- if not device_path.startswith("/"):
- device_path = "/dev/%s" % (device_path,)
- log.debug("Mapped metadata name %s to %s", orig, device_path)
- else:
- if DEVICE_NAME_RE.match(startname):
- device_path = "/dev/%s" % (device_path,)
-
- partition_path = None
- if partition_number is None:
- partition_path = _get_nth_partition_for_device(device_path, 1)
- else:
- partition_path = _get_nth_partition_for_device(device_path,
- partition_number)
- if partition_path is None:
- return None
-
- if _is_block_device(device_path, partition_path):
- if partition_path is not None:
- return partition_path
- return device_path
- return None
-
-
-def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
- # make a suggestion on the size of swap for this system.
- if memsize is None:
- memsize = util.read_meminfo()['total']
-
- GB = 2 ** 30
- sugg_max = 8 * GB
-
- info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize}
-
- if fsys is None and maxsize is None:
- # set max to 8GB default if no filesystem given
- maxsize = sugg_max
- elif fsys:
- statvfs = os.statvfs(fsys)
- avail = statvfs.f_frsize * statvfs.f_bfree
- info['avail'] = avail
-
- if maxsize is None:
- # set to 25% of filesystem space
- maxsize = min(int(avail / 4), sugg_max)
- elif maxsize > ((avail * .9)):
- # set to 90% of available disk space
- maxsize = int(avail * .9)
- elif maxsize is None:
- maxsize = sugg_max
-
- info['max'] = maxsize
-
- formulas = [
- # < 1G: swap = double memory
- (1 * GB, lambda x: x * 2),
- # < 2G: swap = 2G
- (2 * GB, lambda x: 2 * GB),
- # < 4G: swap = memory
- (4 * GB, lambda x: x),
- # < 16G: 4G
- (16 * GB, lambda x: 4 * GB),
- # < 64G: 1/2 M up to max
- (64 * GB, lambda x: x / 2),
- ]
-
- size = None
- for top, func in formulas:
- if memsize <= top:
- size = min(func(memsize), maxsize)
- # if less than 1/2 memory and not much, return 0
- if size < (memsize / 2) and size < 4 * GB:
- size = 0
- break
- break
-
- if size is not None:
- size = maxsize
-
- info['size'] = size
-
- MB = 2 ** 20
- pinfo = {}
- for k, v in info.items():
- if isinstance(v, int):
- pinfo[k] = "%s MB" % (v / MB)
- else:
- pinfo[k] = v
-
- LOG.debug("suggest %(size)s swap for %(mem)s memory with '%(avail)s'"
- " disk given max=%(max_in)s [max=%(max)s]'" % pinfo)
- return size
-
-
-def setup_swapfile(fname, size=None, maxsize=None):
- """
- fname: full path string of filename to setup
- size: the size to create. set to "auto" for recommended
- maxsize: the maximum size
- """
- tdir = os.path.dirname(fname)
- if str(size).lower() == "auto":
- try:
- memsize = util.read_meminfo()['total']
- except IOError as e:
- LOG.debug("Not creating swap. failed to read meminfo")
- return
-
- util.ensure_dir(tdir)
- size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
- memsize=memsize)
-
- if not size:
- LOG.debug("Not creating swap: suggested size was 0")
- return
-
- mbsize = str(int(size / (2 ** 20)))
- msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
- try:
- util.ensure_dir(tdir)
- util.log_time(LOG.debug, msg, func=util.subp,
- args=[['sh', '-c',
- ('rm -f "$1" && umask 0066 && '
- '{ fallocate -l "${2}M" "$1" || '
- ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
- 'setup_swap', fname, mbsize]])
-
- except Exception as e:
- raise IOError("Failed %s: %s" % (msg, e))
-
- return fname
-
-
-def handle_swapcfg(swapcfg):
- """handle the swap config, calling setup_swap if necessary.
- return None or (filename, size)
- """
- if not isinstance(swapcfg, dict):
- LOG.warn("input for swap config was not a dict.")
- return None
-
- fname = swapcfg.get('filename', '/swap.img')
- size = swapcfg.get('size', 0)
- maxsize = swapcfg.get('maxsize', None)
-
- if not (size and fname):
- LOG.debug("no need to setup swap")
- return
-
- if os.path.exists(fname):
- if not os.path.exists("/proc/swaps"):
- LOG.debug("swap file %s existed. no /proc/swaps. Being safe.",
- fname)
- return fname
- try:
- for line in util.load_file("/proc/swaps").splitlines():
- if line.startswith(fname + " "):
- LOG.debug("swap file %s already in use.", fname)
- return fname
- LOG.debug("swap file %s existed, but not in /proc/swaps", fname)
- except Exception:
- LOG.warn("swap file %s existed. Error reading /proc/swaps", fname)
- return fname
-
- try:
- if isinstance(size, str) and size != "auto":
- size = util.human2bytes(size)
- if isinstance(maxsize, str):
- maxsize = util.human2bytes(maxsize)
- return setup_swapfile(fname=fname, size=size, maxsize=maxsize)
-
- except Exception as e:
- LOG.warn("failed to setup swap: %s", e)
-
- return None
-
-
-def handle(_name, cfg, cloud, log, _args):
- # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
- def_mnt_opts = "defaults,nobootwait"
- if cloud.distro.uses_systemd():
- def_mnt_opts = "defaults,nofail"
-
- defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
- defvals = cfg.get("mount_default_fields", defvals)
-
- # these are our default set of mounts
- defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
- ["swap", "none", "swap", "sw", "0", "0"]]
-
- cfgmnt = []
- if "mounts" in cfg:
- cfgmnt = cfg["mounts"]
-
- for i in range(len(cfgmnt)):
- # skip something that wasn't a list
- if not isinstance(cfgmnt[i], list):
- log.warn("Mount option %s not a list, got a %s instead",
- (i + 1), type_utils.obj_name(cfgmnt[i]))
- continue
-
- start = str(cfgmnt[i][0])
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
- if sanitized is None:
- log.debug("Ignorming nonexistant named mount %s", start)
- continue
-
- if sanitized != start:
- log.debug("changed %s => %s" % (start, sanitized))
- cfgmnt[i][0] = sanitized
-
- # in case the user did not quote a field (likely fs-freq, fs_passno)
- # but do not convert None to 'None' (LP: #898365)
- for j in range(len(cfgmnt[i])):
- if cfgmnt[i][j] is None:
- continue
- else:
- cfgmnt[i][j] = str(cfgmnt[i][j])
-
- for i in range(len(cfgmnt)):
- # fill in values with defaults from defvals above
- for j in range(len(defvals)):
- if len(cfgmnt[i]) <= j:
- cfgmnt[i].append(defvals[j])
- elif cfgmnt[i][j] is None:
- cfgmnt[i][j] = defvals[j]
-
- # if the second entry in the list is 'None' this
- # clears all previous entries of that same 'fs_spec'
- # (fs_spec is the first field in /etc/fstab, ie, that device)
- if cfgmnt[i][1] is None:
- for j in range(i):
- if cfgmnt[j][0] == cfgmnt[i][0]:
- cfgmnt[j][1] = None
-
- # for each of the "default" mounts, add them only if no other
- # entry has the same device name
- for defmnt in defmnts:
- start = defmnt[0]
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
- if sanitized is None:
- log.debug("Ignoring nonexistant default named mount %s", start)
- continue
- if sanitized != start:
- log.debug("changed default device %s => %s" % (start, sanitized))
- defmnt[0] = sanitized
-
- cfgmnt_has = False
- for cfgm in cfgmnt:
- if cfgm[0] == defmnt[0]:
- cfgmnt_has = True
- break
-
- if cfgmnt_has:
- log.debug(("Not including %s, already"
- " previously included"), start)
- continue
- cfgmnt.append(defmnt)
-
- # now, each entry in the cfgmnt list has all fstab values
- # if the second field is None (not the string, the value) we skip it
- actlist = []
- for x in cfgmnt:
- if x[1] is None:
- log.debug("Skipping non-existent device named %s", x[0])
- else:
- actlist.append(x)
-
- swapret = handle_swapcfg(cfg.get('swap', {}))
- if swapret:
- actlist.append([swapret, "none", "swap", "sw", "0", "0"])
-
- if len(actlist) == 0:
- log.debug("No modifications to fstab needed.")
- return
-
- comment = "comment=cloudconfig"
- cc_lines = []
- needswap = False
- dirs = []
- for line in actlist:
- # write 'comment' in the fs_mntops, entry, claiming this
- line[3] = "%s,%s" % (line[3], comment)
- if line[2] == "swap":
- needswap = True
- if line[1].startswith("/"):
- dirs.append(line[1])
- cc_lines.append('\t'.join(line))
-
- fstab_lines = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- try:
- toks = WS.split(line)
- if toks[3].find(comment) != -1:
- continue
- except Exception:
- pass
- fstab_lines.append(line)
-
- fstab_lines.extend(cc_lines)
- contents = "%s\n" % ('\n'.join(fstab_lines))
- util.write_file(FSTAB_PATH, contents)
-
- if needswap:
- try:
- util.subp(("swapon", "-a"))
- except Exception:
- util.logexc(log, "Activating swap via 'swapon -a' failed")
-
- for d in dirs:
- try:
- util.ensure_dir(d)
- except Exception:
- util.logexc(log, "Failed to make '%s' config-mount", d)
-
- try:
- util.subp(("mount", "-a"))
- except Exception:
- util.logexc(log, "Activating mounts via 'mount -a' failed")
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
deleted file mode 100644
index 73b0e30d..00000000
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-
-from cloudinit import log as logging
-from cloudinit import util
-
-REBOOT_FILE = "/var/run/reboot-required"
-REBOOT_CMD = ["/sbin/reboot"]
-
-
-def _multi_cfg_bool_get(cfg, *keys):
- for k in keys:
- if util.get_cfg_option_bool(cfg, k, False):
- return True
- return False
-
-
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
- start = time.time()
- wait_time = initial_sleep
- for _i in range(0, wait_attempts):
- time.sleep(wait_time)
- wait_time *= backoff
- elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
- # If we got here, not good
- elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
-
-
-def handle(_name, cfg, cloud, log, _args):
- # Handle the old style + new config names
- update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update')
- upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade')
- reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required',
- 'package_reboot_if_required')
- pkglist = util.get_cfg_option_list(cfg, 'packages', [])
-
- errors = []
- if update or len(pkglist) or upgrade:
- try:
- cloud.distro.update_package_sources()
- except Exception as e:
- util.logexc(log, "Package update failed")
- errors.append(e)
-
- if upgrade:
- try:
- cloud.distro.package_command("upgrade")
- except Exception as e:
- util.logexc(log, "Package upgrade failed")
- errors.append(e)
-
- if len(pkglist):
- try:
- cloud.distro.install_packages(pkglist)
- except Exception as e:
- util.logexc(log, "Failed to install packages: %s", pkglist)
- errors.append(e)
-
- # TODO(smoser): handle this less violently
- # kernel and openssl (possibly some other packages)
- # write a file /var/run/reboot-required after upgrading.
- # if that file exists and configured, then just stop right now and reboot
- reboot_fn_exists = os.path.isfile(REBOOT_FILE)
- if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
- try:
- log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE)
- # Flush the above warning + anything else out...
- logging.flushLoggers(log)
- _fire_reboot(log)
- except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
- errors.append(e)
-
- if len(errors):
- log.warn("%s failed with exceptions, re-raising the last one",
- len(errors))
- raise errors[-1]
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
deleted file mode 100644
index 72176d42..00000000
--- a/cloudinit/config/cc_phone_home.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-POST_LIST_ALL = [
- 'pub_key_dsa',
- 'pub_key_rsa',
- 'pub_key_ecdsa',
- 'instance_id',
- 'hostname',
- 'fdqn'
-]
-
-
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
-#
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE_ID/
-# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id, hostname,
-# fqdn ]
-#
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- ph_cfg = util.read_conf(args[0])
- else:
- if 'phone_home' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'phone_home' configuration found"), name)
- return
- ph_cfg = cfg['phone_home']
-
- if 'url' not in ph_cfg:
- log.warn(("Skipping module named %s, "
- "no 'url' found in 'phone_home' configuration"), name)
- return
-
- url = ph_cfg['url']
- post_list = ph_cfg.get('post', 'all')
- tries = ph_cfg.get('tries')
- try:
- tries = int(tries)
- except Exception:
- tries = 10
- util.logexc(log, "Configuration entry 'tries' is not an integer, "
- "using %s instead", tries)
-
- if post_list == "all":
- post_list = POST_LIST_ALL
-
- all_keys = {}
- all_keys['instance_id'] = cloud.get_instance_id()
- all_keys['hostname'] = cloud.get_hostname()
- all_keys['fqdn'] = cloud.get_hostname(fqdn=True)
-
- pubkeys = {
- 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
- 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
- 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
- }
-
- for (n, path) in pubkeys.items():
- try:
- all_keys[n] = util.load_file(path)
- except Exception:
- util.logexc(log, "%s: failed to open, can not phone home that "
- "data!", path)
-
- submit_keys = {}
- for k in post_list:
- if k in all_keys:
- submit_keys[k] = all_keys[k]
- else:
- submit_keys[k] = None
- log.warn(("Requested key %s from 'post'"
- " configuration list not available"), k)
-
- # Get them read to be posted
- real_submit_keys = {}
- for (k, v) in submit_keys.items():
- if v is None:
- real_submit_keys[k] = 'N/A'
- else:
- real_submit_keys[k] = str(v)
-
- # Incase the url is parameterized
- url_params = {
- 'INSTANCE_ID': all_keys['instance_id'],
- }
- url = templater.render_string(url, url_params)
- try:
- util.read_file_or_url(url, data=real_submit_keys,
- retries=tries, sec_between=3,
- ssl_details=util.fetch_ssl_details(cloud.paths))
- except Exception:
- util.logexc(log, "Failed to post phone home data to %s in %s tries",
- url, tries)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
deleted file mode 100644
index cc3f7f70..00000000
--- a/cloudinit/config/cc_power_state_change.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import errno
-import os
-import re
-import six
-import subprocess
-import time
-
-frequency = PER_INSTANCE
-
-EXIT_FAIL = 254
-
-
-def givecmdline(pid):
- # Returns the cmdline for the given process id. In Linux we can use procfs
- # for this but on BSD there is /usr/bin/procstat.
- try:
- # Example output from procstat -c 1
- # PID COMM ARGS
- # 1 init /bin/init --
- if util.system_info()["platform"].startswith('FreeBSD'):
- (output, _err) = util.subp(['procstat', '-c', str(pid)])
- line = output.splitlines()[1]
- m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
- return m.group(2)
- else:
- return util.load_file("/proc/%s/cmdline" % pid)
- except IOError:
- return None
-
-
-def check_condition(cond, log=None):
- if isinstance(cond, bool):
- if log:
- log.debug("Static Condition: %s" % cond)
- return cond
-
- pre = "check_condition command (%s): " % cond
- try:
- proc = subprocess.Popen(cond, shell=not isinstance(cond, list))
- proc.communicate()
- ret = proc.returncode
- if ret == 0:
- if log:
- log.debug(pre + "exited 0. condition met.")
- return True
- elif ret == 1:
- if log:
- log.debug(pre + "exited 1. condition not met.")
- return False
- else:
- if log:
- log.warn(pre + "unexpected exit %s. " % ret +
- "do not apply change.")
- return False
- except Exception as e:
- if log:
- log.warn(pre + "Unexpected error: %s" % e)
- return False
-
-
-def handle(_name, cfg, _cloud, log, _args):
-
- try:
- (args, timeout, condition) = load_power_state(cfg)
- if args is None:
- log.debug("no power_state provided. doing nothing")
- return
- except Exception as e:
- log.warn("%s Not performing power state change!" % str(e))
- return
-
- if condition is False:
- log.debug("Condition was false. Will not perform state change.")
- return
-
- mypid = os.getpid()
-
- cmdline = givecmdline(mypid)
- if not cmdline:
- log.warn("power_state: failed to get cmdline of current process")
- return
-
- devnull_fp = open(os.devnull, "w")
-
- log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
-
- util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
- condition, execmd, [args, devnull_fp])
-
-
-def load_power_state(cfg):
- # returns a tuple of shutdown_command, timeout
- # shutdown_command is None if no config found
- pstate = cfg.get('power_state')
-
- if pstate is None:
- return (None, None, None)
-
- if not isinstance(pstate, dict):
- raise TypeError("power_state is not a dict.")
-
- opt_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}
-
- mode = pstate.get("mode")
- if mode not in opt_map:
- raise TypeError(
- "power_state[mode] required, must be one of: %s. found: '%s'." %
- (','.join(opt_map.keys()), mode))
-
- delay = pstate.get("delay", "now")
- # convert integer 30 or string '30' to '+30'
- try:
- delay = "+%s" % int(delay)
- except ValueError:
- pass
-
- if delay != "now" and not re.match(r"\+[0-9]+", delay):
- raise TypeError(
- "power_state[delay] must be 'now' or '+m' (minutes)."
- " found '%s'." % delay)
-
- args = ["shutdown", opt_map[mode], delay]
- if pstate.get("message"):
- args.append(pstate.get("message"))
-
- try:
- timeout = float(pstate.get('timeout', 30.0))
- except ValueError:
- raise ValueError("failed to convert timeout '%s' to float." %
- pstate['timeout'])
-
- condition = pstate.get("condition", True)
- if not isinstance(condition, six.string_types + (list, bool)):
- raise TypeError("condition type %s invalid. must be list, bool, str")
- return (args, timeout, condition)
-
-
-def doexit(sysexit):
- os._exit(sysexit)
-
-
-def execmd(exe_args, output=None, data_in=None):
- try:
- proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
- stdout=output, stderr=subprocess.STDOUT)
- proc.communicate(data_in)
- ret = proc.returncode
- except Exception:
- doexit(EXIT_FAIL)
- doexit(ret)
-
-
-def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
- # wait until pid, with /proc/pid/cmdline contents of pidcmdline
- # is no longer alive. After it is gone, or timeout has passed
- # execute func(args)
- msg = None
- end_time = time.time() + timeout
-
- def fatal(msg):
- if log:
- log.warn(msg)
- doexit(EXIT_FAIL)
-
- known_errnos = (errno.ENOENT, errno.ESRCH)
-
- while True:
- if time.time() > end_time:
- msg = "timeout reached before %s ended" % pid
- break
-
- try:
- cmdline = givecmdline(pid)
- if cmdline != pidcmdline:
- msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
- break
-
- except IOError as ioerr:
- if ioerr.errno in known_errnos:
- msg = "pidfile gone [%d]" % ioerr.errno
- else:
- fatal("IOError during wait: %s" % ioerr)
- break
-
- except Exception as e:
- fatal("Unexpected Exception: %s" % e)
-
- time.sleep(.25)
-
- if not msg:
- fatal("Unexpected error in run_after_pid_gone")
-
- if log:
- log.debug(msg)
-
- try:
- if not check_condition(condition, log):
- return
- except Exception as e:
- fatal("Unexpected Exception when checking condition: %s" % e)
-
- func(*args)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
deleted file mode 100644
index 774d3322..00000000
--- a/cloudinit/config/cc_puppet.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-import os
-import socket
-
-from cloudinit import helpers
-from cloudinit import util
-
-PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
-PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/'
-PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
-PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem'
-
-
-def _autostart_puppet(log):
- # Set puppet to automatically start
- if os.path.exists('/etc/default/puppet'):
- util.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
- elif os.path.exists('/bin/systemctl'):
- util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
- capture=False)
- elif os.path.exists('/sbin/chkconfig'):
- util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
- else:
- log.warn(("Sorry we do not know how to enable"
- " puppet services on this system"))
-
-
-def handle(name, cfg, cloud, log, _args):
- # If there isn't a puppet key in the configuration don't do anything
- if 'puppet' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'puppet' configuration found"), name)
- return
-
- puppet_cfg = cfg['puppet']
-
- # Start by installing the puppet package if necessary...
- install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
- version = util.get_cfg_option_str(puppet_cfg, 'version', None)
- if not install and version:
- log.warn(("Puppet install set false but version supplied,"
- " doing nothing."))
- elif install:
- log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
- cloud.distro.install_packages(('puppet', version))
-
- # ... and then update the puppet configuration
- if 'conf' in puppet_cfg:
- # Add all sections from the conf object to puppet.conf
- contents = util.load_file(PUPPET_CONF_PATH)
- # Create object for reading puppet.conf values
- puppet_config = helpers.DefaultingConfigParser()
- # Read puppet.conf values from original file in order to be able to
- # mix the rest up. First clean them up
- # (TODO(harlowja) is this really needed??)
- cleaned_lines = [i.lstrip() for i in contents.splitlines()]
- cleaned_contents = '\n'.join(cleaned_lines)
- puppet_config.readfp(StringIO(cleaned_contents),
- filename=PUPPET_CONF_PATH)
- for (cfg_name, cfg) in puppet_cfg['conf'].items():
- # Cert configuration is a special case
- # Dump the puppet master ca certificate in the correct place
- if cfg_name == 'ca_cert':
- # Puppet ssl sub-directory isn't created yet
- # Create it with the proper permissions and ownership
- util.ensure_dir(PUPPET_SSL_DIR, 0o771)
- util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
- util.ensure_dir(PUPPET_SSL_CERT_DIR)
- util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
- util.write_file(PUPPET_SSL_CERT_PATH, cfg)
- util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
- else:
- # Iterate throug the config items, we'll use ConfigParser.set
- # to overwrite or create new items as needed
- for (o, v) in cfg.items():
- if o == 'certname':
- # Expand %f as the fqdn
- # TODO(harlowja) should this use the cloud fqdn??
- v = v.replace("%f", socket.getfqdn())
- # Expand %i as the instance id
- v = v.replace("%i", cloud.get_instance_id())
- # certname needs to be downcased
- v = v.lower()
- puppet_config.set(cfg_name, o, v)
- # We got all our config as wanted we'll rename
- # the previous puppet.conf and create our new one
- util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH))
- util.write_file(PUPPET_CONF_PATH, puppet_config.stringify())
-
- # Set it up so it autostarts
- _autostart_puppet(log)
-
- # Start puppetd
- util.subp(['service', 'puppet', 'start'], capture=False)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
deleted file mode 100644
index 2a2a9f59..00000000
--- a/cloudinit/config/cc_resizefs.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import os
-import stat
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def _resize_btrfs(mount_point, devpth):
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
-
-
-def _resize_ext(mount_point, devpth):
- return ('resize2fs', devpth)
-
-
-def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', devpth)
-
-
-def _resize_ufs(mount_point, devpth):
- return ('growfs', devpth)
-
-# Do not use a dictionary as these commands should be able to be used
-# for multiple filesystem types if possible, e.g. one command for
-# ext2, ext3 and ext4.
-RESIZE_FS_PREFIXES_CMDS = [
- ('btrfs', _resize_btrfs),
- ('ext', _resize_ext),
- ('xfs', _resize_xfs),
- ('ufs', _resize_ufs),
-]
-
-NOBLOCK = "noblock"
-
-
-def rootdev_from_cmdline(cmdline):
- found = None
- for tok in cmdline.split():
- if tok.startswith("root="):
- found = tok[5:]
- break
- if found is None:
- return None
-
- if found.startswith("/dev/"):
- return found
- if found.startswith("LABEL="):
- return "/dev/disk/by-label/" + found[len("LABEL="):]
- if found.startswith("UUID="):
- return "/dev/disk/by-uuid/" + found[len("UUID="):]
-
- return "/dev/" + found
-
-
-def handle(name, cfg, _cloud, log, args):
- if len(args) != 0:
- resize_root = args[0]
- else:
- resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
-
- if not util.translate_bool(resize_root, addons=[NOBLOCK]):
- log.debug("Skipping module named %s, resizing disabled", name)
- return
-
- # TODO(harlowja) is the directory ok to be used??
- resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
- util.ensure_dir(resize_root_d)
-
- # TODO(harlowja): allow what is to be resized to be configurable??
- resize_what = "/"
- result = util.get_mount_info(resize_what, log)
- if not result:
- log.warn("Could not determine filesystem type of %s", resize_what)
- return
-
- (devpth, fs_type, mount_point) = result
-
- info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
- log.debug("resize_info: %s" % info)
-
- container = util.is_container()
-
- # Ensure the path is a block device.
- if (devpth == "/dev/root" and not os.path.exists(devpth) and
- not container):
- devpth = rootdev_from_cmdline(util.get_cmdline())
- if devpth is None:
- log.warn("Unable to find device '/dev/root'")
- return
- log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
-
- try:
- statret = os.stat(devpth)
- except OSError as exc:
- if container and exc.errno == errno.ENOENT:
- log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpth, info)
- elif exc.errno == errno.ENOENT:
- log.warn("Device '%s' did not exist. cannot resize: %s",
- devpth, info)
- else:
- raise exc
- return
-
- if not os.access(devpth, os.W_OK):
- if container:
- log.debug("'%s' not writable in container. cannot resize: %s",
- devpth, info)
- else:
- log.warn("'%s' not writable. cannot resize: %s", devpth, info)
- return
-
- if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
- if container:
- log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpth, info))
- else:
- log.warn("device '%s' not a block device. cannot resize: %s" %
- (devpth, info))
- return
-
- resizer = None
- fstype_lc = fs_type.lower()
- for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
- if fstype_lc.startswith(pfix):
- resizer = root_cmd
- break
-
- if not resizer:
- log.warn("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
- return
-
- resize_cmd = resizer(resize_what, devpth)
- log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
- ' '.join(resize_cmd))
-
- if resize_root == NOBLOCK:
- # Fork to a child that will run
- # the resize command
- util.fork_cb(
- util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
- func=do_resize, args=(resize_cmd, log))
- else:
- util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
-
- action = 'Resized'
- if resize_root == NOBLOCK:
- action = 'Resizing (via forking)'
- log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
- resize_root)
-
-
-def do_resize(resize_cmd, log):
- try:
- util.subp(resize_cmd)
- except util.ProcessExecutionError:
- util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
- raise
- # TODO(harlowja): Should we add a fsck check after this to make
- # sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
deleted file mode 100644
index 71d9e3a7..00000000
--- a/cloudinit/config/cc_resolv_conf.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Craig Tracey
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Craig Tracey <craigtracey@gmail.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Note:
-# This module is intended to manage resolv.conf in environments where
-# early configuration of resolv.conf is necessary for further
-# bootstrapping and/or where configuration management such as puppet or
-# chef own dns configuration. As Debian/Ubuntu will, by default, utilize
-# resovlconf, and similarly RedHat will use sysconfig, this module is
-# likely to be of little use unless those are configured correctly.
-#
-# For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
-# enabled NICs. And, in Ubuntu/Debian it is recommended that DNS
-# be configured via the standard /etc/network/interfaces configuration
-# file.
-#
-#
-# Usage Example:
-#
-# #cloud-config
-# manage_resolv_conf: true
-#
-# resolv_conf:
-# nameservers: ['8.8.4.4', '8.8.8.8']
-# searchdomains:
-# - foo.example.com
-# - bar.example.com
-# domain: example.com
-# options:
-# rotate: true
-# timeout: 1
-#
-
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import templater
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-distros = ['fedora', 'rhel', 'sles']
-
-
-def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
- flags = []
- false_flags = []
-
- if 'options' in params:
- for key, val in params['options'].items():
- if isinstance(val, bool):
- if val:
- flags.append(key)
- else:
- false_flags.append(key)
-
- for flag in flags + false_flags:
- del params['options'][flag]
-
- if not params.get('options'):
- params['options'] = {}
-
- params['flags'] = flags
- LOG.debug("Writing resolv.conf from template %s" % template_fn)
- templater.render_to_file(template_fn, target_fname, params)
-
-
-def handle(name, cfg, cloud, log, _args):
- """
- Handler for resolv.conf
-
- @param name: The module name "resolv-conf" from cloud.cfg
- @param cfg: A nested dict containing the entire cloud config contents.
- @param cloud: The L{CloudInit} object in use.
- @param log: Pre-initialized Python logger object to use for logging.
- @param args: Any module arguments from cloud.cfg
- """
- if "manage_resolv_conf" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'manage_resolv_conf' key in configuration"), name)
- return
-
- if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
- log.debug(("Skipping module named %s,"
- " 'manage_resolv_conf' present but set to False"), name)
- return
-
- if "resolv_conf" not in cfg:
- log.warn("manage_resolv_conf True but no parameters provided!")
-
- template_fn = cloud.get_template_filename('resolv.conf')
- if not template_fn:
- log.warn("No template found, not rendering /etc/resolv.conf")
- return
-
- generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
- return
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
deleted file mode 100644
index 3a113aea..00000000
--- a/cloudinit/config/cc_rh_subscription.py
+++ /dev/null
@@ -1,408 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Red Hat, Inc.
-#
-# Author: Brent Baude <bbaude@redhat.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, _cloud, log, _args):
- sm = SubscriptionManager(cfg)
- sm.log = log
- if not sm.is_configured():
- log.debug("%s: module not configured.", name)
- return None
-
- if not sm.is_registered():
- try:
- verify, verify_msg = sm._verify_keys()
- if verify is not True:
- raise SubscriptionError(verify_msg)
- cont = sm.rhn_register()
- if not cont:
- raise SubscriptionError("Registration failed or did not "
- "run completely")
-
- # Splitting up the registration, auto-attach, and servicelevel
- # commands because the error codes, messages from subman are not
- # specific enough.
-
- # Attempt to change the service level
- if sm.auto_attach and sm.servicelevel is not None:
- if not sm._set_service_level():
- raise SubscriptionError("Setting of service-level "
- "failed")
- else:
- sm.log.debug("Completed auto-attach with service level")
- elif sm.auto_attach:
- if not sm._set_auto_attach():
- raise SubscriptionError("Setting auto-attach failed")
- else:
- sm.log.debug("Completed auto-attach")
-
- if sm.pools is not None:
- if not isinstance(sm.pools, list):
- pool_fail = "Pools must in the format of a list"
- raise SubscriptionError(pool_fail)
-
- return_stat = sm.addPool(sm.pools)
- if not return_stat:
- raise SubscriptionError("Unable to attach pools {0}"
- .format(sm.pools))
- if (sm.enable_repo is not None) or (sm.disable_repo is not None):
- return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
- if not return_stat:
- raise SubscriptionError("Unable to add or remove repos")
- sm.log_success("rh_subscription plugin completed successfully")
- except SubscriptionError as e:
- sm.log_warn(str(e))
- sm.log_warn("rh_subscription plugin did not complete successfully")
- else:
- sm.log_success("System is already registered")
-
-
-class SubscriptionError(Exception):
- pass
-
-
-class SubscriptionManager(object):
- valid_rh_keys = ['org', 'activation-key', 'username', 'password',
- 'disable-repo', 'enable-repo', 'add-pool',
- 'rhsm-baseurl', 'server-hostname',
- 'auto-attach', 'service-level']
-
- def __init__(self, cfg):
- self.cfg = cfg
- self.rhel_cfg = self.cfg.get('rh_subscription', {})
- self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
- self.server_hostname = self.rhel_cfg.get('server-hostname')
- self.pools = self.rhel_cfg.get('add-pool')
- self.activation_key = self.rhel_cfg.get('activation-key')
- self.org = self.rhel_cfg.get('org')
- self.userid = self.rhel_cfg.get('username')
- self.password = self.rhel_cfg.get('password')
- self.auto_attach = self.rhel_cfg.get('auto-attach')
- self.enable_repo = self.rhel_cfg.get('enable-repo')
- self.disable_repo = self.rhel_cfg.get('disable-repo')
- self.servicelevel = self.rhel_cfg.get('service-level')
- self.subman = ['subscription-manager']
-
- def log_success(self, msg):
- '''Simple wrapper for logging info messages. Useful for unittests'''
- self.log.info(msg)
-
- def log_warn(self, msg):
- '''Simple wrapper for logging warning messages. Useful for unittests'''
- self.log.warn(msg)
-
- def _verify_keys(self):
- '''
- Checks that the keys in the rh_subscription dict from the user-data
- are what we expect.
- '''
-
- for k in self.rhel_cfg:
- if k not in self.valid_rh_keys:
- bad_key = "{0} is not a valid key for rh_subscription. "\
- "Valid keys are: "\
- "{1}".format(k, ', '.join(self.valid_rh_keys))
- return False, bad_key
-
- # Check for bad auto-attach value
- if (self.auto_attach is not None) and \
- not (util.is_true(self.auto_attach) or
- util.is_false(self.auto_attach)):
- not_bool = "The key auto-attach must be a boolean value "\
- "(True/False "
- return False, not_bool
-
- if (self.servicelevel is not None) and ((not self.auto_attach) or
- (util.is_false(str(self.auto_attach)))):
- no_auto = ("The service-level key must be used in conjunction "
- "with the auto-attach key. Please re-run with "
- "auto-attach: True")
- return False, no_auto
- return True, None
-
- def is_registered(self):
- '''
- Checks if the system is already registered and returns
- True if so, else False
- '''
- cmd = ['identity']
-
- try:
- self._sub_man_cli(cmd)
- except util.ProcessExecutionError:
- return False
-
- return True
-
- def _sub_man_cli(self, cmd, logstring_val=False):
- '''
- Uses the prefered cloud-init subprocess def of util.subp
- and runs subscription-manager. Breaking this to a
- separate function for later use in mocking and unittests
- '''
- cmd = self.subman + cmd
- return util.subp(cmd, logstring=logstring_val)
-
- def rhn_register(self):
- '''
- Registers the system by userid and password or activation key
- and org. Returns True when successful False when not.
- '''
-
- if (self.activation_key is not None) and (self.org is not None):
- # register by activation key
- cmd = ['register', '--activationkey={0}'.
- format(self.activation_key), '--org={0}'.format(self.org)]
-
- # If the baseurl and/or server url are passed in, we register
- # with them.
-
- if self.rhsm_baseurl is not None:
- cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
-
- if self.server_hostname is not None:
- cmd.append("--serverurl={0}".format(self.server_hostname))
-
- try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
- except util.ProcessExecutionError as e:
- if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
- return False
-
- elif (self.userid is not None) and (self.password is not None):
- # register by username and password
- cmd = ['register', '--username={0}'.format(self.userid),
- '--password={0}'.format(self.password)]
-
- # If the baseurl and/or server url are passed in, we register
- # with them.
-
- if self.rhsm_baseurl is not None:
- cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
-
- if self.server_hostname is not None:
- cmd.append("--serverurl={0}".format(self.server_hostname))
-
- # Attempting to register the system only
- try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
- except util.ProcessExecutionError as e:
- if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
- return False
-
- else:
- self.log_warn("Unable to register system due to incomplete "
- "information.")
- self.log_warn("Use either activationkey and org *or* userid "
- "and password")
- return False
-
- reg_id = return_out.split("ID: ")[1].rstrip()
- self.log.debug("Registered successfully with ID {0}".format(reg_id))
- return True
-
- def _set_service_level(self):
- cmd = ['attach', '--auto', '--servicelevel={0}'
- .format(self.servicelevel)]
-
- try:
- return_out, return_err = self._sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
- if e.stdout.rstrip() != '':
- for line in e.stdout.split("\n"):
- if line is not '':
- self.log_warn(line)
- else:
- self.log_warn("Setting the service level failed with: "
- "{0}".format(e.stderr.strip()))
- return False
- for line in return_out.split("\n"):
- if line is not "":
- self.log.debug(line)
- return True
-
- def _set_auto_attach(self):
- cmd = ['attach', '--auto']
- try:
- return_out, return_err = self._sub_man_cli(cmd)
- except util.ProcessExecutionError:
- self.log_warn("Auto-attach failed with: "
- "{0}]".format(return_err.strip()))
- return False
- for line in return_out.split("\n"):
- if line is not "":
- self.log.debug(line)
- return True
-
- def _getPools(self):
- '''
- Gets the list pools for the active subscription and returns them
- in list form.
- '''
- available = []
- consumed = []
-
- # Get all available pools
- cmd = ['list', '--available', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
- available = (results.rstrip()).split("\n")
-
- # Get all consumed pools
- cmd = ['list', '--consumed', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
- consumed = (results.rstrip()).split("\n")
-
- return available, consumed
-
- def _getRepos(self):
- '''
- Obtains the current list of active yum repositories and returns
- them in list form.
- '''
-
- cmd = ['repos', '--list-enabled']
- return_out, return_err = self._sub_man_cli(cmd)
- active_repos = []
- for repo in return_out.split("\n"):
- if "Repo ID:" in repo:
- active_repos.append((repo.split(':')[1]).strip())
-
- cmd = ['repos', '--list-disabled']
- return_out, return_err = self._sub_man_cli(cmd)
-
- inactive_repos = []
- for repo in return_out.split("\n"):
- if "Repo ID:" in repo:
- inactive_repos.append((repo.split(':')[1]).strip())
- return active_repos, inactive_repos
-
- def addPool(self, pools):
- '''
- Takes a list of subscription pools and "attaches" them to the
- current subscription
- '''
-
- # An empty list was passed
- if len(pools) == 0:
- self.log.debug("No pools to attach")
- return True
-
- pool_available, pool_consumed = self._getPools()
- pool_list = []
- cmd = ['attach']
- for pool in pools:
- if (pool not in pool_consumed) and (pool in pool_available):
- pool_list.append('--pool={0}'.format(pool))
- else:
- self.log_warn("Pool {0} is not available".format(pool))
- if len(pool_list) > 0:
- cmd.extend(pool_list)
- try:
- self._sub_man_cli(cmd)
- self.log.debug("Attached the following pools to your "
- "system: %s" % (", ".join(pool_list))
- .replace('--pool=', ''))
- return True
- except util.ProcessExecutionError as e:
- self.log_warn("Unable to attach pool {0} "
- "due to {1}".format(pool, e))
- return False
-
- def update_repos(self, erepos, drepos):
- '''
- Takes a list of yum repo ids that need to be disabled or enabled; then
- it verifies if they are already enabled or disabled and finally
- executes the action to disable or enable
- '''
-
- if (erepos is not None) and (not isinstance(erepos, list)):
- self.log_warn("Repo IDs must in the format of a list.")
- return False
-
- if (drepos is not None) and (not isinstance(drepos, list)):
- self.log_warn("Repo IDs must in the format of a list.")
- return False
-
- # Bail if both lists are not populated
- if (len(erepos) == 0) and (len(drepos) == 0):
- self.log.debug("No repo IDs to enable or disable")
- return True
-
- active_repos, inactive_repos = self._getRepos()
- # Creating a list of repoids to be enabled
- enable_list = []
- enable_list_fail = []
- for repoid in erepos:
- if (repoid in inactive_repos):
- enable_list.append("--enable={0}".format(repoid))
- else:
- enable_list_fail.append(repoid)
-
- # Creating a list of repoids to be disabled
- disable_list = []
- disable_list_fail = []
- for repoid in drepos:
- if repoid in active_repos:
- disable_list.append("--disable={0}".format(repoid))
- else:
- disable_list_fail.append(repoid)
-
- # Logging any repos that are already enabled or disabled
- if len(enable_list_fail) > 0:
- for fail in enable_list_fail:
- # Check if the repo exists or not
- if fail in active_repos:
- self.log.debug("Repo {0} is already enabled".format(fail))
- else:
- self.log_warn("Repo {0} does not appear to "
- "exist".format(fail))
- if len(disable_list_fail) > 0:
- for fail in disable_list_fail:
- self.log.debug("Repo {0} not disabled "
- "because it is not enabled".format(fail))
-
- cmd = ['repos']
- if len(enable_list) > 0:
- cmd.extend(enable_list)
- if len(disable_list) > 0:
- cmd.extend(disable_list)
-
- try:
- self._sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
- self.log_warn("Unable to alter repos due to {0}".format(e))
- return False
-
- if len(enable_list) > 0:
- self.log.debug("Enabled the following repos: %s" %
- (", ".join(enable_list)).replace('--enable=', ''))
- if len(disable_list) > 0:
- self.log.debug("Disabled the following repos: %s" %
- (", ".join(disable_list)).replace('--disable=', ''))
- return True
-
- def is_configured(self):
- return bool((self.userid and self.password) or self.activation_key)
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
deleted file mode 100644
index 8118fac4..00000000
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# The purpose of this script is to allow cloud-init to consume
-# rightscale style userdata. rightscale user data is key-value pairs
-# in a url-query-string like format.
-#
-# for cloud-init support, there will be a key named
-# 'CLOUD_INIT_REMOTE_HOOK'.
-#
-# This cloud-config module will
-# - read the blob of data from raw user data, and parse it as key/value
-# - for each key that is found, download the content to
-# the local instance/scripts directory and set them executable.
-# - the files in that directory will be run by the user-scripts module
-# Therefore, this must run before that.
-#
-#
-
-import os
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-from six.moves.urllib_parse import parse_qs
-
-frequency = PER_INSTANCE
-
-MY_NAME = "cc_rightscale_userdata"
-MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK'
-
-
-def handle(name, _cfg, cloud, log, _args):
- try:
- ud = cloud.get_userdata_raw()
- except Exception:
- log.debug("Failed to get raw userdata in module %s", name)
- return
-
- try:
- mdict = parse_qs(ud)
- if not mdict or MY_HOOKNAME not in mdict:
- log.debug(("Skipping module %s, "
- "did not find %s in parsed"
- " raw userdata"), name, MY_HOOKNAME)
- return
- except Exception:
- util.logexc(log, "Failed to parse query string %s into a dictionary",
- ud)
- raise
-
- wrote_fns = []
- captured_excps = []
-
- # These will eventually be then ran by the cc_scripts_user
- # TODO(harlowja): maybe this should just be a new user data handler??
- # Instead of a late module that acts like a user data handler?
- scripts_d = cloud.get_ipath_cur('scripts')
- urls = mdict[MY_HOOKNAME]
- for (i, url) in enumerate(urls):
- fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
- try:
- resp = uhelp.readurl(url)
- # Ensure its a valid http response (and something gotten)
- if resp.ok() and resp.contents:
- util.write_file(fname, resp, mode=0o700)
- wrote_fns.append(fname)
- except Exception as e:
- captured_excps.append(e)
- util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url,
- fname)
-
- if wrote_fns:
- log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
-
- if len(wrote_fns) != len(urls):
- skipped = len(urls) - len(wrote_fns)
- log.debug("%s urls were skipped or failed", skipped)
-
- if captured_excps:
- log.warn("%s failed with exceptions, re-raising the last one",
- len(captured_excps))
- raise captured_excps[-1]
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
deleted file mode 100644
index b8642d65..00000000
--- a/cloudinit/config/cc_rsyslog.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# vi: ts=4 expandtab syntax=python
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-rsyslog module allows configuration of syslog logging via rsyslog
-Configuration is done under the cloud-config top level 'rsyslog'.
-
-Under 'rsyslog' you can define:
- - configs: [default=[]]
- this is a list. entries in it are a string or a dictionary.
- each entry has 2 parts:
- * content
- * filename
- if the entry is a string, then it is assigned to 'content'.
- for each entry, content is written to the provided filename.
- if filename is not provided, its default is read from 'config_filename'
-
- Content here can be any valid rsyslog configuration. No format
- specific format is enforced.
-
- For simply logging to an existing remote syslog server, via udp:
- configs: ["*.* @192.168.1.1"]
-
- - remotes: [default={}]
- This is a dictionary of name / value pairs.
- In comparison to 'config's, it is more focused in that it only supports
- remote syslog configuration. It is not rsyslog specific, and could
- convert to other syslog implementations.
-
- Each entry in remotes is a 'name' and a 'value'.
- * name: an string identifying the entry. good practice would indicate
- using a consistent and identifiable string for the producer.
- For example, the MAAS service could use 'maas' as the key.
- * value consists of the following parts:
- * optional filter for log messages
- default if not present: *.*
- * optional leading '@' or '@@' (indicates udp or tcp respectively).
- default if not present (udp): @
- This is rsyslog format for that. if not present, is '@'.
- * ipv4 or ipv6 or hostname
- ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
- * optional port
- port defaults to 514
-
- - config_filename: [default=20-cloud-config.conf]
- this is the file name to use if none is provided in a config entry.
-
- - config_dir: [default=/etc/rsyslog.d]
- this directory is used for filenames that are not absolute paths.
-
- - service_reload_command: [default="auto"]
- this command is executed if files have been written and thus the syslog
- daemon needs to be told.
-
-Note, since cloud-init 0.5 a legacy version of rsyslog config has been
-present and is still supported. See below for the mappings between old
-value and new value:
- old value -> new value
- 'rsyslog' -> rsyslog/configs
- 'rsyslog_filename' -> rsyslog/config_filename
- 'rsyslog_dir' -> rsyslog/config_dir
-
-the legacy config does not support 'service_reload_command'.
-
-Example config:
- #cloud-config
- rsyslog:
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- remotes:
- maas: "192.168.1.1"
- juju: "10.0.4.1"
- config_dir: config_dir
- config_filename: config_filename
- service_reload_command: [your, syslog, restart, command]
-
-Example Legacy config:
- #cloud-config
- rsyslog:
- - "*.* @@192.158.1.1"
- rsyslog_dir: /etc/rsyslog-config.d/
- rsyslog_filename: 99-local.conf
-"""
-
-import os
-import re
-import six
-
-from cloudinit import log as logging
-from cloudinit import util
-
-DEF_FILENAME = "20-cloud-config.conf"
-DEF_DIR = "/etc/rsyslog.d"
-DEF_RELOAD = "auto"
-DEF_REMOTES = {}
-
-KEYNAME_CONFIGS = 'configs'
-KEYNAME_FILENAME = 'config_filename'
-KEYNAME_DIR = 'config_dir'
-KEYNAME_RELOAD = 'service_reload_command'
-KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
-KEYNAME_LEGACY_DIR = 'rsyslog_dir'
-KEYNAME_REMOTES = 'remotes'
-
-LOG = logging.getLogger(__name__)
-
-COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
-HOST_PORT_RE = re.compile(
- r'^(?P<proto>[@]{0,2})'
- '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
- '([:](?P<port>[0-9]+))?$')
-
-
-def reload_syslog(command=DEF_RELOAD, systemd=False):
- service = 'rsyslog'
- if command == DEF_RELOAD:
- if systemd:
- cmd = ['systemctl', 'reload-or-try-restart', service]
- else:
- cmd = ['service', service, 'restart']
- else:
- cmd = command
- util.subp(cmd, capture=True)
-
-
-def load_config(cfg):
- # return an updated config with entries of the correct type
- # support converting the old top level format into new format
- mycfg = cfg.get('rsyslog', {})
-
- if isinstance(cfg.get('rsyslog'), list):
- mycfg = {KEYNAME_CONFIGS: cfg.get('rsyslog')}
- if KEYNAME_LEGACY_FILENAME in cfg:
- mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
- if KEYNAME_LEGACY_DIR in cfg:
- mycfg[KEYNAME_DIR] = cfg[KEYNAME_LEGACY_DIR]
-
- fillup = (
- (KEYNAME_CONFIGS, [], list),
- (KEYNAME_DIR, DEF_DIR, six.string_types),
- (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
- (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)),
- (KEYNAME_REMOTES, DEF_REMOTES, dict))
-
- for key, default, vtypes in fillup:
- if key not in mycfg or not isinstance(mycfg[key], vtypes):
- mycfg[key] = default
-
- return mycfg
-
-
-def apply_rsyslog_changes(configs, def_fname, cfg_dir):
- # apply the changes in 'configs' to the paths in def_fname and cfg_dir
- # return a list of the files changed
- files = []
- for cur_pos, ent in enumerate(configs):
- if isinstance(ent, dict):
- if "content" not in ent:
- LOG.warn("No 'content' entry in config entry %s", cur_pos + 1)
- continue
- content = ent['content']
- filename = ent.get("filename", def_fname)
- else:
- content = ent
- filename = def_fname
-
- filename = filename.strip()
- if not filename:
- LOG.warn("Entry %s has an empty filename", cur_pos + 1)
- continue
-
- filename = os.path.join(cfg_dir, filename)
-
- # Truncate filename first time you see it
- omode = "ab"
- if filename not in files:
- omode = "wb"
- files.append(filename)
-
- try:
- endl = ""
- if not content.endswith("\n"):
- endl = "\n"
- util.write_file(filename, content + endl, omode=omode)
- except Exception:
- util.logexc(LOG, "Failed to write to %s", filename)
-
- return files
-
-
-def parse_remotes_line(line, name=None):
- try:
- data, comment = COMMENT_RE.split(line)
- comment = comment.strip()
- except ValueError:
- data, comment = (line, None)
-
- toks = data.strip().split()
- match = None
- if len(toks) == 1:
- host_port = data
- elif len(toks) == 2:
- match, host_port = toks
- else:
- raise ValueError("line had multiple spaces: %s" % data)
-
- toks = HOST_PORT_RE.match(host_port)
-
- if not toks:
- raise ValueError("Invalid host specification '%s'" % host_port)
-
- proto = toks.group('proto')
- addr = toks.group('addr') or toks.group('bracket_addr')
- port = toks.group('port')
-
- if addr.startswith("[") and not addr.endswith("]"):
- raise ValueError("host spec had invalid brackets: %s" % addr)
-
- if comment and not name:
- name = comment
-
- t = SyslogRemotesLine(name=name, match=match, proto=proto,
- addr=addr, port=port)
- t.validate()
- return t
-
-
-class SyslogRemotesLine(object):
- def __init__(self, name=None, match=None, proto=None, addr=None,
- port=None):
- if not match:
- match = "*.*"
- self.name = name
- self.match = match
- if not proto:
- proto = "udp"
- if proto == "@":
- proto = "udp"
- elif proto == "@@":
- proto = "tcp"
- self.proto = proto
-
- self.addr = addr
- if port:
- self.port = int(port)
- else:
- self.port = None
-
- def validate(self):
- if self.port:
- try:
- int(self.port)
- except ValueError:
- raise ValueError("port '%s' is not an integer" % self.port)
-
- if not self.addr:
- raise ValueError("address is required")
-
- def __repr__(self):
- return "[name=%s match=%s proto=%s address=%s port=%s]" % (
- self.name, self.match, self.proto, self.addr, self.port
- )
-
- def __str__(self):
- buf = self.match + " "
- if self.proto == "udp":
- buf += "@"
- elif self.proto == "tcp":
- buf += "@@"
-
- if ":" in self.addr:
- buf += "[" + self.addr + "]"
- else:
- buf += self.addr
-
- if self.port:
- buf += ":%s" % self.port
-
- if self.name:
- buf += " # %s" % self.name
- return buf
-
-
-def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
- if not remotes:
- return None
- lines = []
- if header is not None:
- lines.append(header)
- for name, line in remotes.items():
- if not line:
- continue
- try:
- lines.append(str(parse_remotes_line(line, name=name)))
- except ValueError as e:
- LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
- if footer is not None:
- lines.append(footer)
- return '\n'.join(lines) + "\n"
-
-
-def handle(name, cfg, cloud, log, _args):
- if 'rsyslog' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'rsyslog' key in configuration"), name)
- return
-
- mycfg = load_config(cfg)
- configs = mycfg[KEYNAME_CONFIGS]
-
- if mycfg[KEYNAME_REMOTES]:
- configs.append(
- remotes_to_rsyslog_cfg(
- mycfg[KEYNAME_REMOTES],
- header="# begin remotes",
- footer="# end remotes",
- ))
-
- if not mycfg['configs']:
- log.debug("Empty config rsyslog['configs'], nothing to do")
- return
-
- changes = apply_rsyslog_changes(
- configs=mycfg[KEYNAME_CONFIGS],
- def_fname=mycfg[KEYNAME_FILENAME],
- cfg_dir=mycfg[KEYNAME_DIR])
-
- if not changes:
- log.debug("restart of syslog not necessary, no changes made")
- return
-
- try:
- restarted = reload_syslog(
- command=mycfg[KEYNAME_RELOAD],
- systemd=cloud.distro.uses_systemd()),
- except util.ProcessExecutionError as e:
- restarted = False
- log.warn("Failed to reload syslog", e)
-
- if restarted:
- # This only needs to run if we *actually* restarted
- # syslog above.
- cloud.cycle_logging()
- # This should now use rsyslog if
- # the logging was setup to use it...
- log.debug("%s configured %s files", name, changes)
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
deleted file mode 100644
index bc09d38c..00000000
--- a/cloudinit/config/cc_runcmd.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, _args):
- if "runcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'runcmd' key in configuration"), name)
- return
-
- out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
- cmd = cfg["runcmd"]
- try:
- content = util.shellify(cmd)
- util.write_file(out_fn, content, 0o700)
- except Exception:
- util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
deleted file mode 100644
index f5786a31..00000000
--- a/cloudinit/config/cc_salt_minion.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Jeff Bauer <jbauer@rubic.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-# Note: see http://saltstack.org/topics/installation/
-
-
-def handle(name, cfg, cloud, log, _args):
- # If there isn't a salt key in the configuration don't do anything
- if 'salt_minion' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'salt_minion' key in configuration"), name)
- return
-
- salt_cfg = cfg['salt_minion']
-
- # Start by installing the salt package ...
- cloud.distro.install_packages(('salt-minion',))
-
- # Ensure we can configure files at the right dir
- config_dir = salt_cfg.get("config_dir", '/etc/salt')
- util.ensure_dir(config_dir)
-
- # ... and then update the salt configuration
- if 'conf' in salt_cfg:
- # Add all sections from the conf object to /etc/salt/minion
- minion_config = os.path.join(config_dir, 'minion')
- minion_data = util.yaml_dumps(salt_cfg.get('conf'))
- util.write_file(minion_config, minion_data)
-
- # ... copy the key pair if specified
- if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
- pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
- with util.umask(0o77):
- util.ensure_dir(pki_dir)
- pub_name = os.path.join(pki_dir, 'minion.pub')
- pem_name = os.path.join(pki_dir, 'minion.pem')
- util.write_file(pub_name, salt_cfg['public_key'])
- util.write_file(pem_name, salt_cfg['private_key'])
-
- # restart salt-minion. 'service' will start even if not started. if it
- # was started, it needs to be restarted for config change.
- util.subp(['service', 'salt-minion', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
deleted file mode 100644
index ee3b6c9f..00000000
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-SCRIPT_SUBDIR = 'per-boot'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
deleted file mode 100644
index c0d62b12..00000000
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'per-instance'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
deleted file mode 100644
index ecb527f6..00000000
--- a/cloudinit/config/cc_scripts_per_once.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ONCE
-
-frequency = PER_ONCE
-
-SCRIPT_SUBDIR = 'per-once'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
deleted file mode 100644
index 699857d1..00000000
--- a/cloudinit/config/cc_scripts_user.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'scripts'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # This is written to by the user data handlers
- # Ie, any custom shell scripts that come down
- # go here...
- runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
deleted file mode 100644
index 80bf10ff..00000000
--- a/cloudinit/config/cc_scripts_vendor.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'vendor'
-
-
-def handle(name, cfg, cloud, log, _args):
- # This is written to by the vendor data handlers
- # any vendor data shell scripts get placed in runparts_path
- runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
- SCRIPT_SUBDIR)
-
- prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
-
- try:
- util.runparts(runparts_path, exe_prefix=prefix)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
deleted file mode 100644
index 5085c23a..00000000
--- a/cloudinit/config/cc_seed_random.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Yahoo! Inc.
-# Copyright (C) 2014 Canonical, Ltd
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Dustin Kirkland <kirkland@ubuntu.com>
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import os
-
-from six import BytesIO
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-LOG = logging.getLogger(__name__)
-
-
-def _decode(data, encoding=None):
- if not data:
- return b''
- if not encoding or encoding.lower() in ['raw']:
- return util.encode_text(data)
- elif encoding.lower() in ['base64', 'b64']:
- return base64.b64decode(data)
- elif encoding.lower() in ['gzip', 'gz']:
- return util.decomp_gzip(data, quiet=False, decode=None)
- else:
- raise IOError("Unknown random_seed encoding: %s" % (encoding))
-
-
-def handle_random_seed_command(command, required, env=None):
- if not command and required:
- raise ValueError("no command found but required=true")
- elif not command:
- LOG.debug("no command provided")
- return
-
- cmd = command[0]
- if not util.which(cmd):
- if required:
- raise ValueError("command '%s' not found but required=true", cmd)
- else:
- LOG.debug("command '%s' not found for seed_command", cmd)
- return
- util.subp(command, env=env, capture=False)
-
-
-def handle(name, cfg, cloud, log, _args):
- mycfg = cfg.get('random_seed', {})
- seed_path = mycfg.get('file', '/dev/urandom')
- seed_data = mycfg.get('data', b'')
-
- seed_buf = BytesIO()
- if seed_data:
- seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
-
- # 'random_seed' is set up by Azure datasource, and comes already in
- # openstack meta_data.json
- metadata = cloud.datasource.metadata
- if metadata and 'random_seed' in metadata:
- seed_buf.write(util.encode_text(metadata['random_seed']))
-
- seed_data = seed_buf.getvalue()
- if len(seed_data):
- log.debug("%s: adding %s bytes of random seed entropy to %s", name,
- len(seed_data), seed_path)
- util.append_file(seed_path, seed_data)
-
- command = mycfg.get('command', None)
- req = mycfg.get('command_required', False)
- try:
- env = os.environ.copy()
- env['RANDOM_SEED_FILE'] = seed_path
- handle_random_seed_command(command=command, required=req, env=env)
- except ValueError as e:
- log.warn("handling random command [%s] failed: %s", command, e)
- raise e
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
deleted file mode 100644
index f43d8d5a..00000000
--- a/cloudinit/config/cc_set_hostname.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not setting the hostname in module %s"), name)
- return
-
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- try:
- log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
- cloud.distro.set_hostname(hostname, fqdn)
- except Exception:
- util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn,
- hostname)
- raise
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
deleted file mode 100644
index 5c8c23b8..00000000
--- a/cloudinit/config/cc_set_passwords.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-from string import ascii_letters, digits
-
-# We are removing certain 'painful' letters/numbers
-PW_SET = (''.join([x for x in ascii_letters + digits
- if x not in 'loLOI01']))
-
-
-def handle(_name, cfg, cloud, log, args):
- if len(args) != 0:
- # if run from command line, and give args, wipe the chpasswd['list']
- password = args[0]
- if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
- del cfg['chpasswd']['list']
- else:
- password = util.get_cfg_option_str(cfg, "password", None)
-
- expire = True
- plist = None
-
- if 'chpasswd' in cfg:
- chfg = cfg['chpasswd']
- plist = util.get_cfg_option_str(chfg, 'list', plist)
- expire = util.get_cfg_option_bool(chfg, 'expire', expire)
-
- if not plist and password:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- if user:
- plist = "%s:%s" % (user, password)
- else:
- log.warn("No default or defined user to change password for.")
-
- errors = []
- if plist:
- plist_in = []
- randlist = []
- users = []
- for line in plist.splitlines():
- u, p = line.split(':', 1)
- if p == "R" or p == "RANDOM":
- p = rand_user_password()
- randlist.append("%s:%s" % (u, p))
- plist_in.append("%s:%s" % (u, p))
- users.append(u)
-
- ch_in = '\n'.join(plist_in) + '\n'
- try:
- log.debug("Changing password for %s:", users)
- util.subp(['chpasswd'], ch_in)
- except Exception as e:
- errors.append(e)
- util.logexc(log, "Failed to set passwords with chpasswd for %s",
- users)
-
- if len(randlist):
- blurb = ("Set the following 'random' passwords\n",
- '\n'.join(randlist))
- sys.stderr.write("%s\n%s\n" % blurb)
-
- if expire:
- expired_users = []
- for u in users:
- try:
- util.subp(['passwd', '--expire', u])
- expired_users.append(u)
- except Exception as e:
- errors.append(e)
- util.logexc(log, "Failed to set 'expire' for %s", u)
- if expired_users:
- log.debug("Expired passwords for: %s users", expired_users)
-
- change_pwauth = False
- pw_auth = None
- if 'ssh_pwauth' in cfg:
- if util.is_true(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'yes'
- elif util.is_false(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'no'
- elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not str(cfg['ssh_pwauth']).strip():
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not cfg['ssh_pwauth']:
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- else:
- msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
- util.logexc(log, msg)
-
- if change_pwauth:
- replaced_auth = False
-
- # See: man sshd_config
- old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
- new_lines = []
- i = 0
- for (i, line) in enumerate(old_lines):
- # Keywords are case-insensitive and arguments are case-sensitive
- if line.key == 'passwordauthentication':
- log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
- replaced_auth = True
- line.value = pw_auth
- new_lines.append(line)
-
- if not replaced_auth:
- log.debug("Adding new auth line %s", i + 1)
- replaced_auth = True
- new_lines.append(ssh_util.SshdConfigLine('',
- 'PasswordAuthentication',
- pw_auth))
-
- lines = [str(l) for l in new_lines]
- util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))
-
- try:
- cmd = cloud.distro.init_cmd # Default service
- cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
- cmd.append('restart')
- if 'systemctl' in cmd: # Switch action ordering
- cmd[1], cmd[2] = cmd[2], cmd[1]
- cmd = filter(None, cmd) # Remove empty arguments
- util.subp(cmd)
- log.debug("Restarted the ssh daemon")
- except Exception:
- util.logexc(log, "Restarting of the ssh daemon failed")
-
- if len(errors):
- log.debug("%s errors occured, re-raising the last one", len(errors))
- raise errors[-1]
-
-
-def rand_user_password(pwlen=9):
- return util.rand_str(pwlen, select_from=PW_SET)
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
deleted file mode 100644
index 1a485ee6..00000000
--- a/cloudinit/config/cc_snappy.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# vi: ts=4 expandtab
-#
-"""
-snappy modules allows configuration of snappy.
-Example config:
- #cloud-config
- snappy:
- system_snappy: auto
- ssh_enabled: auto
- packages: [etcd, pkg2.smoser]
- config:
- pkgname:
- key2: value2
- pkg2:
- key1: value1
- packages_dir: '/writable/user-data/cloud-init/snaps'
-
- - ssh_enabled:
- This controls the system's ssh service. The default value is 'auto'.
- True: enable ssh service
- False: disable ssh service
- auto: enable ssh service if either ssh keys have been provided
- or user has requested password authentication (ssh_pwauth).
-
- - snap installation and config
- The above would install 'etcd', and then install 'pkg2.smoser' with a
- '<config-file>' argument where 'config-file' has 'config-blob' inside it.
- If 'pkgname' is installed already, then 'snappy config pkgname <file>'
- will be called where 'file' has 'pkgname-config-blob' as its content.
-
- Entries in 'config' can be namespaced or non-namespaced for a package.
- In either case, the config provided to snappy command is non-namespaced.
- The package name is provided as it appears.
-
- If 'packages_dir' has files in it that end in '.snap', then they are
- installed. Given 3 files:
- <packages_dir>/foo.snap
- <packages_dir>/foo.config
- <packages_dir>/bar.snap
- cloud-init will invoke:
- snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
- snappy install <packages_dir>/bar.snap
-
- Note, that if provided a 'config' entry for 'ubuntu-core', then
- cloud-init will invoke: snappy config ubuntu-core <config>
- Allowing you to configure ubuntu-core in this way.
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import glob
-import os
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snappy"
-NAMESPACE_DELIM = '.'
-
-BUILTIN_CFG = {
- 'packages': [],
- 'packages_dir': '/writable/user-data/cloud-init/snaps',
- 'ssh_enabled': "auto",
- 'system_snappy': "auto",
- 'config': {},
-}
-
-
-def parse_filename(fname):
- fname = os.path.basename(fname)
- fname_noext = fname.rpartition(".")[0]
- name = fname_noext.partition("_")[0]
- shortname = name.partition(".")[0]
- return(name, shortname, fname_noext)
-
-
-def get_fs_package_ops(fspath):
- if not fspath:
- return []
- ops = []
- for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
- (name, shortname, fname_noext) = parse_filename(snapfile)
- cfg = None
- for cand in (fname_noext, name, shortname):
- fpcand = os.path.sep.join([fspath, cand]) + ".config"
- if os.path.isfile(fpcand):
- cfg = fpcand
- break
- ops.append(makeop('install', name, config=None,
- path=snapfile, cfgfile=cfg))
- return ops
-
-
-def makeop(op, name, config=None, path=None, cfgfile=None):
- return({'op': op, 'name': name, 'config': config, 'path': path,
- 'cfgfile': cfgfile})
-
-
-def get_package_config(configs, name):
- # load the package's config from the configs dict.
- # prefer full-name entry (config-example.canonical)
- # over short name entry (config-example)
- if name in configs:
- return configs[name]
- return configs.get(name.partition(NAMESPACE_DELIM)[0])
-
-
-def get_package_ops(packages, configs, installed=None, fspath=None):
- # get the install an config operations that should be done
- if installed is None:
- installed = read_installed_packages()
- short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
-
- if not packages:
- packages = []
- if not configs:
- configs = {}
-
- ops = []
- ops += get_fs_package_ops(fspath)
-
- for name in packages:
- ops.append(makeop('install', name, get_package_config(configs, name)))
-
- to_install = [f['name'] for f in ops]
- short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
-
- for name in configs:
- if name in to_install:
- continue
- shortname = name.partition(NAMESPACE_DELIM)[0]
- if shortname in short_to_install:
- continue
- if name in installed or shortname in short_installed:
- ops.append(makeop('config', name,
- config=get_package_config(configs, name)))
-
- # prefer config entries to filepath entries
- for op in ops:
- if op['op'] != 'install' or not op['cfgfile']:
- continue
- name = op['name']
- fromcfg = get_package_config(configs, op['name'])
- if fromcfg:
- LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
- op['cfgfile'] = None
- op['config'] = fromcfg
-
- return ops
-
-
-def render_snap_op(op, name, path=None, cfgfile=None, config=None):
- if op not in ('install', 'config'):
- raise ValueError("cannot render op '%s'" % op)
-
- shortname = name.partition(NAMESPACE_DELIM)[0]
- try:
- cfg_tmpf = None
- if config is not None:
- # input to 'snappy config packagename' must have nested data. odd.
- # config:
- # packagename:
- # config
- # Note, however, we do not touch config files on disk.
- nested_cfg = {'config': {shortname: config}}
- (fd, cfg_tmpf) = tempfile.mkstemp()
- os.write(fd, util.yaml_dumps(nested_cfg).encode())
- os.close(fd)
- cfgfile = cfg_tmpf
-
- cmd = [SNAPPY_CMD, op]
- if op == 'install':
- if path:
- cmd.append("--allow-unauthenticated")
- cmd.append(path)
- else:
- cmd.append(name)
- if cfgfile:
- cmd.append(cfgfile)
- elif op == 'config':
- cmd += [name, cfgfile]
-
- util.subp(cmd)
-
- finally:
- if cfg_tmpf:
- os.unlink(cfg_tmpf)
-
-
-def read_installed_packages():
- ret = []
- for (name, date, version, dev) in read_pkg_data():
- if dev:
- ret.append(NAMESPACE_DELIM.join([name, dev]))
- else:
- ret.append(name)
- return ret
-
-
-def read_pkg_data():
- out, err = util.subp([SNAPPY_CMD, "list"])
- pkg_data = []
- for line in out.splitlines()[1:]:
- toks = line.split(sep=None, maxsplit=3)
- if len(toks) == 3:
- (name, date, version) = toks
- dev = None
- else:
- (name, date, version, dev) = toks
- pkg_data.append((name, date, version, dev,))
- return pkg_data
-
-
-def disable_enable_ssh(enabled):
- LOG.debug("setting enablement of ssh to: %s", enabled)
- # do something here that would enable or disable
- not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
- if enabled:
- util.del_file(not_to_be_run)
- # this is an indempotent operation
- util.subp(["systemctl", "start", "ssh"])
- else:
- # this is an indempotent operation
- util.subp(["systemctl", "stop", "ssh"])
- util.write_file(not_to_be_run, "cloud-init\n")
-
-
-def system_is_snappy():
- # channel.ini is configparser loadable.
- # snappy will move to using /etc/system-image/config.d/*.ini
- # this is certainly not a perfect test, but good enough for now.
- content = util.load_file("/etc/system-image/channel.ini", quiet=True)
- if 'ubuntu-core' in content.lower():
- return True
- if os.path.isdir("/etc/system-image/config.d/"):
- return True
- return False
-
-
-def set_snappy_command():
- global SNAPPY_CMD
- if util.which("snappy-go"):
- SNAPPY_CMD = "snappy-go"
- else:
- SNAPPY_CMD = "snappy"
- LOG.debug("snappy command is '%s'", SNAPPY_CMD)
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- sys_snappy = str(mycfg.get("system_snappy", "auto"))
- if util.is_false(sys_snappy):
- LOG.debug("%s: System is not snappy. disabling", name)
- return
-
- if sys_snappy.lower() == "auto" and not(system_is_snappy()):
- LOG.debug("%s: 'auto' mode, and system not snappy", name)
- return
-
- set_snappy_command()
-
- pkg_ops = get_package_ops(packages=mycfg['packages'],
- configs=mycfg['config'],
- fspath=mycfg['packages_dir'])
-
- fails = []
- for pkg_op in pkg_ops:
- try:
- render_snap_op(**pkg_op)
- except Exception as e:
- fails.append((pkg_op, e,))
- LOG.warn("'%s' failed for '%s': %s",
- pkg_op['op'], pkg_op['name'], e)
-
- # Default to disabling SSH
- ssh_enabled = mycfg.get('ssh_enabled', "auto")
-
- # If the user has not explicitly enabled or disabled SSH, then enable it
- # when password SSH authentication is requested or there are SSH keys
- if ssh_enabled == "auto":
- user_ssh_keys = cloud.get_public_ssh_keys() or None
- password_auth_enabled = cfg.get('ssh_pwauth', False)
- if user_ssh_keys:
- LOG.debug("Enabling SSH, ssh keys found in datasource")
- ssh_enabled = True
- elif cfg.get('ssh_authorized_keys'):
- LOG.debug("Enabling SSH, ssh keys found in config")
- elif password_auth_enabled:
- LOG.debug("Enabling SSH, password authentication requested")
- ssh_enabled = True
- elif ssh_enabled not in (True, False):
- LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled)
-
- disable_enable_ssh(ssh_enabled)
-
- if fails:
- raise Exception("failed to install/configure snaps")
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
deleted file mode 100644
index cb9b70aa..00000000
--- a/cloudinit/config/cc_ssh.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import os
-import sys
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-DISABLE_ROOT_OPTS = (
- "no-port-forwarding,no-agent-forwarding,"
- "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"root\\\".\';echo;sleep 10\"")
-
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
-
-CONFIG_KEY_TO_FILE = {}
-PRIV_TO_PUB = {}
-for k in GENERATE_KEY_NAMES:
- CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
- CONFIG_KEY_TO_FILE.update(
- {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
- PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
-
-KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
-
-
-def handle(_name, cfg, cloud, log, _args):
-
- # remove the static keys from the pristine image
- if cfg.get("ssh_deletekeys", True):
- key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
- for f in glob.glob(key_pth):
- try:
- util.del_file(f)
- except Exception:
- util.logexc(log, "Failed deleting key file %s", f)
-
- if "ssh_keys" in cfg:
- # if there are keys in cloud-config, use them
- for (key, val) in cfg["ssh_keys"].items():
- if key in CONFIG_KEY_TO_FILE:
- tgt_fn = CONFIG_KEY_TO_FILE[key][0]
- tgt_perms = CONFIG_KEY_TO_FILE[key][1]
- util.write_file(tgt_fn, val, tgt_perms)
-
- for (priv, pub) in PRIV_TO_PUB.items():
- if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
- continue
- pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
- cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
- try:
- # TODO(harlowja): Is this guard needed?
- with util.SeLinuxGuard("/etc/ssh", recursive=True):
- util.subp(cmd, capture=False)
- log.debug("Generated a key for %s from %s", pair[0], pair[1])
- except Exception:
- util.logexc(log, "Failed generated a key for %s from %s",
- pair[0], pair[1])
- else:
- # if not, generate them
- genkeys = util.get_cfg_option_list(cfg,
- 'ssh_genkeytypes',
- GENERATE_KEY_NAMES)
- lang_c = os.environ.copy()
- lang_c['LANG'] = 'C'
- for keytype in genkeys:
- keyfile = KEY_FILE_TPL % (keytype)
- if os.path.exists(keyfile):
- continue
- util.ensure_dir(os.path.dirname(keyfile))
- cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
-
- # TODO(harlowja): Is this guard needed?
- with util.SeLinuxGuard("/etc/ssh", recursive=True):
- try:
- out, err = util.subp(cmd, capture=True, env=lang_c)
- sys.stdout.write(util.decode_binary(out))
- except util.ProcessExecutionError as e:
- err = util.decode_binary(e.stderr).lower()
- if (e.exit_code == 1 and
- err.lower().startswith("unknown key")):
- log.debug("ssh-keygen: unknown key type '%s'", keytype)
- else:
- util.logexc(log, "Failed generating key type %s to "
- "file %s", keytype, keyfile)
-
- try:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
- disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- DISABLE_ROOT_OPTS)
-
- keys = cloud.get_public_ssh_keys() or []
- if "ssh_authorized_keys" in cfg:
- cfgkeys = cfg["ssh_authorized_keys"]
- keys.extend(cfgkeys)
-
- apply_credentials(keys, user, disable_root, disable_root_opts)
- except Exception:
- util.logexc(log, "Applying ssh credentials failed!")
-
-
-def apply_credentials(keys, user, disable_root, disable_root_opts):
-
- keys = set(keys)
- if user:
- ssh_util.setup_user_keys(keys, user)
-
- if disable_root:
- if not user:
- user = "NONE"
- key_prefix = disable_root_opts.replace('$USER', user)
- else:
- key_prefix = ''
-
- ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
deleted file mode 100644
index 6ce831bc..00000000
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import hashlib
-
-from prettytable import PrettyTable
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-
-def _split_hash(bin_hash):
- split_up = []
- for i in range(0, len(bin_hash), 2):
- split_up.append(bin_hash[i:i + 2])
- return split_up
-
-
-def _gen_fingerprint(b64_text, hash_meth='md5'):
- if not b64_text:
- return ''
- # TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
- try:
- hasher = hashlib.new(hash_meth)
- hasher.update(base64.b64decode(b64_text))
- return ":".join(_split_hash(hasher.hexdigest()))
- except (TypeError, ValueError):
- # Raised when b64 not really b64...
- # or when the hash type is not really
- # a known/supported hash type...
- return '?'
-
-
-def _is_printable_key(entry):
- if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and
- entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
- return True
- return False
-
-
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
- prefix='ci-info: '):
- if not key_entries:
- message = ("%sno authorized ssh keys fingerprints found for user %s.\n"
- % (prefix, user))
- util.multi_log(message)
- return
- tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
- 'Comment']
- tbl = PrettyTable(tbl_fields)
- for entry in key_entries:
- if _is_printable_key(entry):
- row = []
- row.append(entry.keytype or '-')
- row.append(_gen_fingerprint(entry.base64, hash_meth) or '-')
- row.append(entry.options or '-')
- row.append(entry.comment or '-')
- tbl.add_row(row)
- authtbl_s = tbl.get_string()
- authtbl_lines = authtbl_s.splitlines()
- max_len = len(max(authtbl_lines, key=len))
- lines = [
- util.center("Authorized keys from %s for user %s" %
- (key_fn, user), "+", max_len),
- ]
- lines.extend(authtbl_lines)
- for line in lines:
- util.multi_log(text="%s%s\n" % (prefix, line),
- stderr=False, console=True)
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.is_true(cfg.get('no_ssh_fingerprints', False)):
- log.debug(("Skipping module named %s, "
- "logging of ssh fingerprints disabled"), name)
- return
-
- hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- for (user_name, _cfg) in users.items():
- (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
- _pprint_key_entries(user_name, key_fn,
- key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
deleted file mode 100644
index 28c4585b..00000000
--- a/cloudinit/config/cc_ssh_import_id.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import util
-import pwd
-
-# https://launchpad.net/ssh-import-id
-distros = ['ubuntu', 'debian']
-
-
-def handle(_name, cfg, cloud, log, args):
-
- # import for "user: XXXXX"
- if len(args) != 0:
- user = args[0]
- ids = []
- if len(args) > 1:
- ids = args[1:]
-
- import_ssh_ids(ids, user, log)
- return
-
- # import for cloudinit created users
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- elist = []
- for (user, user_cfg) in users.items():
- import_ids = []
- if user_cfg['default']:
- import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
- else:
- try:
- import_ids = user_cfg['ssh_import_id']
- except Exception:
- log.debug("User %s is not configured for ssh_import_id", user)
- continue
-
- try:
- import_ids = util.uniq_merge(import_ids)
- import_ids = [str(i) for i in import_ids]
- except Exception:
- log.debug("User %s is not correctly configured for ssh_import_id",
- user)
- continue
-
- if not len(import_ids):
- continue
-
- try:
- import_ssh_ids(import_ids, user, log)
- except Exception as exc:
- util.logexc(log, "ssh-import-id failed for: %s %s", user,
- import_ids)
- elist.append(exc)
-
- if len(elist):
- raise elist[0]
-
-
-def import_ssh_ids(ids, user, log):
-
- if not (user and ids):
- log.debug("empty user(%s) or ids(%s). not importing", user, ids)
- return
-
- try:
- pwd.getpwnam(user)
- except KeyError as exc:
- raise exc
-
- cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
- log.debug("Importing ssh ids for user %s.", user)
-
- try:
- util.subp(cmd, capture=False)
- except util.ProcessExecutionError as exc:
- util.logexc(log, "Failed to run command to import %s ssh ids", user)
- raise exc
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
deleted file mode 100644
index b9eb85b2..00000000
--- a/cloudinit/config/cc_timezone.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- timezone = args[0]
- else:
- timezone = util.get_cfg_option_str(cfg, "timezone", False)
-
- if not timezone:
- log.debug("Skipping module named %s, no 'timezone' specified", name)
- return
-
- # Let the distro handle settings its timezone
- cloud.distro.set_timezone(timezone)
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
deleted file mode 100644
index 884d79f1..00000000
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** reboot system into another init.
-
-**Description:** This module provides a way for the user to boot with systemd
-even if the image is set to boot with upstart. It should be run as one of the
-first ``cloud_init_modules``, and will switch the init system and then issue a
-reboot. The next boot will come up in the target init system and no action will
-be taken.
-
-This should be inert on non-ubuntu systems, and also exit quickly.
-
-It can be configured with the following option structure::
-
- init_switch:
- target: systemd (can be 'systemd' or 'upstart')
- reboot: true (reboot if a change was made, or false to not reboot)
-
-.. note::
-
- Best effort is made, but it's possible
- this system will break, and probably won't interact well with any other
- mechanism you've used to switch the init system.
-"""
-
-from cloudinit.distros import ubuntu
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import os
-import time
-
-frequency = PER_INSTANCE
-REBOOT_CMD = ["/sbin/reboot", "--force"]
-
-DEFAULT_CONFIG = {
- 'init_switch': {'target': None, 'reboot': True}
-}
-
-SWITCH_INIT = """
-#!/bin/sh
-# switch_init: [upstart | systemd]
-
-is_systemd() {
- [ "$(dpkg-divert --listpackage /sbin/init)" = "systemd-sysv" ]
-}
-debug() { echo "$@" 1>&2; }
-fail() { echo "$@" 1>&2; exit 1; }
-
-if [ "$1" = "systemd" ]; then
- if is_systemd; then
- debug "already systemd, nothing to do"
- else
- [ -f /lib/systemd/systemd ] || fail "no systemd available";
- dpkg-divert --package systemd-sysv --divert /sbin/init.diverted \\
- --rename /sbin/init
- fi
- [ -f /sbin/init ] || ln /lib/systemd/systemd /sbin/init
-elif [ "$1" = "upstart" ]; then
- if is_systemd; then
- rm -f /sbin/init
- dpkg-divert --package systemd-sysv --rename --remove /sbin/init
- else
- debug "already upstart, nothing to do."
- fi
-else
- fail "Error. expect 'upstart' or 'systemd'"
-fi
-"""
-
-
-def handle(name, cfg, cloud, log, args):
- """Handler method activated by cloud-init."""
-
- if not isinstance(cloud.distro, ubuntu.Distro):
- log.debug("%s: distro is '%s', not ubuntu. returning",
- name, cloud.distro.__class__)
- return
-
- cfg = util.mergemanydict([cfg, DEFAULT_CONFIG])
- target = cfg['init_switch']['target']
- reboot = cfg['init_switch']['reboot']
-
- if len(args) != 0:
- target = args[0]
- if len(args) > 1:
- reboot = util.is_true(args[1])
-
- if not target:
- log.debug("%s: target=%s. nothing to do", name, target)
- return
-
- if not util.which('dpkg'):
- log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name)
- return
-
- supported = ('upstart', 'systemd')
- if target not in supported:
- log.warn("%s: target set to %s, expected one of: %s",
- name, target, str(supported))
-
- if os.path.exists("/run/systemd/system"):
- current = "systemd"
- else:
- current = "upstart"
-
- if current == target:
- log.debug("%s: current = target = %s. nothing to do", name, target)
- return
-
- try:
- util.subp(['sh', '-s', target], data=SWITCH_INIT)
- except util.ProcessExecutionError as e:
- log.warn("%s: Failed to switch to init '%s'. %s", name, target, e)
- return
-
- if util.is_false(reboot):
- log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.",
- name, current, target)
- return
-
- try:
- log.warn("%s: switched '%s' to '%s'. rebooting.",
- name, current, target)
- logging.flushLoggers(log)
- _fire_reboot(log, wait_attempts=4, initial_sleep=4)
- except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
- raise
-
-
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
- start = time.time()
- wait_time = initial_sleep
- for _i in range(0, wait_attempts):
- time.sleep(wait_time)
- wait_time *= backoff
- elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
- # If we got here, not good
- elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
deleted file mode 100644
index 15703efe..00000000
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
- manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
- if util.translate_bool(manage_hosts, addons=['template']):
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
- return
-
- # Render from a template file
- tpl_fn_name = cloud.get_template_filename("hosts.%s" %
- (cloud.distro.osfamily))
- if not tpl_fn_name:
- raise RuntimeError(("No hosts template could be"
- " found for distro %s") %
- (cloud.distro.osfamily))
-
- templater.render_to_file(tpl_fn_name, '/etc/hosts',
- {'hostname': hostname, 'fqdn': fqdn})
-
- elif manage_hosts == "localhost":
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
- return
-
- log.debug("Managing localhost in /etc/hosts")
- cloud.distro.update_etc_hosts(hostname, fqdn)
- else:
- log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing /etc/hosts in module %s"), name)
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
deleted file mode 100644
index 5b78afe1..00000000
--- a/cloudinit/config/cc_update_hostname.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not updating the hostname in module %s"), name)
- return
-
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- try:
- prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
- log.debug("Updating hostname to %s (%s)", fqdn, hostname)
- cloud.distro.update_hostname(hostname, fqdn, prev_fn)
- except Exception:
- util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
- hostname)
- raise
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
deleted file mode 100644
index bf5b4581..00000000
--- a/cloudinit/config/cc_users_groups.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-
-def handle(name, cfg, cloud, _log, _args):
- (users, groups) = ds.normalize_users_groups(cfg, cloud.distro)
- for (name, members) in groups.items():
- cloud.distro.create_group(name, members)
- for (user, config) in users.items():
- cloud.distro.create_user(user, **config)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
deleted file mode 100644
index b1096b9b..00000000
--- a/cloudinit/config/cc_write_files.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import os
-import six
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-DEFAULT_OWNER = "root:root"
-DEFAULT_PERMS = 0o644
-UNKNOWN_ENC = 'text/plain'
-
-
-def handle(name, cfg, _cloud, log, _args):
- files = cfg.get('write_files')
- if not files:
- log.debug(("Skipping module named %s,"
- " no/empty 'write_files' key in configuration"), name)
- return
- write_files(name, files, log)
-
-
-def canonicalize_extraction(encoding_type, log):
- if not encoding_type:
- encoding_type = ''
- encoding_type = encoding_type.lower().strip()
- if encoding_type in ['gz', 'gzip']:
- return ['application/x-gzip']
- if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
- return ['application/base64', 'application/x-gzip']
- # Yaml already encodes binary data as base64 if it is given to the
- # yaml file as binary, so those will be automatically decoded for you.
- # But the above b64 is just for people that are more 'comfortable'
- # specifing it manually (which might be a possiblity)
- if encoding_type in ['b64', 'base64']:
- return ['application/base64']
- if encoding_type:
- log.warn("Unknown encoding type %s, assuming %s",
- encoding_type, UNKNOWN_ENC)
- return [UNKNOWN_ENC]
-
-
-def write_files(name, files, log):
- if not files:
- return
-
- for (i, f_info) in enumerate(files):
- path = f_info.get('path')
- if not path:
- log.warn("No path provided to write for entry %s in module %s",
- i + 1, name)
- continue
- path = os.path.abspath(path)
- extractions = canonicalize_extraction(f_info.get('encoding'), log)
- contents = extract_contents(f_info.get('content', ''), extractions)
- (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
- perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS, log)
- util.write_file(path, contents, mode=perms)
- util.chownbyname(path, u, g)
-
-
-def decode_perms(perm, default, log):
- if perm is None:
- return default
- try:
- if isinstance(perm, six.integer_types + (float,)):
- # Just 'downcast' it (if a float)
- return int(perm)
- else:
- # Force to string and try octal conversion
- return int(str(perm), 8)
- except (TypeError, ValueError):
- log.warn("Undecodable permissions %s, assuming %s", perm, default)
- return default
-
-
-def extract_contents(contents, extraction_types):
- result = contents
- for t in extraction_types:
- if t == 'application/x-gzip':
- result = util.decomp_gzip(result, quiet=False, decode=False)
- elif t == 'application/base64':
- result = base64.b64decode(result)
- elif t == UNKNOWN_ENC:
- pass
- return result
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
deleted file mode 100644
index 64fba869..00000000
--- a/cloudinit/config/cc_yum_add_repo.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-import configobj
-import six
-
-from cloudinit import util
-
-
-def _canonicalize_id(repo_id):
- repo_id = repo_id.lower().replace("-", "_")
- repo_id = repo_id.replace(" ", "_")
- return repo_id
-
-
-def _format_repo_value(val):
- if isinstance(val, (bool)):
- # Seems like yum prefers 1/0
- return str(int(val))
- if isinstance(val, (list, tuple)):
- # Can handle 'lists' in certain cases
- # See: http://bit.ly/Qqrf1t
- return "\n ".join([_format_repo_value(v) for v in val])
- if not isinstance(val, six.string_types):
- return str(val)
- return val
-
-
-# TODO(harlowja): move to distro?
-# See man yum.conf
-def _format_repository_config(repo_id, repo_config):
- to_be = configobj.ConfigObj()
- to_be[repo_id] = {}
- # Do basic translation of the items -> values
- for (k, v) in repo_config.items():
- # For now assume that people using this know
- # the format of yum and don't verify keys/values further
- to_be[repo_id][k] = _format_repo_value(v)
- lines = to_be.write()
- lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822()))
- return "\n".join(lines)
-
-
-def handle(name, cfg, _cloud, log, _args):
- repos = cfg.get('yum_repos')
- if not repos:
- log.debug(("Skipping module named %s,"
- " no 'yum_repos' configuration found"), name)
- return
- repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir',
- '/etc/yum.repos.d/')
- repo_locations = {}
- repo_configs = {}
- for (repo_id, repo_config) in repos.items():
- canon_repo_id = _canonicalize_id(repo_id)
- repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
- if os.path.exists(repo_fn_pth):
- log.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
- continue
- elif canon_repo_id in repo_locations:
- log.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
- continue
- if not repo_config:
- repo_config = {}
- # Do some basic sanity checks/cleaning
- n_repo_config = {}
- for (k, v) in repo_config.items():
- k = k.lower().strip().replace("-", "_")
- if k:
- n_repo_config[k] = v
- repo_config = n_repo_config
- missing_required = 0
- for req_field in ['baseurl']:
- if req_field not in repo_config:
- log.warn(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
- repo_id, req_field)
- missing_required += 1
- if not missing_required:
- repo_configs[canon_repo_id] = repo_config
- repo_locations[canon_repo_id] = repo_fn_pth
- else:
- log.warn("Repository %s is missing %s required fields, skipping!",
- repo_id, missing_required)
- for (c_repo_id, path) in repo_locations.items():
- repo_blob = _format_repository_config(c_repo_id,
- repo_configs.get(c_repo_id))
- util.write_file(path, repo_blob)
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
deleted file mode 100644
index 412431f2..00000000
--- a/cloudinit/cs_utils.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 CloudSigma
-#
-# Author: Kiril Vladimiroff <kiril.vladimiroff@cloudsigma.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-cepko implements easy-to-use communication with CloudSigma's VMs through
-a virtual serial port without bothering with formatting the messages
-properly nor parsing the output with the specific and sometimes
-confusing shell tools for that purpose.
-
-Having the server definition accessible by the VM can ve useful in various
-ways. For example it is possible to easily determine from within the VM,
-which network interfaces are connected to public and which to private network.
-Another use is to pass some data to initial VM setup scripts, like setting the
-hostname to the VM name or passing ssh public keys through server meta.
-
-For more information take a look at the Server Context section of CloudSigma
-API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
-"""
-import json
-import platform
-
-from cloudinit import serial
-
-
-# these high timeouts are necessary as read may read a lot of data.
-READ_TIMEOUT = 60
-WRITE_TIMEOUT = 10
-
-SERIAL_PORT = '/dev/ttyS1'
-if platform.system() == 'Windows':
- SERIAL_PORT = 'COM2'
-
-
-class Cepko(object):
- """
- One instance of that object could be use for one or more
- queries to the serial port.
- """
- request_pattern = "<\n{}\n>"
-
- def get(self, key="", request_pattern=None):
- if request_pattern is None:
- request_pattern = self.request_pattern
- return CepkoResult(request_pattern.format(key))
-
- def all(self):
- return self.get()
-
- def meta(self, key=""):
- request_pattern = self.request_pattern.format("/meta/{}")
- return self.get(key, request_pattern)
-
- def global_context(self, key=""):
- request_pattern = self.request_pattern.format("/global_context/{}")
- return self.get(key, request_pattern)
-
-
-class CepkoResult(object):
- """
- CepkoResult executes the request to the virtual serial port as soon
- as the instance is initialized and stores the result in both raw and
- marshalled format.
- """
- def __init__(self, request):
- self.request = request
- self.raw_result = self._execute()
- self.result = self._marshal(self.raw_result)
-
- def _execute(self):
- connection = serial.Serial(port=SERIAL_PORT,
- timeout=READ_TIMEOUT,
- writeTimeout=WRITE_TIMEOUT)
- connection.write(self.request.encode('ascii'))
- return connection.readline().strip(b'\x04\n').decode('ascii')
-
- def _marshal(self, raw_result):
- try:
- return json.loads(raw_result)
- except ValueError:
- return raw_result
-
- def __len__(self):
- return self.result.__len__()
-
- def __getitem__(self, key):
- return self.result.__getitem__(key)
-
- def __contains__(self, item):
- return self.result.__contains__(item)
-
- def __iter__(self):
- return self.result.__iter__()
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
deleted file mode 100644
index 40af8802..00000000
--- a/cloudinit/distros/__init__.py
+++ /dev/null
@@ -1,980 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from six import StringIO
-
-import abc
-import os
-import re
-import stat
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit import ssh_util
-from cloudinit import type_utils
-from cloudinit import util
-
-from cloudinit.distros.parsers import hosts
-
-
-OSFAMILIES = {
- 'debian': ['debian', 'ubuntu'],
- 'redhat': ['fedora', 'rhel'],
- 'gentoo': ['gentoo'],
- 'freebsd': ['freebsd'],
- 'suse': ['sles'],
- 'arch': ['arch'],
-}
-
-LOG = logging.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Distro(object):
-
- usr_lib_exec = "/usr/lib"
- hosts_fn = "/etc/hosts"
- ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
- hostname_conf_fn = "/etc/hostname"
- tz_zone_dir = "/usr/share/zoneinfo"
- init_cmd = ['service'] # systemctl, service etc
-
- def __init__(self, name, cfg, paths):
- self._paths = paths
- self._cfg = cfg
- self.name = name
-
- @abc.abstractmethod
- def install_packages(self, pkglist):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def _write_network(self, settings):
- # In the future use the http://fedorahosted.org/netcf/
- # to write this blob out in a distro format
- raise NotImplementedError()
-
- def _write_network_config(self, settings):
- raise NotImplementedError()
-
- def _find_tz_file(self, tz):
- tz_file = os.path.join(self.tz_zone_dir, str(tz))
- if not os.path.isfile(tz_file):
- raise IOError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
- return tz_file
-
- def get_option(self, opt_name, default=None):
- return self._cfg.get(opt_name, default)
-
- def set_hostname(self, hostname, fqdn=None):
- writeable_hostname = self._select_hostname(hostname, fqdn)
- self._write_hostname(writeable_hostname, self.hostname_conf_fn)
- self._apply_hostname(writeable_hostname)
-
- def uses_systemd(self):
- try:
- res = os.lstat('/run/systemd/system')
- return stat.S_ISDIR(res.st_mode)
- except Exception:
- return False
-
- @abc.abstractmethod
- def package_command(self, cmd, args=None, pkgs=None):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def update_package_sources(self):
- raise NotImplementedError()
-
- def get_primary_arch(self):
- arch = os.uname[4]
- if arch in ("i386", "i486", "i586", "i686"):
- return "i386"
- return arch
-
- def _get_arch_package_mirror_info(self, arch=None):
- mirror_info = self.get_option("package_mirrors", [])
- if not arch:
- arch = self.get_primary_arch()
- return _get_arch_package_mirror_info(mirror_info, arch)
-
- def get_package_mirror_info(self, arch=None, data_source=None):
- # This resolves the package_mirrors config option
- # down to a single dict of {mirror_name: mirror_url}
- arch_info = self._get_arch_package_mirror_info(arch)
- return _get_package_mirror_info(data_source=data_source,
- mirror_info=arch_info)
-
- def apply_network(self, settings, bring_up=True):
- # this applies network where 'settings' is interfaces(5) style
- # it is obsolete compared to apply_network_config
- # Write it out
- dev_names = self._write_network(settings)
- # Now try to bring them up
- if bring_up:
- return self._bring_up_interfaces(dev_names)
- return False
-
- def _apply_network_from_network_config(self, netconfig, bring_up=True):
- distro = self.__class__
- LOG.warn("apply_network_config is not currently implemented "
- "for distribution '%s'. Attempting to use apply_network",
- distro)
- header = '\n'.join([
- "# Converted from network_config for distro %s" % distro,
- "# Implmentation of _write_network_config is needed."
- ])
- ns = network_state.parse_net_config_data(netconfig)
- contents = eni.network_state_to_eni(
- ns, header=header, render_hwaddress=True)
- return self.apply_network(contents, bring_up=bring_up)
-
- def apply_network_config(self, netconfig, bring_up=False):
- # apply network config netconfig
- # This method is preferred to apply_network which only takes
- # a much less complete network config format (interfaces(5)).
- try:
- dev_names = self._write_network_config(netconfig)
- except NotImplementedError:
- # backwards compat until all distros have apply_network_config
- return self._apply_network_from_network_config(
- netconfig, bring_up=bring_up)
-
- # Now try to bring them up
- if bring_up:
- return self._bring_up_interfaces(dev_names)
- return False
-
- def apply_network_config_names(self, netconfig):
- net.apply_network_config_names(netconfig)
-
- @abc.abstractmethod
- def apply_locale(self, locale, out_fn=None):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def set_timezone(self, tz):
- raise NotImplementedError()
-
- def _get_localhost_ip(self):
- return "127.0.0.1"
-
- @abc.abstractmethod
- def _read_hostname(self, filename, default=None):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def _write_hostname(self, hostname, filename):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def _read_system_hostname(self):
- raise NotImplementedError()
-
- def _apply_hostname(self, hostname):
- # This really only sets the hostname
- # temporarily (until reboot so it should
- # not be depended on). Use the write
- # hostname functions for 'permanent' adjustments.
- LOG.debug("Non-persistently setting the system hostname to %s",
- hostname)
- try:
- util.subp(['hostname', hostname])
- except util.ProcessExecutionError:
- util.logexc(LOG, "Failed to non-persistently adjust the system "
- "hostname to %s", hostname)
-
- def _select_hostname(self, hostname, fqdn):
- # Prefer the short hostname over the long
- # fully qualified domain name
- if not hostname:
- return fqdn
- return hostname
-
- @staticmethod
- def expand_osfamily(family_list):
- distros = []
- for family in family_list:
- if family not in OSFAMILIES:
- raise ValueError("No distibutions found for osfamily %s"
- % (family))
- distros.extend(OSFAMILIES[family])
- return distros
-
- def update_hostname(self, hostname, fqdn, prev_hostname_fn):
- applying_hostname = hostname
-
- # Determine what the actual written hostname should be
- hostname = self._select_hostname(hostname, fqdn)
-
- # If the previous hostname file exists lets see if we
- # can get a hostname from it
- if prev_hostname_fn and os.path.exists(prev_hostname_fn):
- prev_hostname = self._read_hostname(prev_hostname_fn)
- else:
- prev_hostname = None
-
- # Lets get where we should write the system hostname
- # and what the system hostname is
- (sys_fn, sys_hostname) = self._read_system_hostname()
- update_files = []
-
- # If there is no previous hostname or it differs
- # from what we want, lets update it or create the
- # file in the first place
- if not prev_hostname or prev_hostname != hostname:
- update_files.append(prev_hostname_fn)
-
- # If the system hostname is different than the previous
- # one or the desired one lets update it as well
- if ((not sys_hostname) or (sys_hostname == prev_hostname and
- sys_hostname != hostname)):
- update_files.append(sys_fn)
-
- # If something else has changed the hostname after we set it
- # initially, we should not overwrite those changes (we should
- # only be setting the hostname once per instance)
- if (sys_hostname and prev_hostname and
- sys_hostname != prev_hostname):
- LOG.info("%s differs from %s, assuming user maintained hostname.",
- prev_hostname_fn, sys_fn)
- return
-
- # Remove duplicates (incase the previous config filename)
- # is the same as the system config filename, don't bother
- # doing it twice
- update_files = set([f for f in update_files if f])
- LOG.debug("Attempting to update hostname to %s in %s files",
- hostname, len(update_files))
-
- for fn in update_files:
- try:
- self._write_hostname(hostname, fn)
- except IOError:
- util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
- fn)
-
- # If the system hostname file name was provided set the
- # non-fqdn as the transient hostname.
- if sys_fn in update_files:
- self._apply_hostname(applying_hostname)
-
- def update_etc_hosts(self, hostname, fqdn):
- header = ''
- if os.path.exists(self.hosts_fn):
- eh = hosts.HostsConf(util.load_file(self.hosts_fn))
- else:
- eh = hosts.HostsConf('')
- header = util.make_header(base="added")
- local_ip = self._get_localhost_ip()
- prev_info = eh.get_entry(local_ip)
- need_change = False
- if not prev_info:
- eh.add_entry(local_ip, fqdn, hostname)
- need_change = True
- else:
- need_change = True
- for entry in prev_info:
- entry_fqdn = None
- entry_aliases = []
- if len(entry) >= 1:
- entry_fqdn = entry[0]
- if len(entry) >= 2:
- entry_aliases = entry[1:]
- if entry_fqdn is not None and entry_fqdn == fqdn:
- if hostname in entry_aliases:
- # Exists already, leave it be
- need_change = False
- if need_change:
- # Doesn't exist, add that entry in...
- new_entries = list(prev_info)
- new_entries.append([fqdn, hostname])
- eh.del_entries(local_ip)
- for entry in new_entries:
- if len(entry) == 1:
- eh.add_entry(local_ip, entry[0])
- elif len(entry) >= 2:
- eh.add_entry(local_ip, *entry)
- if need_change:
- contents = StringIO()
- if header:
- contents.write("%s\n" % (header))
- contents.write("%s\n" % (eh))
- util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)
-
- def _bring_up_interface(self, device_name):
- cmd = ['ifup', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
- return True
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
-
- def _bring_up_interfaces(self, device_names):
- am_failed = 0
- for d in device_names:
- if not self._bring_up_interface(d):
- am_failed += 1
- if am_failed == 0:
- return True
- return False
-
- def get_default_user(self):
- return self.get_option('default_user')
-
- def add_user(self, name, **kwargs):
- """
- Add a user to the system using standard GNU tools
- """
- if util.is_user(name):
- LOG.info("User %s already exists, skipping." % name)
- return
-
- if 'create_groups' in kwargs:
- create_groups = kwargs.pop('create_groups')
- else:
- create_groups = True
-
- adduser_cmd = ['useradd', name]
- log_adduser_cmd = ['useradd', name]
-
- # Since we are creating users, we want to carefully validate the
- # inputs. If something goes wrong, we can end up with a system
- # that nobody can login to.
- adduser_opts = {
- "gecos": '--comment',
- "homedir": '--home',
- "primary_group": '--gid',
- "uid": '--uid',
- "groups": '--groups',
- "passwd": '--password',
- "shell": '--shell',
- "expiredate": '--expiredate',
- "inactive": '--inactive',
- "selinux_user": '--selinux-user',
- }
-
- adduser_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
- }
-
- redact_opts = ['passwd']
-
- # support kwargs having groups=[list] or groups="g1,g2"
- groups = kwargs.get('groups')
- if groups:
- if isinstance(groups, (list, tuple)):
- # kwargs.items loop below wants a comma delimeted string
- # that can go right through to the command.
- kwargs['groups'] = ",".join(groups)
- else:
- groups = groups.split(",")
-
- primary_group = kwargs.get('primary_group')
- if primary_group:
- groups.append(primary_group)
-
- if create_groups and groups:
- for group in groups:
- if not util.is_group(group):
- self.create_group(group)
- LOG.debug("created group %s for user %s", name, group)
-
- # Check the values and create the command
- for key, val in kwargs.items():
-
- if key in adduser_opts and val and isinstance(val, str):
- adduser_cmd.extend([adduser_opts[key], val])
-
- # Redact certain fields from the logs
- if key in redact_opts:
- log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
- else:
- log_adduser_cmd.extend([adduser_opts[key], val])
-
- elif key in adduser_flags and val:
- adduser_cmd.append(adduser_flags[key])
- log_adduser_cmd.append(adduser_flags[key])
-
- # Don't create the home directory if directed so or if the user is a
- # system user
- if 'no_create_home' in kwargs or 'system' in kwargs:
- adduser_cmd.append('-M')
- log_adduser_cmd.append('-M')
- else:
- adduser_cmd.append('-m')
- log_adduser_cmd.append('-m')
-
- # Run the command
- LOG.debug("Adding user %s", name)
- try:
- util.subp(adduser_cmd, logstring=log_adduser_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to create user %s", name)
- raise e
-
- def create_user(self, name, **kwargs):
- """
- Creates users for the system using the GNU passwd tools. This
- will work on an GNU system. This should be overriden on
- distros where useradd is not desirable or not available.
- """
-
- # Add the user
- self.add_user(name, **kwargs)
-
- # Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
-
- # Set password if hashed password is provided and non-empty
- if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']:
- self.set_passwd(name, kwargs['hashed_passwd'], hashed=True)
-
- # Default locking down the account. 'lock_passwd' defaults to True.
- # lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
- self.lock_passwd(name)
-
- # Configure sudo access
- if 'sudo' in kwargs:
- self.write_sudo_rules(name, kwargs['sudo'])
-
- # Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
- # Try to handle this in a smart manner.
- keys = kwargs['ssh_authorized_keys']
- if isinstance(keys, six.string_types):
- keys = [keys]
- elif isinstance(keys, dict):
- keys = list(keys.values())
- if keys is not None:
- if not isinstance(keys, (tuple, list, set)):
- LOG.warn("Invalid type '%s' detected for"
- " 'ssh_authorized_keys', expected list,"
- " string, dict, or set.", type(keys))
- else:
- keys = set(keys) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
- return True
-
- def lock_passwd(self, name):
- """
- Lock the password of a user, i.e., disable password logins
- """
- try:
- # Need to use the short option name '-l' instead of '--lock'
- # (which would be more descriptive) since SLES 11 doesn't know
- # about long names.
- util.subp(['passwd', '-l', name])
- except Exception as e:
- util.logexc(LOG, 'Failed to disable password for user %s', name)
- raise e
-
- def set_passwd(self, user, passwd, hashed=False):
- pass_string = '%s:%s' % (user, passwd)
- cmd = ['chpasswd']
-
- if hashed:
- # Need to use the short option name '-e' instead of '--encrypted'
- # (which would be more descriptive) since SLES 11 doesn't know
- # about long names.
- cmd.append('-e')
-
- try:
- util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
- except Exception as e:
- util.logexc(LOG, "Failed to set password for %s", user)
- raise e
-
- return True
-
- def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'):
- # Ensure the dir is included and that
- # it actually exists as a directory
- sudoers_contents = ''
- base_exists = False
- if os.path.exists(sudo_base):
- sudoers_contents = util.load_file(sudo_base)
- base_exists = True
- found_include = False
- for line in sudoers_contents.splitlines():
- line = line.strip()
- include_match = re.search(r"^#includedir\s+(.*)$", line)
- if not include_match:
- continue
- included_dir = include_match.group(1).strip()
- if not included_dir:
- continue
- included_dir = os.path.abspath(included_dir)
- if included_dir == path:
- found_include = True
- break
- if not found_include:
- try:
- if not base_exists:
- lines = [('# See sudoers(5) for more information'
- ' on "#include" directives:'), '',
- util.make_header(base="added"),
- "#includedir %s" % (path), '']
- sudoers_contents = "\n".join(lines)
- util.write_file(sudo_base, sudoers_contents, 0o440)
- else:
- lines = ['', util.make_header(base="added"),
- "#includedir %s" % (path), '']
- sudoers_contents = "\n".join(lines)
- util.append_file(sudo_base, sudoers_contents)
- LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base))
- except IOError as e:
- util.logexc(LOG, "Failed to write %s", sudo_base)
- raise e
- util.ensure_dir(path, 0o750)
-
- def write_sudo_rules(self, user, rules, sudo_file=None):
- if not sudo_file:
- sudo_file = self.ci_sudoers_fn
-
- lines = [
- '',
- "# User rules for %s" % user,
- ]
- if isinstance(rules, (list, tuple)):
- for rule in rules:
- lines.append("%s %s" % (user, rule))
- elif isinstance(rules, six.string_types):
- lines.append("%s %s" % (user, rules))
- else:
- msg = "Can not create sudoers rule addition with type %r"
- raise TypeError(msg % (type_utils.obj_name(rules)))
- content = "\n".join(lines)
- content += "\n" # trailing newline
-
- self.ensure_sudo_dir(os.path.dirname(sudo_file))
- if not os.path.exists(sudo_file):
- contents = [
- util.make_header(),
- content,
- ]
- try:
- util.write_file(sudo_file, "\n".join(contents), 0o440)
- except IOError as e:
- util.logexc(LOG, "Failed to write sudoers file %s", sudo_file)
- raise e
- else:
- try:
- util.append_file(sudo_file, content)
- except IOError as e:
- util.logexc(LOG, "Failed to append sudoers file %s", sudo_file)
- raise e
-
- def create_group(self, name, members=None):
- group_add_cmd = ['groupadd', name]
- if not members:
- members = []
-
- # Check if group exists, and then add it doesn't
- if util.is_group(name):
- LOG.warn("Skipping creation of existing group '%s'" % name)
- else:
- try:
- util.subp(group_add_cmd)
- LOG.info("Created new group %s" % name)
- except Exception:
- util.logexc(LOG, "Failed to create group %s", name)
-
- # Add members to the group, if so defined
- if len(members) > 0:
- for member in members:
- if not util.is_user(member):
- LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
-
- util.subp(['usermod', '-a', '-G', name, member])
- LOG.info("Added user '%s' to group '%s'" % (member, name))
-
-
-def _get_package_mirror_info(mirror_info, data_source=None,
- mirror_filter=util.search_for_mirror):
- # given a arch specific 'mirror_info' entry (from package_mirrors)
- # search through the 'search' entries, and fallback appropriately
- # return a dict with only {name: mirror} entries.
- if not mirror_info:
- mirror_info = {}
-
- # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
- # the region is us-east-1. so region = az[0:-1]
- directions_re = '|'.join([
- 'central', 'east', 'north', 'northeast', 'northwest',
- 'south', 'southeast', 'southwest', 'west'])
- ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re)
-
- subst = {}
- if data_source and data_source.availability_zone:
- subst['availability_zone'] = data_source.availability_zone
-
- if re.match(ec2_az_re, data_source.availability_zone):
- subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
-
- if data_source and data_source.region:
- subst['region'] = data_source.region
-
- results = {}
- for (name, mirror) in mirror_info.get('failsafe', {}).items():
- results[name] = mirror
-
- for (name, searchlist) in mirror_info.get('search', {}).items():
- mirrors = []
- for tmpl in searchlist:
- try:
- mirrors.append(tmpl % subst)
- except KeyError:
- pass
-
- found = mirror_filter(mirrors)
- if found:
- results[name] = found
-
- LOG.debug("filtered distro mirror info: %s" % results)
-
- return results
-
-
-def _get_arch_package_mirror_info(package_mirrors, arch):
- # pull out the specific arch from a 'package_mirrors' config option
- default = None
- for item in package_mirrors:
- arches = item.get("arches")
- if arch in arches:
- return item
- if "default" in arches:
- default = item
- return default
-
-
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# group names, or a list of group names
-# or a python dictionary of group names
-# to a list of members of that group.
-#
-# The output is a dictionary of group
-# names => members of that group which
-# is the standard form used in the rest
-# of cloud-init
-def _normalize_groups(grp_cfg):
- if isinstance(grp_cfg, six.string_types):
- grp_cfg = grp_cfg.strip().split(",")
- if isinstance(grp_cfg, list):
- c_grp_cfg = {}
- for i in grp_cfg:
- if isinstance(i, dict):
- for k, v in i.items():
- if k not in c_grp_cfg:
- if isinstance(v, list):
- c_grp_cfg[k] = list(v)
- elif isinstance(v, six.string_types):
- c_grp_cfg[k] = [v]
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
- else:
- if isinstance(v, list):
- c_grp_cfg[k].extend(v)
- elif isinstance(v, six.string_types):
- c_grp_cfg[k].append(v)
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
- elif isinstance(i, six.string_types):
- if i not in c_grp_cfg:
- c_grp_cfg[i] = []
- else:
- raise TypeError("Unknown group name type %s" %
- type_utils.obj_name(i))
- grp_cfg = c_grp_cfg
- groups = {}
- if isinstance(grp_cfg, dict):
- for (grp_name, grp_members) in grp_cfg.items():
- groups[grp_name] = util.uniq_merge_sorted(grp_members)
- else:
- raise TypeError(("Group config must be list, dict "
- " or string types only and not %s") %
- type_utils.obj_name(grp_cfg))
- return groups
-
-
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# user names, or a list of string user names
-# or a list of dictionaries with components
-# that define the user config + 'name' (if
-# a 'name' field does not exist then the
-# default user is assumed to 'own' that
-# configuration.
-#
-# The output is a dictionary of user
-# names => user config which is the standard
-# form used in the rest of cloud-init. Note
-# the default user will have a special config
-# entry 'default' which will be marked as true
-# all other users will be marked as false.
-def _normalize_users(u_cfg, def_user_cfg=None):
- if isinstance(u_cfg, dict):
- ad_ucfg = []
- for (k, v) in u_cfg.items():
- if isinstance(v, (bool, int, float) + six.string_types):
- if util.is_true(v):
- ad_ucfg.append(str(k))
- elif isinstance(v, dict):
- v['name'] = k
- ad_ucfg.append(v)
- else:
- raise TypeError(("Unmappable user value type %s"
- " for key %s") % (type_utils.obj_name(v), k))
- u_cfg = ad_ucfg
- elif isinstance(u_cfg, six.string_types):
- u_cfg = util.uniq_merge_sorted(u_cfg)
-
- users = {}
- for user_config in u_cfg:
- if isinstance(user_config, (list,) + six.string_types):
- for u in util.uniq_merge(user_config):
- if u and u not in users:
- users[u] = {}
- elif isinstance(user_config, dict):
- if 'name' in user_config:
- n = user_config.pop('name')
- prev_config = users.get(n) or {}
- users[n] = util.mergemanydict([prev_config,
- user_config])
- else:
- # Assume the default user then
- prev_config = users.get('default') or {}
- users['default'] = util.mergemanydict([prev_config,
- user_config])
- else:
- raise TypeError(("User config must be dictionary/list "
- " or string types only and not %s") %
- type_utils.obj_name(user_config))
-
- # Ensure user options are in the right python friendly format
- if users:
- c_users = {}
- for (uname, uconfig) in users.items():
- c_uconfig = {}
- for (k, v) in uconfig.items():
- k = k.replace('-', '_').strip()
- if k:
- c_uconfig[k] = v
- c_users[uname] = c_uconfig
- users = c_users
-
- # Fixup the default user into the real
- # default user name and replace it...
- def_user = None
- if users and 'default' in users:
- def_config = users.pop('default')
- if def_user_cfg:
- # Pickup what the default 'real name' is
- # and any groups that are provided by the
- # default config
- def_user_cfg = def_user_cfg.copy()
- def_user = def_user_cfg.pop('name')
- def_groups = def_user_cfg.pop('groups', [])
- # Pickup any config + groups for that user name
- # that we may have previously extracted
- parsed_config = users.pop(def_user, {})
- parsed_groups = parsed_config.get('groups', [])
- # Now merge our extracted groups with
- # anything the default config provided
- users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
- parsed_config['groups'] = ",".join(users_groups)
- # The real config for the default user is the
- # combination of the default user config provided
- # by the distro, the default user config provided
- # by the above merging for the user 'default' and
- # then the parsed config from the user's 'real name'
- # which does not have to be 'default' (but could be)
- users[def_user] = util.mergemanydict([def_user_cfg,
- def_config,
- parsed_config])
-
- # Ensure that only the default user that we
- # found (if any) is actually marked as being
- # the default user
- if users:
- for (uname, uconfig) in users.items():
- if def_user and uname == def_user:
- uconfig['default'] = True
- else:
- uconfig['default'] = False
-
- return users
-
-
-# Normalizes a set of user/users and group
-# dictionary configuration into a useable
-# format that the rest of cloud-init can
-# understand using the default user
-# provided by the input distrobution (if any)
-# to allow for mapping of the 'default' user.
-#
-# Output is a dictionary of group names -> [member] (list)
-# and a dictionary of user names -> user configuration (dict)
-#
-# If 'user' exists it will override
-# the 'users'[0] entry (if a list) otherwise it will
-# just become an entry in the returned dictionary (no override)
-def normalize_users_groups(cfg, distro):
- if not cfg:
- cfg = {}
-
- users = {}
- groups = {}
- if 'groups' in cfg:
- groups = _normalize_groups(cfg['groups'])
-
- # Handle the previous style of doing this where the first user
- # overrides the concept of the default user if provided in the user: XYZ
- # format.
- old_user = {}
- if 'user' in cfg and cfg['user']:
- old_user = cfg['user']
- # Translate it into the format that is more useful
- # going forward
- if isinstance(old_user, six.string_types):
- old_user = {
- 'name': old_user,
- }
- if not isinstance(old_user, dict):
- LOG.warn(("Format for 'user' key must be a string or "
- "dictionary and not %s"), type_utils.obj_name(old_user))
- old_user = {}
-
- # If no old user format, then assume the distro
- # provides what the 'default' user maps to, but notice
- # that if this is provided, we won't automatically inject
- # a 'default' user into the users list, while if a old user
- # format is provided we will.
- distro_user_config = {}
- try:
- distro_user_config = distro.get_default_user()
- except NotImplementedError:
- LOG.warn(("Distro has not implemented default user "
- "access. No distribution provided default user"
- " will be normalized."))
-
- # Merge the old user (which may just be an empty dict when not
- # present with the distro provided default user configuration so
- # that the old user style picks up all the distribution specific
- # attributes (if any)
- default_user_config = util.mergemanydict([old_user, distro_user_config])
-
- base_users = cfg.get('users', [])
- if not isinstance(base_users, (list, dict) + six.string_types):
- LOG.warn(("Format for 'users' key must be a comma separated string"
- " or a dictionary or a list and not %s"),
- type_utils.obj_name(base_users))
- base_users = []
-
- if old_user:
- # Ensure that when user: is provided that this user
- # always gets added (as the default user)
- if isinstance(base_users, list):
- # Just add it on at the end...
- base_users.append({'name': 'default'})
- elif isinstance(base_users, dict):
- base_users['default'] = dict(base_users).get('default', True)
- elif isinstance(base_users, six.string_types):
- # Just append it on to be re-parsed later
- base_users += ",default"
-
- users = _normalize_users(base_users, default_user_config)
- return (users, groups)
-
-
-# Given a user dictionary config it will
-# extract the default user name and user config
-# from that list and return that tuple or
-# return (None, None) if no default user is
-# found in the given input
-def extract_default(users, default_name=None, default_config=None):
- if not users:
- users = {}
-
- def safe_find(entry):
- config = entry[1]
- if not config or 'default' not in config:
- return False
- else:
- return config['default']
-
- tmp_users = users.items()
- tmp_users = dict(filter(safe_find, tmp_users))
- if not tmp_users:
- return (default_name, default_config)
- else:
- name = list(tmp_users)[0]
- config = tmp_users[name]
- config.pop('default', None)
- return (name, config)
-
-
-def fetch(name):
- locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])
- if not locs:
- raise ImportError("No distribution found for distro %s (searched %s)"
- % (name, looked_locs))
- mod = importer.import_module(locs[0])
- cls = getattr(mod, 'Distro')
- return cls
-
-
-def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
- tz_local="/etc/localtime"):
- util.write_file(tz_conf, str(tz).rstrip() + "\n")
- # This ensures that the correct tz will be used for the system
- if tz_local and tz_file:
- # use a symlink if there exists a symlink or tz_local is not present
- islink = os.path.islink(tz_local)
- if islink or not os.path.exists(tz_local):
- if islink:
- util.del_file(tz_local)
- os.symlink(tz_file, tz_local)
- else:
- util.copy(tz_file, tz_local)
- return
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
deleted file mode 100644
index 66209f22..00000000
--- a/cloudinit/distros/arch.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Rackspace, US Inc.
-#
-# Author: Nate House <nathan.house@rackspace.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(distros.Distro):
- locale_conf_fn = "/etc/locale.gen"
- network_conf_dir = "/etc/netctl"
- resolve_conf_fn = "/etc/resolv.conf"
- init_cmd = ['systemctl'] # init scripts
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'arch'
- cfg['ssh_svcname'] = 'sshd'
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % (locale),
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('', pkgs=pkglist)
-
- def _write_network(self, settings):
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- dev_names = entries.keys()
- # Format for netctl
- for (dev, info) in entries.items():
- nameservers = []
- net_fn = self.network_conf_dir + dev
- net_cfg = {
- 'Connection': 'ethernet',
- 'Interface': dev,
- 'IP': info.get('bootproto'),
- 'Address': "('%s/%s')" % (info.get('address'),
- info.get('netmask')),
- 'Gateway': info.get('gateway'),
- 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')
- }
- util.write_file(net_fn, convert_netctl(net_cfg))
- if info.get('auto'):
- self._enable_interface(dev)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
-
- if nameservers:
- util.write_file(self.resolve_conf_fn,
- convert_resolv_conf(nameservers))
-
- return dev_names
-
- def _enable_interface(self, device_name):
- cmd = ['netctl', 'reenable', device_name]
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
-
- def _bring_up_interface(self, device_name):
- cmd = ['netctl', 'restart', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
- return True
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
-
- def _bring_up_interfaces(self, device_names):
- for d in device_names:
- if not self._bring_up_interface(d):
- return False
- return True
-
- def _write_hostname(self, your_hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, conf, 0o644)
-
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['pacman']
- # Redirect output
- cmd.append("-Sy")
- cmd.append("--quiet")
- cmd.append("--noconfirm")
-
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["-y"], freq=PER_INSTANCE)
-
-
-def convert_netctl(settings):
- """Returns a settings string formatted for netctl."""
- result = ''
- if isinstance(settings, dict):
- for k, v in settings.items():
- result = result + '%s=%s\n' % (k, v)
- return result
-
-
-def convert_resolv_conf(settings):
- """Returns a settings string formatted for resolv.conf."""
- result = ''
- if isinstance(settings, list):
- for ns in settings:
- result = result + 'nameserver %s\n' % ns
- return result
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
deleted file mode 100644
index f9b3b92e..00000000
--- a/cloudinit/distros/debian.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit.net import eni
-from cloudinit.net.network_state import parse_net_config_data
-from cloudinit import util
-
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold',
- '--option=Dpkg::options::=--force-unsafe-io',
- '--assume-yes', '--quiet')
-APT_GET_WRAPPER = {
- 'command': 'eatmydata',
- 'enabled': 'auto',
-}
-
-ENI_HEADER = """# This file is generated from information provided by
-# the datasource. Changes to it will not persist across an instance.
-# To disable cloud-init's network configuration capabilities, write a file
-# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
-# network: {config: disabled}
-"""
-
-
-class Distro(distros.Distro):
- hostname_conf_fn = "/etc/hostname"
- locale_conf_fn = "/etc/default/locale"
- network_conf_fn = "/etc/network/interfaces.d/50-cloud-init.cfg"
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'debian'
- self._net_renderer = eni.Renderer({
- 'eni_path': self.network_conf_fn,
- 'eni_header': ENI_HEADER,
- 'links_path_prefix': None,
- 'netrules_path': None,
- })
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- util.subp(['locale-gen', locale], capture=False)
- util.subp(['update-locale', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % (locale),
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
-
- def _write_network(self, settings):
- util.write_file(self.network_conf_fn, settings)
- return ['all']
-
- def _write_network_config(self, netconfig):
- ns = parse_net_config_data(netconfig)
- self._net_renderer.render_network_state("/", ns)
- _maybe_remove_legacy_eth0()
- return []
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- return distros.Distro._bring_up_interface(self, '--all')
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def _write_hostname(self, your_hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
-
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def _get_localhost_ip(self):
- # Note: http://www.leonardoborda.com/blog/127-0-1-1-ubuntu-debian/
- return "127.0.1.1"
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- e = os.environ.copy()
- # See: http://tiny.cc/kg91fw
- # Or: http://tiny.cc/mh91fw
- e['DEBIAN_FRONTEND'] = 'noninteractive'
-
- wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER)
- cmd = _get_wrapper_prefix(
- wcfg.get('command', APT_GET_WRAPPER['command']),
- wcfg.get('enabled', APT_GET_WRAPPER['enabled']))
-
- cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND)))
-
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- subcmd = command
- if command == "upgrade":
- subcmd = self.get_option("apt_get_upgrade_subcommand",
- "dist-upgrade")
-
- cmd.append(subcmd)
-
- pkglist = util.expand_package_list('%s=%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.log_time(logfunc=LOG.debug,
- msg="apt-%s [%s]" % (command, ' '.join(cmd)),
- func=util.subp,
- args=(cmd,), kwargs={'env': e, 'capture': False})
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
-
- def get_primary_arch(self):
- (arch, _err) = util.subp(['dpkg', '--print-architecture'])
- return str(arch).strip()
-
-
-def _get_wrapper_prefix(cmd, mode):
- if isinstance(cmd, str):
- cmd = [str(cmd)]
-
- if (util.is_true(mode) or
- (str(mode).lower() == "auto" and cmd[0] and
- util.which(cmd[0]))):
- return cmd
- else:
- return []
-
-
-def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
- """Ubuntu cloud images previously included a 'eth0.cfg' that had
- hard coded content. That file would interfere with the rendered
- configuration if it was present.
-
- if the file does not exist do nothing.
- If the file exists:
- - with known content, remove it and warn
- - with unknown content, leave it and warn
- """
-
- if not os.path.exists(path):
- return
-
- bmsg = "Dynamic networking config may not apply."
- try:
- contents = util.load_file(path)
- known_contents = ["auto eth0", "iface eth0 inet dhcp"]
- lines = [f.strip() for f in contents.splitlines()
- if not f.startswith("#")]
- if lines == known_contents:
- util.del_file(path)
- msg = "removed %s with known contents" % path
- else:
- msg = (bmsg + " '%s' exists with user configured content." % path)
- except Exception:
- msg = bmsg + " %s exists, but could not be read." % path
-
- LOG.warn(msg)
diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py
deleted file mode 100644
index c777845d..00000000
--- a/cloudinit/distros/fedora.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.distros import rhel
-
-from cloudinit import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(rhel.Distro):
- pass
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
deleted file mode 100644
index 91bf4a4e..00000000
--- a/cloudinit/distros/freebsd.py
+++ /dev/null
@@ -1,417 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Harm Weites
-#
-# Author: Harm Weites <harm@weites.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import six
-from six import StringIO
-
-import re
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import ssh_util
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(distros.Distro):
- rc_conf_fn = "/etc/rc.conf"
- login_conf_fn = '/etc/login.conf'
- login_conf_fn_bak = '/etc/login.conf.orig'
- resolv_conf_fn = '/etc/resolv.conf'
- ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'freebsd'
-
- # Updates a key in /etc/rc.conf.
- def updatercconf(self, key, value):
- LOG.debug("Checking %s for: %s = %s", self.rc_conf_fn, key, value)
- conf = self.loadrcconf()
- config_changed = False
- if key not in conf:
- LOG.debug("Adding key in %s: %s = %s", self.rc_conf_fn, key,
- value)
- conf[key] = value
- config_changed = True
- else:
- for item in conf.keys():
- if item == key and conf[item] != value:
- conf[item] = value
- LOG.debug("Changing key in %s: %s = %s", self.rc_conf_fn,
- key, value)
- config_changed = True
-
- if config_changed:
- LOG.info("Writing %s", self.rc_conf_fn)
- buf = StringIO()
- for keyval in conf.items():
- buf.write('%s="%s"\n' % keyval)
- util.write_file(self.rc_conf_fn, buf.getvalue())
-
- # Load the contents of /etc/rc.conf and store all keys in a dict. Make sure
- # quotes are ignored:
- # hostname="bla"
- def loadrcconf(self):
- RE_MATCH = re.compile(r'^(\w+)\s*=\s*(.*)\s*')
- conf = {}
- lines = util.load_file(self.rc_conf_fn).splitlines()
- for line in lines:
- m = RE_MATCH.match(line)
- if not m:
- LOG.debug("Skipping line from /etc/rc.conf: %s", line)
- continue
- key = m.group(1).rstrip()
- val = m.group(2).rstrip()
- # Kill them quotes (not completely correct, aka won't handle
- # quoted values, but should be ok ...)
- if val[0] in ('"', "'"):
- val = val[1:]
- if val[-1] in ('"', "'"):
- val = val[0:-1]
- if len(val) == 0:
- LOG.debug("Skipping empty value from /etc/rc.conf: %s", line)
- continue
- conf[key] = val
- return conf
-
- def readrcconf(self, key):
- conf = self.loadrcconf()
- try:
- val = conf[key]
- except KeyError:
- val = None
- return val
-
- # NOVA will inject something like eth0, rewrite that to use the FreeBSD
- # adapter. Since this adapter is based on the used driver, we need to
- # figure out which interfaces are available. On KVM platforms this is
- # vtnet0, where Xen would use xn0.
- def getnetifname(self, dev):
- LOG.debug("Translating network interface %s", dev)
- if dev.startswith('lo'):
- return dev
-
- n = re.search('\d+$', dev)
- index = n.group(0)
-
- (out, err) = util.subp(['ifconfig', '-a'])
- ifconfigoutput = [x for x in (out.strip()).splitlines()
- if len(x.split()) > 0]
- for line in ifconfigoutput:
- m = re.match('^\w+', line)
- if m:
- if m.group(0).startswith('lo'):
- continue
- # Just settle with the first non-lo adapter we find, since it's
- # rather unlikely there will be multiple nicdrivers involved.
- bsddev = m.group(0)
- break
-
- # Replace the index with the one we're after.
- bsddev = re.sub('\d+$', index, bsddev)
- LOG.debug("Using network interface %s", bsddev)
- return bsddev
-
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(filename=None)
- return ('rc.conf', sys_hostname)
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- hostname = self.readrcconf('hostname')
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def _write_hostname(self, hostname, filename):
- self.updatercconf('hostname', hostname)
-
- def create_group(self, name, members):
- group_add_cmd = ['pw', '-n', name]
- if util.is_group(name):
- LOG.warn("Skipping creation of existing group '%s'", name)
- else:
- try:
- util.subp(group_add_cmd)
- LOG.info("Created new group %s", name)
- except Exception as e:
- util.logexc(LOG, "Failed to create group %s", name)
- raise e
-
- if len(members) > 0:
- for member in members:
- if not util.is_user(member):
- LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
- try:
- util.subp(['pw', 'usermod', '-n', name, '-G', member])
- LOG.info("Added user '%s' to group '%s'", member, name)
- except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
-
- def add_user(self, name, **kwargs):
- if util.is_user(name):
- LOG.info("User %s already exists, skipping.", name)
- return False
-
- adduser_cmd = ['pw', 'useradd', '-n', name]
- log_adduser_cmd = ['pw', 'useradd', '-n', name]
-
- adduser_opts = {
- "homedir": '-d',
- "gecos": '-c',
- "primary_group": '-g',
- "groups": '-G',
- "passwd": '-h',
- "shell": '-s',
- "inactive": '-E',
- }
- adduser_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
- }
-
- redact_opts = ['passwd']
-
- for key, val in kwargs.items():
- if (key in adduser_opts and val and
- isinstance(val, six.string_types)):
- adduser_cmd.extend([adduser_opts[key], val])
-
- # Redact certain fields from the logs
- if key in redact_opts:
- log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
- else:
- log_adduser_cmd.extend([adduser_opts[key], val])
-
- elif key in adduser_flags and val:
- adduser_cmd.append(adduser_flags[key])
- log_adduser_cmd.append(adduser_flags[key])
-
- if 'no_create_home' in kwargs or 'system' in kwargs:
- adduser_cmd.append('-d/nonexistent')
- log_adduser_cmd.append('-d/nonexistent')
- else:
- adduser_cmd.append('-d/usr/home/%s' % name)
- adduser_cmd.append('-m')
- log_adduser_cmd.append('-d/usr/home/%s' % name)
- log_adduser_cmd.append('-m')
-
- # Run the command
- LOG.info("Adding user %s", name)
- try:
- util.subp(adduser_cmd, logstring=log_adduser_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to create user %s", name)
- raise e
-
- def set_passwd(self, user, passwd, hashed=False):
- cmd = ['pw', 'usermod', user]
-
- if hashed:
- cmd.append('-H')
- else:
- cmd.append('-h')
-
- cmd.append('0')
-
- try:
- util.subp(cmd, passwd, logstring="chpasswd for %s" % user)
- except Exception as e:
- util.logexc(LOG, "Failed to set password for %s", user)
- raise e
-
- def lock_passwd(self, name):
- try:
- util.subp(['pw', 'usermod', name, '-h', '-'])
- except Exception as e:
- util.logexc(LOG, "Failed to lock user %s", name)
- raise e
-
- def create_user(self, name, **kwargs):
- self.add_user(name, **kwargs)
-
- # Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
-
- # Default locking down the account. 'lock_passwd' defaults to True.
- # lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
- self.lock_passwd(name)
-
- # Configure sudo access
- if 'sudo' in kwargs:
- self.write_sudo_rules(name, kwargs['sudo'])
-
- # Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
- keys = set(kwargs['ssh_authorized_keys']) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
- def _write_network(self, settings):
- entries = net_util.translate_network(settings)
- nameservers = []
- searchdomains = []
- dev_names = entries.keys()
- for (device, info) in entries.items():
- # Skip the loopback interface.
- if device.startswith('lo'):
- continue
-
- dev = self.getnetifname(device)
-
- LOG.info('Configuring interface %s', dev)
-
- if info.get('bootproto') == 'static':
- LOG.debug('Configuring dev %s with %s / %s', dev,
- info.get('address'), info.get('netmask'))
- # Configure an ipv4 address.
- ifconfig = (info.get('address') + ' netmask ' +
- info.get('netmask'))
-
- # Configure the gateway.
- self.updatercconf('defaultrouter', info.get('gateway'))
-
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchdomains.extend(info['dns-search'])
- else:
- ifconfig = 'DHCP'
-
- self.updatercconf('ifconfig_' + dev, ifconfig)
-
- # Try to read the /etc/resolv.conf or just start from scratch if that
- # fails.
- try:
- resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn))
- resolvconf.parse()
- except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- self.resolv_conf_fn)
- resolvconf = ResolvConf('')
- resolvconf.parse()
-
- # Add some nameservers
- for server in nameservers:
- try:
- resolvconf.add_nameserver(server)
- except ValueError:
- util.logexc(LOG, "Failed to add nameserver %s", server)
-
- # And add any searchdomains.
- for domain in searchdomains:
- try:
- resolvconf.add_search_domain(domain)
- except ValueError:
- util.logexc(LOG, "Failed to add search domain %s", domain)
- util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644)
-
- return dev_names
-
- def apply_locale(self, locale, out_fn=None):
- # Adjust the locals value to the new value
- newconf = StringIO()
- for line in util.load_file(self.login_conf_fn).splitlines():
- newconf.write(re.sub(r'^default:',
- r'default:lang=%s:' % locale, line))
- newconf.write("\n")
-
- # Make a backup of login.conf.
- util.copy(self.login_conf_fn, self.login_conf_fn_bak)
-
- # And write the new login.conf.
- util.write_file(self.login_conf_fn, newconf.getvalue())
-
- try:
- LOG.debug("Running cap_mkdb for %s", locale)
- util.subp(['cap_mkdb', self.login_conf_fn])
- except util.ProcessExecutionError:
- # cap_mkdb failed, so restore the backup.
- util.logexc(LOG, "Failed to apply locale %s", locale)
- try:
- util.copy(self.login_conf_fn_bak, self.login_conf_fn)
- except IOError:
- util.logexc(LOG, "Failed to restore %s backup",
- self.login_conf_fn)
-
- def _bring_up_interface(self, device_name):
- if device_name.startswith('lo'):
- return
- dev = self.getnetifname(device_name)
- cmd = ['/etc/rc.d/netif', 'start', dev]
- LOG.debug("Attempting to bring up interface %s using command %s",
- dev, cmd)
- # This could return 1 when the interface has already been put UP by the
- # OS. This is just fine.
- (_out, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warn("Error running %s: %s", cmd, err)
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- e = os.environ.copy()
- e['ASSUME_ALWAYS_YES'] = 'YES'
-
- cmd = ['pkg']
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, env=e, capture=False)
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
deleted file mode 100644
index 6267dd6e..00000000
--- a/cloudinit/distros/gentoo.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Rackspace, US Inc.
-#
-# Author: Nate House <nathan.house@rackspace.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(distros.Distro):
- locale_conf_fn = "/etc/locale.gen"
- network_conf_fn = "/etc/conf.d/net"
- init_cmd = [''] # init scripts
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'gentoo'
- # Fix sshd restarts
- cfg['ssh_svcname'] = '/etc/init.d/sshd'
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % (locale),
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('', pkgs=pkglist)
-
- def _write_network(self, settings):
- util.write_file(self.network_conf_fn, settings)
- return ['all']
-
- def _bring_up_interface(self, device_name):
- cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
- return True
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- # Grab device names from init scripts
- cmd = ['ls', '/etc/init.d/net.*']
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd,
- err)
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
- devices = [x.split('.')[2] for x in _out.split(' ')]
- return distros.Distro._bring_up_interfaces(self, devices)
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def _write_hostname(self, your_hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, conf, 0o644)
-
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['emerge']
- # Redirect output
- cmd.append("--quiet")
-
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["-u", "world"], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
deleted file mode 100644
index cadfa6b6..00000000
--- a/cloudinit/distros/net_util.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-# This is a util function to translate debian based distro interface blobs as
-# given in /etc/network/interfaces to an *somewhat* agnostic format for
-# distributions that use other formats.
-#
-# TODO(harlowja) remove when we have python-netcf active...
-#
-# The format is the following:
-# {
-# <device-name>: {
-# # All optional (if not existent in original format)
-# "netmask": <ip>,
-# "broadcast": <ip>,
-# "gateway": <ip>,
-# "address": <ip>,
-# "bootproto": "static"|"dhcp",
-# "dns-search": <hostname>,
-# "hwaddress": <mac-address>,
-# "auto": True (or non-existent),
-# "dns-nameservers": [<ip/hostname>, ...],
-# }
-# }
-#
-# Things to note, comments are removed, if a ubuntu/debian interface is
-# marked as auto then only then first segment (?) is retained, ie
-# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1).
-#
-# Example input:
-#
-# auto lo
-# iface lo inet loopback
-#
-# auto eth0
-# iface eth0 inet static
-# address 10.0.0.1
-# netmask 255.255.252.0
-# broadcast 10.0.0.255
-# gateway 10.0.0.2
-# dns-nameservers 98.0.0.1 98.0.0.2
-#
-# Example output:
-# {
-# "lo": {
-# "auto": true
-# },
-# "eth0": {
-# "auto": true,
-# "dns-nameservers": [
-# "98.0.0.1",
-# "98.0.0.2"
-# ],
-# "broadcast": "10.0.0.255",
-# "netmask": "255.255.252.0",
-# "bootproto": "static",
-# "address": "10.0.0.1",
-# "gateway": "10.0.0.2"
-# }
-# }
-
-def translate_network(settings):
- # Get the standard cmd, args from the ubuntu format
- entries = []
- for line in settings.splitlines():
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- split_up = line.split(None, 1)
- if len(split_up) <= 1:
- continue
- entries.append(split_up)
- # Figure out where each iface section is
- ifaces = []
- consume = {}
- for (cmd, args) in entries:
- if cmd == 'iface':
- if consume:
- ifaces.append(consume)
- consume = {}
- consume[cmd] = args
- else:
- consume[cmd] = args
- # Check if anything left over to consume
- absorb = False
- for (cmd, args) in consume.items():
- if cmd == 'iface':
- absorb = True
- if absorb:
- ifaces.append(consume)
- # Now translate
- real_ifaces = {}
- for info in ifaces:
- if 'iface' not in info:
- continue
- iface_details = info['iface'].split(None)
- # Check if current device *may* have an ipv6 IP
- use_ipv6 = False
- if 'inet6' in iface_details:
- use_ipv6 = True
- dev_name = None
- if len(iface_details) >= 1:
- dev = iface_details[0].strip().lower()
- if dev:
- dev_name = dev
- if not dev_name:
- continue
- iface_info = {}
- iface_info['ipv6'] = {}
- if len(iface_details) >= 3:
- proto_type = iface_details[2].strip().lower()
- # Seems like this can be 'loopback' which we don't
- # really care about
- if proto_type in ['dhcp', 'static']:
- iface_info['bootproto'] = proto_type
- # These can just be copied over
- if use_ipv6:
- for k in ['address', 'gateway']:
- if k in info:
- val = info[k].strip().lower()
- if val:
- iface_info['ipv6'][k] = val
- else:
- for k in ['netmask', 'address', 'gateway', 'broadcast']:
- if k in info:
- val = info[k].strip().lower()
- if val:
- iface_info[k] = val
- # Name server info provided??
- if 'dns-nameservers' in info:
- iface_info['dns-nameservers'] = info['dns-nameservers'].split()
- # Name server search info provided??
- if 'dns-search' in info:
- iface_info['dns-search'] = info['dns-search'].split()
- # Is any mac address spoofing going on??
- if 'hwaddress' in info:
- hw_info = info['hwaddress'].lower().strip()
- hw_split = hw_info.split(None, 1)
- if len(hw_split) == 2 and hw_split[0].startswith('ether'):
- hw_addr = hw_split[1]
- if hw_addr:
- iface_info['hwaddress'] = hw_addr
- # If ipv6 is enabled, device will have multiple IPs, so we need to
- # update the dictionary instead of overwriting it...
- if dev_name in real_ifaces:
- real_ifaces[dev_name].update(iface_info)
- else:
- real_ifaces[dev_name] = iface_info
- # Check for those that should be started on boot via 'auto'
- for (cmd, args) in entries:
- args = args.split(None)
- if not args:
- continue
- dev_name = args[0].strip().lower()
- if cmd == 'auto':
- # Seems like auto can be like 'auto eth0 eth0:1' so just get the
- # first part out as the device name
- if dev_name in real_ifaces:
- real_ifaces[dev_name]['auto'] = True
- if cmd == 'iface' and 'inet6' in args:
- real_ifaces[dev_name]['inet6'] = True
- return real_ifaces
diff --git a/cloudinit/distros/parsers/__init__.py b/cloudinit/distros/parsers/__init__.py
deleted file mode 100644
index 1c413eaa..00000000
--- a/cloudinit/distros/parsers/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-def chop_comment(text, comment_chars):
- comment_locations = [text.find(c) for c in comment_chars]
- comment_locations = [c for c in comment_locations if c != -1]
- if not comment_locations:
- return (text, '')
- min_comment = min(comment_locations)
- before_comment = text[0:min_comment]
- comment = text[min_comment:]
- return (before_comment, comment)
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
deleted file mode 100644
index efb185d4..00000000
--- a/cloudinit/distros/parsers/hostname.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-from cloudinit.distros.parsers import chop_comment
-
-
-# Parser that knows how to work with /etc/hostname format
-class HostnameConf(object):
- def __init__(self, text):
- self._text = text
- self._contents = None
-
- def parse(self):
- if self._contents is None:
- self._contents = self._parse(self._text)
-
- def __str__(self):
- self.parse()
- contents = StringIO()
- for (line_type, components) in self._contents:
- if line_type == 'blank':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'all_comment':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'hostname':
- (hostname, tail) = components
- contents.write("%s%s\n" % (hostname, tail))
- # Ensure trailing newline
- contents = contents.getvalue()
- if not contents.endswith("\n"):
- contents += "\n"
- return contents
-
- @property
- def hostname(self):
- self.parse()
- for (line_type, components) in self._contents:
- if line_type == 'hostname':
- return components[0]
- return None
-
- def set_hostname(self, your_hostname):
- your_hostname = your_hostname.strip()
- if not your_hostname:
- return
- self.parse()
- replaced = False
- for (line_type, components) in self._contents:
- if line_type == 'hostname':
- components[0] = str(your_hostname)
- replaced = True
- if not replaced:
- self._contents.append(('hostname', [str(your_hostname), '']))
-
- def _parse(self, contents):
- entries = []
- hostnames_found = set()
- for line in contents.splitlines():
- if not len(line.strip()):
- entries.append(('blank', [line]))
- continue
- (head, tail) = chop_comment(line.strip(), '#')
- if not len(head):
- entries.append(('all_comment', [line]))
- continue
- entries.append(('hostname', [head, tail]))
- hostnames_found.add(head)
- if len(hostnames_found) > 1:
- raise IOError("Multiple hostnames (%s) found!"
- % (hostnames_found))
- return entries
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
deleted file mode 100644
index 3c5498ee..00000000
--- a/cloudinit/distros/parsers/hosts.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-from cloudinit.distros.parsers import chop_comment
-
-
-# See: man hosts
-# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts
-# or http://tinyurl.com/6lmox3
-class HostsConf(object):
- def __init__(self, text):
- self._text = text
- self._contents = None
-
- def parse(self):
- if self._contents is None:
- self._contents = self._parse(self._text)
-
- def get_entry(self, ip):
- self.parse()
- options = []
- for (line_type, components) in self._contents:
- if line_type == 'option':
- (pieces, _tail) = components
- if len(pieces) and pieces[0] == ip:
- options.append(pieces[1:])
- return options
-
- def del_entries(self, ip):
- self.parse()
- n_entries = []
- for (line_type, components) in self._contents:
- if line_type != 'option':
- n_entries.append((line_type, components))
- continue
- else:
- (pieces, _tail) = components
- if len(pieces) and pieces[0] == ip:
- pass
- elif len(pieces):
- n_entries.append((line_type, list(components)))
- self._contents = n_entries
-
- def add_entry(self, ip, canonical_hostname, *aliases):
- self.parse()
- self._contents.append(('option',
- ([ip, canonical_hostname] + list(aliases), '')))
-
- def _parse(self, contents):
- entries = []
- for line in contents.splitlines():
- if not len(line.strip()):
- entries.append(('blank', [line]))
- continue
- (head, tail) = chop_comment(line.strip(), '#')
- if not len(head):
- entries.append(('all_comment', [line]))
- continue
- entries.append(('option', [head.split(None), tail]))
- return entries
-
- def __str__(self):
- self.parse()
- contents = StringIO()
- for (line_type, components) in self._contents:
- if line_type == 'blank':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'all_comment':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'option':
- (pieces, tail) = components
- pieces = [str(p) for p in pieces]
- pieces = "\t".join(pieces)
- contents.write("%s%s\n" % (pieces, tail))
- return contents.getvalue()
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
deleted file mode 100644
index 2ed13d9c..00000000
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-from cloudinit import util
-
-from cloudinit.distros.parsers import chop_comment
-
-
-# See: man resolv.conf
-class ResolvConf(object):
- def __init__(self, text):
- self._text = text
- self._contents = None
-
- def parse(self):
- if self._contents is None:
- self._contents = self._parse(self._text)
-
- @property
- def nameservers(self):
- self.parse()
- return self._retr_option('nameserver')
-
- @property
- def local_domain(self):
- self.parse()
- dm = self._retr_option('domain')
- if dm:
- return dm[0]
- return None
-
- @property
- def search_domains(self):
- self.parse()
- current_sds = self._retr_option('search')
- flat_sds = []
- for sdlist in current_sds:
- for sd in sdlist.split(None):
- if sd:
- flat_sds.append(sd)
- return flat_sds
-
- def __str__(self):
- self.parse()
- contents = StringIO()
- for (line_type, components) in self._contents:
- if line_type == 'blank':
- contents.write("\n")
- elif line_type == 'all_comment':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'option':
- (cfg_opt, cfg_value, comment_tail) = components
- line = "%s %s" % (cfg_opt, cfg_value)
- if len(comment_tail):
- line += comment_tail
- contents.write("%s\n" % (line))
- return contents.getvalue()
-
- def _retr_option(self, opt_name):
- found = []
- for (line_type, components) in self._contents:
- if line_type == 'option':
- (cfg_opt, cfg_value, _comment_tail) = components
- if cfg_opt == opt_name:
- found.append(cfg_value)
- return found
-
- def add_nameserver(self, ns):
- self.parse()
- current_ns = self._retr_option('nameserver')
- new_ns = list(current_ns)
- new_ns.append(str(ns))
- new_ns = util.uniq_list(new_ns)
- if len(new_ns) == len(current_ns):
- return current_ns
- if len(current_ns) >= 3:
- # Hard restriction on only 3 name servers
- raise ValueError(("Adding %r would go beyond the "
- "'3' maximum name servers") % (ns))
- self._remove_option('nameserver')
- for n in new_ns:
- self._contents.append(('option', ['nameserver', n, '']))
- return new_ns
-
- def _remove_option(self, opt_name):
-
- def remove_opt(item):
- line_type, components = item
- if line_type != 'option':
- return False
- (cfg_opt, _cfg_value, _comment_tail) = components
- if cfg_opt != opt_name:
- return False
- return True
-
- new_contents = []
- for c in self._contents:
- if not remove_opt(c):
- new_contents.append(c)
- self._contents = new_contents
-
- def add_search_domain(self, search_domain):
- flat_sds = self.search_domains
- new_sds = list(flat_sds)
- new_sds.append(str(search_domain))
- new_sds = util.uniq_list(new_sds)
- if len(flat_sds) == len(new_sds):
- return new_sds
- if len(flat_sds) >= 6:
- # Hard restriction on only 6 search domains
- raise ValueError(("Adding %r would go beyond the "
- "'6' maximum search domains") % (search_domain))
- s_list = " ".join(new_sds)
- if len(s_list) > 256:
- # Some hard limit on 256 chars total
- raise ValueError(("Adding %r would go beyond the "
- "256 maximum search list character limit")
- % (search_domain))
- self._remove_option('search')
- self._contents.append(('option', ['search', s_list, '']))
- return flat_sds
-
- @local_domain.setter
- def local_domain(self, domain):
- self.parse()
- self._remove_option('domain')
- self._contents.append(('option', ['domain', str(domain), '']))
- return domain
-
- def _parse(self, contents):
- entries = []
- for (i, line) in enumerate(contents.splitlines()):
- sline = line.strip()
- if not sline:
- entries.append(('blank', [line]))
- continue
- (head, tail) = chop_comment(line, ';#')
- if not len(head.strip()):
- entries.append(('all_comment', [line]))
- continue
- if not tail:
- tail = ''
- try:
- (cfg_opt, cfg_values) = head.split(None, 1)
- except (IndexError, ValueError):
- raise IOError("Incorrectly formatted resolv.conf line %s"
- % (i + 1))
- if cfg_opt not in ['nameserver', 'domain',
- 'search', 'sortlist', 'options']:
- raise IOError("Unexpected resolv.conf option %s" % (cfg_opt))
- entries.append(("option", [cfg_opt, cfg_values, tail]))
- return entries
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
deleted file mode 100644
index 6157cf32..00000000
--- a/cloudinit/distros/parsers/sys_conf.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from six import StringIO
-
-import pipes
-import re
-
-# This library is used to parse/write
-# out the various sysconfig files edited (best attempt effort)
-#
-# It has to be slightly modified though
-# to ensure that all values are quoted/unquoted correctly
-# since these configs are usually sourced into
-# bash scripts...
-import configobj
-
-# See: http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html
-# or look at the 'param_expand()' function in the subst.c file in the bash
-# source tarball...
-SHELL_VAR_RULE = r'[a-zA-Z_]+[a-zA-Z0-9_]*'
-SHELL_VAR_REGEXES = [
- # Basic variables
- re.compile(r"\$" + SHELL_VAR_RULE),
- # Things like $?, $0, $-, $@
- re.compile(r"\$[0-9#\?\-@\*]"),
- # Things like ${blah:1} - but this one
- # gets very complex so just try the
- # simple path
- re.compile(r"\$\{.+\}"),
-]
-
-
-def _contains_shell_variable(text):
- for r in SHELL_VAR_REGEXES:
- if r.search(text):
- return True
- return False
-
-
-class SysConf(configobj.ConfigObj):
- def __init__(self, contents):
- configobj.ConfigObj.__init__(self, contents,
- interpolation=False,
- write_empty_values=True)
-
- def __str__(self):
- contents = self.write()
- out_contents = StringIO()
- if isinstance(contents, (list, tuple)):
- out_contents.write("\n".join(contents))
- else:
- out_contents.write(str(contents))
- return out_contents.getvalue()
-
- def _quote(self, value, multiline=False):
- if not isinstance(value, six.string_types):
- raise ValueError('Value "%s" is not a string' % (value))
- if len(value) == 0:
- return ''
- quot_func = None
- if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:
- if len(value) == 1:
- quot_func = (lambda x: self._get_single_quote(x) % x)
- else:
- # Quote whitespace if it isn't the start + end of a shell command
- if value.strip().startswith("$(") and value.strip().endswith(")"):
- pass
- else:
- if re.search(r"[\t\r\n ]", value):
- if _contains_shell_variable(value):
- # If it contains shell variables then we likely want to
- # leave it alone since the pipes.quote function likes
- # to use single quotes which won't get expanded...
- if re.search(r"[\n\"']", value):
- quot_func = (lambda x:
- self._get_triple_quote(x) % x)
- else:
- quot_func = (lambda x:
- self._get_single_quote(x) % x)
- else:
- quot_func = pipes.quote
- if not quot_func:
- return value
- return quot_func(value)
-
- def _write_line(self, indent_string, entry, this_entry, comment):
- # Ensure it is formatted fine for
- # how these sysconfig scripts are used
- val = self._decode_element(self._quote(this_entry))
- key = self._decode_element(self._quote(entry))
- cmnt = self._decode_element(comment)
- return '%s%s%s%s%s' % (indent_string,
- key,
- self._a_to_u('='),
- val,
- cmnt)
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
deleted file mode 100644
index 1aa42d75..00000000
--- a/cloudinit/distros/rhel.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit.net.network_state import parse_net_config_data
-from cloudinit.net import sysconfig
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros import rhel_util
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-def _make_sysconfig_bool(val):
- if val:
- return 'yes'
- else:
- return 'no'
-
-
-class Distro(distros.Distro):
- # See: http://tiny.cc/6r99fw
- clock_conf_fn = "/etc/sysconfig/clock"
- locale_conf_fn = '/etc/sysconfig/i18n'
- systemd_locale_conf_fn = '/etc/locale.conf'
- network_conf_fn = "/etc/sysconfig/network"
- hostname_conf_fn = "/etc/sysconfig/network"
- systemd_hostname_conf_fn = "/etc/hostname"
- network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
- resolve_conf_fn = "/etc/resolv.conf"
- tz_local_fn = "/etc/localtime"
- usr_lib_exec = "/usr/libexec"
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'redhat'
- self._net_renderer = sysconfig.Renderer()
-
- def install_packages(self, pkglist):
- self.package_command('install', pkgs=pkglist)
-
- def _write_network_config(self, netconfig):
- ns = parse_net_config_data(netconfig)
- self._net_renderer.render_network_state("/", ns)
- return []
-
- def _write_network(self, settings):
- # TODO(harlowja) fix this... since this is the ubuntu format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the rhel format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- use_ipv6 = False
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- net_cfg = {
- 'DEVICE': dev,
- 'NETMASK': info.get('netmask'),
- 'IPADDR': info.get('address'),
- 'BOOTPROTO': info.get('bootproto'),
- 'GATEWAY': info.get('gateway'),
- 'BROADCAST': info.get('broadcast'),
- 'MACADDR': info.get('hwaddress'),
- 'ONBOOT': _make_sysconfig_bool(info.get('auto')),
- }
- if info.get('inet6'):
- use_ipv6 = True
- net_cfg.update({
- 'IPV6INIT': _make_sysconfig_bool(True),
- 'IPV6ADDR': info.get('ipv6').get('address'),
- 'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'),
- })
- rhel_util.update_sysconfig_file(net_fn, net_cfg)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- if dev_names:
- net_cfg = {
- 'NETWORKING': _make_sysconfig_bool(True),
- }
- # If IPv6 interface present, enable ipv6 networking
- if use_ipv6:
- net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True)
- net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False)
- rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
- return dev_names
-
- def apply_locale(self, locale, out_fn=None):
- if self.uses_systemd():
- if not out_fn:
- out_fn = self.systemd_locale_conf_fn
- out_fn = self.systemd_locale_conf_fn
- else:
- if not out_fn:
- out_fn = self.locale_conf_fn
- locale_cfg = {
- 'LANG': locale,
- }
- rhel_util.update_sysconfig_file(out_fn, locale_cfg)
-
- def _write_hostname(self, hostname, out_fn):
- # systemd will never update previous-hostname for us, so
- # we need to do it ourselves
- if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
- util.write_file(out_fn, hostname)
- elif self.uses_systemd():
- util.subp(['hostnamectl', 'set-hostname', str(hostname)])
- else:
- host_cfg = {
- 'HOSTNAME': hostname,
- }
- rhel_util.update_sysconfig_file(out_fn, host_cfg)
-
- def _select_hostname(self, hostname, fqdn):
- # See: http://bit.ly/TwitgL
- # Should be fqdn if we can use it
- if fqdn:
- return fqdn
- return hostname
-
- def _read_system_hostname(self):
- if self.uses_systemd():
- host_fn = self.systemd_hostname_conf_fn
- else:
- host_fn = self.hostname_conf_fn
- return (host_fn, self._read_hostname(host_fn))
-
- def _read_hostname(self, filename, default=None):
- if self.uses_systemd() and filename.endswith('/previous-hostname'):
- return util.load_file(filename).strip()
- elif self.uses_systemd():
- (out, _err) = util.subp(['hostname'])
- if len(out):
- return out
- else:
- return default
- else:
- (_exists, contents) = rhel_util.read_sysconfig_file(filename)
- if 'HOSTNAME' in contents:
- return contents['HOSTNAME']
- else:
- return default
-
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def set_timezone(self, tz):
- tz_file = self._find_tz_file(tz)
- if self.uses_systemd():
- # Currently, timedatectl complains if invoked during startup
- # so for compatibility, create the link manually.
- util.del_file(self.tz_local_fn)
- util.sym_link(tz_file, self.tz_local_fn)
- else:
- # Adjust the sysconfig clock zone setting
- clock_cfg = {
- 'ZONE': str(tz),
- }
- rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
- # This ensures that the correct tz will be used for the system
- util.copy(tz_file, self.tz_local_fn)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['yum']
- # If enabled, then yum will be tolerant of errors on the command line
- # with regard to packages.
- # For example: if you request to install foo, bar and baz and baz is
- # installed; yum won't error out complaining that baz is already
- # installed.
- cmd.append("-t")
- # Determines whether or not yum prompts for confirmation
- # of critical actions. We don't want to prompt...
- cmd.append("-y")
-
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["makecache"], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
deleted file mode 100644
index 903d7793..00000000
--- a/cloudinit/distros/rhel_util.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-from cloudinit.distros.parsers.sys_conf import SysConf
-
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
-def update_sysconfig_file(fn, adjustments, allow_empty=False):
- if not adjustments:
- return
- (exists, contents) = read_sysconfig_file(fn)
- updated_am = 0
- for (k, v) in adjustments.items():
- if v is None:
- continue
- v = str(v)
- if len(v) == 0 and not allow_empty:
- continue
- contents[k] = v
- updated_am += 1
- if updated_am:
- lines = [
- str(contents),
- ]
- if not exists:
- lines.insert(0, util.make_header())
- util.write_file(fn, "\n".join(lines) + "\n", 0o644)
-
-
-# Helper function to read a RHEL/SUSE /etc/sysconfig/* file
-def read_sysconfig_file(fn):
- exists = False
- try:
- contents = util.load_file(fn).splitlines()
- exists = True
- except IOError:
- contents = []
- return (exists, SysConf(contents))
-
-
-# Helper function to update RHEL/SUSE /etc/resolv.conf
-def update_resolve_conf_file(fn, dns_servers, search_servers):
- try:
- r_conf = ResolvConf(util.load_file(fn))
- r_conf.parse()
- except IOError:
- util.logexc(LOG, "Failed at parsing %s reverting to an empty "
- "instance", fn)
- r_conf = ResolvConf('')
- r_conf.parse()
- if dns_servers:
- for s in dns_servers:
- try:
- r_conf.add_nameserver(s)
- except ValueError:
- util.logexc(LOG, "Failed at adding nameserver %s", s)
- if search_servers:
- for s in search_servers:
- try:
- r_conf.add_search_domain(s)
- except ValueError:
- util.logexc(LOG, "Failed at adding search domain %s", s)
- util.write_file(fn, str(r_conf), 0o644)
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
deleted file mode 100644
index 620c974c..00000000
--- a/cloudinit/distros/sles.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# Leaning very heavily on the RHEL and Debian implementation
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import distros
-
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros import rhel_util
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(distros.Distro):
- clock_conf_fn = '/etc/sysconfig/clock'
- locale_conf_fn = '/etc/sysconfig/language'
- network_conf_fn = '/etc/sysconfig/network'
- hostname_conf_fn = '/etc/HOSTNAME'
- network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
- resolve_conf_fn = '/etc/resolv.conf'
- tz_local_fn = '/etc/localtime'
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'suse'
-
- def install_packages(self, pkglist):
- self.package_command('install', args='-l', pkgs=pkglist)
-
- def _write_network(self, settings):
- # Convert debian settings to ifcfg format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the suse format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- mode = info.get('auto')
- if mode and mode.lower() == 'true':
- mode = 'auto'
- else:
- mode = 'manual'
- net_cfg = {
- 'BOOTPROTO': info.get('bootproto'),
- 'BROADCAST': info.get('broadcast'),
- 'GATEWAY': info.get('gateway'),
- 'IPADDR': info.get('address'),
- 'LLADDR': info.get('hwaddress'),
- 'NETMASK': info.get('netmask'),
- 'STARTMODE': mode,
- 'USERCONTROL': 'no'
- }
- if dev != 'lo':
- net_cfg['ETHERDEVICE'] = dev
- net_cfg['ETHTOOL_OPTIONS'] = ''
- else:
- net_cfg['FIREWALL'] = 'no'
- rhel_util.update_sysconfig_file(net_fn, net_cfg, True)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- return dev_names
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- locale_cfg = {
- 'RC_LANG': locale,
- }
- rhel_util.update_sysconfig_file(out_fn, locale_cfg)
-
- def _write_hostname(self, hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(hostname)
- util.write_file(out_fn, str(conf), 0o644)
-
- def _read_system_hostname(self):
- host_fn = self.hostname_conf_fn
- return (host_fn, self._read_hostname(host_fn))
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def set_timezone(self, tz):
- tz_file = self._find_tz_file(tz)
- # Adjust the sysconfig clock zone setting
- clock_cfg = {
- 'TIMEZONE': str(tz),
- }
- rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
- # This ensures that the correct tz will be used for the system
- util.copy(tz_file, self.tz_local_fn)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['zypper']
- # No user interaction possible, enable non-interactive mode
- cmd.append('--non-interactive')
-
- # Comand is the operation, such as install
- cmd.append(command)
-
- # args are the arguments to the command, not global options
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ['refresh'], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
deleted file mode 100644
index c527f248..00000000
--- a/cloudinit/distros/ubuntu.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.distros import debian
-from cloudinit import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(debian.Distro):
- pass
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
deleted file mode 100644
index 76dda042..00000000
--- a/cloudinit/ec2_utils.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import functools
-import json
-
-from cloudinit import log as logging
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-SKIP_USERDATA_CODES = frozenset([url_helper.NOT_FOUND])
-
-
-class MetadataLeafDecoder(object):
- """Decodes a leaf blob into something meaningful."""
-
- def _maybe_json_object(self, text):
- if not text:
- return False
- text = text.strip()
- if text.startswith("{") and text.endswith("}"):
- return True
- return False
-
- def __call__(self, field, blob):
- if not blob:
- return blob
- try:
- blob = util.decode_binary(blob)
- except UnicodeDecodeError:
- return blob
- if self._maybe_json_object(blob):
- try:
- # Assume it's json, unless it fails parsing...
- return json.loads(blob)
- except (ValueError, TypeError) as e:
- LOG.warn("Field %s looked like a json object, but it was"
- " not: %s", field, e)
- if blob.find("\n") != -1:
- return blob.splitlines()
- return blob
-
-
-# See: http://bit.ly/TyoUQs
-#
-class MetadataMaterializer(object):
- def __init__(self, blob, base_url, caller, leaf_decoder=None):
- self._blob = blob
- self._md = None
- self._base_url = base_url
- self._caller = caller
- if leaf_decoder is None:
- self._leaf_decoder = MetadataLeafDecoder()
- else:
- self._leaf_decoder = leaf_decoder
-
- def _parse(self, blob):
- leaves = {}
- children = []
- blob = util.decode_binary(blob)
-
- if not blob:
- return (leaves, children)
-
- def has_children(item):
- if item.endswith("/"):
- return True
- else:
- return False
-
- def get_name(item):
- if item.endswith("/"):
- return item.rstrip("/")
- return item
-
- for field in blob.splitlines():
- field = field.strip()
- field_name = get_name(field)
- if not field or not field_name:
- continue
- if has_children(field):
- if field_name not in children:
- children.append(field_name)
- else:
- contents = field.split("=", 1)
- resource = field_name
- if len(contents) > 1:
- # What a PITA...
- (ident, sub_contents) = contents
- ident = util.safe_int(ident)
- if ident is not None:
- resource = "%s/openssh-key" % (ident)
- field_name = sub_contents
- leaves[field_name] = resource
- return (leaves, children)
-
- def materialize(self):
- if self._md is not None:
- return self._md
- self._md = self._materialize(self._blob, self._base_url)
- return self._md
-
- def _materialize(self, blob, base_url):
- (leaves, children) = self._parse(blob)
- child_contents = {}
- for c in children:
- child_url = url_helper.combine_url(base_url, c)
- if not child_url.endswith("/"):
- child_url += "/"
- child_blob = self._caller(child_url)
- child_contents[c] = self._materialize(child_blob, child_url)
- leaf_contents = {}
- for (field, resource) in leaves.items():
- leaf_url = url_helper.combine_url(base_url, resource)
- leaf_blob = self._caller(leaf_url)
- leaf_contents[field] = self._leaf_decoder(field, leaf_blob)
- joined = {}
- joined.update(child_contents)
- for field in leaf_contents.keys():
- if field in joined:
- LOG.warn("Duplicate key found in results from %s", base_url)
- else:
- joined[field] = leaf_contents[field]
- return joined
-
-
-def _skip_retry_on_codes(status_codes, _request_args, cause):
- """Returns if a request should retry based on a given set of codes that
- case retrying to be stopped/skipped.
- """
- return cause.code in status_codes
-
-
-def get_instance_userdata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5):
- ud_url = url_helper.combine_url(metadata_address, api_version)
- ud_url = url_helper.combine_url(ud_url, 'user-data')
- user_data = ''
- try:
- # It is ok for userdata to not exist (thats why we are stopping if
- # NOT_FOUND occurs) and just in that case returning an empty string.
- exception_cb = functools.partial(_skip_retry_on_codes,
- SKIP_USERDATA_CODES)
- response = util.read_file_or_url(ud_url,
- ssl_details=ssl_details,
- timeout=timeout,
- retries=retries,
- exception_cb=exception_cb)
- user_data = response.contents
- except url_helper.UrlError as e:
- if e.code not in SKIP_USERDATA_CODES:
- util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
- except Exception:
- util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
- return user_data
-
-
-def get_instance_metadata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None):
- md_url = url_helper.combine_url(metadata_address, api_version)
- # Note, 'meta-data' explicitly has trailing /.
- # this is required for CloudStack (LP: #1356855)
- md_url = url_helper.combine_url(md_url, 'meta-data/')
- caller = functools.partial(util.read_file_or_url,
- ssl_details=ssl_details, timeout=timeout,
- retries=retries)
-
- def mcaller(url):
- return caller(url).contents
-
- try:
- response = caller(md_url)
- materializer = MetadataMaterializer(response.contents,
- md_url, mcaller,
- leaf_decoder=leaf_decoder)
- md = materializer.materialize()
- if not isinstance(md, (dict)):
- md = {}
- return md
- except Exception:
- util.logexc(LOG, "Failed fetching metadata from url %s", md_url)
- return {}
diff --git a/cloudinit/filters/__init__.py b/cloudinit/filters/__init__.py
deleted file mode 100644
index da124641..00000000
--- a/cloudinit/filters/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
deleted file mode 100644
index baecdac9..00000000
--- a/cloudinit/filters/launch_index.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-
-from cloudinit import log as logging
-from cloudinit import user_data as ud
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class Filter(object):
- def __init__(self, wanted_idx, allow_none=True):
- self.wanted_idx = wanted_idx
- self.allow_none = allow_none
-
- def _select(self, message):
- msg_idx = message.get('Launch-Index', None)
- if self.allow_none and msg_idx is None:
- return True
- msg_idx = util.safe_int(msg_idx)
- if msg_idx != self.wanted_idx:
- return False
- return True
-
- def _do_filter(self, message):
- # Don't use walk() here since we want to do the reforming of the
- # messages ourselves and not flatten the message listings...
- if not self._select(message):
- return None
- if message.is_multipart():
- # Recreate it and its child messages
- prev_msgs = message.get_payload(decode=False)
- new_msgs = []
- discarded = 0
- for m in prev_msgs:
- m = self._do_filter(m)
- if m is not None:
- new_msgs.append(m)
- else:
- discarded += 1
- LOG.debug(("Discarding %s multipart messages "
- "which do not match launch index %s"),
- discarded, self.wanted_idx)
- new_message = copy.copy(message)
- new_message.set_payload(new_msgs)
- new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
- return new_message
- else:
- return copy.copy(message)
-
- def apply(self, root_message):
- if self.wanted_idx is None:
- return root_message
- return self._do_filter(root_message)
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
deleted file mode 100644
index 6a76d785..00000000
--- a/cloudinit/gpg.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""gpg.py - Collection of gpg key related functions"""
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Christian Ehrhardt <christian.ehrhardt@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-def export_armour(key):
- """Export gpg key, armoured key gets returned"""
- try:
- (armour, _) = util.subp(["gpg", "--export", "--armour", key],
- capture=True)
- except util.ProcessExecutionError as error:
- # debug, since it happens for any key not on the system initially
- LOG.debug('Failed to export armoured key "%s": %s', key, error)
- armour = None
- return armour
-
-
-def receive_key(key, keyserver):
- """Receive gpg key from the specified keyserver"""
- LOG.debug('Receive gpg key "%s"', key)
- try:
- util.subp(["gpg", "--keyserver", keyserver, "--recv-keys", key],
- capture=True)
- except util.ProcessExecutionError as error:
- raise ValueError(('Failed to import key "%s" '
- 'from server "%s" - error %s') %
- (key, keyserver, error))
-
-
-def delete_key(key):
- """Delete the specified key from the local gpg ring"""
- try:
- util.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
- capture=True)
- except util.ProcessExecutionError as error:
- LOG.warn('Failed delete key "%s": %s', key, error)
-
-
-def get_key_by_id(keyid, keyserver="keyserver.ubuntu.com"):
- """get gpg keyid from keyserver"""
- armour = export_armour(keyid)
- if not armour:
- try:
- receive_key(keyid, keyserver=keyserver)
- armour = export_armour(keyid)
- except ValueError:
- LOG.exception('Failed to obtain gpg key %s', keyid)
- raise
- finally:
- # delete just imported key to leave environment as it was before
- delete_key(keyid)
-
- return armour
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
deleted file mode 100644
index b6c43ce8..00000000
--- a/cloudinit/handlers/__init__.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import abc
-import os
-import six
-
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-# Used as the content type when a message is not multipart
-# and it doesn't contain its own content-type
-NOT_MULTIPART_TYPE = "text/x-not-multipart"
-
-# When none is assigned this gets used
-OCTET_TYPE = 'application/octet-stream'
-
-# Special content types that signal the start and end of processing
-CONTENT_END = "__end__"
-CONTENT_START = "__begin__"
-CONTENT_SIGNALS = [CONTENT_START, CONTENT_END]
-
-# Used when a part-handler type is encountered
-# to allow for registration of new types.
-PART_CONTENT_TYPES = ["text/part-handler"]
-PART_HANDLER_FN_TMPL = 'part-handler-%03d'
-
-# For parts without filenames
-PART_FN_TPL = 'part-%03d'
-
-# Different file beginnings to there content type
-INCLUSION_TYPES_MAP = {
- '#include': 'text/x-include-url',
- '#include-once': 'text/x-include-once-url',
- '#!': 'text/x-shellscript',
- '#cloud-config': 'text/cloud-config',
- '#upstart-job': 'text/upstart-job',
- '#part-handler': 'text/part-handler',
- '#cloud-boothook': 'text/cloud-boothook',
- '#cloud-config-archive': 'text/cloud-config-archive',
- '#cloud-config-jsonp': 'text/cloud-config-jsonp',
-}
-
-# Sorted longest first
-INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
- key=(lambda e: 0 - len(e)))
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Handler(object):
-
- def __init__(self, frequency, version=2):
- self.handler_version = version
- self.frequency = frequency
-
- def __repr__(self):
- return "%s: [%s]" % (type_utils.obj_name(self), self.list_types())
-
- @abc.abstractmethod
- def list_types(self):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def handle_part(self, *args, **kwargs):
- raise NotImplementedError()
-
-
-def run_part(mod, data, filename, payload, frequency, headers):
- mod_freq = mod.frequency
- if not (mod_freq == PER_ALWAYS or
- (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)):
- return
- # Sanity checks on version (should be an int convertable)
- try:
- mod_ver = mod.handler_version
- mod_ver = int(mod_ver)
- except (TypeError, ValueError, AttributeError):
- mod_ver = 1
- content_type = headers['Content-Type']
- try:
- LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s",
- mod, content_type, filename, mod_ver, frequency)
- if mod_ver == 3:
- # Treat as v. 3 which does get a frequency + headers
- mod.handle_part(data, content_type, filename,
- payload, frequency, headers)
- elif mod_ver == 2:
- # Treat as v. 2 which does get a frequency
- mod.handle_part(data, content_type, filename,
- payload, frequency)
- elif mod_ver == 1:
- # Treat as v. 1 which gets no frequency
- mod.handle_part(data, content_type, filename, payload)
- else:
- raise ValueError("Unknown module version %s" % (mod_ver))
- except Exception:
- util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with "
- "frequency %s", mod, content_type, filename, mod_ver,
- frequency)
-
-
-def call_begin(mod, data, frequency):
- # Create a fake header set
- headers = {
- 'Content-Type': CONTENT_START,
- }
- run_part(mod, data, None, None, frequency, headers)
-
-
-def call_end(mod, data, frequency):
- # Create a fake header set
- headers = {
- 'Content-Type': CONTENT_END,
- }
- run_part(mod, data, None, None, frequency, headers)
-
-
-def walker_handle_handler(pdata, _ctype, _filename, payload):
- curcount = pdata['handlercount']
- modname = PART_HANDLER_FN_TMPL % (curcount)
- frequency = pdata['frequency']
- modfname = os.path.join(pdata['handlerdir'], "%s" % (modname))
- if not modfname.endswith(".py"):
- modfname = "%s.py" % (modfname)
- # TODO(harlowja): Check if path exists??
- util.write_file(modfname, payload, 0o600)
- handlers = pdata['handlers']
- try:
- mod = fixup_handler(importer.import_module(modname))
- call_begin(mod, pdata['data'], frequency)
- # Only register and increment after the above have worked, so we don't
- # register if it fails starting.
- handlers.register(mod, initialized=True)
- pdata['handlercount'] = curcount + 1
- except Exception:
- util.logexc(LOG, "Failed at registering python file: %s (part "
- "handler %s)", modfname, curcount)
-
-
-def _extract_first_or_bytes(blob, size):
- # Extract the first line or upto X symbols for text objects
- # Extract first X bytes for binary objects
- try:
- if isinstance(blob, six.string_types):
- start = blob.split("\n", 1)[0]
- else:
- # We want to avoid decoding the whole blob (it might be huge)
- # By taking 4*size bytes we guarantee to decode size utf8 chars
- start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
- if len(start) >= size:
- start = start[:size]
- except UnicodeDecodeError:
- # Bytes array doesn't contain text so return chunk of raw bytes
- start = blob[0:size]
- return start
-
-
-def _escape_string(text):
- try:
- return text.encode("string_escape")
- except (LookupError, TypeError):
- try:
- # Unicode (and Python 3's str) doesn't support string_escape...
- return text.encode('unicode_escape')
- except TypeError:
- # Give up...
- pass
- except AttributeError:
- # We're in Python3 and received blob as text
- # No escaping is needed because bytes are printed
- # as 'b\xAA\xBB' automatically in Python3
- pass
- return text
-
-
-def walker_callback(data, filename, payload, headers):
- content_type = headers['Content-Type']
- if content_type in data.get('excluded'):
- LOG.debug('content_type "%s" is excluded', content_type)
- return
-
- if content_type in PART_CONTENT_TYPES:
- walker_handle_handler(data, content_type, filename, payload)
- return
- handlers = data['handlers']
- if content_type in handlers:
- run_part(handlers[content_type], data['data'], filename,
- payload, data['frequency'], headers)
- elif payload:
- # Extract the first line or 24 bytes for displaying in the log
- start = _extract_first_or_bytes(payload, 24)
- details = "'%s...'" % (_escape_string(start))
- if content_type == NOT_MULTIPART_TYPE:
- LOG.warning("Unhandled non-multipart (%s) userdata: %s",
- content_type, details)
- else:
- LOG.warning("Unhandled unknown content-type (%s) userdata: %s",
- content_type, details)
- else:
- LOG.debug("Empty payload of type %s", content_type)
-
-
-# Callback is a function that will be called with
-# (data, content_type, filename, payload)
-def walk(msg, callback, data):
- partnum = 0
- for part in msg.walk():
- # multipart/* are just containers
- if part.get_content_maintype() == 'multipart':
- continue
-
- ctype = part.get_content_type()
- if ctype is None:
- ctype = OCTET_TYPE
-
- filename = part.get_filename()
- if not filename:
- filename = PART_FN_TPL % (partnum)
-
- headers = dict(part)
- LOG.debug(headers)
- headers['Content-Type'] = ctype
- payload = util.fully_decoded_payload(part)
- callback(data, filename, payload, headers)
- partnum = partnum + 1
-
-
-def fixup_handler(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, "handler_version"):
- setattr(mod, "handler_version", 1)
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
- else:
- freq = mod.frequency
- if freq and freq not in FREQUENCIES:
- LOG.warn("Handler %s has an unknown frequency %s", mod, freq)
- return mod
-
-
-def type_from_starts_with(payload, default=None):
- try:
- payload_lc = util.decode_binary(payload).lower()
- except UnicodeDecodeError:
- return default
- payload_lc = payload_lc.lstrip()
- for text in INCLUSION_SRCH:
- if payload_lc.startswith(text):
- return INCLUSION_TYPES_MAP[text]
- return default
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
deleted file mode 100644
index a4ea47ac..00000000
--- a/cloudinit/handlers/boot_hook.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
-
-LOG = logging.getLogger(__name__)
-BOOTHOOK_PREFIX = "#cloud-boothook"
-
-
-class BootHookPartHandler(handlers.Handler):
- def __init__(self, paths, datasource, **_kwargs):
- handlers.Handler.__init__(self, PER_ALWAYS)
- self.boothook_dir = paths.get_ipath("boothooks")
- self.instance_id = None
- if datasource:
- self.instance_id = datasource.get_instance_id()
-
- def list_types(self):
- return [
- handlers.type_from_starts_with(BOOTHOOK_PREFIX),
- ]
-
- def _write_part(self, payload, filename):
- filename = util.clean_filename(filename)
- filepath = os.path.join(self.boothook_dir, filename)
- contents = util.strip_prefix_suffix(util.dos2unix(payload),
- prefix=BOOTHOOK_PREFIX)
- util.write_file(filepath, contents.lstrip(), 0o700)
- return filepath
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- if ctype in handlers.CONTENT_SIGNALS:
- return
-
- filepath = self._write_part(payload, filename)
- try:
- env = os.environ.copy()
- if self.instance_id is not None:
- env['INSTANCE_ID'] = str(self.instance_id)
- util.subp([filepath], env=env)
- except util.ProcessExecutionError:
- util.logexc(LOG, "Boothooks script %s execution error", filepath)
- except Exception:
- util.logexc(LOG, "Boothooks unknown error when running %s",
- filepath)
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
deleted file mode 100644
index cad4dc0f..00000000
--- a/cloudinit/handlers/cloud_config.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import jsonpatch
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
-
-LOG = logging.getLogger(__name__)
-
-MERGE_HEADER = 'Merge-Type'
-
-# Due to the way the loading of yaml configuration was done previously,
-# where previously each cloud config part was appended to a larger yaml
-# file and then finally that file was loaded as one big yaml file we need
-# to mimic that behavior by altering the default strategy to be replacing
-# keys of prior merges.
-#
-#
-# For example
-# #file 1
-# a: 3
-# #file 2
-# a: 22
-# #combined file (comments not included)
-# a: 3
-# a: 22
-#
-# This gets loaded into yaml with final result {'a': 22}
-DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
-CLOUD_PREFIX = "#cloud-config"
-JSONP_PREFIX = "#cloud-config-jsonp"
-
-# The file header -> content types this module will handle.
-CC_TYPES = {
- JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX),
- CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX),
-}
-
-
-class CloudConfigPartHandler(handlers.Handler):
- def __init__(self, paths, **_kwargs):
- handlers.Handler.__init__(self, PER_ALWAYS, version=3)
- self.cloud_buf = None
- self.cloud_fn = paths.get_ipath("cloud_config")
- if 'cloud_config_path' in _kwargs:
- self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
- self.file_names = []
-
- def list_types(self):
- return list(CC_TYPES.values())
-
- def _write_cloud_config(self):
- if not self.cloud_fn:
- return
- # Capture which files we merged from...
- file_lines = []
- if self.file_names:
- file_lines.append("# from %s files" % (len(self.file_names)))
- for fn in self.file_names:
- if not fn:
- fn = '?'
- file_lines.append("# %s" % (fn))
- file_lines.append("")
- if self.cloud_buf is not None:
- # Something was actually gathered....
- lines = [
- CLOUD_PREFIX,
- '',
- ]
- lines.extend(file_lines)
- lines.append(util.yaml_dumps(self.cloud_buf))
- else:
- lines = []
- util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
-
- def _extract_mergers(self, payload, headers):
- merge_header_headers = ''
- for h in [MERGE_HEADER, 'X-%s' % (MERGE_HEADER)]:
- tmp_h = headers.get(h, '')
- if tmp_h:
- merge_header_headers = tmp_h
- break
- # Select either the merge-type from the content
- # or the merge type from the headers or default to our own set
- # if neither exists (or is empty) from the later.
- payload_yaml = util.load_yaml(payload)
- mergers_yaml = mergers.dict_extract_mergers(payload_yaml)
- mergers_header = mergers.string_extract_mergers(merge_header_headers)
- all_mergers = []
- all_mergers.extend(mergers_yaml)
- all_mergers.extend(mergers_header)
- if not all_mergers:
- all_mergers = DEF_MERGERS
- return (payload_yaml, all_mergers)
-
- def _merge_patch(self, payload):
- # JSON doesn't handle comments in this manner, so ensure that
- # if we started with this 'type' that we remove it before
- # attempting to load it as json (which the jsonpatch library will
- # attempt to do).
- payload = payload.lstrip()
- payload = util.strip_prefix_suffix(payload, prefix=JSONP_PREFIX)
- patch = jsonpatch.JsonPatch.from_string(payload)
- LOG.debug("Merging by applying json patch %s", patch)
- self.cloud_buf = patch.apply(self.cloud_buf, in_place=False)
-
- def _merge_part(self, payload, headers):
- (payload_yaml, my_mergers) = self._extract_mergers(payload, headers)
- LOG.debug("Merging by applying %s", my_mergers)
- merger = mergers.construct(my_mergers)
- self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml)
-
- def _reset(self):
- self.file_names = []
- self.cloud_buf = None
-
- def handle_part(self, data, ctype, filename, payload, frequency, headers):
- if ctype == handlers.CONTENT_START:
- self._reset()
- return
- if ctype == handlers.CONTENT_END:
- self._write_cloud_config()
- self._reset()
- return
- try:
- # First time through, merge with an empty dict...
- if self.cloud_buf is None or not self.file_names:
- self.cloud_buf = {}
- if ctype == CC_TYPES[JSONP_PREFIX]:
- self._merge_patch(payload)
- else:
- self._merge_part(payload, headers)
- # Ensure filename is ok to store
- for i in ("\n", "\r", "\t"):
- filename = filename.replace(i, " ")
- self.file_names.append(filename.strip())
- except Exception:
- util.logexc(LOG, "Failed at merging in cloud config part from %s",
- filename)
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
deleted file mode 100644
index b5087693..00000000
--- a/cloudinit/handlers/shell_script.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
-
-LOG = logging.getLogger(__name__)
-SHELL_PREFIX = "#!"
-
-
-class ShellScriptPartHandler(handlers.Handler):
- def __init__(self, paths, **_kwargs):
- handlers.Handler.__init__(self, PER_ALWAYS)
- self.script_dir = paths.get_ipath_cur('scripts')
- if 'script_path' in _kwargs:
- self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
-
- def list_types(self):
- return [
- handlers.type_from_starts_with(SHELL_PREFIX),
- ]
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- if ctype in handlers.CONTENT_SIGNALS:
- # TODO(harlowja): maybe delete existing things here
- return
-
- filename = util.clean_filename(filename)
- payload = util.dos2unix(payload)
- path = os.path.join(self.script_dir, filename)
- util.write_file(path, payload, 0o700)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
deleted file mode 100644
index ab381e00..00000000
--- a/cloudinit/handlers/upstart_job.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-import os
-import re
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.settings import (PER_INSTANCE)
-
-LOG = logging.getLogger(__name__)
-UPSTART_PREFIX = "#upstart-job"
-
-
-class UpstartJobPartHandler(handlers.Handler):
- def __init__(self, paths, **_kwargs):
- handlers.Handler.__init__(self, PER_INSTANCE)
- self.upstart_dir = paths.upstart_conf_d
-
- def list_types(self):
- return [
- handlers.type_from_starts_with(UPSTART_PREFIX),
- ]
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- if ctype in handlers.CONTENT_SIGNALS:
- return
-
- # See: https://bugs.launchpad.net/bugs/819507
- if frequency != PER_INSTANCE:
- return
-
- if not self.upstart_dir:
- return
-
- filename = util.clean_filename(filename)
- (_name, ext) = os.path.splitext(filename)
- if not ext:
- ext = ''
- ext = ext.lower()
- if ext != ".conf":
- filename = filename + ".conf"
-
- payload = util.dos2unix(payload)
- path = os.path.join(self.upstart_dir, filename)
- util.write_file(path, payload, 0o644)
-
- if SUITABLE_UPSTART:
- util.subp(["initctl", "reload-configuration"], capture=False)
-
-
-def _has_suitable_upstart():
- # (LP: #1124384)
- # a bug in upstart means that invoking reload-configuration
- # at this stage in boot causes havoc. So, try to determine if upstart
- # is installed, and reloading configuration is OK.
- if not os.path.exists("/sbin/initctl"):
- return False
- try:
- (version_out, _err) = util.subp(["initctl", "version"])
- except Exception:
- util.logexc(LOG, "initctl version failed")
- return False
-
- # expecting 'initctl version' to output something like: init (upstart X.Y)
- if re.match("upstart 1.[0-7][)]", version_out):
- return False
- if "upstart 0." in version_out:
- return False
- elif "upstart 1.8" in version_out:
- if not os.path.exists("/usr/bin/dpkg-query"):
- return False
- try:
- (dpkg_ver, _err) = util.subp(["dpkg-query",
- "--showformat=${Version}",
- "--show", "upstart"], rcs=[0, 1])
- except Exception:
- util.logexc(LOG, "dpkg-query failed")
- return False
-
- try:
- good = "1.8-0ubuntu1.2"
- util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
- return True
- except util.ProcessExecutionError as e:
- if e.exit_code is 1:
- pass
- else:
- util.logexc(LOG, "dpkg --compare-versions failed [%s]",
- e.exit_code)
- except Exception as e:
- util.logexc(LOG, "dpkg --compare-versions failed")
- return False
- else:
- return True
-
-SUITABLE_UPSTART = _has_suitable_upstart()
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
deleted file mode 100644
index fb95babc..00000000
--- a/cloudinit/helpers.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from time import time
-
-import contextlib
-import os
-
-import six
-from six.moves.configparser import (
- NoSectionError, NoOptionError, RawConfigParser)
-
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CFG_ENV_NAME)
-
-from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class LockFailure(Exception):
- pass
-
-
-class DummyLock(object):
- pass
-
-
-class DummySemaphores(object):
- def __init__(self):
- pass
-
- @contextlib.contextmanager
- def lock(self, _name, _freq, _clear_on_fail=False):
- yield DummyLock()
-
- def has_run(self, _name, _freq):
- return False
-
- def clear(self, _name, _freq):
- return True
-
- def clear_all(self):
- pass
-
-
-class FileLock(object):
- def __init__(self, fn):
- self.fn = fn
-
- def __str__(self):
- return "<%s using file %r>" % (type_utils.obj_name(self), self.fn)
-
-
-def canon_sem_name(name):
- return name.replace("-", "_")
-
-
-class FileSemaphores(object):
- def __init__(self, sem_path):
- self.sem_path = sem_path
-
- @contextlib.contextmanager
- def lock(self, name, freq, clear_on_fail=False):
- name = canon_sem_name(name)
- try:
- yield self._acquire(name, freq)
- except Exception:
- if clear_on_fail:
- self.clear(name, freq)
- raise
-
- def clear(self, name, freq):
- name = canon_sem_name(name)
- sem_file = self._get_path(name, freq)
- try:
- util.del_file(sem_file)
- except (IOError, OSError):
- util.logexc(LOG, "Failed deleting semaphore %s", sem_file)
- return False
- return True
-
- def clear_all(self):
- try:
- util.del_dir(self.sem_path)
- except (IOError, OSError):
- util.logexc(LOG, "Failed deleting semaphore directory %s",
- self.sem_path)
-
- def _acquire(self, name, freq):
- # Check again if its been already gotten
- if self.has_run(name, freq):
- return None
- # This is a race condition since nothing atomic is happening
- # here, but this should be ok due to the nature of when
- # and where cloud-init runs... (file writing is not a lock...)
- sem_file = self._get_path(name, freq)
- contents = "%s: %s\n" % (os.getpid(), time())
- try:
- util.write_file(sem_file, contents)
- except (IOError, OSError):
- util.logexc(LOG, "Failed writing semaphore file %s", sem_file)
- return None
- return FileLock(sem_file)
-
- def has_run(self, name, freq):
- if not freq or freq == PER_ALWAYS:
- return False
-
- cname = canon_sem_name(name)
- sem_file = self._get_path(cname, freq)
- # This isn't really a good atomic check
- # but it suffices for where and when cloudinit runs
- if os.path.exists(sem_file):
- return True
-
- # this case could happen if the migrator module hadn't run yet
- # but the item had run before we did canon_sem_name.
- if cname != name and os.path.exists(self._get_path(name, freq)):
- LOG.warn("%s has run without canonicalized name [%s].\n"
- "likely the migrator has not yet run. "
- "It will run next boot.\n"
- "run manually with: cloud-init single --name=migrator"
- % (name, cname))
- return True
-
- return False
-
- def _get_path(self, name, freq):
- sem_path = self.sem_path
- if not freq or freq == PER_INSTANCE:
- return os.path.join(sem_path, name)
- else:
- return os.path.join(sem_path, "%s.%s" % (name, freq))
-
-
-class Runners(object):
- def __init__(self, paths):
- self.paths = paths
- self.sems = {}
-
- def _get_sem(self, freq):
- if freq == PER_ALWAYS or not freq:
- return None
- sem_path = None
- if freq == PER_INSTANCE:
- # This may not exist,
- # so thats why we still check for none
- # below if say the paths object
- # doesn't have a datasource that can
- # provide this instance path...
- sem_path = self.paths.get_ipath("sem")
- elif freq == PER_ONCE:
- sem_path = self.paths.get_cpath("sem")
- if not sem_path:
- return None
- if sem_path not in self.sems:
- self.sems[sem_path] = FileSemaphores(sem_path)
- return self.sems[sem_path]
-
- def run(self, name, functor, args, freq=None, clear_on_fail=False):
- sem = self._get_sem(freq)
- if not sem:
- sem = DummySemaphores()
- if not args:
- args = []
- if sem.has_run(name, freq):
- LOG.debug("%s already ran (freq=%s)", name, freq)
- return (False, None)
- with sem.lock(name, freq, clear_on_fail) as lk:
- if not lk:
- raise LockFailure("Failed to acquire lock for %s" % name)
- else:
- LOG.debug("Running %s using lock (%s)", name, lk)
- if isinstance(args, (dict)):
- results = functor(**args)
- else:
- results = functor(*args)
- return (True, results)
-
-
-class ConfigMerger(object):
- def __init__(self, paths=None, datasource=None,
- additional_fns=None, base_cfg=None,
- include_vendor=True):
- self._paths = paths
- self._ds = datasource
- self._fns = additional_fns
- self._base_cfg = base_cfg
- self._include_vendor = include_vendor
- # Created on first use
- self._cfg = None
-
- def _get_datasource_configs(self):
- d_cfgs = []
- if self._ds:
- try:
- ds_cfg = self._ds.get_config_obj()
- if ds_cfg and isinstance(ds_cfg, (dict)):
- d_cfgs.append(ds_cfg)
- except Exception:
- util.logexc(LOG, "Failed loading of datasource config object "
- "from %s", self._ds)
- return d_cfgs
-
- def _get_env_configs(self):
- e_cfgs = []
- if CFG_ENV_NAME in os.environ:
- e_fn = os.environ[CFG_ENV_NAME]
- try:
- e_cfgs.append(util.read_conf(e_fn))
- except Exception:
- util.logexc(LOG, 'Failed loading of env. config from %s',
- e_fn)
- return e_cfgs
-
- def _get_instance_configs(self):
- i_cfgs = []
- # If cloud-config was written, pick it up as
- # a configuration file to use when running...
- if not self._paths:
- return i_cfgs
-
- cc_paths = ['cloud_config']
- if self._include_vendor:
- cc_paths.append('vendor_cloud_config')
-
- for cc_p in cc_paths:
- cc_fn = self._paths.get_ipath_cur(cc_p)
- if cc_fn and os.path.isfile(cc_fn):
- try:
- i_cfgs.append(util.read_conf(cc_fn))
- except Exception:
- util.logexc(LOG, 'Failed loading of cloud-config from %s',
- cc_fn)
- return i_cfgs
-
- def _read_cfg(self):
- # Input config files override
- # env config files which
- # override instance configs
- # which override datasource
- # configs which override
- # base configuration
- cfgs = []
- if self._fns:
- for c_fn in self._fns:
- try:
- cfgs.append(util.read_conf(c_fn))
- except Exception:
- util.logexc(LOG, "Failed loading of configuration from %s",
- c_fn)
-
- cfgs.extend(self._get_env_configs())
- cfgs.extend(self._get_instance_configs())
- cfgs.extend(self._get_datasource_configs())
- if self._base_cfg:
- cfgs.append(self._base_cfg)
- return util.mergemanydict(cfgs)
-
- @property
- def cfg(self):
- # None check to avoid empty case causing re-reading
- if self._cfg is None:
- self._cfg = self._read_cfg()
- return self._cfg
-
-
-class ContentHandlers(object):
-
- def __init__(self):
- self.registered = {}
- self.initialized = []
-
- def __contains__(self, item):
- return self.is_registered(item)
-
- def __getitem__(self, key):
- return self._get_handler(key)
-
- def is_registered(self, content_type):
- return content_type in self.registered
-
- def register(self, mod, initialized=False, overwrite=True):
- types = set()
- for t in mod.list_types():
- if overwrite:
- types.add(t)
- else:
- if not self.is_registered(t):
- types.add(t)
- for t in types:
- self.registered[t] = mod
- if initialized and mod not in self.initialized:
- self.initialized.append(mod)
- return types
-
- def _get_handler(self, content_type):
- return self.registered[content_type]
-
- def items(self):
- return list(self.registered.items())
-
-
-class Paths(object):
- def __init__(self, path_cfgs, ds=None):
- self.cfgs = path_cfgs
- # Populate all the initial paths
- self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud')
- self.run_dir = path_cfgs.get('run_dir', '/run/cloud-init')
- self.instance_link = os.path.join(self.cloud_dir, 'instance')
- self.boot_finished = os.path.join(self.instance_link, "boot-finished")
- self.upstart_conf_d = path_cfgs.get('upstart_dir')
- self.seed_dir = os.path.join(self.cloud_dir, 'seed')
- # This one isn't joined, since it should just be read-only
- template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
- self.template_tpl = os.path.join(template_dir, '%s.tmpl')
- self.lookups = {
- "handlers": "handlers",
- "scripts": "scripts",
- "vendor_scripts": "scripts/vendor",
- "sem": "sem",
- "boothooks": "boothooks",
- "userdata_raw": "user-data.txt",
- "userdata": "user-data.txt.i",
- "obj_pkl": "obj.pkl",
- "cloud_config": "cloud-config.txt",
- "vendor_cloud_config": "vendor-cloud-config.txt",
- "data": "data",
- "vendordata_raw": "vendor-data.txt",
- "vendordata": "vendor-data.txt.i",
- "instance_id": ".instance-id",
- }
- # Set when a datasource becomes active
- self.datasource = ds
-
- # get_ipath_cur: get the current instance path for an item
- def get_ipath_cur(self, name=None):
- return self._get_path(self.instance_link, name)
-
- # get_cpath : get the "clouddir" (/var/lib/cloud/<name>)
- # for a name in dirmap
- def get_cpath(self, name=None):
- return self._get_path(self.cloud_dir, name)
-
- # _get_ipath : get the instance path for a name in pathmap
- # (/var/lib/cloud/instances/<instance>/<name>)
- def _get_ipath(self, name=None):
- if not self.datasource:
- return None
- iid = self.datasource.get_instance_id()
- if iid is None:
- return None
- path_safe_iid = str(iid).replace(os.sep, '_')
- ipath = os.path.join(self.cloud_dir, 'instances', path_safe_iid)
- add_on = self.lookups.get(name)
- if add_on:
- ipath = os.path.join(ipath, add_on)
- return ipath
-
- # get_ipath : get the instance path for a name in pathmap
- # (/var/lib/cloud/instances/<instance>/<name>)
- # returns None + warns if no active datasource....
- def get_ipath(self, name=None):
- ipath = self._get_ipath(name)
- if not ipath:
- LOG.warn(("No per instance data available, "
- "is there an datasource/iid set?"))
- return None
- else:
- return ipath
-
- def _get_path(self, base, name=None):
- if name is None:
- return base
- return os.path.join(base, self.lookups[name])
-
- def get_runpath(self, name=None):
- return self._get_path(self.run_dir, name)
-
-
-# This config parser will not throw when sections don't exist
-# and you are setting values on those sections which is useful
-# when writing to new options that may not have corresponding
-# sections. Also it can default other values when doing gets
-# so that if those sections/options do not exist you will
-# get a default instead of an error. Another useful case where
-# you can avoid catching exceptions that you typically don't
-# care about...
-
-class DefaultingConfigParser(RawConfigParser):
- DEF_INT = 0
- DEF_FLOAT = 0.0
- DEF_BOOLEAN = False
- DEF_BASE = None
-
- def get(self, section, option):
- value = self.DEF_BASE
- try:
- value = RawConfigParser.get(self, section, option)
- except NoSectionError:
- pass
- except NoOptionError:
- pass
- return value
-
- def set(self, section, option, value=None):
- if not self.has_section(section) and section.lower() != 'default':
- self.add_section(section)
- RawConfigParser.set(self, section, option, value)
-
- def remove_option(self, section, option):
- if self.has_option(section, option):
- RawConfigParser.remove_option(self, section, option)
-
- def getboolean(self, section, option):
- if not self.has_option(section, option):
- return self.DEF_BOOLEAN
- return RawConfigParser.getboolean(self, section, option)
-
- def getfloat(self, section, option):
- if not self.has_option(section, option):
- return self.DEF_FLOAT
- return RawConfigParser.getfloat(self, section, option)
-
- def getint(self, section, option):
- if not self.has_option(section, option):
- return self.DEF_INT
- return RawConfigParser.getint(self, section, option)
-
- def stringify(self, header=None):
- contents = ''
- with six.StringIO() as outputstream:
- self.write(outputstream)
- outputstream.flush()
- contents = outputstream.getvalue()
- if header:
- contents = "\n".join([header, contents])
- return contents
diff --git a/cloudinit/importer.py b/cloudinit/importer.py
deleted file mode 100644
index fb57253c..00000000
--- a/cloudinit/importer.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-
-def import_module(module_name):
- __import__(module_name)
- return sys.modules[module_name]
-
-
-def find_module(base_name, search_paths, required_attrs=None):
- if not required_attrs:
- required_attrs = []
- # NOTE(harlowja): translate the search paths to include the base name.
- lookup_paths = []
- for path in search_paths:
- real_path = []
- if path:
- real_path.extend(path.split("."))
- real_path.append(base_name)
- full_path = '.'.join(real_path)
- lookup_paths.append(full_path)
- found_paths = []
- for full_path in lookup_paths:
- mod = None
- try:
- mod = import_module(full_path)
- except ImportError:
- pass
- if not mod:
- continue
- found_attrs = 0
- for attr in required_attrs:
- if hasattr(mod, attr):
- found_attrs += 1
- if found_attrs == len(required_attrs):
- found_paths.append(full_path)
- return (found_paths, lookup_paths)
diff --git a/cloudinit/log.py b/cloudinit/log.py
deleted file mode 100644
index 3c79b9c9..00000000
--- a/cloudinit/log.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import logging.config
-import logging.handlers
-
-import collections
-import os
-import sys
-
-import six
-from six import StringIO
-
-# Logging levels for easy access
-CRITICAL = logging.CRITICAL
-FATAL = logging.FATAL
-ERROR = logging.ERROR
-WARNING = logging.WARNING
-WARN = logging.WARN
-INFO = logging.INFO
-DEBUG = logging.DEBUG
-NOTSET = logging.NOTSET
-
-# Default basic format
-DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
-
-
-def setupBasicLogging(level=DEBUG):
- root = logging.getLogger()
- console = logging.StreamHandler(sys.stderr)
- console.setFormatter(logging.Formatter(DEF_CON_FORMAT))
- console.setLevel(level)
- root.addHandler(console)
- root.setLevel(level)
-
-
-def flushLoggers(root):
- if not root:
- return
- for h in root.handlers:
- if isinstance(h, (logging.StreamHandler)):
- try:
- h.flush()
- except IOError:
- pass
- flushLoggers(root.parent)
-
-
-def setupLogging(cfg=None):
- # See if the config provides any logging conf...
- if not cfg:
- cfg = {}
-
- log_cfgs = []
- log_cfg = cfg.get('logcfg')
- if log_cfg and isinstance(log_cfg, six.string_types):
- # If there is a 'logcfg' entry in the config,
- # respect it, it is the old keyname
- log_cfgs.append(str(log_cfg))
- elif "log_cfgs" in cfg:
- for a_cfg in cfg['log_cfgs']:
- if isinstance(a_cfg, six.string_types):
- log_cfgs.append(a_cfg)
- elif isinstance(a_cfg, (collections.Iterable)):
- cfg_str = [str(c) for c in a_cfg]
- log_cfgs.append('\n'.join(cfg_str))
- else:
- log_cfgs.append(str(a_cfg))
-
- # See if any of them actually load...
- am_tried = 0
- for log_cfg in log_cfgs:
- try:
- am_tried += 1
- # Assume its just a string if not a filename
- if log_cfg.startswith("/") and os.path.isfile(log_cfg):
- # Leave it as a file and do not make it look like
- # something that is a file (but is really a buffer that
- # is acting as a file)
- pass
- else:
- log_cfg = StringIO(log_cfg)
- # Attempt to load its config
- logging.config.fileConfig(log_cfg)
- # The first one to work wins!
- return
- except Exception:
- # We do not write any logs of this here, because the default
- # configuration includes an attempt at using /dev/log, followed
- # up by writing to a file. /dev/log will not exist in very early
- # boot, so an exception on that is expected.
- pass
-
- # If it didn't work, at least setup a basic logger (if desired)
- basic_enabled = cfg.get('log_basic', True)
-
- sys.stderr.write(("WARN: no logging configured!"
- " (tried %s configs)\n") % (am_tried))
- if basic_enabled:
- sys.stderr.write("Setting up basic logging...\n")
- setupBasicLogging()
-
-
-def getLogger(name='cloudinit'):
- return logging.getLogger(name)
-
-
-# Fixes this annoyance...
-# No handlers could be found for logger XXX annoying output...
-try:
- from logging import NullHandler
-except ImportError:
- class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-
-def _resetLogger(log):
- if not log:
- return
- handlers = list(log.handlers)
- for h in handlers:
- h.flush()
- h.close()
- log.removeHandler(h)
- log.setLevel(NOTSET)
- log.addHandler(NullHandler())
-
-
-def resetLogging():
- _resetLogger(logging.getLogger())
- _resetLogger(getLogger())
-
-
-resetLogging()
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
deleted file mode 100644
index e13f55ac..00000000
--- a/cloudinit/mergers/__init__.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-import six
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import type_utils
-
-NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$")
-
-LOG = logging.getLogger(__name__)
-DEF_MERGE_TYPE = "list()+dict()+str()"
-MERGER_PREFIX = 'm_'
-MERGER_ATTR = 'Merger'
-
-
-class UnknownMerger(object):
- # Named differently so auto-method finding
- # doesn't pick this up if there is ever a type
- # named "unknown"
- def _handle_unknown(self, _meth_wanted, value, _merge_with):
- return value
-
- # This merging will attempt to look for a '_on_X' method
- # in our own object for a given object Y with type X,
- # if found it will be called to perform the merge of a source
- # object and a object to merge_with.
- #
- # If not found the merge will be given to a '_handle_unknown'
- # function which can decide what to do wit the 2 values.
- def merge(self, source, merge_with):
- type_name = type_utils.obj_name(source)
- type_name = type_name.lower()
- method_name = "_on_%s" % (type_name)
- meth = None
- args = [source, merge_with]
- if hasattr(self, method_name):
- meth = getattr(self, method_name)
- if not meth:
- meth = self._handle_unknown
- args.insert(0, method_name)
- return meth(*args)
-
-
-class LookupMerger(UnknownMerger):
- def __init__(self, lookups=None):
- UnknownMerger.__init__(self)
- if lookups is None:
- self._lookups = []
- else:
- self._lookups = lookups
-
- def __str__(self):
- return 'LookupMerger: (%s)' % (len(self._lookups))
-
- # For items which can not be merged by the parent this object
- # will lookup in a internally maintained set of objects and
- # find which one of those objects can perform the merge. If
- # any of the contained objects have the needed method, they
- # will be called to perform the merge.
- def _handle_unknown(self, meth_wanted, value, merge_with):
- meth = None
- for merger in self._lookups:
- if hasattr(merger, meth_wanted):
- # First one that has that method/attr gets to be
- # the one that will be called
- meth = getattr(merger, meth_wanted)
- break
- if not meth:
- return UnknownMerger._handle_unknown(self, meth_wanted,
- value, merge_with)
- return meth(value, merge_with)
-
-
-def dict_extract_mergers(config):
- parsed_mergers = []
- raw_mergers = config.pop('merge_how', None)
- if raw_mergers is None:
- raw_mergers = config.pop('merge_type', None)
- if raw_mergers is None:
- return parsed_mergers
- if isinstance(raw_mergers, six.string_types):
- return string_extract_mergers(raw_mergers)
- for m in raw_mergers:
- if isinstance(m, (dict)):
- name = m['name']
- name = name.replace("-", "_").strip()
- opts = m['settings']
- else:
- name = m[0]
- if len(m) >= 2:
- opts = m[1:]
- else:
- opts = []
- if name:
- parsed_mergers.append((name, opts))
- return parsed_mergers
-
-
-def string_extract_mergers(merge_how):
- parsed_mergers = []
- for m_name in merge_how.split("+"):
- # Canonicalize the name (so that it can be found
- # even when users alter it in various ways)
- m_name = m_name.lower().strip()
- m_name = m_name.replace("-", "_")
- if not m_name:
- continue
- match = NAME_MTCH.match(m_name)
- if not match:
- msg = ("Matcher identifer '%s' is not in the right format" %
- (m_name))
- raise ValueError(msg)
- (m_name, m_ops) = match.groups()
- m_ops = m_ops.strip().split(",")
- m_ops = [m.strip().lower() for m in m_ops if m.strip()]
- parsed_mergers.append((m_name, m_ops))
- return parsed_mergers
-
-
-def default_mergers():
- return tuple(string_extract_mergers(DEF_MERGE_TYPE))
-
-
-def construct(parsed_mergers):
- mergers_to_be = []
- for (m_name, m_ops) in parsed_mergers:
- if not m_name.startswith(MERGER_PREFIX):
- m_name = MERGER_PREFIX + str(m_name)
- merger_locs, looked_locs = importer.find_module(m_name,
- [__name__],
- [MERGER_ATTR])
- if not merger_locs:
- msg = ("Could not find merger module named '%s' "
- "with attribute '%s' (searched %s)") % (m_name,
- MERGER_ATTR,
- looked_locs)
- raise ImportError(msg)
- else:
- mod = importer.import_module(merger_locs[0])
- mod_attr = getattr(mod, MERGER_ATTR)
- mergers_to_be.append((mod_attr, m_ops))
- # Now form them...
- mergers = []
- root = LookupMerger(mergers)
- for (attr, opts) in mergers_to_be:
- mergers.append(attr(root, opts))
- return root
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
deleted file mode 100644
index 87cf1a72..00000000
--- a/cloudinit/mergers/m_dict.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-DEF_MERGE_TYPE = 'no_replace'
-MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
-
-
-def _has_any(what, *keys):
- for k in keys:
- if k in what:
- return True
- return False
-
-
-class Merger(object):
- def __init__(self, merger, opts):
- self._merger = merger
- # Affects merging behavior...
- self._method = DEF_MERGE_TYPE
- for m in MERGE_TYPES:
- if m in opts:
- self._method = m
- break
- # Affect how recursive merging is done on other primitives.
- self._recurse_str = 'recurse_str' in opts
- self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
- self._allow_delete = 'allow_delete' in opts
- # Backwards compat require this to be on.
- self._recurse_dict = True
-
- def __str__(self):
- s = ('DictMerger: (method=%s,recurse_str=%s,'
- 'recurse_dict=%s,recurse_array=%s,allow_delete=%s)')
- s = s % (self._method, self._recurse_str,
- self._recurse_dict, self._recurse_array, self._allow_delete)
- return s
-
- def _do_dict_replace(self, value, merge_with, do_replace):
-
- def merge_same_key(old_v, new_v):
- if do_replace:
- return new_v
- if isinstance(new_v, (list, tuple)) and self._recurse_array:
- return self._merger.merge(old_v, new_v)
- if isinstance(new_v, six.string_types) and self._recurse_str:
- return self._merger.merge(old_v, new_v)
- if isinstance(new_v, (dict)) and self._recurse_dict:
- return self._merger.merge(old_v, new_v)
- # Otherwise leave it be...
- return old_v
-
- for (k, v) in merge_with.items():
- if k in value:
- if v is None and self._allow_delete:
- value.pop(k)
- else:
- value[k] = merge_same_key(value[k], v)
- else:
- value[k] = v
- return value
-
- def _on_dict(self, value, merge_with):
- if not isinstance(merge_with, (dict)):
- return value
- if self._method == 'replace':
- merged = self._do_dict_replace(dict(value), merge_with, True)
- elif self._method == 'no_replace':
- merged = self._do_dict_replace(dict(value), merge_with, False)
- else:
- raise NotImplementedError("Unknown merge type %s" % (self._method))
- return merged
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
deleted file mode 100644
index 81e5c580..00000000
--- a/cloudinit/mergers/m_list.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-DEF_MERGE_TYPE = 'replace'
-MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
-
-
-def _has_any(what, *keys):
- for k in keys:
- if k in what:
- return True
- return False
-
-
-class Merger(object):
- def __init__(self, merger, opts):
- self._merger = merger
- # Affects merging behavior...
- self._method = DEF_MERGE_TYPE
- for m in MERGE_TYPES:
- if m in opts:
- self._method = m
- break
- # Affect how recursive merging is done on other primitives
- self._recurse_str = _has_any(opts, 'recurse_str')
- self._recurse_dict = _has_any(opts, 'recurse_dict')
- self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
-
- def __str__(self):
- return ('ListMerger: (method=%s,recurse_str=%s,'
- 'recurse_dict=%s,recurse_array=%s)') % (self._method,
- self._recurse_str,
- self._recurse_dict,
- self._recurse_array)
-
- def _on_tuple(self, value, merge_with):
- return tuple(self._on_list(list(value), merge_with))
-
- def _on_list(self, value, merge_with):
- if (self._method == 'replace' and
- not isinstance(merge_with, (tuple, list))):
- return merge_with
-
- # Ok we now know that what we are merging with is a list or tuple.
- merged_list = []
- if self._method == 'prepend':
- merged_list.extend(merge_with)
- merged_list.extend(value)
- return merged_list
- elif self._method == 'append':
- merged_list.extend(value)
- merged_list.extend(merge_with)
- return merged_list
-
- def merge_same_index(old_v, new_v):
- if self._method == 'no_replace':
- # Leave it be...
- return old_v
- if isinstance(new_v, (list, tuple)) and self._recurse_array:
- return self._merger.merge(old_v, new_v)
- if isinstance(new_v, six.string_types) and self._recurse_str:
- return self._merger.merge(old_v, new_v)
- if isinstance(new_v, (dict)) and self._recurse_dict:
- return self._merger.merge(old_v, new_v)
- return new_v
-
- # Ok now we are replacing same indexes
- merged_list.extend(value)
- common_len = min(len(merged_list), len(merge_with))
- for i in range(0, common_len):
- merged_list[i] = merge_same_index(merged_list[i], merge_with[i])
- return merged_list
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
deleted file mode 100644
index b00c4bf3..00000000
--- a/cloudinit/mergers/m_str.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-
-class Merger(object):
- def __init__(self, _merger, opts):
- self._append = 'append' in opts
-
- def __str__(self):
- return 'StringMerger: (append=%s)' % (self._append)
-
- # On encountering a unicode object to merge value with
- # we will for now just proxy into the string method to let it handle it.
- def _on_unicode(self, value, merge_with):
- return self._on_str(value, merge_with)
-
- # On encountering a string object to merge with we will
- # perform the following action, if appending we will
- # merge them together, otherwise we will just return value.
- def _on_str(self, value, merge_with):
- if not isinstance(value, six.string_types):
- return merge_with
- if not self._append:
- return merge_with
- if isinstance(value, six.text_type):
- return value + six.text_type(merge_with)
- else:
- return value + six.binary_type(merge_with)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
deleted file mode 100644
index 21cc602b..00000000
--- a/cloudinit/net/__init__.py
+++ /dev/null
@@ -1,371 +0,0 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Blake Rouse <blake.rouse@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import logging
-import os
-import re
-
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-SYS_CLASS_NET = "/sys/class/net/"
-DEFAULT_PRIMARY_INTERFACE = 'eth0'
-
-
-def sys_dev_path(devname, path=""):
- return SYS_CLASS_NET + devname + "/" + path
-
-
-def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None):
- try:
- contents = util.load_file(sys_dev_path(devname, path))
- except (OSError, IOError) as e:
- if getattr(e, 'errno', None) == errno.ENOENT:
- if enoent is not None:
- return enoent
- raise
- contents = contents.strip()
- if translate is None:
- return contents
- try:
- return translate.get(contents)
- except KeyError:
- LOG.debug("found unexpected value '%s' in '%s/%s'", contents,
- devname, path)
- if keyerror is not None:
- return keyerror
- raise
-
-
-def is_up(devname):
- # The linux kernel says to consider devices in 'unknown'
- # operstate as up for the purposes of network configuration. See
- # Documentation/networking/operstates.txt in the kernel source.
- translate = {'up': True, 'unknown': True, 'down': False}
- return read_sys_net(devname, "operstate", enoent=False, keyerror=False,
- translate=translate)
-
-
-def is_wireless(devname):
- return os.path.exists(sys_dev_path(devname, "wireless"))
-
-
-def is_connected(devname):
- # is_connected isn't really as simple as that. 2 is
- # 'physically connected'. 3 is 'not connected'. but a wlan interface will
- # always show 3.
- try:
- iflink = read_sys_net(devname, "iflink", enoent=False)
- if iflink == "2":
- return True
- if not is_wireless(devname):
- return False
- LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname)
-
- return read_sys_net(devname, "carrier", enoent=False, keyerror=False,
- translate={'0': False, '1': True})
-
- except IOError as e:
- if e.errno == errno.EINVAL:
- return False
- raise
-
-
-def is_physical(devname):
- return os.path.exists(sys_dev_path(devname, "device"))
-
-
-def is_present(devname):
- return os.path.exists(sys_dev_path(devname))
-
-
-def get_devicelist():
- return os.listdir(SYS_CLASS_NET)
-
-
-class ParserError(Exception):
- """Raised when a parser has issue parsing a file/content."""
-
-
-def is_disabled_cfg(cfg):
- if not cfg or not isinstance(cfg, dict):
- return False
- return cfg.get('config') == "disabled"
-
-
-def sys_netdev_info(name, field):
- if not os.path.exists(os.path.join(SYS_CLASS_NET, name)):
- raise OSError("%s: interface does not exist in %s" %
- (name, SYS_CLASS_NET))
- fname = os.path.join(SYS_CLASS_NET, name, field)
- if not os.path.exists(fname):
- raise OSError("%s: could not find sysfs entry: %s" % (name, fname))
- data = util.load_file(fname)
- if data[-1] == '\n':
- data = data[:-1]
- return data
-
-
-def generate_fallback_config():
- """Determine which attached net dev is most likely to have a connection and
- generate network state to run dhcp on that interface"""
- # by default use eth0 as primary interface
- nconf = {'config': [], 'version': 1}
-
- # get list of interfaces that could have connections
- invalid_interfaces = set(['lo'])
- potential_interfaces = set(get_devicelist())
- potential_interfaces = potential_interfaces.difference(invalid_interfaces)
- # sort into interfaces with carrier, interfaces which could have carrier,
- # and ignore interfaces that are definitely disconnected
- connected = []
- possibly_connected = []
- for interface in potential_interfaces:
- if interface.startswith("veth"):
- continue
- if os.path.exists(sys_dev_path(interface, "bridge")):
- # skip any bridges
- continue
- try:
- carrier = int(sys_netdev_info(interface, 'carrier'))
- if carrier:
- connected.append(interface)
- continue
- except OSError:
- pass
- # check if nic is dormant or down, as this may make a nick appear to
- # not have a carrier even though it could acquire one when brought
- # online by dhclient
- try:
- dormant = int(sys_netdev_info(interface, 'dormant'))
- if dormant:
- possibly_connected.append(interface)
- continue
- except OSError:
- pass
- try:
- operstate = sys_netdev_info(interface, 'operstate')
- if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']:
- possibly_connected.append(interface)
- continue
- except OSError:
- pass
-
- # don't bother with interfaces that might not be connected if there are
- # some that definitely are
- if connected:
- potential_interfaces = connected
- else:
- potential_interfaces = possibly_connected
- # if there are no interfaces, give up
- if not potential_interfaces:
- return
- # if eth0 exists use it above anything else, otherwise get the interface
- # that looks 'first'
- if DEFAULT_PRIMARY_INTERFACE in potential_interfaces:
- name = DEFAULT_PRIMARY_INTERFACE
- else:
- name = sorted(potential_interfaces)[0]
-
- mac = sys_netdev_info(name, 'address')
- target_name = name
-
- nconf['config'].append(
- {'type': 'physical', 'name': target_name,
- 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
- return nconf
-
-
-def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
- """read the network config and rename devices accordingly.
- if strict_present is false, then do not raise exception if no devices
- match. if strict_busy is false, then do not raise exception if the
- device cannot be renamed because it is currently configured."""
- renames = []
- for ent in netcfg.get('config', {}):
- if ent.get('type') != 'physical':
- continue
- mac = ent.get('mac_address')
- name = ent.get('name')
- if not mac:
- continue
- renames.append([mac, name])
-
- return _rename_interfaces(renames)
-
-
-def _get_current_rename_info(check_downable=True):
- """Collect information necessary for rename_interfaces."""
- names = get_devicelist()
- bymac = {}
- for n in names:
- bymac[get_interface_mac(n)] = {
- 'name': n, 'up': is_up(n), 'downable': None}
-
- if check_downable:
- nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
- ipv6, _err = util.subp(['ip', '-6', 'addr', 'show', 'permanent',
- 'scope', 'global'], capture=True)
- ipv4, _err = util.subp(['ip', '-4', 'addr', 'show'], capture=True)
-
- nics_with_addresses = set()
- for bytes_out in (ipv6, ipv4):
- nics_with_addresses.update(nmatch.findall(bytes_out))
-
- for d in bymac.values():
- d['downable'] = (d['up'] is False or
- d['name'] not in nics_with_addresses)
-
- return bymac
-
-
-def _rename_interfaces(renames, strict_present=True, strict_busy=True,
- current_info=None):
-
- if not len(renames):
- LOG.debug("no interfaces to rename")
- return
-
- if current_info is None:
- current_info = _get_current_rename_info()
-
- cur_bymac = {}
- for mac, data in current_info.items():
- cur = data.copy()
- cur['mac'] = mac
- cur_bymac[mac] = cur
-
- def update_byname(bymac):
- return dict((data['name'], data)
- for data in bymac.values())
-
- def rename(cur, new):
- util.subp(["ip", "link", "set", cur, "name", new], capture=True)
-
- def down(name):
- util.subp(["ip", "link", "set", name, "down"], capture=True)
-
- def up(name):
- util.subp(["ip", "link", "set", name, "up"], capture=True)
-
- ops = []
- errors = []
- ups = []
- cur_byname = update_byname(cur_bymac)
- tmpname_fmt = "cirename%d"
- tmpi = -1
-
- for mac, new_name in renames:
- cur = cur_bymac.get(mac, {})
- cur_name = cur.get('name')
- cur_ops = []
- if cur_name == new_name:
- # nothing to do
- continue
-
- if not cur_name:
- if strict_present:
- errors.append(
- "[nic not present] Cannot rename mac=%s to %s"
- ", not available." % (mac, new_name))
- continue
-
- if cur['up']:
- msg = "[busy] Error renaming mac=%s from %s to %s"
- if not cur['downable']:
- if strict_busy:
- errors.append(msg % (mac, cur_name, new_name))
- continue
- cur['up'] = False
- cur_ops.append(("down", mac, new_name, (cur_name,)))
- ups.append(("up", mac, new_name, (new_name,)))
-
- if new_name in cur_byname:
- target = cur_byname[new_name]
- if target['up']:
- msg = "[busy-target] Error renaming mac=%s from %s to %s."
- if not target['downable']:
- if strict_busy:
- errors.append(msg % (mac, cur_name, new_name))
- continue
- else:
- cur_ops.append(("down", mac, new_name, (new_name,)))
-
- tmp_name = None
- while tmp_name is None or tmp_name in cur_byname:
- tmpi += 1
- tmp_name = tmpname_fmt % tmpi
-
- cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
- target['name'] = tmp_name
- cur_byname = update_byname(cur_bymac)
- if target['up']:
- ups.append(("up", mac, new_name, (tmp_name,)))
-
- cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
- cur['name'] = new_name
- cur_byname = update_byname(cur_bymac)
- ops += cur_ops
-
- opmap = {'rename': rename, 'down': down, 'up': up}
-
- if len(ops) + len(ups) == 0:
- if len(errors):
- LOG.debug("unable to do any work for renaming of %s", renames)
- else:
- LOG.debug("no work necessary for renaming of %s", renames)
- else:
- LOG.debug("achieving renaming of %s with ops %s", renames, ops + ups)
-
- for op, mac, new_name, params in ops + ups:
- try:
- opmap.get(op)(*params)
- except Exception as e:
- errors.append(
- "[unknown] Error performing %s%s for %s, %s: %s" %
- (op, params, mac, new_name, e))
-
- if len(errors):
- raise Exception('\n'.join(errors))
-
-
-def get_interface_mac(ifname):
- """Returns the string value of an interface's MAC Address"""
- return read_sys_net(ifname, "address", enoent=False)
-
-
-def get_interfaces_by_mac(devs=None):
- """Build a dictionary of tuples {mac: name}"""
- if devs is None:
- try:
- devs = get_devicelist()
- except OSError as e:
- if e.errno == errno.ENOENT:
- devs = []
- else:
- raise
- ret = {}
- for name in devs:
- mac = get_interface_mac(name)
- # some devices may not have a mac (tun0)
- if mac:
- ret[mac] = name
- return ret
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
deleted file mode 100644
index 822a020b..00000000
--- a/cloudinit/net/cmdline.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Blake Rouse <blake.rouse@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import glob
-import gzip
-import io
-import shlex
-import sys
-
-import six
-
-from . import get_devicelist
-from . import sys_netdev_info
-
-from cloudinit import util
-
-PY26 = sys.version_info[0:2] == (2, 6)
-
-
-def _shlex_split(blob):
- if PY26 and isinstance(blob, six.text_type):
- # Older versions don't support unicode input
- blob = blob.encode("utf8")
- return shlex.split(blob)
-
-
-def _load_shell_content(content, add_empty=False, empty_val=None):
- """Given shell like syntax (key=value\nkey2=value2\n) in content
- return the data in dictionary form. If 'add_empty' is True
- then add entries in to the returned dictionary for 'VAR='
- variables. Set their value to empty_val."""
- data = {}
- for line in _shlex_split(content):
- key, value = line.split("=", 1)
- if not value:
- value = empty_val
- if add_empty or value:
- data[key] = value
-
- return data
-
-
-def _klibc_to_config_entry(content, mac_addrs=None):
- """Convert a klibc writtent shell content file to a 'config' entry
- When ip= is seen on the kernel command line in debian initramfs
- and networking is brought up, ipconfig will populate
- /run/net-<name>.cfg.
-
- The files are shell style syntax, and examples are in the tests
- provided here. There is no good documentation on this unfortunately.
-
- DEVICE=<name> is expected/required and PROTO should indicate if
- this is 'static' or 'dhcp'.
- """
-
- if mac_addrs is None:
- mac_addrs = {}
-
- data = _load_shell_content(content)
- try:
- name = data['DEVICE']
- except KeyError:
- raise ValueError("no 'DEVICE' entry in data")
-
- # ipconfig on precise does not write PROTO
- proto = data.get('PROTO')
- if not proto:
- if data.get('filename'):
- proto = 'dhcp'
- else:
- proto = 'static'
-
- if proto not in ('static', 'dhcp'):
- raise ValueError("Unexpected value for PROTO: %s" % proto)
-
- iface = {
- 'type': 'physical',
- 'name': name,
- 'subnets': [],
- }
-
- if name in mac_addrs:
- iface['mac_address'] = mac_addrs[name]
-
- # originally believed there might be IPV6* values
- for v, pre in (('ipv4', 'IPV4'),):
- # if no IPV4ADDR or IPV6ADDR, then go on.
- if pre + "ADDR" not in data:
- continue
- subnet = {'type': proto, 'control': 'manual'}
-
- # these fields go right on the subnet
- for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
- if pre + key in data:
- subnet[key.lower()] = data[pre + key]
-
- dns = []
- # handle IPV4DNS0 or IPV6DNS0
- for nskey in ('DNS0', 'DNS1'):
- ns = data.get(pre + nskey)
- # verify it has something other than 0.0.0.0 (or ipv6)
- if ns and len(ns.strip(":.0")):
- dns.append(data[pre + nskey])
- if dns:
- subnet['dns_nameservers'] = dns
- # add search to both ipv4 and ipv6, as it has no namespace
- search = data.get('DOMAINSEARCH')
- if search:
- if ',' in search:
- subnet['dns_search'] = search.split(",")
- else:
- subnet['dns_search'] = search.split()
-
- iface['subnets'].append(subnet)
-
- return name, iface
-
-
-def config_from_klibc_net_cfg(files=None, mac_addrs=None):
- if files is None:
- files = glob.glob('/run/net*.conf')
-
- entries = []
- names = {}
- for cfg_file in files:
- name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
- mac_addrs=mac_addrs)
- if name in names:
- raise ValueError(
- "device '%s' defined multiple times: %s and %s" % (
- name, names[name], cfg_file))
-
- names[name] = cfg_file
- entries.append(entry)
- return {'config': entries, 'version': 1}
-
-
-def _decomp_gzip(blob, strict=True):
- # decompress blob. raise exception if not compressed unless strict=False.
- with io.BytesIO(blob) as iobuf:
- gzfp = None
- try:
- gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
- return gzfp.read()
- except IOError:
- if strict:
- raise
- return blob
- finally:
- if gzfp:
- gzfp.close()
-
-
-def _b64dgz(b64str, gzipped="try"):
- # decode a base64 string. If gzipped is true, transparently uncompresss
- # if gzipped is 'try', then try gunzip, returning the original on fail.
- try:
- blob = base64.b64decode(b64str)
- except TypeError:
- raise ValueError("Invalid base64 text: %s" % b64str)
-
- if not gzipped:
- return blob
-
- return _decomp_gzip(blob, strict=gzipped != "try")
-
-
-def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
- if cmdline is None:
- cmdline = util.get_cmdline()
-
- if 'network-config=' in cmdline:
- data64 = None
- for tok in cmdline.split():
- if tok.startswith("network-config="):
- data64 = tok.split("=", 1)[1]
- if data64:
- return util.load_yaml(_b64dgz(data64))
-
- if 'ip=' not in cmdline:
- return None
-
- if mac_addrs is None:
- mac_addrs = dict((k, sys_netdev_info(k, 'address'))
- for k in get_devicelist())
-
- return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
deleted file mode 100644
index eff5b924..00000000
--- a/cloudinit/net/eni.py
+++ /dev/null
@@ -1,504 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import glob
-import os
-import re
-
-from . import ParserError
-
-from . import renderer
-
-from cloudinit import util
-
-
-NET_CONFIG_COMMANDS = [
- "pre-up", "up", "post-up", "down", "pre-down", "post-down",
-]
-
-NET_CONFIG_BRIDGE_OPTIONS = [
- "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
- "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
-]
-
-NET_CONFIG_OPTIONS = [
- "address", "netmask", "broadcast", "network", "metric", "gateway",
- "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
- "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
- "netnum", "endpoint", "local", "ttl",
-]
-
-
-# TODO: switch valid_map based on mode inet/inet6
-def _iface_add_subnet(iface, subnet):
- content = []
- valid_map = [
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'mtu',
- 'scope',
- 'dns_search',
- 'dns_nameservers',
- ]
- for key, value in subnet.items():
- if value and key in valid_map:
- if type(value) == list:
- value = " ".join(value)
- if '_' in key:
- key = key.replace('_', '-')
- content.append(" {0} {1}".format(key, value))
-
- return sorted(content)
-
-
-# TODO: switch to valid_map for attrs
-def _iface_add_attrs(iface, index):
- # If the index is non-zero, this is an alias interface. Alias interfaces
- # represent additional interface addresses, and should not have additional
- # attributes. (extra attributes here are almost always either incorrect,
- # or are applied to the parent interface.) So if this is an alias, stop
- # right here.
- if index != 0:
- return []
- content = []
- ignore_map = [
- 'control',
- 'index',
- 'inet',
- 'mode',
- 'name',
- 'subnets',
- 'type',
- ]
- renames = {'mac_address': 'hwaddress'}
- if iface['type'] not in ['bond', 'bridge', 'vlan']:
- ignore_map.append('mac_address')
-
- for key, value in iface.items():
- if not value or key in ignore_map:
- continue
- if type(value) == list:
- value = " ".join(value)
- content.append(" {0} {1}".format(renames.get(key, key), value))
-
- return sorted(content)
-
-
-def _iface_start_entry(iface, index, render_hwaddress=False):
- fullname = iface['name']
- if index != 0:
- fullname += ":%s" % index
-
- control = iface['control']
- if control == "auto":
- cverb = "auto"
- elif control in ("hotplug",):
- cverb = "allow-" + control
- else:
- cverb = "# control-" + control
-
- subst = iface.copy()
- subst.update({'fullname': fullname, 'cverb': cverb})
-
- lines = [
- "{cverb} {fullname}".format(**subst),
- "iface {fullname} {inet} {mode}".format(**subst)]
- if render_hwaddress and iface.get('mac_address'):
- lines.append(" hwaddress {mac_address}".format(**subst))
-
- return lines
-
-
-def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
- """Parses the file contents, placing result into ifaces.
-
- '_source_path' is added to every dictionary entry to define which file
- the configration information came from.
-
- :param ifaces: interface dictionary
- :param contents: contents of interfaces file
- :param src_dir: directory interfaces file was located
- :param src_path: file path the `contents` was read
- """
- currif = None
- for line in contents.splitlines():
- line = line.strip()
- if line.startswith('#'):
- continue
- split = line.split(' ')
- option = split[0]
- if option == "source-directory":
- parsed_src_dir = split[1]
- if not parsed_src_dir.startswith("/"):
- parsed_src_dir = os.path.join(src_dir, parsed_src_dir)
- for expanded_path in glob.glob(parsed_src_dir):
- dir_contents = os.listdir(expanded_path)
- dir_contents = [
- os.path.join(expanded_path, path)
- for path in dir_contents
- if (os.path.isfile(os.path.join(expanded_path, path)) and
- re.match("^[a-zA-Z0-9_-]+$", path) is not None)
- ]
- for entry in dir_contents:
- with open(entry, "r") as fp:
- src_data = fp.read().strip()
- abs_entry = os.path.abspath(entry)
- _parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_entry), abs_entry)
- elif option == "source":
- new_src_path = split[1]
- if not new_src_path.startswith("/"):
- new_src_path = os.path.join(src_dir, new_src_path)
- for expanded_path in glob.glob(new_src_path):
- with open(expanded_path, "r") as fp:
- src_data = fp.read().strip()
- abs_path = os.path.abspath(expanded_path)
- _parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_path), abs_path)
- elif option == "auto":
- for iface in split[1:]:
- if iface not in ifaces:
- ifaces[iface] = {
- # Include the source path this interface was found in.
- "_source_path": src_path
- }
- ifaces[iface]['auto'] = True
- elif option == "iface":
- iface, family, method = split[1:4]
- if iface not in ifaces:
- ifaces[iface] = {
- # Include the source path this interface was found in.
- "_source_path": src_path
- }
- elif 'family' in ifaces[iface]:
- raise ParserError(
- "Interface %s can only be defined once. "
- "Re-defined in '%s'." % (iface, src_path))
- ifaces[iface]['family'] = family
- ifaces[iface]['method'] = method
- currif = iface
- elif option == "hwaddress":
- if split[1] == "ether":
- val = split[2]
- else:
- val = split[1]
- ifaces[currif]['hwaddress'] = val
- elif option in NET_CONFIG_OPTIONS:
- ifaces[currif][option] = split[1]
- elif option in NET_CONFIG_COMMANDS:
- if option not in ifaces[currif]:
- ifaces[currif][option] = []
- ifaces[currif][option].append(' '.join(split[1:]))
- elif option.startswith('dns-'):
- if 'dns' not in ifaces[currif]:
- ifaces[currif]['dns'] = {}
- if option == 'dns-search':
- ifaces[currif]['dns']['search'] = []
- for domain in split[1:]:
- ifaces[currif]['dns']['search'].append(domain)
- elif option == 'dns-nameservers':
- ifaces[currif]['dns']['nameservers'] = []
- for server in split[1:]:
- ifaces[currif]['dns']['nameservers'].append(server)
- elif option.startswith('bridge_'):
- if 'bridge' not in ifaces[currif]:
- ifaces[currif]['bridge'] = {}
- if option in NET_CONFIG_BRIDGE_OPTIONS:
- bridge_option = option.replace('bridge_', '', 1)
- ifaces[currif]['bridge'][bridge_option] = split[1]
- elif option == "bridge_ports":
- ifaces[currif]['bridge']['ports'] = []
- for iface in split[1:]:
- ifaces[currif]['bridge']['ports'].append(iface)
- elif option == "bridge_hw" and split[1].lower() == "mac":
- ifaces[currif]['bridge']['mac'] = split[2]
- elif option == "bridge_pathcost":
- if 'pathcost' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['pathcost'] = {}
- ifaces[currif]['bridge']['pathcost'][split[1]] = split[2]
- elif option == "bridge_portprio":
- if 'portprio' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['portprio'] = {}
- ifaces[currif]['bridge']['portprio'][split[1]] = split[2]
- elif option.startswith('bond-'):
- if 'bond' not in ifaces[currif]:
- ifaces[currif]['bond'] = {}
- bond_option = option.replace('bond-', '', 1)
- ifaces[currif]['bond'][bond_option] = split[1]
- for iface in ifaces.keys():
- if 'auto' not in ifaces[iface]:
- ifaces[iface]['auto'] = False
-
-
-def parse_deb_config(path):
- """Parses a debian network configuration file."""
- ifaces = {}
- with open(path, "r") as fp:
- contents = fp.read().strip()
- abs_path = os.path.abspath(path)
- _parse_deb_config_data(
- ifaces, contents,
- os.path.dirname(abs_path), abs_path)
- return ifaces
-
-
-def convert_eni_data(eni_data):
- # return a network config representation of what is in eni_data
- ifaces = {}
- _parse_deb_config_data(ifaces, eni_data, src_dir=None, src_path=None)
- return _ifaces_to_net_config_data(ifaces)
-
-
-def _ifaces_to_net_config_data(ifaces):
- """Return network config that represents the ifaces data provided.
- ifaces = parse_deb_config("/etc/network/interfaces")
- config = ifaces_to_net_config_data(ifaces)
- state = parse_net_config_data(config)."""
- devs = {}
- for name, data in ifaces.items():
- # devname is 'eth0' for name='eth0:1'
- devname = name.partition(":")[0]
- if devname not in devs:
- devs[devname] = {'type': 'physical', 'name': devname,
- 'subnets': []}
- # this isnt strictly correct, but some might specify
- # hwaddress on a nic for matching / declaring name.
- if 'hwaddress' in data:
- devs[devname]['mac_address'] = data['hwaddress']
- subnet = {'_orig_eni_name': name, 'type': data['method']}
- if data.get('auto'):
- subnet['control'] = 'auto'
- else:
- subnet['control'] = 'manual'
-
- if data.get('method') == 'static':
- subnet['address'] = data['address']
-
- for copy_key in ('netmask', 'gateway', 'broadcast'):
- if copy_key in data:
- subnet[copy_key] = data[copy_key]
-
- if 'dns' in data:
- for n in ('nameservers', 'search'):
- if n in data['dns'] and data['dns'][n]:
- subnet['dns_' + n] = data['dns'][n]
- devs[devname]['subnets'].append(subnet)
-
- return {'version': 1,
- 'config': [devs[d] for d in sorted(devs)]}
-
-
-class Renderer(renderer.Renderer):
- """Renders network information in a /etc/network/interfaces format."""
-
- def __init__(self, config=None):
- if not config:
- config = {}
- self.eni_path = config.get('eni_path', 'etc/network/interfaces')
- self.eni_header = config.get('eni_header', None)
- self.links_path_prefix = config.get(
- 'links_path_prefix', 'etc/systemd/network/50-cloud-init-')
- self.netrules_path = config.get(
- 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
-
- def _render_route(self, route, indent=""):
- """When rendering routes for an iface, in some cases applying a route
- may result in the route command returning non-zero which produces
- some confusing output for users manually using ifup/ifdown[1]. To
- that end, we will optionally include an '|| true' postfix to each
- route line allowing users to work with ifup/ifdown without using
- --force option.
-
- We may at somepoint not want to emit this additional postfix, and
- add a 'strict' flag to this function. When called with strict=True,
- then we will not append the postfix.
-
- 1. http://askubuntu.com/questions/168033/
- how-to-set-static-routes-in-ubuntu-server
- """
- content = []
- up = indent + "post-up route add"
- down = indent + "pre-down route del"
- or_true = " || true"
- mapping = {
- 'network': '-net',
- 'netmask': 'netmask',
- 'gateway': 'gw',
- 'metric': 'metric',
- }
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- default_gw = " default gw %s" % route['gateway']
- content.append(up + default_gw + or_true)
- content.append(down + default_gw + or_true)
- elif route['network'] == '::' and route['netmask'] == 0:
- # ipv6!
- default_gw = " -A inet6 default gw %s" % route['gateway']
- content.append(up + default_gw + or_true)
- content.append(down + default_gw + or_true)
- else:
- route_line = ""
- for k in ['network', 'netmask', 'gateway', 'metric']:
- if k in route:
- route_line += " %s %s" % (mapping[k], route[k])
- content.append(up + route_line + or_true)
- content.append(down + route_line + or_true)
- return content
-
- def _render_iface(self, iface, render_hwaddress=False):
- sections = []
- subnets = iface.get('subnets', {})
- if subnets:
- for index, subnet in zip(range(0, len(subnets)), subnets):
- iface['index'] = index
- iface['mode'] = subnet['type']
- iface['control'] = subnet.get('control', 'auto')
- subnet_inet = 'inet'
- if iface['mode'].endswith('6'):
- # This is a request for DHCPv6.
- subnet_inet += '6'
- elif iface['mode'] == 'static' and ":" in subnet['address']:
- # This is a static IPv6 address.
- subnet_inet += '6'
- iface['inet'] = subnet_inet
- if iface['mode'].startswith('dhcp'):
- iface['mode'] = 'dhcp'
-
- lines = list(
- _iface_start_entry(
- iface, index, render_hwaddress=render_hwaddress) +
- _iface_add_subnet(iface, subnet) +
- _iface_add_attrs(iface, index)
- )
- for route in subnet.get('routes', []):
- lines.extend(self._render_route(route, indent=" "))
-
- if len(subnets) > 1 and index == 0:
- tmpl = " post-up ifup %s:%s\n"
- for i in range(1, len(subnets)):
- lines.append(tmpl % (iface['name'], i))
-
- sections.append(lines)
- else:
- # ifenslave docs say to auto the slave devices
- lines = []
- if 'bond-master' in iface:
- lines.append("auto {name}".format(**iface))
- lines.append("iface {name} {inet} {mode}".format(**iface))
- lines.extend(_iface_add_attrs(iface, index=0))
- sections.append(lines)
- return sections
-
- def _render_interfaces(self, network_state, render_hwaddress=False):
- '''Given state, emit etc/network/interfaces content.'''
-
- # handle 'lo' specifically as we need to insert the global dns entries
- # there (as that is the only interface that will be always up).
- lo = {'name': 'lo', 'type': 'physical', 'inet': 'inet',
- 'subnets': [{'type': 'loopback', 'control': 'auto'}]}
- for iface in network_state.iter_interfaces():
- if iface.get('name') == "lo":
- lo = copy.deepcopy(iface)
-
- nameservers = network_state.dns_nameservers
- if nameservers:
- lo['subnets'][0]["dns_nameservers"] = (" ".join(nameservers))
-
- searchdomains = network_state.dns_searchdomains
- if searchdomains:
- lo['subnets'][0]["dns_search"] = (" ".join(searchdomains))
-
- ''' Apply a sort order to ensure that we write out
- the physical interfaces first; this is critical for
- bonding
- '''
- order = {
- 'physical': 0,
- 'bond': 1,
- 'bridge': 2,
- 'vlan': 3,
- }
-
- sections = []
- sections.extend(self._render_iface(lo))
- for iface in sorted(network_state.iter_interfaces(),
- key=lambda k: (order[k['type']], k['name'])):
-
- if iface.get('name') == "lo":
- continue
- sections.extend(
- self._render_iface(iface, render_hwaddress=render_hwaddress))
-
- for route in network_state.iter_routes():
- sections.append(self._render_route(route))
-
- return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
-
- def render_network_state(self, target, network_state):
- fpeni = os.path.join(target, self.eni_path)
- util.ensure_dir(os.path.dirname(fpeni))
- header = self.eni_header if self.eni_header else ""
- util.write_file(fpeni, header + self._render_interfaces(network_state))
-
- if self.netrules_path:
- netrules = os.path.join(target, self.netrules_path)
- util.ensure_dir(os.path.dirname(netrules))
- util.write_file(netrules,
- self._render_persistent_net(network_state))
-
- if self.links_path_prefix:
- self._render_systemd_links(target, network_state,
- links_prefix=self.links_path_prefix)
-
- def _render_systemd_links(self, target, network_state, links_prefix):
- fp_prefix = os.path.join(target, links_prefix)
- for f in glob.glob(fp_prefix + "*"):
- os.unlink(f)
- for iface in network_state.iter_interfaces():
- if (iface['type'] == 'physical' and 'name' in iface and
- iface.get('mac_address')):
- fname = fp_prefix + iface['name'] + ".link"
- content = "\n".join([
- "[Match]",
- "MACAddress=" + iface['mac_address'],
- "",
- "[Link]",
- "Name=" + iface['name'],
- ""
- ])
- util.write_file(fname, content)
-
-
-def network_state_to_eni(network_state, header=None, render_hwaddress=False):
- # render the provided network state, return a string of equivalent eni
- eni_path = 'etc/network/interfaces'
- renderer = Renderer({
- 'eni_path': eni_path,
- 'eni_header': header,
- 'links_path_prefix': None,
- 'netrules_path': None,
- })
- if not header:
- header = ""
- if not header.endswith("\n"):
- header += "\n"
- contents = renderer._render_interfaces(
- network_state, render_hwaddress=render_hwaddress)
- return header + contents
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
deleted file mode 100644
index 8ca5106f..00000000
--- a/cloudinit/net/network_state.py
+++ /dev/null
@@ -1,454 +0,0 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
-#
-# Author: Ryan Harper <ryan.harper@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import functools
-import logging
-
-import six
-
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-NETWORK_STATE_VERSION = 1
-NETWORK_STATE_REQUIRED_KEYS = {
- 1: ['version', 'config', 'network_state'],
-}
-
-
-def parse_net_config_data(net_config, skip_broken=True):
- """Parses the config, returns NetworkState object
-
- :param net_config: curtin network config dict
- """
- state = None
- if 'version' in net_config and 'config' in net_config:
- nsi = NetworkStateInterpreter(version=net_config.get('version'),
- config=net_config.get('config'))
- nsi.parse_config(skip_broken=skip_broken)
- state = nsi.network_state
- return state
-
-
-def parse_net_config(path, skip_broken=True):
- """Parses a curtin network configuration file and
- return network state"""
- ns = None
- net_config = util.read_conf(path)
- if 'network' in net_config:
- ns = parse_net_config_data(net_config.get('network'),
- skip_broken=skip_broken)
- return ns
-
-
-def from_state_file(state_file):
- state = util.read_conf(state_file)
- nsi = NetworkStateInterpreter()
- nsi.load(state)
- return nsi
-
-
-def diff_keys(expected, actual):
- missing = set(expected)
- for key in actual:
- missing.discard(key)
- return missing
-
-
-class InvalidCommand(Exception):
- pass
-
-
-def ensure_command_keys(required_keys):
-
- def wrapper(func):
-
- @functools.wraps(func)
- def decorator(self, command, *args, **kwargs):
- if required_keys:
- missing_keys = diff_keys(required_keys, command)
- if missing_keys:
- raise InvalidCommand("Command missing %s of required"
- " keys %s" % (missing_keys,
- required_keys))
- return func(self, command, *args, **kwargs)
-
- return decorator
-
- return wrapper
-
-
-class CommandHandlerMeta(type):
- """Metaclass that dynamically creates a 'command_handlers' attribute.
-
- This will scan the to-be-created class for methods that start with
- 'handle_' and on finding those will populate a class attribute mapping
- so that those methods can be quickly located and called.
- """
- def __new__(cls, name, parents, dct):
- command_handlers = {}
- for attr_name, attr in dct.items():
- if callable(attr) and attr_name.startswith('handle_'):
- handles_what = attr_name[len('handle_'):]
- if handles_what:
- command_handlers[handles_what] = attr
- dct['command_handlers'] = command_handlers
- return super(CommandHandlerMeta, cls).__new__(cls, name,
- parents, dct)
-
-
-class NetworkState(object):
-
- def __init__(self, network_state, version=NETWORK_STATE_VERSION):
- self._network_state = copy.deepcopy(network_state)
- self._version = version
-
- @property
- def version(self):
- return self._version
-
- def iter_routes(self, filter_func=None):
- for route in self._network_state.get('routes', []):
- if filter_func is not None:
- if filter_func(route):
- yield route
- else:
- yield route
-
- @property
- def dns_nameservers(self):
- try:
- return self._network_state['dns']['nameservers']
- except KeyError:
- return []
-
- @property
- def dns_searchdomains(self):
- try:
- return self._network_state['dns']['search']
- except KeyError:
- return []
-
- def iter_interfaces(self, filter_func=None):
- ifaces = self._network_state.get('interfaces', {})
- for iface in six.itervalues(ifaces):
- if filter_func is None:
- yield iface
- else:
- if filter_func(iface):
- yield iface
-
-
-@six.add_metaclass(CommandHandlerMeta)
-class NetworkStateInterpreter(object):
-
- initial_network_state = {
- 'interfaces': {},
- 'routes': [],
- 'dns': {
- 'nameservers': [],
- 'search': [],
- }
- }
-
- def __init__(self, version=NETWORK_STATE_VERSION, config=None):
- self._version = version
- self._config = config
- self._network_state = copy.deepcopy(self.initial_network_state)
- self._parsed = False
-
- @property
- def network_state(self):
- return NetworkState(self._network_state, version=self._version)
-
- def dump(self):
- state = {
- 'version': self._version,
- 'config': self._config,
- 'network_state': self._network_state,
- }
- return util.yaml_dumps(state)
-
- def load(self, state):
- if 'version' not in state:
- LOG.error('Invalid state, missing version field')
- raise ValueError('Invalid state, missing version field')
-
- required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']]
- missing_keys = diff_keys(required_keys, state)
- if missing_keys:
- msg = 'Invalid state, missing keys: %s' % (missing_keys)
- LOG.error(msg)
- raise ValueError(msg)
-
- # v1 - direct attr mapping, except version
- for key in [k for k in required_keys if k not in ['version']]:
- setattr(self, key, state[key])
-
- def dump_network_state(self):
- return util.yaml_dumps(self._network_state)
-
- def parse_config(self, skip_broken=True):
- # rebuild network state
- for command in self._config:
- command_type = command['type']
- try:
- handler = self.command_handlers[command_type]
- except KeyError:
- raise RuntimeError("No handler found for"
- " command '%s'" % command_type)
- try:
- handler(self, command)
- except InvalidCommand:
- if not skip_broken:
- raise
- else:
- LOG.warn("Skipping invalid command: %s", command,
- exc_info=True)
- LOG.debug(self.dump_network_state())
-
- @ensure_command_keys(['name'])
- def handle_physical(self, command):
- '''
- command = {
- 'type': 'physical',
- 'mac_address': 'c0:d6:9f:2c:e8:80',
- 'name': 'eth0',
- 'subnets': [
- {'type': 'dhcp4'}
- ]
- }
- '''
-
- interfaces = self._network_state.get('interfaces', {})
- iface = interfaces.get(command['name'], {})
- for param, val in command.get('params', {}).items():
- iface.update({param: val})
-
- # convert subnet ipv6 netmask to cidr as needed
- subnets = command.get('subnets')
- if subnets:
- for subnet in subnets:
- if subnet['type'] == 'static':
- if 'netmask' in subnet and ':' in subnet['address']:
- subnet['netmask'] = mask2cidr(subnet['netmask'])
- for route in subnet.get('routes', []):
- if 'netmask' in route:
- route['netmask'] = mask2cidr(route['netmask'])
- iface.update({
- 'name': command.get('name'),
- 'type': command.get('type'),
- 'mac_address': command.get('mac_address'),
- 'inet': 'inet',
- 'mode': 'manual',
- 'mtu': command.get('mtu'),
- 'address': None,
- 'gateway': None,
- 'subnets': subnets,
- })
- self._network_state['interfaces'].update({command.get('name'): iface})
- self.dump_network_state()
-
- @ensure_command_keys(['name', 'vlan_id', 'vlan_link'])
- def handle_vlan(self, command):
- '''
- auto eth0.222
- iface eth0.222 inet static
- address 10.10.10.1
- netmask 255.255.255.0
- hwaddress ether BC:76:4E:06:96:B3
- vlan-raw-device eth0
- '''
- interfaces = self._network_state.get('interfaces', {})
- self.handle_physical(command)
- iface = interfaces.get(command.get('name'), {})
- iface['vlan-raw-device'] = command.get('vlan_link')
- iface['vlan_id'] = command.get('vlan_id')
- interfaces.update({iface['name']: iface})
-
- @ensure_command_keys(['name', 'bond_interfaces', 'params'])
- def handle_bond(self, command):
- '''
- #/etc/network/interfaces
- auto eth0
- iface eth0 inet manual
- bond-master bond0
- bond-mode 802.3ad
-
- auto eth1
- iface eth1 inet manual
- bond-master bond0
- bond-mode 802.3ad
-
- auto bond0
- iface bond0 inet static
- address 192.168.0.10
- gateway 192.168.0.1
- netmask 255.255.255.0
- bond-slaves none
- bond-mode 802.3ad
- bond-miimon 100
- bond-downdelay 200
- bond-updelay 200
- bond-lacp-rate 4
- '''
-
- self.handle_physical(command)
- interfaces = self._network_state.get('interfaces')
- iface = interfaces.get(command.get('name'), {})
- for param, val in command.get('params').items():
- iface.update({param: val})
- iface.update({'bond-slaves': 'none'})
- self._network_state['interfaces'].update({iface['name']: iface})
-
- # handle bond slaves
- for ifname in command.get('bond_interfaces'):
- if ifname not in interfaces:
- cmd = {
- 'name': ifname,
- 'type': 'bond',
- }
- # inject placeholder
- self.handle_physical(cmd)
-
- interfaces = self._network_state.get('interfaces', {})
- bond_if = interfaces.get(ifname)
- bond_if['bond-master'] = command.get('name')
- # copy in bond config into slave
- for param, val in command.get('params').items():
- bond_if.update({param: val})
- self._network_state['interfaces'].update({ifname: bond_if})
-
- @ensure_command_keys(['name', 'bridge_interfaces', 'params'])
- def handle_bridge(self, command):
- '''
- auto br0
- iface br0 inet static
- address 10.10.10.1
- netmask 255.255.255.0
- bridge_ports eth0 eth1
- bridge_stp off
- bridge_fd 0
- bridge_maxwait 0
-
- bridge_params = [
- "bridge_ports",
- "bridge_ageing",
- "bridge_bridgeprio",
- "bridge_fd",
- "bridge_gcint",
- "bridge_hello",
- "bridge_hw",
- "bridge_maxage",
- "bridge_maxwait",
- "bridge_pathcost",
- "bridge_portprio",
- "bridge_stp",
- "bridge_waitport",
- ]
- '''
-
- # find one of the bridge port ifaces to get mac_addr
- # handle bridge_slaves
- interfaces = self._network_state.get('interfaces', {})
- for ifname in command.get('bridge_interfaces'):
- if ifname in interfaces:
- continue
-
- cmd = {
- 'name': ifname,
- }
- # inject placeholder
- self.handle_physical(cmd)
-
- interfaces = self._network_state.get('interfaces', {})
- self.handle_physical(command)
- iface = interfaces.get(command.get('name'), {})
- iface['bridge_ports'] = command['bridge_interfaces']
- for param, val in command.get('params').items():
- iface.update({param: val})
-
- interfaces.update({iface['name']: iface})
-
- @ensure_command_keys(['address'])
- def handle_nameserver(self, command):
- dns = self._network_state.get('dns')
- if 'address' in command:
- addrs = command['address']
- if not type(addrs) == list:
- addrs = [addrs]
- for addr in addrs:
- dns['nameservers'].append(addr)
- if 'search' in command:
- paths = command['search']
- if not isinstance(paths, list):
- paths = [paths]
- for path in paths:
- dns['search'].append(path)
-
- @ensure_command_keys(['destination'])
- def handle_route(self, command):
- routes = self._network_state.get('routes', [])
- network, cidr = command['destination'].split("/")
- netmask = cidr2mask(int(cidr))
- route = {
- 'network': network,
- 'netmask': netmask,
- 'gateway': command.get('gateway'),
- 'metric': command.get('metric'),
- }
- routes.append(route)
-
-
-def cidr2mask(cidr):
- mask = [0, 0, 0, 0]
- for i in list(range(0, cidr)):
- idx = int(i / 8)
- mask[idx] = mask[idx] + (1 << (7 - i % 8))
- return ".".join([str(x) for x in mask])
-
-
-def ipv4mask2cidr(mask):
- if '.' not in mask:
- return mask
- return sum([bin(int(x)).count('1') for x in mask.split('.')])
-
-
-def ipv6mask2cidr(mask):
- if ':' not in mask:
- return mask
-
- bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
- 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
- 0xfffe, 0xffff]
- cidr = 0
- for word in mask.split(':'):
- if not word or int(word, 16) == 0:
- break
- cidr += bitCount.index(int(word, 16))
-
- return cidr
-
-
-def mask2cidr(mask):
- if ':' in mask:
- return ipv6mask2cidr(mask)
- elif '.' in mask:
- return ipv4mask2cidr(mask)
- else:
- return mask
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
deleted file mode 100644
index 310cbe0d..00000000
--- a/cloudinit/net/renderer.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Blake Rouse <blake.rouse@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-from .udev import generate_udev_rule
-
-
-def filter_by_type(match_type):
- return lambda iface: match_type == iface['type']
-
-
-def filter_by_name(match_name):
- return lambda iface: match_name == iface['name']
-
-
-filter_by_physical = filter_by_type('physical')
-
-
-class Renderer(object):
-
- @staticmethod
- def _render_persistent_net(network_state):
- """Given state, emit udev rules to map mac to ifname."""
- # TODO(harlowja): this seems shared between eni renderer and
- # this, so move it to a shared location.
- content = six.StringIO()
- for iface in network_state.iter_interfaces(filter_by_physical):
- # for physical interfaces write out a persist net udev rule
- if 'name' in iface and iface.get('mac_address'):
- content.write(generate_udev_rule(iface['name'],
- iface['mac_address']))
- return content.getvalue()
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
deleted file mode 100644
index c53acf71..00000000
--- a/cloudinit/net/sysconfig.py
+++ /dev/null
@@ -1,400 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-
-import six
-
-from cloudinit.distros.parsers import resolv_conf
-from cloudinit import util
-
-from . import renderer
-
-
-def _make_header(sep='#'):
- lines = [
- "Created by cloud-init on instance boot automatically, do not edit.",
- "",
- ]
- for i in range(0, len(lines)):
- if lines[i]:
- lines[i] = sep + " " + lines[i]
- else:
- lines[i] = sep
- return "\n".join(lines)
-
-
-def _is_default_route(route):
- if route['network'] == '::' and route['netmask'] == 0:
- return True
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- return True
- return False
-
-
-def _quote_value(value):
- if re.search(r"\s", value):
- # This doesn't handle complex cases...
- if value.startswith('"') and value.endswith('"'):
- return value
- else:
- return '"%s"' % value
- else:
- return value
-
-
-class ConfigMap(object):
- """Sysconfig like dictionary object."""
-
- # Why does redhat prefer yes/no to true/false??
- _bool_map = {
- True: 'yes',
- False: 'no',
- }
-
- def __init__(self):
- self._conf = {}
-
- def __setitem__(self, key, value):
- self._conf[key] = value
-
- def drop(self, key):
- self._conf.pop(key, None)
-
- def __len__(self):
- return len(self._conf)
-
- def to_string(self):
- buf = six.StringIO()
- buf.write(_make_header())
- if self._conf:
- buf.write("\n")
- for key in sorted(self._conf.keys()):
- value = self._conf[key]
- if isinstance(value, bool):
- value = self._bool_map[value]
- if not isinstance(value, six.string_types):
- value = str(value)
- buf.write("%s=%s\n" % (key, _quote_value(value)))
- return buf.getvalue()
-
-
-class Route(ConfigMap):
- """Represents a route configuration."""
-
- route_fn_tpl = '%(base)s/network-scripts/route-%(name)s'
-
- def __init__(self, route_name, base_sysconf_dir):
- super(Route, self).__init__()
- self.last_idx = 1
- self.has_set_default = False
- self._route_name = route_name
- self._base_sysconf_dir = base_sysconf_dir
-
- def copy(self):
- r = Route(self._route_name, self._base_sysconf_dir)
- r._conf = self._conf.copy()
- r.last_idx = self.last_idx
- r.has_set_default = self.has_set_default
- return r
-
- @property
- def path(self):
- return self.route_fn_tpl % ({'base': self._base_sysconf_dir,
- 'name': self._route_name})
-
-
-class NetInterface(ConfigMap):
- """Represents a sysconfig/networking-script (and its config + children)."""
-
- iface_fn_tpl = '%(base)s/network-scripts/ifcfg-%(name)s'
-
- iface_types = {
- 'ethernet': 'Ethernet',
- 'bond': 'Bond',
- 'bridge': 'Bridge',
- }
-
- def __init__(self, iface_name, base_sysconf_dir, kind='ethernet'):
- super(NetInterface, self).__init__()
- self.children = []
- self.routes = Route(iface_name, base_sysconf_dir)
- self._kind = kind
- self._iface_name = iface_name
- self._conf['DEVICE'] = iface_name
- self._conf['TYPE'] = self.iface_types[kind]
- self._base_sysconf_dir = base_sysconf_dir
-
- @property
- def name(self):
- return self._iface_name
-
- @name.setter
- def name(self, iface_name):
- self._iface_name = iface_name
- self._conf['DEVICE'] = iface_name
-
- @property
- def kind(self):
- return self._kind
-
- @kind.setter
- def kind(self, kind):
- self._kind = kind
- self._conf['TYPE'] = self.iface_types[kind]
-
- @property
- def path(self):
- return self.iface_fn_tpl % ({'base': self._base_sysconf_dir,
- 'name': self.name})
-
- def copy(self, copy_children=False, copy_routes=False):
- c = NetInterface(self.name, self._base_sysconf_dir, kind=self._kind)
- c._conf = self._conf.copy()
- if copy_children:
- c.children = list(self.children)
- if copy_routes:
- c.routes = self.routes.copy()
- return c
-
-
-class Renderer(renderer.Renderer):
- """Renders network information in a /etc/sysconfig format."""
-
- # See: https://access.redhat.com/documentation/en-US/\
- # Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/\
- # s1-networkscripts-interfaces.html (or other docs for
- # details about this)
-
- iface_defaults = tuple([
- ('ONBOOT', True),
- ('USERCTL', False),
- ('NM_CONTROLLED', False),
- ('BOOTPROTO', 'none'),
- ])
-
- # If these keys exist, then there values will be used to form
- # a BONDING_OPTS grouping; otherwise no grouping will be set.
- bond_tpl_opts = tuple([
- ('bond_mode', "mode=%s"),
- ('bond_xmit_hash_policy', "xmit_hash_policy=%s"),
- ('bond_miimon', "miimon=%s"),
- ])
-
- bridge_opts_keys = tuple([
- ('bridge_stp', 'STP'),
- ('bridge_ageing', 'AGEING'),
- ('bridge_bridgeprio', 'PRIO'),
- ])
-
- def __init__(self, config=None):
- if not config:
- config = {}
- self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig/')
- self.netrules_path = config.get(
- 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
- self.dns_path = config.get('dns_path', 'etc/resolv.conf')
-
- @classmethod
- def _render_iface_shared(cls, iface, iface_cfg):
- for k, v in cls.iface_defaults:
- iface_cfg[k] = v
- for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]:
- old_value = iface.get(old_key)
- if old_value is not None:
- iface_cfg[new_key] = old_value
-
- @classmethod
- def _render_subnet(cls, iface_cfg, route_cfg, subnet):
- subnet_type = subnet.get('type')
- if subnet_type == 'dhcp6':
- iface_cfg['DHCPV6C'] = True
- iface_cfg['IPV6INIT'] = True
- iface_cfg['BOOTPROTO'] = 'dhcp'
- elif subnet_type in ['dhcp4', 'dhcp']:
- iface_cfg['BOOTPROTO'] = 'dhcp'
- elif subnet_type == 'static':
- iface_cfg['BOOTPROTO'] = 'static'
- if subnet.get('ipv6'):
- iface_cfg['IPV6ADDR'] = subnet['address']
- iface_cfg['IPV6INIT'] = True
- else:
- iface_cfg['IPADDR'] = subnet['address']
- else:
- raise ValueError("Unknown subnet type '%s' found"
- " for interface '%s'" % (subnet_type,
- iface_cfg.name))
- if 'netmask' in subnet:
- iface_cfg['NETMASK'] = subnet['netmask']
- for route in subnet.get('routes', []):
- if _is_default_route(route):
- if route_cfg.has_set_default:
- raise ValueError("Duplicate declaration of default"
- " route found for interface '%s'"
- % (iface_cfg.name))
- # NOTE(harlowja): ipv6 and ipv4 default gateways
- gw_key = 'GATEWAY0'
- nm_key = 'NETMASK0'
- addr_key = 'ADDRESS0'
- # The owning interface provides the default route.
- #
- # TODO(harlowja): add validation that no other iface has
- # also provided the default route?
- iface_cfg['DEFROUTE'] = True
- if 'gateway' in route:
- iface_cfg['GATEWAY'] = route['gateway']
- route_cfg.has_set_default = True
- else:
- gw_key = 'GATEWAY%s' % route_cfg.last_idx
- nm_key = 'NETMASK%s' % route_cfg.last_idx
- addr_key = 'ADDRESS%s' % route_cfg.last_idx
- route_cfg.last_idx += 1
- for (old_key, new_key) in [('gateway', gw_key),
- ('netmask', nm_key),
- ('network', addr_key)]:
- if old_key in route:
- route_cfg[new_key] = route[old_key]
-
- @classmethod
- def _render_bonding_opts(cls, iface_cfg, iface):
- bond_opts = []
- for (bond_key, value_tpl) in cls.bond_tpl_opts:
- # Seems like either dash or underscore is possible?
- bond_keys = [bond_key, bond_key.replace("_", "-")]
- for bond_key in bond_keys:
- if bond_key in iface:
- bond_value = iface[bond_key]
- if isinstance(bond_value, (tuple, list)):
- bond_value = " ".join(bond_value)
- bond_opts.append(value_tpl % (bond_value))
- break
- if bond_opts:
- iface_cfg['BONDING_OPTS'] = " ".join(bond_opts)
-
- @classmethod
- def _render_physical_interfaces(cls, network_state, iface_contents):
- physical_filter = renderer.filter_by_physical
- for iface in network_state.iter_interfaces(physical_filter):
- iface_name = iface['name']
- iface_subnets = iface.get("subnets", [])
- iface_cfg = iface_contents[iface_name]
- route_cfg = iface_cfg.routes
- if len(iface_subnets) == 1:
- cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0])
- elif len(iface_subnets) > 1:
- for i, iface_subnet in enumerate(iface_subnets,
- start=len(iface.children)):
- iface_sub_cfg = iface_cfg.copy()
- iface_sub_cfg.name = "%s:%s" % (iface_name, i)
- iface.children.append(iface_sub_cfg)
- cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet)
-
- @classmethod
- def _render_bond_interfaces(cls, network_state, iface_contents):
- bond_filter = renderer.filter_by_type('bond')
- for iface in network_state.iter_interfaces(bond_filter):
- iface_name = iface['name']
- iface_cfg = iface_contents[iface_name]
- cls._render_bonding_opts(iface_cfg, iface)
- iface_master_name = iface['bond-master']
- iface_cfg['MASTER'] = iface_master_name
- iface_cfg['SLAVE'] = True
- # Ensure that the master interface (and any of its children)
- # are actually marked as being bond types...
- master_cfg = iface_contents[iface_master_name]
- master_cfgs = [master_cfg]
- master_cfgs.extend(master_cfg.children)
- for master_cfg in master_cfgs:
- master_cfg['BONDING_MASTER'] = True
- master_cfg.kind = 'bond'
-
- @staticmethod
- def _render_vlan_interfaces(network_state, iface_contents):
- vlan_filter = renderer.filter_by_type('vlan')
- for iface in network_state.iter_interfaces(vlan_filter):
- iface_name = iface['name']
- iface_cfg = iface_contents[iface_name]
- iface_cfg['VLAN'] = True
- iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')]
-
- @staticmethod
- def _render_dns(network_state, existing_dns_path=None):
- content = resolv_conf.ResolvConf("")
- if existing_dns_path and os.path.isfile(existing_dns_path):
- content = resolv_conf.ResolvConf(util.load_file(existing_dns_path))
- for nameserver in network_state.dns_nameservers:
- content.add_nameserver(nameserver)
- for searchdomain in network_state.dns_searchdomains:
- content.add_search_domain(searchdomain)
- return "\n".join([_make_header(';'), str(content)])
-
- @classmethod
- def _render_bridge_interfaces(cls, network_state, iface_contents):
- bridge_filter = renderer.filter_by_type('bridge')
- for iface in network_state.iter_interfaces(bridge_filter):
- iface_name = iface['name']
- iface_cfg = iface_contents[iface_name]
- iface_cfg.kind = 'bridge'
- for old_key, new_key in cls.bridge_opts_keys:
- if old_key in iface:
- iface_cfg[new_key] = iface[old_key]
- # Is this the right key to get all the connected interfaces?
- for bridged_iface_name in iface.get('bridge_ports', []):
- # Ensure all bridged interfaces are correctly tagged
- # as being bridged to this interface.
- bridged_cfg = iface_contents[bridged_iface_name]
- bridged_cfgs = [bridged_cfg]
- bridged_cfgs.extend(bridged_cfg.children)
- for bridge_cfg in bridged_cfgs:
- bridge_cfg['BRIDGE'] = iface_name
-
- @classmethod
- def _render_sysconfig(cls, base_sysconf_dir, network_state):
- '''Given state, return /etc/sysconfig files + contents'''
- iface_contents = {}
- for iface in network_state.iter_interfaces():
- iface_name = iface['name']
- iface_cfg = NetInterface(iface_name, base_sysconf_dir)
- cls._render_iface_shared(iface, iface_cfg)
- iface_contents[iface_name] = iface_cfg
- cls._render_physical_interfaces(network_state, iface_contents)
- cls._render_bond_interfaces(network_state, iface_contents)
- cls._render_vlan_interfaces(network_state, iface_contents)
- cls._render_bridge_interfaces(network_state, iface_contents)
- contents = {}
- for iface_name, iface_cfg in iface_contents.items():
- if iface_cfg or iface_cfg.children:
- contents[iface_cfg.path] = iface_cfg.to_string()
- for iface_cfg in iface_cfg.children:
- if iface_cfg:
- contents[iface_cfg.path] = iface_cfg.to_string()
- if iface_cfg.routes:
- contents[iface_cfg.routes.path] = iface_cfg.routes.to_string()
- return contents
-
- def render_network_state(self, target, network_state):
- base_sysconf_dir = os.path.join(target, self.sysconf_dir)
- for path, data in self._render_sysconfig(base_sysconf_dir,
- network_state).items():
- util.write_file(path, data)
- if self.dns_path:
- dns_path = os.path.join(target, self.dns_path)
- resolv_content = self._render_dns(network_state,
- existing_dns_path=dns_path)
- util.write_file(dns_path, resolv_content)
- if self.netrules_path:
- netrules_content = self._render_persistent_net(network_state)
- netrules_path = os.path.join(target, self.netrules_path)
- util.write_file(netrules_path, netrules_content)
diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py
deleted file mode 100644
index 09188295..00000000
--- a/cloudinit/net/udev.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (C) 2015 Canonical Ltd.
-#
-# Author: Ryan Harper <ryan.harper@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-
-def compose_udev_equality(key, value):
- """Return a udev comparison clause, like `ACTION=="add"`."""
- assert key == key.upper()
- return '%s=="%s"' % (key, value)
-
-
-def compose_udev_attr_equality(attribute, value):
- """Return a udev attribute comparison clause, like `ATTR{type}=="1"`."""
- assert attribute == attribute.lower()
- return 'ATTR{%s}=="%s"' % (attribute, value)
-
-
-def compose_udev_setting(key, value):
- """Return a udev assignment clause, like `NAME="eth0"`."""
- assert key == key.upper()
- return '%s="%s"' % (key, value)
-
-
-def generate_udev_rule(interface, mac):
- """Return a udev rule to set the name of network interface with `mac`.
-
- The rule ends up as a single line looking something like:
-
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*",
- ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0"
- """
- rule = ', '.join([
- compose_udev_equality('SUBSYSTEM', 'net'),
- compose_udev_equality('ACTION', 'add'),
- compose_udev_equality('DRIVERS', '?*'),
- compose_udev_attr_equality('address', mac),
- compose_udev_setting('NAME', interface),
- ])
- return '%s\n' % rule
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
deleted file mode 100644
index d8698a5d..00000000
--- a/cloudinit/netinfo.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-from cloudinit import log as logging
-from cloudinit import util
-
-from prettytable import PrettyTable
-
-LOG = logging.getLogger()
-
-
-def netdev_info(empty=""):
- fields = ("hwaddr", "addr", "bcast", "mask")
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"])
- devs = {}
- for line in str(ifcfg_out).splitlines():
- if len(line) == 0:
- continue
- if line[0] not in ("\t", " "):
- curdev = line.split()[0]
- devs[curdev] = {"up": False}
- for field in fields:
- devs[curdev][field] = ""
- toks = line.lower().strip().split()
- if toks[0] == "up":
- devs[curdev]['up'] = True
- # If the output of ifconfig doesn't contain the required info in the
- # obvious place, use a regex filter to be sure.
- elif len(toks) > 1:
- if re.search(r"flags=\d+<up,", toks[1]):
- devs[curdev]['up'] = True
-
- fieldpost = ""
- if toks[0] == "inet6":
- fieldpost = "6"
-
- for i in range(len(toks)):
- # older net-tools (ubuntu) show 'inet addr:xx.yy',
- # newer (freebsd and fedora) show 'inet xx.yy'
- # just skip this 'inet' entry. (LP: #1285185)
- try:
- if ((toks[i] in ("inet", "inet6") and
- toks[i + 1].startswith("addr:"))):
- continue
- except IndexError:
- pass
-
- # Couple the different items we're interested in with the correct
- # field since FreeBSD/CentOS/Fedora differ in the output.
- ifconfigfields = {
- "addr:": "addr", "inet": "addr",
- "bcast:": "bcast", "broadcast": "bcast",
- "mask:": "mask", "netmask": "mask",
- "hwaddr": "hwaddr", "ether": "hwaddr",
- "scope": "scope",
- }
- for origfield, field in ifconfigfields.items():
- target = "%s%s" % (field, fieldpost)
- if devs[curdev].get(target, ""):
- continue
- if toks[i] == "%s" % origfield:
- try:
- devs[curdev][target] = toks[i + 1]
- except IndexError:
- pass
- elif toks[i].startswith("%s" % origfield):
- devs[curdev][target] = toks[i][len(field) + 1:]
-
- if empty != "":
- for (_devname, dev) in devs.items():
- for field in dev:
- if dev[field] == "":
- dev[field] = empty
-
- return devs
-
-
-def route_info():
- (route_out, _err) = util.subp(["netstat", "-rn"])
-
- routes = {}
- routes['ipv4'] = []
- routes['ipv6'] = []
-
- entries = route_out.splitlines()[1:]
- for line in entries:
- if not line:
- continue
- toks = line.split()
- # FreeBSD shows 6 items in the routing table:
- # Destination Gateway Flags Refs Use Netif Expire
- # default 10.65.0.1 UGS 0 34920 vtnet0
- #
- # Linux netstat shows 2 more:
- # Destination Gateway Genmask Flags MSS Window irtt Iface
- # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
- if (len(toks) < 6 or toks[0] == "Kernel" or
- toks[0] == "Destination" or toks[0] == "Internet" or
- toks[0] == "Internet6" or toks[0] == "Routing"):
- continue
- if len(toks) < 8:
- toks.append("-")
- toks.append("-")
- toks[7] = toks[5]
- toks[5] = "-"
- entry = {
- 'destination': toks[0],
- 'gateway': toks[1],
- 'genmask': toks[2],
- 'flags': toks[3],
- 'metric': toks[4],
- 'ref': toks[5],
- 'use': toks[6],
- 'iface': toks[7],
- }
- routes['ipv4'].append(entry)
-
- try:
- (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"])
- except util.ProcessExecutionError:
- pass
- else:
- entries6 = route_out6.splitlines()[1:]
- for line in entries6:
- if not line:
- continue
- toks = line.split()
- if (len(toks) < 6 or toks[0] == "Kernel" or
- toks[0] == "Proto" or toks[0] == "Active"):
- continue
- entry = {
- 'proto': toks[0],
- 'recv-q': toks[1],
- 'send-q': toks[2],
- 'local address': toks[3],
- 'foreign address': toks[4],
- 'state': toks[5],
- }
- routes['ipv6'].append(entry)
- return routes
-
-
-def getgateway():
- try:
- routes = route_info()
- except Exception:
- pass
- else:
- for r in routes.get('ipv4', []):
- if r['flags'].find("G") >= 0:
- return "%s[%s]" % (r['gateway'], r['iface'])
- return None
-
-
-def netdev_pformat():
- lines = []
- try:
- netdev = netdev_info(empty=".")
- except Exception:
- lines.append(util.center("Net device info failed", '!', 80))
- else:
- fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
- tbl = PrettyTable(fields)
- for (dev, d) in netdev.items():
- tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
- if d.get('addr6'):
- tbl.add_row([dev, d["up"],
- d["addr6"], ".", d.get("scope6"), d["hwaddr"]])
- netdev_s = tbl.get_string()
- max_len = len(max(netdev_s.splitlines(), key=len))
- header = util.center("Net device info", "+", max_len)
- lines.extend([header, netdev_s])
- return "\n".join(lines)
-
-
-def route_pformat():
- lines = []
- try:
- routes = route_info()
- except Exception as e:
- lines.append(util.center('Route info failed', '!', 80))
- util.logexc(LOG, "Route info failed: %s" % e)
- else:
- if routes.get('ipv4'):
- fields_v4 = ['Route', 'Destination', 'Gateway',
- 'Genmask', 'Interface', 'Flags']
- tbl_v4 = PrettyTable(fields_v4)
- for (n, r) in enumerate(routes.get('ipv4')):
- route_id = str(n)
- tbl_v4.add_row([route_id, r['destination'],
- r['gateway'], r['genmask'],
- r['iface'], r['flags']])
- route_s = tbl_v4.get_string()
- max_len = len(max(route_s.splitlines(), key=len))
- header = util.center("Route IPv4 info", "+", max_len)
- lines.extend([header, route_s])
- if routes.get('ipv6'):
- fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
- 'Local Address', 'Foreign Address', 'State']
- tbl_v6 = PrettyTable(fields_v6)
- for (n, r) in enumerate(routes.get('ipv6')):
- route_id = str(n)
- tbl_v6.add_row([route_id, r['proto'],
- r['recv-q'], r['send-q'],
- r['local address'], r['foreign address'],
- r['state']])
- route_s = tbl_v6.get_string()
- max_len = len(max(route_s.splitlines(), key=len))
- header = util.center("Route IPv6 info", "+", max_len)
- lines.extend([header, route_s])
- return "\n".join(lines)
-
-
-def debug_info(prefix='ci-info: '):
- lines = []
- netdev_lines = netdev_pformat().splitlines()
- if prefix:
- for line in netdev_lines:
- lines.append("%s%s" % (prefix, line))
- else:
- lines.extend(netdev_lines)
- route_lines = route_pformat().splitlines()
- if prefix:
- for line in route_lines:
- lines.append("%s%s" % (prefix, line))
- else:
- lines.extend(route_lines)
- return "\n".join(lines)
diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py
deleted file mode 100644
index f6609d6f..00000000
--- a/cloudinit/patcher.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import imp
-import logging
-import sys
-
-# Default fallback format
-FALL_FORMAT = ('FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: ' +
- '%(message)s')
-
-
-class QuietStreamHandler(logging.StreamHandler):
- def handleError(self, record):
- pass
-
-
-def _patch_logging():
- # Replace 'handleError' with one that will be more
- # tolerant of errors in that it can avoid
- # re-notifying on exceptions and when errors
- # do occur, it can at least try to write to
- # sys.stderr using a fallback logger
- fallback_handler = QuietStreamHandler(sys.stderr)
- fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT))
-
- def handleError(self, record):
- try:
- fallback_handler.handle(record)
- fallback_handler.flush()
- except IOError:
- pass
- setattr(logging.Handler, 'handleError', handleError)
-
-
-def patch():
- imp.acquire_lock()
- try:
- _patch_logging()
- finally:
- imp.release_lock()
diff --git a/cloudinit/registry.py b/cloudinit/registry.py
deleted file mode 100644
index 04368ddf..00000000
--- a/cloudinit/registry.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-# This file is part of cloud-init. See LICENCE file for license information.
-#
-# vi: ts=4 expandtab
-import copy
-
-
-class DictRegistry(object):
- """A simple registry for a mapping of objects."""
-
- def __init__(self):
- self.reset()
-
- def reset(self):
- self._items = {}
-
- def register_item(self, key, item):
- """Add item to the registry."""
- if key in self._items:
- raise ValueError(
- 'Item already registered with key {0}'.format(key))
- self._items[key] = item
-
- def unregister_item(self, key, force=True):
- """Remove item from the registry."""
- if key in self._items:
- del self._items[key]
- elif not force:
- raise KeyError("%s: key not present to unregister" % key)
-
- @property
- def registered_items(self):
- """All the items that have been registered.
-
- This cannot be used to modify the contents of the registry.
- """
- return copy.copy(self._items)
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
deleted file mode 100644
index 6b41ae61..00000000
--- a/cloudinit/reporting/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-# This file is part of cloud-init. See LICENCE file for license information.
-#
-"""
-cloud-init reporting framework
-
-The reporting framework is intended to allow all parts of cloud-init to
-report events in a structured manner.
-"""
-
-from ..registry import DictRegistry
-from .handlers import available_handlers
-
-DEFAULT_CONFIG = {
- 'logging': {'type': 'log'},
-}
-
-
-def update_configuration(config):
- """Update the instanciated_handler_registry.
-
- :param config:
- The dictionary containing changes to apply. If a key is given
- with a False-ish value, the registered handler matching that name
- will be unregistered.
- """
- for handler_name, handler_config in config.items():
- if not handler_config:
- instantiated_handler_registry.unregister_item(
- handler_name, force=True)
- continue
- handler_config = handler_config.copy()
- cls = available_handlers.registered_items[handler_config.pop('type')]
- instantiated_handler_registry.unregister_item(handler_name)
- instance = cls(**handler_config)
- instantiated_handler_registry.register_item(handler_name, instance)
-
-
-instantiated_handler_registry = DictRegistry()
-update_configuration(DEFAULT_CONFIG)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
deleted file mode 100644
index df2b9b4a..00000000
--- a/cloudinit/reporting/events.py
+++ /dev/null
@@ -1,248 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-# This file is part of cloud-init. See LICENCE file for license information.
-#
-"""
-events for reporting.
-
-The events here are designed to be used with reporting.
-They can be published to registered handlers with report_event.
-"""
-import base64
-import os.path
-import time
-
-from . import instantiated_handler_registry
-
-FINISH_EVENT_TYPE = 'finish'
-START_EVENT_TYPE = 'start'
-
-DEFAULT_EVENT_ORIGIN = 'cloudinit'
-
-
-class _nameset(set):
- def __getattr__(self, name):
- if name in self:
- return name
- raise AttributeError("%s not a valid value" % name)
-
-
-status = _nameset(("SUCCESS", "WARN", "FAIL"))
-
-
-class ReportingEvent(object):
- """Encapsulation of event formatting."""
-
- def __init__(self, event_type, name, description,
- origin=DEFAULT_EVENT_ORIGIN, timestamp=None):
- self.event_type = event_type
- self.name = name
- self.description = description
- self.origin = origin
- if timestamp is None:
- timestamp = time.time()
- self.timestamp = timestamp
-
- def as_string(self):
- """The event represented as a string."""
- return '{0}: {1}: {2}'.format(
- self.event_type, self.name, self.description)
-
- def as_dict(self):
- """The event represented as a dictionary."""
- return {'name': self.name, 'description': self.description,
- 'event_type': self.event_type, 'origin': self.origin,
- 'timestamp': self.timestamp}
-
-
-class FinishReportingEvent(ReportingEvent):
-
- def __init__(self, name, description, result=status.SUCCESS,
- post_files=None):
- super(FinishReportingEvent, self).__init__(
- FINISH_EVENT_TYPE, name, description)
- self.result = result
- if post_files is None:
- post_files = []
- self.post_files = post_files
- if result not in status:
- raise ValueError("Invalid result: %s" % result)
-
- def as_string(self):
- return '{0}: {1}: {2}: {3}'.format(
- self.event_type, self.name, self.result, self.description)
-
- def as_dict(self):
- """The event represented as json friendly."""
- data = super(FinishReportingEvent, self).as_dict()
- data['result'] = self.result
- if self.post_files:
- data['files'] = _collect_file_info(self.post_files)
- return data
-
-
-def report_event(event):
- """Report an event to all registered event handlers.
-
- This should generally be called via one of the other functions in
- the reporting module.
-
- :param event_type:
- The type of the event; this should be a constant from the
- reporting module.
- """
- for _, handler in instantiated_handler_registry.registered_items.items():
- handler.publish_event(event)
-
-
-def report_finish_event(event_name, event_description,
- result=status.SUCCESS, post_files=None):
- """Report a "finish" event.
-
- See :py:func:`.report_event` for parameter details.
- """
- event = FinishReportingEvent(event_name, event_description, result,
- post_files=post_files)
- return report_event(event)
-
-
-def report_start_event(event_name, event_description):
- """Report a "start" event.
-
- :param event_name:
- The name of the event; this should be a topic which events would
- share (e.g. it will be the same for start and finish events).
-
- :param event_description:
- A human-readable description of the event that has occurred.
- """
- event = ReportingEvent(START_EVENT_TYPE, event_name, event_description)
- return report_event(event)
-
-
-class ReportEventStack(object):
- """Context Manager for using :py:func:`report_event`
-
- This enables calling :py:func:`report_start_event` and
- :py:func:`report_finish_event` through a context manager.
-
- :param name:
- the name of the event
-
- :param description:
- the event's description, passed on to :py:func:`report_start_event`
-
- :param message:
- the description to use for the finish event. defaults to
- :param:description.
-
- :param parent:
- :type parent: :py:class:ReportEventStack or None
- The parent of this event. The parent is populated with
- results of all its children. The name used in reporting
- is <parent.name>/<name>
-
- :param reporting_enabled:
- Indicates if reporting events should be generated.
- If not provided, defaults to the parent's value, or True if no parent
- is provided.
-
- :param result_on_exception:
- The result value to set if an exception is caught. default
- value is FAIL.
- """
- def __init__(self, name, description, message=None, parent=None,
- reporting_enabled=None, result_on_exception=status.FAIL,
- post_files=None):
- self.parent = parent
- self.name = name
- self.description = description
- self.message = message
- self.result_on_exception = result_on_exception
- self.result = status.SUCCESS
- if post_files is None:
- post_files = []
- self.post_files = post_files
-
- # use parents reporting value if not provided
- if reporting_enabled is None:
- if parent:
- reporting_enabled = parent.reporting_enabled
- else:
- reporting_enabled = True
- self.reporting_enabled = reporting_enabled
-
- if parent:
- self.fullname = '/'.join((parent.fullname, name,))
- else:
- self.fullname = self.name
- self.children = {}
-
- def __repr__(self):
- return ("ReportEventStack(%s, %s, reporting_enabled=%s)" %
- (self.name, self.description, self.reporting_enabled))
-
- def __enter__(self):
- self.result = status.SUCCESS
- if self.reporting_enabled:
- report_start_event(self.fullname, self.description)
- if self.parent:
- self.parent.children[self.name] = (None, None)
- return self
-
- def _childrens_finish_info(self):
- for cand_result in (status.FAIL, status.WARN):
- for name, (value, msg) in self.children.items():
- if value == cand_result:
- return (value, self.message)
- return (self.result, self.message)
-
- @property
- def result(self):
- return self._result
-
- @result.setter
- def result(self, value):
- if value not in status:
- raise ValueError("'%s' not a valid result" % value)
- self._result = value
-
- @property
- def message(self):
- if self._message is not None:
- return self._message
- return self.description
-
- @message.setter
- def message(self, value):
- self._message = value
-
- def _finish_info(self, exc):
- # return tuple of description, and value
- if exc:
- return (self.result_on_exception, self.message)
- return self._childrens_finish_info()
-
- def __exit__(self, exc_type, exc_value, traceback):
- (result, msg) = self._finish_info(exc_value)
- if self.parent:
- self.parent.children[self.name] = (result, msg)
- if self.reporting_enabled:
- report_finish_event(self.fullname, msg, result,
- post_files=self.post_files)
-
-
-def _collect_file_info(files):
- if not files:
- return None
- ret = []
- for fname in files:
- if not os.path.isfile(fname):
- content = None
- else:
- with open(fname, "rb") as fp:
- content = base64.b64encode(fp.read()).decode()
- ret.append({'path': fname, 'content': content,
- 'encoding': 'base64'})
- return ret
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
deleted file mode 100644
index dff20ecb..00000000
--- a/cloudinit/reporting/handlers.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# vi: ts=4 expandtab
-
-import abc
-import json
-import six
-
-from cloudinit import log as logging
-from cloudinit.registry import DictRegistry
-from cloudinit import (url_helper, util)
-
-
-LOG = logging.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class ReportingHandler(object):
- """Base class for report handlers.
-
- Implement :meth:`~publish_event` for controlling what
- the handler does with an event.
- """
-
- @abc.abstractmethod
- def publish_event(self, event):
- """Publish an event."""
-
-
-class LogHandler(ReportingHandler):
- """Publishes events to the cloud-init log at the ``DEBUG`` log level."""
-
- def __init__(self, level="DEBUG"):
- super(LogHandler, self).__init__()
- if isinstance(level, int):
- pass
- else:
- input_level = level
- try:
- level = getattr(logging, level.upper())
- except Exception:
- LOG.warn("invalid level '%s', using WARN", input_level)
- level = logging.WARN
- self.level = level
-
- def publish_event(self, event):
- logger = logging.getLogger(
- '.'.join(['cloudinit', 'reporting', event.event_type, event.name]))
- logger.log(self.level, event.as_string())
-
-
-class PrintHandler(ReportingHandler):
- """Print the event as a string."""
-
- def publish_event(self, event):
- print(event.as_string())
-
-
-class WebHookHandler(ReportingHandler):
- def __init__(self, endpoint, consumer_key=None, token_key=None,
- token_secret=None, consumer_secret=None, timeout=None,
- retries=None):
- super(WebHookHandler, self).__init__()
-
- if any([consumer_key, token_key, token_secret, consumer_secret]):
- self.oauth_helper = url_helper.OauthUrlHelper(
- consumer_key=consumer_key, token_key=token_key,
- token_secret=token_secret, consumer_secret=consumer_secret)
- else:
- self.oauth_helper = None
- self.endpoint = endpoint
- self.timeout = timeout
- self.retries = retries
- self.ssl_details = util.fetch_ssl_details()
-
- def publish_event(self, event):
- if self.oauth_helper:
- readurl = self.oauth_helper.readurl
- else:
- readurl = url_helper.readurl
- try:
- return readurl(
- self.endpoint, data=json.dumps(event.as_dict()),
- timeout=self.timeout,
- retries=self.retries, ssl_details=self.ssl_details)
- except Exception:
- LOG.warn("failed posting event: %s" % event.as_string())
-
-
-available_handlers = DictRegistry()
-available_handlers.register_item('log', LogHandler)
-available_handlers.register_item('print', PrintHandler)
-available_handlers.register_item('webhook', WebHookHandler)
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
deleted file mode 100644
index eba5d056..00000000
--- a/cloudinit/safeyaml.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-
-class _CustomSafeLoader(yaml.SafeLoader):
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
-_CustomSafeLoader.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- _CustomSafeLoader.construct_python_unicode)
-
-
-def load(blob):
- return(yaml.load(blob, Loader=_CustomSafeLoader))
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
deleted file mode 100644
index af45c13e..00000000
--- a/cloudinit/serial.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-from __future__ import absolute_import
-
-try:
- from serial import Serial
-except ImportError:
- # For older versions of python (ie 2.6) pyserial may not exist and/or
- # work and/or be installed, so make a dummy/fake serial that blows up
- # when used...
- class Serial(object):
- def __init__(self, *args, **kwargs):
- pass
-
- @staticmethod
- def isOpen():
- return False
-
- @staticmethod
- def write(data):
- raise IOError("Unable to perform serial `write` operation,"
- " pyserial not installed.")
-
- @staticmethod
- def readline():
- raise IOError("Unable to perform serial `readline` operation,"
- " pyserial not installed.")
-
- @staticmethod
- def flush():
- raise IOError("Unable to perform serial `flush` operation,"
- " pyserial not installed.")
-
- @staticmethod
- def read(size=1):
- raise IOError("Unable to perform serial `read` operation,"
- " pyserial not installed.")
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
deleted file mode 100644
index 8c258ea1..00000000
--- a/cloudinit/settings.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Set and read for determining the cloud config file location
-CFG_ENV_NAME = "CLOUD_CFG"
-
-# This is expected to be a yaml formatted file
-CLOUD_CONFIG = '/etc/cloud/cloud.cfg'
-
-# What u get if no config is provided
-CFG_BUILTIN = {
- 'datasource_list': [
- 'NoCloud',
- 'ConfigDrive',
- 'OpenNebula',
- 'Azure',
- 'AltCloud',
- 'OVF',
- 'MAAS',
- 'GCE',
- 'OpenStack',
- 'Ec2',
- 'CloudSigma',
- 'CloudStack',
- 'SmartOS',
- 'Bigstep',
- # At the end to act as a 'catch' when none of the above work...
- 'None',
- ],
- 'def_log_file': '/var/log/cloud-init.log',
- 'log_cfgs': [],
- 'syslog_fix_perms': ['syslog:adm', 'root:adm'],
- 'system_info': {
- 'paths': {
- 'cloud_dir': '/var/lib/cloud',
- 'templates_dir': '/etc/cloud/templates/',
- },
- 'distro': 'ubuntu',
- },
- 'vendor_data': {'enabled': True, 'prefix': []},
-}
-
-# Valid frequencies of handlers/modules
-PER_INSTANCE = "once-per-instance"
-PER_ALWAYS = "always"
-PER_ONCE = "once"
-
-# Used to sanity check incoming handlers/modules frequencies
-FREQUENCIES = [PER_INSTANCE, PER_ALWAYS, PER_ONCE]
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
deleted file mode 100644
index 0d95f506..00000000
--- a/cloudinit/signal_handler.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import inspect
-import signal
-import sys
-
-from six import StringIO
-
-from cloudinit import log as logging
-from cloudinit import util
-from cloudinit import version as vr
-
-LOG = logging.getLogger(__name__)
-
-
-BACK_FRAME_TRACE_DEPTH = 3
-EXIT_FOR = {
- signal.SIGINT: ('Cloud-init %(version)s received SIGINT, exiting...', 1),
- signal.SIGTERM: ('Cloud-init %(version)s received SIGTERM, exiting...', 1),
- # Can't be caught...
- # signal.SIGKILL: ('Cloud-init killed, exiting...', 1),
- signal.SIGABRT: ('Cloud-init %(version)s received SIGABRT, exiting...', 1),
-}
-
-
-def _pprint_frame(frame, depth, max_depth, contents):
- if depth > max_depth or not frame:
- return
- frame_info = inspect.getframeinfo(frame)
- prefix = " " * (depth * 2)
- contents.write("%sFilename: %s\n" % (prefix, frame_info.filename))
- contents.write("%sFunction: %s\n" % (prefix, frame_info.function))
- contents.write("%sLine number: %s\n" % (prefix, frame_info.lineno))
- _pprint_frame(frame.f_back, depth + 1, max_depth, contents)
-
-
-def _handle_exit(signum, frame):
- (msg, rc) = EXIT_FOR[signum]
- msg = msg % ({'version': vr.version()})
- contents = StringIO()
- contents.write("%s\n" % (msg))
- _pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents)
- util.multi_log(contents.getvalue(),
- console=True, stderr=False, log=LOG)
- sys.exit(rc)
-
-
-def attach_handlers():
- sigs_attached = 0
- for signum in EXIT_FOR.keys():
- signal.signal(signum, _handle_exit)
- sigs_attached += len(EXIT_FOR)
- return sigs_attached
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
deleted file mode 100644
index a3529609..00000000
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joe VLcek <JVLcek@RedHat.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-This file contains code used to gather the user data passed to an
-instance on RHEVm and vSphere.
-'''
-
-import errno
-import os
-import os.path
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.util import ProcessExecutionError
-
-LOG = logging.getLogger(__name__)
-
-# Needed file paths
-CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
-
-# Shell command lists
-CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5']
-
-META_DATA_NOT_SUPPORTED = {
- 'block-device-mapping': {},
- 'instance-id': 455,
- 'local-hostname': 'localhost',
- 'placement': {},
-}
-
-
-def read_user_data_callback(mount_dir):
- '''
- Description:
- This callback will be applied by util.mount_cb() on the mounted
- file.
-
- Deltacloud file name contains deltacloud. Those not using
- Deltacloud but instead instrumenting the injection, could
- drop deltacloud from the file name.
-
- Input:
- mount_dir - Mount directory
-
- Returns:
- User Data
-
- '''
-
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
-
- # First try deltacloud_user_data_file. On failure try user_data_file.
- try:
- user_data = util.load_file(deltacloud_user_data_file).strip()
- except IOError:
- try:
- user_data = util.load_file(user_data_file).strip()
- except IOError:
- util.logexc(LOG, 'Failed accessing user data file.')
- return None
-
- return user_data
-
-
-class DataSourceAltCloud(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_cloud_type(self):
- '''
- Description:
- Get the type for the cloud back end this instance is running on
- by examining the string returned by reading the dmi data.
-
- Input:
- None
-
- Returns:
- One of the following strings:
- 'RHEV', 'VSPHERE' or 'UNKNOWN'
-
- '''
-
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data is not available on ARM processors
- LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)")
- return 'UNKNOWN'
-
- system_name = util.read_dmi_data("system-product-name")
- if not system_name:
- return 'UNKNOWN'
-
- sys_name = system_name.upper()
-
- if sys_name.startswith('RHEV'):
- return 'RHEV'
-
- if sys_name.startswith('VMWARE'):
- return 'VSPHERE'
-
- return 'UNKNOWN'
-
- def get_data(self):
- '''
- Description:
- User Data is passed to the launching instance which
- is used to perform instance configuration.
-
- Cloud providers expose the user data differently.
- It is necessary to determine which cloud provider
- the current instance is running on to determine
- how to access the user data. Images built with
- image factory will contain a CLOUD_INFO_FILE which
- contains a string identifying the cloud provider.
-
- Images not built with Imagefactory will try to
- determine what the cloud provider is based on system
- information.
- '''
-
- LOG.debug('Invoked get_data()')
-
- if os.path.exists(CLOUD_INFO_FILE):
- try:
- cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper()
- except IOError:
- util.logexc(LOG, 'Unable to access cloud info file at %s.',
- CLOUD_INFO_FILE)
- return False
- else:
- cloud_type = self.get_cloud_type()
-
- LOG.debug('cloud_type: ' + str(cloud_type))
-
- if 'RHEV' in cloud_type:
- if self.user_data_rhevm():
- return True
- elif 'VSPHERE' in cloud_type:
- if self.user_data_vsphere():
- return True
- else:
- # there was no recognized alternate cloud type
- # indicating this handler should not be used.
- return False
-
- # No user data found
- util.logexc(LOG, 'Failed accessing user data.')
- return False
-
- def user_data_rhevm(self):
- '''
- RHEVM specific userdata read
-
- If on RHEV-M the user data will be contained on the
- floppy device in file <user_data_file>
- To access it:
- modprobe floppy
-
- Leverage util.mount_cb to:
- mkdir <tmp mount dir>
- mount /dev/fd0 <tmp mount dir>
- The call back passed to util.mount_cb will do:
- read <tmp mount dir>/<user_data_file>
- '''
-
- return_str = None
-
- # modprobe floppy
- try:
- cmd = CMD_PROBE_FLOPPY
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
- return False
-
- floppy_dev = '/dev/fd0'
-
- # udevadm settle for floppy device
- try:
- cmd = CMD_UDEVADM_SETTLE
- cmd.append('--exit-if-exists=' + floppy_dev)
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
-
- try:
- return_str = util.mount_cb(floppy_dev, read_user_data_callback)
- except OSError as err:
- if err.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user data",
- floppy_dev)
-
- self.userdata_raw = return_str
- self.metadata = META_DATA_NOT_SUPPORTED
-
- if return_str:
- return True
- else:
- return False
-
- def user_data_vsphere(self):
- '''
- vSphere specific userdata read
-
- If on vSphere the user data will be contained on the
- cdrom device in file <user_data_file>
- To access it:
- Leverage util.mount_cb to:
- mkdir <tmp mount dir>
- mount /dev/fd0 <tmp mount dir>
- The call back passed to util.mount_cb will do:
- read <tmp mount dir>/<user_data_file>
- '''
-
- return_str = None
- cdrom_list = util.find_devs_with('LABEL=CDROM')
- for cdrom_dev in cdrom_list:
- try:
- return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
- if return_str:
- break
- except OSError as err:
- if err.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", cdrom_dev)
-
- self.userdata_raw = return_str
- self.metadata = META_DATA_NOT_SUPPORTED
-
- if return_str:
- return True
- else:
- return False
-
-# Used to match classes to dependencies
-# Source DataSourceAltCloud does not really depend on networking.
-# In the future 'dsmode' like behavior can be added to offer user
-# the ability to run before networking.
-datasources = [
- (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
deleted file mode 100644
index 8c7e8673..00000000
--- a/cloudinit/sources/DataSourceAzure.py
+++ /dev/null
@@ -1,651 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import contextlib
-import crypt
-import fnmatch
-import os
-import os.path
-import time
-import xml.etree.ElementTree as ET
-
-from xml.dom import minidom
-
-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-DS_NAME = 'Azure'
-DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
-AGENT_START = ['service', 'walinuxagent', 'start']
-BOUNCE_COMMAND = [
- 'sh', '-xc',
- "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"
-]
-
-BUILTIN_DS_CONFIG = {
- 'agent_command': AGENT_START,
- 'data_dir': "/var/lib/waagent",
- 'set_hostname': True,
- 'hostname_bounce': {
- 'interface': 'eth0',
- 'policy': True,
- 'command': BOUNCE_COMMAND,
- 'hostname_command': 'hostname',
- },
- 'disk_aliases': {'ephemeral0': '/dev/sdb'},
-}
-
-BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'gpt',
- 'layout': [100],
- 'overwrite': True},
- },
- 'fs_setup': [{'filesystem': 'ext4',
- 'device': 'ephemeral0.1',
- 'replace_fs': 'ntfs'}],
-}
-
-DS_CFG_PATH = ['datasource', DS_NAME]
-DEF_EPHEMERAL_LABEL = 'Temporary Storage'
-
-# The redacted password fails to meet password complexity requirements
-# so we can safely use this to mask/redact the password in the ovf-env.xml
-DEF_PASSWD_REDACTION = 'REDACTED'
-
-
-def get_hostname(hostname_command='hostname'):
- return util.subp(hostname_command, capture=True)[0].strip()
-
-
-def set_hostname(hostname, hostname_command='hostname'):
- util.subp([hostname_command, hostname])
-
-
-@contextlib.contextmanager
-def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
- """
- Set a temporary hostname, restoring the previous hostname on exit.
-
- Will have the value of the previous hostname when used as a context
- manager, or None if the hostname was not changed.
- """
- policy = cfg['hostname_bounce']['policy']
- previous_hostname = get_hostname(hostname_command)
- if (not util.is_true(cfg.get('set_hostname')) or
- util.is_false(policy) or
- (previous_hostname == temp_hostname and policy != 'force')):
- yield None
- return
- set_hostname(temp_hostname, hostname_command)
- try:
- yield previous_hostname
- finally:
- set_hostname(previous_hostname, hostname_command)
-
-
-class DataSourceAzureNet(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'azure')
- self.cfg = {}
- self.seed = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_metadata_from_agent(self):
- temp_hostname = self.metadata.get('local-hostname')
- hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
- with temporary_hostname(temp_hostname, self.ds_cfg,
- hostname_command=hostname_command) \
- as previous_hostname:
- if (previous_hostname is not None and
- util.is_true(self.ds_cfg.get('set_hostname'))):
- cfg = self.ds_cfg['hostname_bounce']
- try:
- perform_hostname_bounce(hostname=temp_hostname,
- cfg=cfg,
- prev_hostname=previous_hostname)
- except Exception as e:
- LOG.warn("Failed publishing hostname: %s", e)
- util.logexc(LOG, "handling set_hostname failed")
-
- try:
- invoke_agent(self.ds_cfg['agent_command'])
- except util.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- self.ds_cfg['agent_command'])
-
- ddir = self.ds_cfg['data_dir']
-
- fp_files = []
- key_value = None
- for pk in self.cfg.get('_pubkeys', []):
- if pk.get('value', None):
- key_value = pk['value']
- LOG.debug("ssh authentication: using value from fabric")
- else:
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
- LOG.debug("ssh authentication: "
- "using fingerprint from fabirc")
-
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
- func=wait_for_files,
- args=(fp_files,))
- if len(missing):
- LOG.warn("Did not find files, but going on: %s", missing)
-
- metadata = {}
- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
- return metadata
-
- def get_data(self):
- # azure removes/ejects the cdrom containing the ovf-env.xml
- # file on reboot. So, in order to successfully reboot we
- # need to look in the datadir and consider that valid
- ddir = self.ds_cfg['data_dir']
-
- candidates = [self.seed_dir]
- candidates.extend(list_possible_azure_ds_devs())
- if ddir:
- candidates.append(ddir)
-
- found = None
-
- for cdev in candidates:
- try:
- if cdev.startswith("/dev/"):
- ret = util.mount_cb(cdev, load_azure_ds_dir)
- else:
- ret = load_azure_ds_dir(cdev)
-
- except NonAzureDataSource:
- continue
- except BrokenAzureDataSource as exc:
- raise exc
- except util.MountFailedError:
- LOG.warn("%s was not mountable", cdev)
- continue
-
- (md, self.userdata_raw, cfg, files) = ret
- self.seed = cdev
- self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
- found = cdev
-
- LOG.debug("found datasource in %s", cdev)
- break
-
- if not found:
- return False
-
- if found == ddir:
- LOG.debug("using files cached in %s", ddir)
-
- # azure / hyper-v provides random data here
- seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
- quiet=True, decode=False)
- if seed:
- self.metadata['random_seed'] = seed
-
- # now update ds_cfg to reflect contents pass in config
- user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
- self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
-
- # walinux agent writes files world readable, but expects
- # the directory to be protected.
- write_files(ddir, files, dirmode=0o700)
-
- if self.ds_cfg['agent_command'] == '__builtin__':
- metadata_func = get_metadata_from_fabric
- else:
- metadata_func = self.get_metadata_from_agent
- try:
- fabric_data = metadata_func()
- except Exception as exc:
- LOG.info("Error communicating with Azure fabric; assume we aren't"
- " on Azure.", exc_info=True)
- return False
-
- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
- self.metadata.update(fabric_data)
-
- found_ephemeral = find_fabric_formatted_ephemeral_disk()
- if found_ephemeral:
- self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
- LOG.debug("using detected ephemeral0 of %s", found_ephemeral)
-
- cc_modules_override = support_new_ephemeral(self.sys_cfg)
- if cc_modules_override:
- self.cfg['cloud_config_modules'] = cc_modules_override
-
- return True
-
- def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
-
- def get_config_obj(self):
- return self.cfg
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
-
-def count_files(mp):
- return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*'))
-
-
-def find_fabric_formatted_ephemeral_part():
- """
- Locate the first fabric formatted ephemeral device.
- """
- potential_locations = ['/dev/disk/cloud/azure_resource-part1',
- '/dev/disk/azure/resource-part1']
- device_location = None
- for potential_location in potential_locations:
- if os.path.exists(potential_location):
- device_location = potential_location
- break
- if device_location is None:
- return None
- ntfs_devices = util.find_devs_with("TYPE=ntfs")
- real_device = os.path.realpath(device_location)
- if real_device in ntfs_devices:
- return device_location
- return None
-
-
-def find_fabric_formatted_ephemeral_disk():
- """
- Get the ephemeral disk.
- """
- part_dev = find_fabric_formatted_ephemeral_part()
- if part_dev:
- return part_dev.split('-')[0]
- return None
-
-
-def support_new_ephemeral(cfg):
- """
- Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device
- may be presented as a fresh device, or not.
-
- Since the knowledge of when a disk is supposed to be plowed under is
- specific to Windows Azure, the logic resides here in the datasource. When a
- new ephemeral device is detected, cloud-init overrides the default
- frequency for both disk-setup and mounts for the current boot only.
- """
- device = find_fabric_formatted_ephemeral_part()
- if not device:
- LOG.debug("no default fabric formated ephemeral0.1 found")
- return None
- LOG.debug("fabric formated ephemeral0.1 device at %s", device)
-
- file_count = 0
- try:
- file_count = util.mount_cb(device, count_files)
- except Exception:
- return None
- LOG.debug("fabric prepared ephmeral0.1 has %s files on it", file_count)
-
- if file_count >= 1:
- LOG.debug("fabric prepared ephemeral0.1 will be preserved")
- return None
- else:
- # if device was already mounted, then we need to unmount it
- # race conditions could allow for a check-then-unmount
- # to have a false positive. so just unmount and then check.
- try:
- util.subp(['umount', device])
- except util.ProcessExecutionError as e:
- if device in util.mounts():
- LOG.warn("Failed to unmount %s, will not reformat.", device)
- LOG.debug("Failed umount: %s", e)
- return None
-
- LOG.debug("cloud-init will format ephemeral0.1 this boot.")
- LOG.debug("setting disk_setup and mounts modules 'always' for this boot")
-
- cc_modules = cfg.get('cloud_config_modules')
- if not cc_modules:
- return None
-
- mod_list = []
- for mod in cc_modules:
- if mod in ("disk_setup", "mounts"):
- mod_list.append([mod, PER_ALWAYS])
- LOG.debug("set module '%s' to 'always' for this boot", mod)
- else:
- mod_list.append(mod)
- return mod_list
-
-
-def perform_hostname_bounce(hostname, cfg, prev_hostname):
- # set the hostname to 'hostname' if it is not already set to that.
- # then, if policy is not off, bounce the interface using command
- command = cfg['command']
- interface = cfg['interface']
- policy = cfg['policy']
-
- msg = ("hostname=%s policy=%s interface=%s" %
- (hostname, policy, interface))
- env = os.environ.copy()
- env['interface'] = interface
- env['hostname'] = hostname
- env['old_hostname'] = prev_hostname
-
- if command == "builtin":
- command = BOUNCE_COMMAND
-
- LOG.debug("pubhname: publishing hostname [%s]", msg)
- shell = not isinstance(command, (list, tuple))
- # capture=False, see comments in bug 1202758 and bug 1206164.
- util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
-
-
-def crtfile_to_pubkey(fname, data=None):
- pipeline = ('openssl x509 -noout -pubkey < "$0" |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname],
- capture=True, data=data)
- return out.rstrip()
-
-
-def pubkeys_from_crt_files(flist):
- pubkeys = []
- errors = []
- for fname in flist:
- try:
- pubkeys.append(crtfile_to_pubkey(fname))
- except util.ProcessExecutionError:
- errors.append(fname)
-
- if errors:
- LOG.warn("failed to convert the crt files to pubkey: %s", errors)
-
- return pubkeys
-
-
-def wait_for_files(flist, maxwait=60, naplen=.5):
- need = set(flist)
- waited = 0
- while waited < maxwait:
- need -= set([f for f in need if os.path.exists(f)])
- if len(need) == 0:
- return []
- time.sleep(naplen)
- waited += naplen
- return need
-
-
-def write_files(datadir, files, dirmode=None):
-
- def _redact_password(cnt, fname):
- """Azure provides the UserPassword in plain text. So we redact it"""
- try:
- root = ET.fromstring(cnt)
- for elem in root.iter():
- if ('UserPassword' in elem.tag and
- elem.text != DEF_PASSWD_REDACTION):
- elem.text = DEF_PASSWD_REDACTION
- return ET.tostring(root)
- except Exception:
- LOG.critical("failed to redact userpassword in %s", fname)
- return cnt
-
- if not datadir:
- return
- if not files:
- files = {}
- util.ensure_dir(datadir, dirmode)
- for (name, content) in files.items():
- fname = os.path.join(datadir, name)
- if 'ovf-env.xml' in name:
- content = _redact_password(content, fname)
- util.write_file(filename=fname, content=content, mode=0o600)
-
-
-def invoke_agent(cmd):
- # this is a function itself to simplify patching it for test
- if cmd:
- LOG.debug("invoking agent: %s", cmd)
- util.subp(cmd, shell=(not isinstance(cmd, list)))
- else:
- LOG.debug("not invoking agent")
-
-
-def find_child(node, filter_func):
- ret = []
- if not node.hasChildNodes():
- return ret
- for child in node.childNodes:
- if filter_func(child):
- ret.append(child)
- return ret
-
-
-def load_azure_ovf_pubkeys(sshnode):
- # This parses a 'SSH' node formatted like below, and returns
- # an array of dicts.
- # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
- # 'path': 'where/to/go'}]
- #
- # <SSH><PublicKeys>
- # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path>
- # ...
- # </PublicKeys></SSH>
- results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
- if len(results) == 0:
- return []
- if len(results) > 1:
- raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
- len(results))
-
- pubkeys_node = results[0]
- pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
-
- if len(pubkeys) == 0:
- return []
-
- found = []
- text_node = minidom.Document.TEXT_NODE
-
- for pk_node in pubkeys:
- if not pk_node.hasChildNodes():
- continue
-
- cur = {'fingerprint': "", 'path': "", 'value': ""}
- for child in pk_node.childNodes:
- if child.nodeType == text_node or not child.localName:
- continue
-
- name = child.localName.lower()
-
- if name not in cur.keys():
- continue
-
- if (len(child.childNodes) != 1 or
- child.childNodes[0].nodeType != text_node):
- continue
-
- cur[name] = child.childNodes[0].wholeText.strip()
- found.append(cur)
-
- return found
-
-
-def read_azure_ovf(contents):
- try:
- dom = minidom.parseString(contents)
- except Exception as e:
- raise BrokenAzureDataSource("invalid xml: %s" % e)
-
- results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
-
- if len(results) == 0:
- raise NonAzureDataSource("No ProvisioningSection")
- if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
- len(results))
- provSection = results[0]
-
- lpcs_nodes = find_child(provSection,
- lambda n:
- n.localName == "LinuxProvisioningConfigurationSet")
-
- if len(results) == 0:
- raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
- if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' %ss" %
- ("LinuxProvisioningConfigurationSet",
- len(results)))
- lpcs = lpcs_nodes[0]
-
- if not lpcs.hasChildNodes():
- raise BrokenAzureDataSource("no child nodes of configuration set")
-
- md_props = 'seedfrom'
- md = {'azure_data': {}}
- cfg = {}
- ud = ""
- password = None
- username = None
-
- for child in lpcs.childNodes:
- if child.nodeType == dom.TEXT_NODE or not child.localName:
- continue
-
- name = child.localName.lower()
-
- simple = False
- value = ""
- if (len(child.childNodes) == 1 and
- child.childNodes[0].nodeType == dom.TEXT_NODE):
- simple = True
- value = child.childNodes[0].wholeText
-
- attrs = dict([(k, v) for k, v in child.attributes.items()])
-
- # we accept either UserData or CustomData. If both are present
- # then behavior is undefined.
- if name == "userdata" or name == "customdata":
- if attrs.get('encoding') in (None, "base64"):
- ud = base64.b64decode(''.join(value.split()))
- else:
- ud = value
- elif name == "username":
- username = value
- elif name == "userpassword":
- password = value
- elif name == "hostname":
- md['local-hostname'] = value
- elif name == "dscfg":
- if attrs.get('encoding') in (None, "base64"):
- dscfg = base64.b64decode(''.join(value.split()))
- else:
- dscfg = value
- cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
- elif name == "ssh":
- cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
- elif name == "disablesshpasswordauthentication":
- cfg['ssh_pwauth'] = util.is_false(value)
- elif simple:
- if name in md_props:
- md[name] = value
- else:
- md['azure_data'][name] = value
-
- defuser = {}
- if username:
- defuser['name'] = username
- if password and DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = encrypt_pass(password)
- defuser['lock_passwd'] = False
-
- if defuser:
- cfg['system_info'] = {'default_user': defuser}
-
- if 'ssh_pwauth' not in cfg and password:
- cfg['ssh_pwauth'] = True
-
- return (md, ud, cfg)
-
-
-def encrypt_pass(password, salt_id="$6$"):
- return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
-
-
-def list_possible_azure_ds_devs():
- # return a sorted list of devices that might have a azure datasource
- devlist = []
- for fstype in ("iso9660", "udf"):
- devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
-
- devlist.sort(reverse=True)
- return devlist
-
-
-def load_azure_ds_dir(source_dir):
- ovf_file = os.path.join(source_dir, "ovf-env.xml")
-
- if not os.path.isfile(ovf_file):
- raise NonAzureDataSource("No ovf-env file found")
-
- with open(ovf_file, "rb") as fp:
- contents = fp.read()
-
- md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
-
-
-class BrokenAzureDataSource(Exception):
- pass
-
-
-class NonAzureDataSource(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
deleted file mode 100644
index f80956a5..00000000
--- a/cloudinit/sources/DataSourceBigstep.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Copyright (C) 2015-2016 Bigstep Cloud Ltd.
-#
-# Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com>
-#
-
-import errno
-import json
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceBigstep(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = {}
- self.vendordata_raw = ""
- self.userdata_raw = ""
-
- def get_data(self, apply_filter=False):
- url = get_url_from_file()
- if url is None:
- return False
- response = url_helper.readurl(url)
- decoded = json.loads(response.contents)
- self.metadata = decoded["metadata"]
- self.vendordata_raw = decoded["vendordata_raw"]
- self.userdata_raw = decoded["userdata_raw"]
- return True
-
-
-def get_url_from_file():
- try:
- content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
- except IOError as e:
- # If the file doesn't exist, then the server probably isn't a Bigstep
- # instance; otherwise, another problem exists which needs investigation
- if e.errno == errno.ENOENT:
- return None
- else:
- raise
- return content
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
deleted file mode 100644
index d1f806d6..00000000
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 CloudSigma
-#
-# Author: Kiril Vladimiroff <kiril.vladimiroff@cloudsigma.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from base64 import b64decode
-import os
-import re
-
-from cloudinit.cs_utils import Cepko
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceCloudSigma(sources.DataSource):
- """
- Uses cepko in order to gather the server context from the VM.
-
- For more information about CloudSigma's Server Context:
- http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
- """
- def __init__(self, sys_cfg, distro, paths):
- self.cepko = Cepko()
- self.ssh_public_key = ''
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
-
- def is_running_in_cloudsigma(self):
- """
- Uses dmi data to detect if this instance of cloud-init is running
- in the CloudSigma's infrastructure.
- """
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data on ARM processors
- LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)")
- return False
-
- LOG.debug("determining hypervisor product name via dmi data")
- sys_product_name = util.read_dmi_data("system-product-name")
- if not sys_product_name:
- LOG.debug("system-product-name not available in dmi data")
- return False
- else:
- LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
-
- LOG.warn("failed to query dmi data for system product name")
- return False
-
- def get_data(self):
- """
- Metadata is the whole server context and /meta/cloud-config is used
- as userdata.
- """
- dsmode = None
- if not self.is_running_in_cloudsigma():
- return False
-
- try:
- server_context = self.cepko.all().result
- server_meta = server_context['meta']
- except Exception:
- # TODO: check for explicit "config on", and then warn
- # but since no explicit config is available now, just debug.
- LOG.debug("CloudSigma: Unable to read from serial port")
- return False
-
- self.dsmode = self._determine_dsmode(
- [server_meta.get('cloudinit-dsmode')])
- if dsmode == sources.DSMODE_DISABLED:
- return False
-
- base64_fields = server_meta.get('base64_fields', '').split(',')
- self.userdata_raw = server_meta.get('cloudinit-user-data', "")
- if 'cloudinit-user-data' in base64_fields:
- self.userdata_raw = b64decode(self.userdata_raw)
- if 'cloudinit' in server_context.get('vendor_data', {}):
- self.vendordata_raw = server_context["vendor_data"]["cloudinit"]
-
- self.metadata = server_context
- self.ssh_public_key = server_meta['ssh_public_key']
-
- return True
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- """
- Cleans up and uses the server's name if the latter is set. Otherwise
- the first part from uuid is being used.
- """
- if re.match(r'^[A-Za-z0-9 -_\.]+$', self.metadata['name']):
- return self.metadata['name'][:61]
- else:
- return self.metadata['uuid'].split('-')[0]
-
- def get_public_ssh_keys(self):
- return [self.ssh_public_key]
-
- def get_instance_id(self):
- return self.metadata['uuid']
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceCloudSigmaNet = DataSourceCloudSigma
-
-# Used to match classes to dependencies. Since this datasource uses the serial
-# port network is not really required, so it's okay to load without it, too.
-datasources = [
- (DataSourceCloudSigma, (sources.DEP_FILESYSTEM)),
-]
-
-
-def get_datasource_list(depends):
- """
- Return a list of data sources that match this set of dependencies
- """
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
deleted file mode 100644
index 4de1f563..00000000
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Cosmin Luta
-# Copyright (C) 2012 Yahoo! Inc.
-# Copyright (C) 2012 Gerard Dethier
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Cosmin Luta <q4break@gmail.com>
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Gerard Dethier <g.dethier@gmail.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from socket import inet_ntoa
-from struct import pack
-import time
-
-from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class CloudStackPasswordServerClient(object):
- """
- Implements password fetching from the CloudStack password server.
-
- http://cloudstack-administration.readthedocs.org/
- en/latest/templates.html#adding-password-management-to-your-templates
- has documentation about the system. This implementation is following that
- found at
- https://github.com/shankerbalan/cloudstack-scripts/
- blob/master/cloud-set-guest-password-debian
- """
-
- def __init__(self, virtual_router_address):
- self.virtual_router_address = virtual_router_address
-
- def _do_request(self, domu_request):
- # The password server was in the past, a broken HTTP server, but is now
- # fixed. wget handles this seamlessly, so it's easier to shell out to
- # that rather than write our own handling code.
- output, _ = util.subp([
- 'wget', '--quiet', '--tries', '3', '--timeout', '20',
- '--output-document', '-', '--header',
- 'DomU_Request: {0}'.format(domu_request),
- '{0}:8080'.format(self.virtual_router_address)
- ])
- return output.strip()
-
- def get_password(self):
- password = self._do_request('send_my_password')
- if password in ['', 'saved_password']:
- return None
- if password == 'bad_request':
- raise RuntimeError('Error when attempting to fetch root password.')
- self._do_request('saved_password')
- return password
-
-
-class DataSourceCloudStack(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'cs')
- # Cloudstack has its metadata/userdata URLs located at
- # http://<virtual-router-ip>/latest/
- self.api_ver = 'latest'
- self.vr_addr = get_vr_address()
- if not self.vr_addr:
- raise RuntimeError("No virtual router found!")
- self.metadata_address = "http://%s/" % (self.vr_addr,)
- self.cfg = {}
-
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- if max_wait == 0:
- return False
-
- timeout = 50
- try:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- (max_wait, timeout) = self._get_url_settings()
-
- urls = [uhelp.combine_url(self.metadata_address,
- 'latest/meta-data/instance-id')]
- start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url)
- else:
- LOG.critical(("Giving up on waiting for the metadata from %s"
- " after %s seconds"),
- urls, int(time.time() - start_time))
-
- return bool(url)
-
- def get_config_obj(self):
- return self.cfg
-
- def get_data(self):
- seed_ret = {}
- if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
- LOG.debug("Using seeded cloudstack data from: %s", self.seed_dir)
- return True
- try:
- if not self.wait_for_metadata_service():
- return False
- start_time = time.time()
- self.userdata_raw = ec2.get_instance_userdata(
- self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
- password_client = CloudStackPasswordServerClient(self.vr_addr)
- try:
- set_password = password_client.get_password()
- except Exception:
- util.logexc(LOG,
- 'Failed to fetch password from virtual router %s',
- self.vr_addr)
- else:
- if set_password:
- self.cfg = {
- 'ssh_pwauth': True,
- 'password': set_password,
- 'chpasswd': {
- 'expire': False,
- },
- }
- return True
- except Exception:
- util.logexc(LOG, 'Failed fetching from metadata service %s',
- self.metadata_address)
- return False
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- @property
- def availability_zone(self):
- return self.metadata['availability-zone']
-
-
-def get_default_gateway():
- # Returns the default gateway ip address in the dotted format.
- lines = util.load_file("/proc/net/route").splitlines()
- for line in lines:
- items = line.split("\t")
- if items[1] == "00000000":
- # Found the default route, get the gateway
- gw = inet_ntoa(pack("<L", int(items[2], 16)))
- LOG.debug("Found default route, gateway is %s", gw)
- return gw
- return None
-
-
-def get_dhclient_d():
- # find lease files directory
- supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp"]
- for d in supported_dirs:
- if os.path.exists(d):
- LOG.debug("Using %s lease directory", d)
- return d
- return None
-
-
-def get_latest_lease():
- # find latest lease file
- lease_d = get_dhclient_d()
- if not lease_d:
- return None
- lease_files = os.listdir(lease_d)
- latest_mtime = -1
- latest_file = None
- for file_name in lease_files:
- if file_name.startswith("dhclient.") and \
- (file_name.endswith(".lease") or file_name.endswith(".leases")):
- abs_path = os.path.join(lease_d, file_name)
- mtime = os.path.getmtime(abs_path)
- if mtime > latest_mtime:
- latest_mtime = mtime
- latest_file = abs_path
- return latest_file
-
-
-def get_vr_address():
- # Get the address of the virtual router via dhcp leases
- # see http://bit.ly/T76eKC for documentation on the virtual router.
- # If no virtual router is detected, fallback on default gateway.
- lease_file = get_latest_lease()
- if not lease_file:
- LOG.debug("No lease file found, using default gateway")
- return get_default_gateway()
-
- latest_address = None
- with open(lease_file, "r") as fd:
- for line in fd:
- if "dhcp-server-identifier" in line:
- words = line.strip(" ;\r\n").split(" ")
- if len(words) > 2:
- dhcp = words[2]
- LOG.debug("Found DHCP identifier %s", dhcp)
- latest_address = dhcp
- if not latest_address:
- # No virtual router found, fallback on default gateway
- LOG.debug("No DHCP found, using default gateway")
- return get_default_gateway()
- return latest_address
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
deleted file mode 100644
index 91d6ff13..00000000
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.net import eni
-
-from cloudinit.sources.helpers import openstack
-
-LOG = logging.getLogger(__name__)
-
-# Various defaults/constants...
-DEFAULT_IID = "iid-dsconfigdrive"
-DEFAULT_MODE = 'pass'
-DEFAULT_METADATA = {
- "instance-id": DEFAULT_IID,
-}
-FS_TYPES = ('vfat', 'iso9660')
-LABEL_TYPES = ('config-2',)
-POSSIBLE_MOUNTS = ('sr', 'cd')
-OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
-
-
-class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
- self.source = None
- self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
- self.version = None
- self.ec2_metadata = None
- self._network_config = None
- self.network_json = None
- self.network_eni = None
- self.known_macs = None
- self.files = {}
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
- mstr += "[source=%s]" % (self.source)
- return mstr
-
- def get_data(self):
- found = None
- md = {}
- results = {}
- if os.path.isdir(self.seed_dir):
- try:
- results = read_config_drive(self.seed_dir)
- found = self.seed_dir
- except openstack.NonReadable:
- util.logexc(LOG, "Failed reading config drive from %s",
- self.seed_dir)
- if not found:
- for dev in find_candidate_devs():
- try:
- # Set mtype if freebsd and turn off sync
- if dev.startswith("/dev/cd"):
- mtype = "cd9660"
- sync = False
- else:
- mtype = None
- sync = True
- results = util.mount_cb(dev, read_config_drive,
- mtype=mtype, sync=sync)
- found = dev
- except openstack.NonReadable:
- pass
- except util.MountFailedError:
- pass
- except openstack.BrokenMetadata:
- util.logexc(LOG, "Broken config drive: %s", dev)
- if found:
- break
- if not found:
- return False
-
- md = results.get('metadata', {})
- md = util.mergemanydict([md, DEFAULT_METADATA])
-
- self.dsmode = self._determine_dsmode(
- [results.get('dsmode'), self.ds_cfg.get('dsmode'),
- sources.DSMODE_PASS if results['version'] == 1 else None])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- prev_iid = get_previous_iid(self.paths)
- cur_iid = md['instance-id']
- if prev_iid != cur_iid:
- # better would be to handle this centrally, allowing
- # the datasource to do something on new instance id
- # note, networking is only rendered here if dsmode is DSMODE_PASS
- # which means "DISABLED, but render files and networking"
- on_first_boot(results, distro=self.distro,
- network=self.dsmode == sources.DSMODE_PASS)
-
- # This is legacy and sneaky. If dsmode is 'pass' then do not claim
- # the datasource was used, even though we did run on_first_boot above.
- if self.dsmode == sources.DSMODE_PASS:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
- return False
-
- self.source = found
- self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
-
- vd = results.get('vendordata')
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
- except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- # network_config is an /etc/network/interfaces formated file and is
- # obsolete compared to networkdata (from network_data.json) but both
- # might be present.
- self.network_eni = results.get("network_config")
- self.network_json = results.get('networkdata')
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self.network_json is not None:
- LOG.debug("network config provided via network_json")
- self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=self.known_macs)
- elif self.network_eni is not None:
- self._network_config = eni.convert_eni_data(self.network_eni)
- LOG.debug("network config provided via converted eni data")
- else:
- LOG.debug("no network configuration available")
- return self._network_config
-
-
-def read_config_drive(source_dir):
- reader = openstack.ConfigDriveReader(source_dir)
- finders = [
- (reader.read_v2, [], {}),
- (reader.read_v1, [], {}),
- ]
- excps = []
- for (functor, args, kwargs) in finders:
- try:
- return functor(*args, **kwargs)
- except openstack.NonReadable as e:
- excps.append(e)
- raise excps[-1]
-
-
-def get_previous_iid(paths):
- # interestingly, for this purpose the "previous" instance-id is the current
- # instance-id. cloud-init hasn't moved them over yet as this datasource
- # hasn't declared itself found.
- fname = os.path.join(paths.get_cpath('data'), 'instance-id')
- try:
- return util.load_file(fname).rstrip("\n")
- except IOError:
- return None
-
-
-def on_first_boot(data, distro=None, network=True):
- """Performs any first-boot actions using data read from a config-drive."""
- if not isinstance(data, dict):
- raise TypeError("Config-drive data expected to be a dict; not %s"
- % (type(data)))
- if network:
- net_conf = data.get("network_config", '')
- if net_conf and distro:
- LOG.warn("Updating network interfaces from config drive")
- distro.apply_network(net_conf)
- write_injected_files(data.get('files'))
-
-
-def write_injected_files(files):
- if files:
- LOG.debug("Writing %s injected files", len(files))
- for (filename, content) in files.items():
- if not filename.startswith(os.sep):
- filename = os.sep + filename
- try:
- util.write_file(filename, content, mode=0o660)
- except IOError:
- util.logexc(LOG, "Failed writing file: %s", filename)
-
-
-def find_candidate_devs(probe_optical=True):
- """Return a list of devices that may contain the config drive.
-
- The returned list is sorted by search order where the first item has
- should be searched first (highest priority)
-
- config drive v1:
- Per documentation, this is "associated as the last available disk on the
- instance", and should be VFAT.
- Currently, we do not restrict search list to "last available disk"
-
- config drive v2:
- Disk should be:
- * either vfat or iso9660 formated
- * labeled with 'config-2'
- """
- # query optical drive to get it in blkid cache for 2.6 kernels
- if probe_optical:
- for device in OPTICAL_DEVICES:
- try:
- util.find_devs_with(path=device)
- except util.ProcessExecutionError:
- pass
-
- by_fstype = []
- for fs_type in FS_TYPES:
- by_fstype.extend(util.find_devs_with("TYPE=%s" % (fs_type)))
-
- by_label = []
- for label in LABEL_TYPES:
- by_label.extend(util.find_devs_with("LABEL=%s" % (label)))
-
- # give preference to "last available disk" (vdb over vda)
- # note, this is not a perfect rendition of that.
- by_fstype.sort(reverse=True)
- by_label.sort(reverse=True)
-
- # combine list of items by putting by-label items first
- # followed by fstype items, but with dupes removed
- candidates = (by_label + [d for d in by_fstype if d not in by_label])
-
- # We are looking for a block device or partition with necessary label or
- # an unpartitioned block device (ex sda, not sda1)
- devices = [d for d in candidates
- if d in by_label or not util.is_partition(d)]
- return devices
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceConfigDriveNet = DataSourceConfigDrive
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceConfigDrive, (sources.DEP_FILESYSTEM,)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
deleted file mode 100644
index 44a17a00..00000000
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Neal Shrader <neal@digitalocean.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import ec2_utils
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-import functools
-
-
-LOG = logging.getLogger(__name__)
-
-BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1/',
- 'mirrors_url': 'http://mirrors.digitalocean.com/'
-}
-MD_RETRIES = 0
-MD_TIMEOUT = 1
-
-
-class DataSourceDigitalOcean(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
-
- if self.ds_cfg.get('retries'):
- self.retries = self.ds_cfg['retries']
- else:
- self.retries = MD_RETRIES
-
- if self.ds_cfg.get('timeout'):
- self.timeout = self.ds_cfg['timeout']
- else:
- self.timeout = MD_TIMEOUT
-
- def get_data(self):
- caller = functools.partial(util.read_file_or_url,
- timeout=self.timeout, retries=self.retries)
-
- def mcaller(url):
- return caller(url).contents
-
- md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
- base_url=self.metadata_address,
- caller=mcaller)
-
- self.metadata = md.materialize()
-
- if self.metadata.get('id'):
- return True
- else:
- return False
-
- def get_userdata_raw(self):
- return "\n".join(self.metadata['user-data'])
-
- def get_vendordata_raw(self):
- return "\n".join(self.metadata['vendor-data'])
-
- def get_public_ssh_keys(self):
- public_keys = self.metadata['public-keys']
- if isinstance(public_keys, list):
- return public_keys
- else:
- return [public_keys]
-
- @property
- def availability_zone(self):
- return self.metadata['region']
-
- def get_instance_id(self):
- return self.metadata['id']
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- return self.metadata['hostname']
-
- def get_package_mirror_info(self):
- return self.ds_cfg['mirrors_url']
-
- @property
- def launch_index(self):
- return None
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
deleted file mode 100644
index 6fe2a0bb..00000000
--- a/cloudinit/sources/DataSourceEc2.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-
-from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-DEF_MD_URL = "http://169.254.169.254"
-
-# Which version we are requesting of the ec2 metadata apis
-DEF_MD_VERSION = '2009-04-04'
-
-# Default metadata urls that will be used if none are provided
-# They will be checked for 'resolveability' and some of the
-# following may be discarded if they do not resolve
-DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"]
-
-
-class DataSourceEc2(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata_address = DEF_MD_URL
- self.seed_dir = os.path.join(paths.seed_dir, "ec2")
- self.api_ver = DEF_MD_VERSION
-
- def get_data(self):
- seed_ret = {}
- if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
- LOG.debug("Using seeded ec2 data from %s", self.seed_dir)
- return True
-
- try:
- if not self.wait_for_metadata_service():
- return False
- start_time = time.time()
- self.userdata_raw = \
- ec2.get_instance_userdata(self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
- return True
- except Exception:
- util.logexc(LOG, "Failed reading from metadata address %s",
- self.metadata_address)
- return False
-
- @property
- def launch_index(self):
- if not self.metadata:
- return None
- return self.metadata.get('ami-launch-index')
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- timeout = 50
- try:
- timeout = max(0, int(mcfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- mcfg = self.ds_cfg
-
- (max_wait, timeout) = self._get_url_settings()
- if max_wait <= 0:
- return False
-
- # Remove addresses from the list that wont resolve.
- mdurls = mcfg.get("metadata_urls", DEF_MD_URLS)
- filtered = [x for x in mdurls if util.is_resolvable_url(x)]
-
- if set(filtered) != set(mdurls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(mdurls) - set(filtered))))
-
- if len(filtered):
- mdurls = filtered
- else:
- LOG.warn("Empty metadata url list! using default list")
- mdurls = DEF_MD_URLS
-
- urls = []
- url2base = {}
- for url in mdurls:
- cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
- urls.append(cur)
- url2base[cur] = url
-
- start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url2base[url])
- else:
- LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
-
- self.metadata_address = url2base.get(url)
- return bool(url)
-
- def device_name_to_device(self, name):
- # Consult metadata service, that has
- # ephemeral0: sdb
- # and return 'sdb' for input 'ephemeral0'
- if 'block-device-mapping' not in self.metadata:
- return None
-
- # Example:
- # 'block-device-mapping':
- # {'ami': '/dev/sda1',
- # 'ephemeral0': '/dev/sdb',
- # 'root': '/dev/sda1'}
- found = None
- bdm = self.metadata['block-device-mapping']
- for (entname, device) in bdm.items():
- if entname == name:
- found = device
- break
- # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0'
- if entname == "ephemeral" and name == "ephemeral0":
- found = device
-
- if found is None:
- LOG.debug("Unable to convert %s to a device", name)
- return None
-
- ofound = found
- if not found.startswith("/"):
- found = "/dev/%s" % found
-
- if os.path.exists(found):
- return found
-
- remapped = self._remap_device(os.path.basename(found))
- if remapped:
- LOG.debug("Remapped device name %s => %s", found, remapped)
- return remapped
-
- # On t1.micro, ephemeral0 will appear in block-device-mapping from
- # metadata, but it will not exist on disk (and never will)
- # at this point, we've verified that the path did not exist
- # in the special case of 'ephemeral0' return None to avoid bogus
- # fstab entry (LP: #744019)
- if name == "ephemeral0":
- return None
- return ofound
-
- @property
- def availability_zone(self):
- try:
- return self.metadata['placement']['availability-zone']
- except KeyError:
- return None
-
- @property
- def region(self):
- az = self.availability_zone
- if az is not None:
- return az[:-1]
- return None
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
deleted file mode 100644
index c660a350..00000000
--- a/cloudinit/sources/DataSourceGCE.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Vaidas Jablonskis <jablonskis@gmail.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-from base64 import b64decode
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://metadata.google.internal/computeMetadata/v1/'
-}
-REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-
-
-class GoogleMetadataFetcher(object):
- headers = {'X-Google-Metadata-Request': True}
-
- def __init__(self, metadata_address):
- self.metadata_address = metadata_address
-
- def get_value(self, path, is_text):
- value = None
- try:
- resp = url_helper.readurl(url=self.metadata_address + path,
- headers=self.headers)
- except url_helper.UrlError as exc:
- msg = "url %s raised exception %s"
- LOG.debug(msg, path, exc)
- else:
- if resp.code == 200:
- if is_text:
- value = util.decode_binary(resp.contents)
- else:
- value = resp.contents
- else:
- LOG.debug("url %s returned code %s", path, resp.code)
- return value
-
-
-class DataSourceGCE(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
-
- # GCE takes sshKeys attribute in the format of '<user>:<public_key>'
- # so we have to trim each key to remove the username part
- def _trim_key(self, public_key):
- try:
- index = public_key.index(':')
- if index > 0:
- return public_key[(index + 1):]
- except Exception:
- return public_key
-
- def get_data(self):
- # url_map: (our-key, path, required, is_text)
- url_map = [
- ('instance-id', ('instance/id',), True, True),
- ('availability-zone', ('instance/zone',), True, True),
- ('local-hostname', ('instance/hostname',), True, True),
- ('public-keys', ('project/attributes/sshKeys',
- 'instance/attributes/sshKeys'), False, True),
- ('user-data', ('instance/attributes/user-data',), False, False),
- ('user-data-encoding', ('instance/attributes/user-data-encoding',),
- False, True),
- ]
-
- # if we cannot resolve the metadata server, then no point in trying
- if not util.is_resolvable_url(self.metadata_address):
- LOG.debug("%s is not resolvable", self.metadata_address)
- return False
-
- metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
- # iterate over url_map keys to get metadata items
- running_on_gce = False
- for (mkey, paths, required, is_text) in url_map:
- value = None
- for path in paths:
- new_value = metadata_fetcher.get_value(path, is_text)
- if new_value is not None:
- value = new_value
- if value:
- running_on_gce = True
- if required and value is None:
- msg = "required key %s returned nothing. not GCE"
- if not running_on_gce:
- LOG.debug(msg, mkey)
- else:
- LOG.warn(msg, mkey)
- return False
- self.metadata[mkey] = value
-
- if self.metadata['public-keys']:
- lines = self.metadata['public-keys'].splitlines()
- self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
-
- if self.metadata['availability-zone']:
- self.metadata['availability-zone'] = self.metadata[
- 'availability-zone'].split('/')[-1]
-
- encoding = self.metadata.get('user-data-encoding')
- if encoding:
- if encoding == 'base64':
- self.metadata['user-data'] = b64decode(
- self.metadata['user-data'])
- else:
- LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
-
- return running_on_gce
-
- @property
- def launch_index(self):
- # GCE does not provide lauch_index property
- return None
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- def get_public_ssh_keys(self):
- return self.metadata['public-keys']
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- # GCE has long FDQN's and has asked for short hostnames
- return self.metadata['local-hostname'].split('.')[0]
-
- def get_userdata_raw(self):
- return self.metadata['user-data']
-
- @property
- def availability_zone(self):
- return self.metadata['availability-zone']
-
- @property
- def region(self):
- return self.availability_zone.rsplit('-', 1)[0]
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
deleted file mode 100644
index d828f078..00000000
--- a/cloudinit/sources/DataSourceMAAS.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import print_function
-
-import errno
-import os
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-MD_VERSION = "2012-03-01"
-
-BINARY_FIELDS = ('user-data',)
-
-
-class DataSourceMAAS(sources.DataSource):
- """
- DataSourceMAAS reads instance information from MAAS.
- Given a config metadata_url, and oauth tokens, it expects to find
- files under the root named:
- instance-id
- user-data
- hostname
- """
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.base_url = None
- self.seed_dir = os.path.join(paths.seed_dir, 'maas')
- self.oauth_helper = self._get_helper()
-
- def _get_helper(self):
- mcfg = self.ds_cfg
- # If we are missing token_key, token_secret or consumer_key
- # then just do non-authed requests
- for required in ('token_key', 'token_secret', 'consumer_key'):
- if required not in mcfg:
- return url_helper.OauthUrlHelper()
-
- return url_helper.OauthUrlHelper(
- consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],
- token_secret=mcfg['token_secret'],
- consumer_secret=mcfg.get('consumer_secret'))
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [%s]" % (root, self.base_url)
-
- def get_data(self):
- mcfg = self.ds_cfg
-
- try:
- (userdata, metadata) = read_maas_seed_dir(self.seed_dir)
- self.userdata_raw = userdata
- self.metadata = metadata
- self.base_url = self.seed_dir
- return True
- except MAASSeedDirNone:
- pass
- except MAASSeedDirMalformed as exc:
- LOG.warn("%s was malformed: %s" % (self.seed_dir, exc))
- raise
-
- # If there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
- if not url:
- return False
-
- try:
- # doing this here actually has a side affect of
- # getting oauth time-fix in place. As no where else would
- # retry by default, so even if we could fix the timestamp
- # we would not.
- if not self.wait_for_metadata_service(url):
- return False
-
- self.base_url = url
-
- (userdata, metadata) = read_maas_seed_url(
- self.base_url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1)
- self.userdata_raw = userdata
- self.metadata = metadata
- return True
- except Exception:
- util.logexc(LOG, "Failed fetching metadata from url %s", url)
- return False
-
- def wait_for_metadata_service(self, url):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- if max_wait == 0:
- return False
-
- timeout = 50
- try:
- if timeout in mcfg:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- LOG.warn("Failed to get timeout, using %s" % timeout)
-
- starttime = time.time()
- check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
- urls = [check_url]
- url = self.oauth_helper.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url)
- else:
- LOG.critical("Giving up on md from %s after %i seconds",
- urls, int(time.time() - starttime))
-
- return bool(url)
-
-
-def read_maas_seed_dir(seed_d):
- """
- Return user-data and metadata for a maas seed dir in seed_d.
- Expected format of seed_d are the following files:
- * instance-id
- * local-hostname
- * user-data
- """
- if not os.path.isdir(seed_d):
- raise MAASSeedDirNone("%s: not a directory")
-
- files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
- md = {}
- for fname in files:
- try:
- md[fname] = util.load_file(os.path.join(seed_d, fname),
- decode=fname not in BINARY_FIELDS)
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
-
- return check_seed_contents(md, seed_d)
-
-
-def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
- version=MD_VERSION, paths=None, retries=None):
- """
- Read the maas datasource at seed_url.
- read_file_or_url is a method that should provide an interface
- like util.read_file_or_url
-
- Expected format of seed_url is are the following files:
- * <seed_url>/<version>/meta-data/instance-id
- * <seed_url>/<version>/meta-data/local-hostname
- * <seed_url>/<version>/user-data
- """
- base_url = "%s/%s" % (seed_url, version)
- file_order = [
- 'local-hostname',
- 'instance-id',
- 'public-keys',
- 'user-data',
- ]
- files = {
- 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'),
- 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'),
- 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
- 'user-data': "%s/%s" % (base_url, 'user-data'),
- }
-
- if read_file_or_url is None:
- read_file_or_url = util.read_file_or_url
-
- md = {}
- for name in file_order:
- url = files.get(name)
- if name == 'user-data':
- item_retries = 0
- else:
- item_retries = retries
-
- try:
- ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=item_retries,
- timeout=timeout, ssl_details=ssl_details)
- if resp.ok():
- if name in BINARY_FIELDS:
- md[name] = resp.contents
- else:
- md[name] = util.decode_binary(resp.contents)
- else:
- LOG.warn(("Fetching from %s resulted in"
- " an invalid http code %s"), url, resp.code)
- except url_helper.UrlError as e:
- if e.code != 404:
- raise
- return check_seed_contents(md, seed_url)
-
-
-def check_seed_contents(content, seed):
- """Validate if content is Is the content a dict that is valid as a
- return for a datasource.
- Either return a (userdata, metadata) tuple or
- Raise MAASSeedDirMalformed or MAASSeedDirNone
- """
- md_required = ('instance-id', 'local-hostname')
- if len(content) == 0:
- raise MAASSeedDirNone("%s: no data files found" % seed)
-
- found = list(content.keys())
- missing = [k for k in md_required if k not in found]
- if len(missing):
- raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
-
- userdata = content.get('user-data', b"")
- md = {}
- for (key, val) in content.items():
- if key == 'user-data':
- continue
- md[key] = val
-
- return (userdata, md)
-
-
-class MAASSeedDirNone(Exception):
- pass
-
-
-class MAASSeedDirMalformed(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
-
-
-if __name__ == "__main__":
- def main():
- """
- Call with single argument of directory or http or https url.
- If url is given additional arguments are allowed, which will be
- interpreted as consumer_key, token_key, token_secret, consumer_secret
- """
- import argparse
- import pprint
-
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)",
- default=MD_VERSION)
-
- subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- subcmds.add_parser('crawl', help="crawl the datasource")
- subcmds.add_parser('get', help="do a single GET of provided url")
- subcmds.add_parser('check-seed', help="read andn verify seed at url")
-
- parser.add_argument("url", help="the data source to query")
-
- args = parser.parse_args()
-
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
-
- if args.config:
- cfg = util.read_conf(args.config)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
- for key in creds.keys():
- if key in cfg and creds[key] is None:
- creds[key] = cfg[key]
-
- oauth_helper = url_helper.OauthUrlHelper(**creds)
-
- def geturl(url):
- # the retry is to ensure that oauth timestamp gets fixed
- return oauth_helper.readurl(url, retries=1).contents
-
- def printurl(url):
- print("== %s ==\n%s\n" % (url, geturl(url).decode()))
-
- def crawl(url):
- if url.endswith("/"):
- for line in geturl(url).decode().splitlines():
- if line.endswith("/"):
- crawl("%s%s" % (url, line))
- elif line == "meta-data":
- # meta-data is a dir, it *should* end in a /
- crawl("%s%s" % (url, "meta-data/"))
- else:
- printurl("%s%s" % (url, line))
- else:
- printurl(url)
-
- if args.subcmd == "check-seed":
- readurl = oauth_helper.readurl
- if args.url[0] == "/" or args.url.startswith("file://"):
- readurl = None
- (userdata, metadata) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
- print("=== userdata ===")
- print(userdata.decode())
- print("=== metadata ===")
- pprint.pprint(metadata)
-
- elif args.subcmd == "get":
- printurl(args.url)
-
- elif args.subcmd == "crawl":
- if not args.url.endswith("/"):
- args.url = "%s/" % args.url
- crawl(args.url)
-
- main()
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
deleted file mode 100644
index cdc9eef5..00000000
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import os
-
-from cloudinit import log as logging
-from cloudinit.net import eni
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNoCloud(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
- os.path.join(paths.seed_dir, 'nocloud-net')]
- self.seed_dir = None
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
-
- def get_data(self):
- defaults = {
- "instance-id": "nocloud",
- "dsmode": self.dsmode,
- }
-
- found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': {}}
-
- try:
- # Parse the kernel command line, getting data passed in
- md = {}
- if load_cmdline_data(md):
- found.append("cmdline")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
- except Exception:
- util.logexc(LOG, "Unable to parse command line data")
- return False
-
- # Check to see if the seed dir has data.
- pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data', 'network-config']}
-
- for path in self.seed_dirs:
- try:
- seeded = util.pathprefix2dict(path, **pp2d_kwargs)
- found.append(path)
- LOG.debug("Using seeded data from %s", path)
- mydata = _merge_new_seed(mydata, seeded)
- break
- except ValueError as e:
- pass
-
- # If the datasource config had a 'seedfrom' entry, then that takes
- # precedence over a 'seedfrom' that was found in a filesystem
- # but not over external media
- if self.ds_cfg.get('seedfrom'):
- found.append("ds_config_seedfrom")
- mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
-
- # fields appropriately named can also just come from the datasource
- # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
- if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
- mydata = _merge_new_seed(mydata, self.ds_cfg)
- found.append("ds_config")
-
- def _pp2d_callback(mp, data):
- return util.pathprefix2dict(mp, **data)
-
- label = self.ds_cfg.get('fs_label', "cidata")
- if label is not None:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
-
- label_list = util.find_devs_with("LABEL=%s" % label)
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
-
- for dev in devlist:
- try:
- LOG.debug("Attempting to use data from %s", dev)
-
- try:
- seeded = util.mount_cb(dev, _pp2d_callback,
- pp2d_kwargs)
- except ValueError as e:
- if dev in label_list:
- LOG.warn("device %s with label=%s not a"
- "valid seed.", dev, label)
- continue
-
- mydata = _merge_new_seed(mydata, seeded)
-
- LOG.debug("Using data from %s", dev)
- found.append(dev)
- break
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for "
- "data", dev)
-
- # There was no indication on kernel cmdline or data
- # in the seeddir suggesting this handler should be used.
- if len(found) == 0:
- return False
-
- # The special argument "seedfrom" indicates we should
- # attempt to seed the userdata / metadata from its value
- # its primarily value is in allowing the user to type less
- # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
- if "seedfrom" in mydata['meta-data']:
- seedfrom = mydata['meta-data']["seedfrom"]
- seedfound = False
- for proto in self.supported_seed_starts:
- if seedfrom.startswith(proto):
- seedfound = proto
- break
- if not seedfound:
- LOG.debug("Seed from %s not supported by %s", seedfrom, self)
- return False
-
- # This could throw errors, but the user told us to do it
- # so if errors are raised, let them raise
- (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- LOG.debug("Using seeded cache data from %s", seedfrom)
-
- # Values in the command line override those from the seed
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- md_seed])
- mydata['user-data'] = ud
- found.append(seedfrom)
-
- # Now that we have exhausted any other places merge in the defaults
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- defaults])
-
- self.dsmode = self._determine_dsmode(
- [mydata['meta-data'].get('dsmode')])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
- return False
-
- self.seed = ",".join(found)
- self.metadata = mydata['meta-data']
- self.userdata_raw = mydata['user-data']
- self.vendordata_raw = mydata['vendor-data']
- self._network_config = mydata['network-config']
- self._network_eni = mydata['meta-data'].get('network-interfaces')
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- # we check kernel command line or files.
- current = self.get_instance_id()
- if not current:
- return None
-
- # LP: #1568150 need getattr in the case that an old class object
- # has been loaded from a pickled file and now executing new source.
- dirs = getattr(self, 'seed_dirs', [self.seed_dir])
- quick_id = _quick_read_instance_id(dirs=dirs)
- if not quick_id:
- return None
- return quick_id == current
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self._network_eni is not None:
- self._network_config = eni.convert_eni_data(self._network_eni)
- return self._network_config
-
-
-def _quick_read_instance_id(dirs=None):
- if dirs is None:
- dirs = []
-
- iid_key = 'instance-id'
- fill = {}
- if load_cmdline_data(fill) and iid_key in fill:
- return fill[iid_key]
-
- for d in dirs:
- if d is None:
- continue
- try:
- data = util.pathprefix2dict(d, required=['meta-data'])
- md = util.load_yaml(data['meta-data'])
- if iid_key in md:
- return md[iid_key]
- except ValueError:
- pass
-
- return None
-
-
-def load_cmdline_data(fill, cmdline=None):
- pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
- ("ds=nocloud-net", sources.DSMODE_NETWORK)]
- for idstr, dsmode in pairs:
- if parse_cmdline_data(idstr, fill, cmdline):
- # if dsmode was explicitly in the commanad line, then
- # prefer it to the dsmode based on the command line id
- if 'dsmode' not in fill:
- fill['dsmode'] = dsmode
- return True
- return False
-
-
-# Returns true or false indicating if cmdline indicated
-# that this module should be used. Updates dictionary 'fill'
-# with data that was found.
-# Example cmdline:
-# root=LABEL=uec-rootfs ro ds=nocloud
-def parse_cmdline_data(ds_id, fill, cmdline=None):
- if cmdline is None:
- cmdline = util.get_cmdline()
- cmdline = " %s " % cmdline
-
- if not (" %s " % ds_id in cmdline or " %s;" % ds_id in cmdline):
- return False
-
- argline = ""
- # cmdline can contain:
- # ds=nocloud[;key=val;key=val]
- for tok in cmdline.split():
- if tok.startswith(ds_id):
- argline = tok.split("=", 1)
-
- # argline array is now 'nocloud' followed optionally by
- # a ';' and then key=value pairs also terminated with ';'
- tmp = argline[1].split(";")
- if len(tmp) > 1:
- kvpairs = tmp[1:]
- else:
- kvpairs = ()
-
- # short2long mapping to save cmdline typing
- s2l = {"h": "local-hostname", "i": "instance-id", "s": "seedfrom"}
- for item in kvpairs:
- if item == "":
- continue
- try:
- (k, v) = item.split("=", 1)
- except Exception:
- k = item
- v = None
- if k in s2l:
- k = s2l[k]
- fill[k] = v
-
- return True
-
-
-def _merge_new_seed(cur, seeded):
- ret = cur.copy()
-
- newmd = seeded.get('meta-data', {})
- if not isinstance(seeded['meta-data'], dict):
- newmd = util.load_yaml(seeded['meta-data'])
- ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
-
- if seeded.get('network-config'):
- ret['network-config'] = util.load_yaml(seeded['network-config'])
-
- if 'user-data' in seeded:
- ret['user-data'] = seeded['user-data']
- if 'vendor-data' in seeded:
- ret['vendor-data'] = seeded['vendor-data']
- return ret
-
-
-class DataSourceNoCloudNet(DataSourceNoCloud):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
- self.supported_seed_starts = ("http://", "https://", "ftp://")
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
- (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
deleted file mode 100644
index d1a62b2a..00000000
--- a/cloudinit/sources/DataSourceNone.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import log as logging
-from cloudinit import sources
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNone(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
- sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
- self.metadata = {}
- self.userdata_raw = ''
-
- def get_data(self):
- # If the datasource config has any provided 'fallback'
- # userdata or metadata, use it...
- if 'userdata_raw' in self.ds_cfg:
- self.userdata_raw = self.ds_cfg['userdata_raw']
- if 'metadata' in self.ds_cfg:
- self.metadata = self.ds_cfg['metadata']
- return True
-
- def get_instance_id(self):
- return 'iid-datasource-none'
-
- @property
- def is_disconnected(self):
- return True
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- (DataSourceNone, []),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
deleted file mode 100644
index 43347cfb..00000000
--- a/cloudinit/sources/DataSourceOVF.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from xml.dom import minidom
-
-import base64
-import os
-import re
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.sources.helpers.vmware.imc.config \
- import Config
-from cloudinit.sources.helpers.vmware.imc.config_file \
- import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic \
- import NicConfigurator
-from cloudinit.sources.helpers.vmware.imc.guestcust_error \
- import GuestCustErrorEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_event \
- import GuestCustEventEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_state \
- import GuestCustStateEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
- enable_nics,
- get_nics_to_enable,
- set_customization_status
-)
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceOVF(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
- self.environment = None
- self.cfg = {}
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_data(self):
- found = []
- md = {}
- ud = ""
- vmwarePlatformFound = False
- vmwareImcConfigFilePath = ''
-
- defaults = {
- "instance-id": "iid-dsovf",
- }
-
- (seedfile, contents) = get_ovf_env(self.paths.seed_dir)
-
- system_type = util.read_dmi_data("system-product-name")
- if system_type is None:
- LOG.debug("No system-product-name found")
-
- if seedfile:
- # Found a seed dir
- seed = os.path.join(self.paths.seed_dir, seedfile)
- (md, ud, cfg) = read_ovf_environment(contents)
- self.environment = contents
- found.append(seed)
- elif system_type and 'vmware' in system_type.lower():
- LOG.debug("VMware Virtualization Platform found")
- if not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True):
- deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
- "libdeployPkgPlugin.so")
- if not deployPkgPluginPath:
- deployPkgPluginPath = search_file("/usr/lib/open-vm-tools",
- "libdeployPkgPlugin.so")
- if deployPkgPluginPath:
- # When the VM is powered on, the "VMware Tools" daemon
- # copies the customization specification file to
- # /var/run/vmware-imc directory. cloud-init code needs
- # to search for the file in that directory.
- vmwareImcConfigFilePath = util.log_time(
- logfunc=LOG.debug,
- msg="waiting for configuration file",
- func=wait_for_imc_cfg_file,
- args=("/var/run/vmware-imc", "cust.cfg"))
-
- if vmwareImcConfigFilePath:
- LOG.debug("Found VMware DeployPkg Config File at %s" %
- vmwareImcConfigFilePath)
- else:
- LOG.debug("Did not find VMware DeployPkg Config File Path")
- else:
- LOG.debug("Customization for VMware platform is disabled.")
-
- if vmwareImcConfigFilePath:
- nics = ""
- try:
- cf = ConfigFile(vmwareImcConfigFilePath)
- conf = Config(cf)
- (md, ud, cfg) = read_vmware_imc(conf)
- dirpath = os.path.dirname(vmwareImcConfigFilePath)
- nics = get_nics_to_enable(dirpath)
- except Exception as e:
- LOG.debug("Error parsing the customization Config File")
- LOG.exception(e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
- enable_nics(nics)
- return False
- finally:
- util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
-
- try:
- LOG.debug("Applying the Network customization")
- nicConfigurator = NicConfigurator(conf.nics)
- nicConfigurator.configure()
- except Exception as e:
- LOG.debug("Error applying the Network Configuration")
- LOG.exception(e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
- enable_nics(nics)
- return False
-
- vmwarePlatformFound = True
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
- enable_nics(nics)
- else:
- np = {'iso': transport_iso9660,
- 'vmware-guestd': transport_vmware_guestd, }
- name = None
- for (name, transfunc) in np.items():
- (contents, _dev, _fname) = transfunc()
- if contents:
- break
- if contents:
- (md, ud, cfg) = read_ovf_environment(contents)
- self.environment = contents
- found.append(name)
-
- # There was no OVF transports found
- if len(found) == 0 and not vmwarePlatformFound:
- return False
-
- if 'seedfrom' in md and md['seedfrom']:
- seedfrom = md['seedfrom']
- seedfound = False
- for proto in self.supported_seed_starts:
- if seedfrom.startswith(proto):
- seedfound = proto
- break
- if not seedfound:
- LOG.debug("Seed from %s not supported by %s",
- seedfrom, self)
- return False
-
- (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- LOG.debug("Using seeded cache data from %s", seedfrom)
-
- md = util.mergemanydict([md, md_seed])
- found.append(seedfrom)
-
- # Now that we have exhausted any other places merge in the defaults
- md = util.mergemanydict([md, defaults])
-
- self.seed = ",".join(found)
- self.metadata = md
- self.userdata_raw = ud
- self.cfg = cfg
- return True
-
- def get_public_ssh_keys(self):
- if 'public-keys' not in self.metadata:
- return []
- pks = self.metadata['public-keys']
- if isinstance(pks, (list)):
- return pks
- else:
- return [pks]
-
- # The data sources' config_obj is a cloud-config formatted
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return self.cfg
-
-
-class DataSourceOVFNet(DataSourceOVF):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceOVF.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
- self.supported_seed_starts = ("http://", "https://", "ftp://")
-
-
-def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
- waited = 0
-
- while waited < maxwait:
- fileFullPath = search_file(dirpath, filename)
- if fileFullPath:
- return fileFullPath
- time.sleep(naplen)
- waited += naplen
- return None
-
-
-# This will return a dict with some content
-# meta-data, user-data, some config
-def read_vmware_imc(config):
- md = {}
- cfg = {}
- ud = ""
- if config.host_name:
- if config.domain_name:
- md['local-hostname'] = config.host_name + "." + config.domain_name
- else:
- md['local-hostname'] = config.host_name
-
- if config.timezone:
- cfg['timezone'] = config.timezone
-
- return (md, ud, cfg)
-
-
-# This will return a dict with some content
-# meta-data, user-data, some config
-def read_ovf_environment(contents):
- props = get_properties(contents)
- md = {}
- cfg = {}
- ud = ""
- cfg_props = ['password']
- md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- for (prop, val) in props.items():
- if prop == 'hostname':
- prop = "local-hostname"
- if prop in md_props:
- md[prop] = val
- elif prop in cfg_props:
- cfg[prop] = val
- elif prop == "user-data":
- try:
- ud = base64.decodestring(val)
- except Exception:
- ud = val
- return (md, ud, cfg)
-
-
-# Returns tuple of filename (in 'dirname', and the contents of the file)
-# on "not found", returns 'None' for filename and False for contents
-def get_ovf_env(dirname):
- env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML")
- for fname in env_names:
- full_fn = os.path.join(dirname, fname)
- if os.path.isfile(full_fn):
- try:
- contents = util.load_file(full_fn)
- return (fname, contents)
- except Exception:
- util.logexc(LOG, "Failed loading ovf file %s", full_fn)
- return (None, False)
-
-
-# Transport functions take no input and return
-# a 3 tuple of content, path, filename
-def transport_iso9660(require_iso=True):
-
- # default_regex matches values in
- # /lib/udev/rules.d/60-cdrom_id.rules
- # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
- envname = "CLOUD_INIT_CDROM_DEV_REGEX"
- default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"
-
- devname_regex = os.environ.get(envname, default_regex)
- cdmatch = re.compile(devname_regex)
-
- # Go through mounts to see if it was already mounted
- mounts = util.mounts()
- for (dev, info) in mounts.items():
- fstype = info['fstype']
- if fstype != "iso9660" and require_iso:
- continue
- if cdmatch.match(dev[5:]) is None: # take off '/dev/'
- continue
- mp = info['mountpoint']
- (fname, contents) = get_ovf_env(mp)
- if contents is not False:
- return (contents, dev, fname)
-
- if require_iso:
- mtype = "iso9660"
- else:
- mtype = None
-
- devs = os.listdir("/dev/")
- devs.sort()
- for dev in devs:
- fullp = os.path.join("/dev/", dev)
-
- if (fullp in mounts or
- not cdmatch.match(dev) or os.path.isdir(fullp)):
- continue
-
- try:
- # See if we can read anything at all...??
- util.peek_file(fullp, 512)
- except IOError:
- continue
-
- try:
- (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype)
- except util.MountFailedError:
- LOG.debug("%s not mountable as iso9660" % fullp)
- continue
-
- if contents is not False:
- return (contents, fullp, fname)
-
- return (False, None, None)
-
-
-def transport_vmware_guestd():
- # http://blogs.vmware.com/vapp/2009/07/ \
- # selfconfiguration-and-the-ovf-environment.html
- # try:
- # cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv']
- # (out, err) = subp(cmd)
- # return(out, 'guestinfo.ovfEnv', 'vmware-guestd')
- # except:
- # # would need to error check here and see why this failed
- # # to know if log/error should be raised
- # return(False, None, None)
- return (False, None, None)
-
-
-def find_child(node, filter_func):
- ret = []
- if not node.hasChildNodes():
- return ret
- for child in node.childNodes:
- if filter_func(child):
- ret.append(child)
- return ret
-
-
-def get_properties(contents):
-
- dom = minidom.parseString(contents)
- if dom.documentElement.localName != "Environment":
- raise XmlError("No Environment Node")
-
- if not dom.documentElement.hasChildNodes():
- raise XmlError("No Child Nodes")
-
- envNsURI = "http://schemas.dmtf.org/ovf/environment/1"
-
- # could also check here that elem.namespaceURI ==
- # "http://schemas.dmtf.org/ovf/environment/1"
- propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
-
- if len(propSections) == 0:
- raise XmlError("No 'PropertySection's")
-
- props = {}
- propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
-
- for elem in propElems:
- key = elem.attributes.getNamedItemNS(envNsURI, "key").value
- val = elem.attributes.getNamedItemNS(envNsURI, "value").value
- props[key] = val
-
- return props
-
-
-def search_file(dirpath, filename):
- if not dirpath or not filename:
- return None
-
- for root, dirs, files in os.walk(dirpath):
- if filename in files:
- return os.path.join(root, filename)
-
- return None
-
-
-class XmlError(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
- (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-)
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
deleted file mode 100644
index 7b3a76b9..00000000
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-# Copyright (C) 2012-2013 CERIT Scientific Cloud
-# Copyright (C) 2012-2013 OpenNebula.org
-# Copyright (C) 2014 Consejo Superior de Investigaciones Cientificas
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Vlastimil Holer <xholer@mail.muni.cz>
-# Author: Javier Fontan <jfontan@opennebula.org>
-# Author: Enol Fernandez <enolfc@ifca.unican.es>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import re
-import string
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_PARSEUSER = 'nobody'
-CONTEXT_DISK_FILES = ["context.sh"]
-
-
-class DataSourceOpenNebula(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
-
- def get_data(self):
- defaults = {"instance-id": DEFAULT_IID}
- results = None
- seed = None
-
- # decide parseuser for context.sh shell reader
- parseuser = DEFAULT_PARSEUSER
- if 'parseuser' in self.ds_cfg:
- parseuser = self.ds_cfg.get('parseuser')
-
- candidates = [self.seed_dir]
- candidates.extend(find_candidate_devs())
- for cdev in candidates:
- try:
- if os.path.isdir(self.seed_dir):
- results = read_context_disk_dir(cdev, asuser=parseuser)
- elif cdev.startswith("/dev"):
- results = util.mount_cb(cdev, read_context_disk_dir,
- data=parseuser)
- except NonContextDiskDir:
- continue
- except BrokenContextDiskDir as exc:
- raise exc
- except util.MountFailedError:
- LOG.warn("%s was not mountable" % cdev)
-
- if results:
- seed = cdev
- LOG.debug("found datasource in %s", cdev)
- break
-
- if not seed:
- return False
-
- # merge fetched metadata with datasource defaults
- md = results['metadata']
- md = util.mergemanydict([md, defaults])
-
- # check for valid user specified dsmode
- self.dsmode = self._determine_dsmode(
- [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- self.seed = seed
- self.network_eni = results.get("network_config")
- self.metadata = md
- self.userdata_raw = results.get('userdata')
- return True
-
- def get_hostname(self, fqdn=False, resolve_ip=None):
- if resolve_ip is None:
- if self.dsmode == sources.DSMODE_NETWORK:
- resolve_ip = True
- else:
- resolve_ip = False
- return sources.DataSource.get_hostname(self, fqdn, resolve_ip)
-
-
-class NonContextDiskDir(Exception):
- pass
-
-
-class BrokenContextDiskDir(Exception):
- pass
-
-
-class OpenNebulaNetwork(object):
- REG_DEV_MAC = re.compile(
- r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
- re.MULTILINE | re.DOTALL)
-
- def __init__(self, ip, context):
- self.ip = ip
- self.context = context
- self.ifaces = self.get_ifaces()
-
- def get_ifaces(self):
- return self.REG_DEV_MAC.findall(self.ip)
-
- def mac2ip(self, mac):
- components = mac.split(':')[2:]
- return [str(int(c, 16)) for c in components]
-
- def get_ip(self, dev, components):
- var_name = dev.upper() + '_IP'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '.'.join(components)
-
- def get_mask(self, dev):
- var_name = dev.upper() + '_MASK'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '255.255.255.0'
-
- def get_network(self, dev, components):
- var_name = dev.upper() + '_NETWORK'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '.'.join(components[:-1]) + '.0'
-
- def get_gateway(self, dev):
- var_name = dev.upper() + '_GATEWAY'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def get_dns(self, dev):
- var_name = dev.upper() + '_DNS'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def get_domain(self, dev):
- var_name = dev.upper() + '_DOMAIN'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def gen_conf(self):
- global_dns = []
- if 'DNS' in self.context:
- global_dns.append(self.context['DNS'])
-
- conf = []
- conf.append('auto lo')
- conf.append('iface lo inet loopback')
- conf.append('')
-
- for i in self.ifaces:
- dev = i[0]
- mac = i[1]
- ip_components = self.mac2ip(mac)
-
- conf.append('auto ' + dev)
- conf.append('iface ' + dev + ' inet static')
- conf.append(' address ' + self.get_ip(dev, ip_components))
- conf.append(' network ' + self.get_network(dev, ip_components))
- conf.append(' netmask ' + self.get_mask(dev))
-
- gateway = self.get_gateway(dev)
- if gateway:
- conf.append(' gateway ' + gateway)
-
- domain = self.get_domain(dev)
- if domain:
- conf.append(' dns-search ' + domain)
-
- # add global DNS servers to all interfaces
- dns = self.get_dns(dev)
- if global_dns or dns:
- all_dns = global_dns
- if dns:
- all_dns.append(dns)
- conf.append(' dns-nameservers ' + ' '.join(all_dns))
-
- conf.append('')
-
- return "\n".join(conf)
-
-
-def find_candidate_devs():
- """
- Return a list of devices that may contain the context disk.
- """
- combined = []
- for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
- devs = util.find_devs_with(f)
- devs.sort()
- for d in devs:
- if d not in combined:
- combined.append(d)
-
- return combined
-
-
-def switch_user_cmd(user):
- return ['sudo', '-u', user]
-
-
-def parse_shell_config(content, keylist=None, bash=None, asuser=None,
- switch_user_cb=None):
-
- if isinstance(bash, str):
- bash = [bash]
- elif bash is None:
- bash = ['bash', '-e']
-
- if switch_user_cb is None:
- switch_user_cb = switch_user_cmd
-
- # allvars expands to all existing variables by using '${!x*}' notation
- # where x is lower or upper case letters or '_'
- allvars = ["${!%s*}" % x for x in string.ascii_letters + "_"]
-
- keylist_in = keylist
- if keylist is None:
- keylist = allvars
- keylist_in = []
-
- setup = '\n'.join(('__v="";', '',))
-
- def varprinter(vlist):
- # output '\0'.join(['_start_', key=value NULL for vars in vlist]
- return '\n'.join((
- 'printf "%s\\0" _start_',
- 'for __v in %s; do' % ' '.join(vlist),
- ' printf "%s=%s\\0" "$__v" "${!__v}";',
- 'done',
- ''
- ))
-
- # the rendered 'bcmd' is bash syntax that does
- # setup: declare variables we use (so they show up in 'all')
- # varprinter(allvars): print all variables known at beginning
- # content: execute the provided content
- # varprinter(keylist): print all variables known after content
- #
- # output is then a null terminated array of:
- # literal '_start_'
- # key=value (for each preset variable)
- # literal '_start_'
- # key=value (for each post set variable)
- bcmd = ('unset IFS\n' +
- setup +
- varprinter(allvars) +
- '{\n%s\n\n:\n} > /dev/null\n' % content +
- 'unset IFS\n' +
- varprinter(keylist) + "\n")
-
- cmd = []
- if asuser is not None:
- cmd = switch_user_cb(asuser)
-
- cmd.extend(bash)
-
- (output, _error) = util.subp(cmd, data=bcmd)
-
- # exclude vars in bash that change on their own or that we used
- excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v")
- preset = {}
- ret = {}
- target = None
- output = output[0:-1] # remove trailing null
-
- # go through output. First _start_ is for 'preset', second for 'target'.
- # Add to target only things were changed and not in volitile
- for line in output.split("\x00"):
- try:
- (key, val) = line.split("=", 1)
- if target is preset:
- target[key] = val
- elif (key not in excluded and
- (key in keylist_in or preset.get(key) != val)):
- ret[key] = val
- except ValueError:
- if line != "_start_":
- raise
- if target is None:
- target = preset
- elif target is preset:
- target = ret
-
- return ret
-
-
-def read_context_disk_dir(source_dir, asuser=None):
- """
- read_context_disk_dir(source_dir):
- read source_dir and return a tuple with metadata dict and user-data
- string populated. If not a valid dir, raise a NonContextDiskDir
- """
- found = {}
- for af in CONTEXT_DISK_FILES:
- fn = os.path.join(source_dir, af)
- if os.path.isfile(fn):
- found[af] = fn
-
- if not found:
- raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
-
- context = {}
- results = {'userdata': None, 'metadata': {}}
-
- if "context.sh" in found:
- if asuser is not None:
- try:
- pwd.getpwnam(asuser)
- except KeyError as e:
- raise BrokenContextDiskDir("configured user '%s' "
- "does not exist", asuser)
- try:
- path = os.path.join(source_dir, 'context.sh')
- content = util.load_file(path)
- context = parse_shell_config(content, asuser=asuser)
- except util.ProcessExecutionError as e:
- raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
- except IOError as e:
- raise NonContextDiskDir("Error reading context.sh: %s" % (e))
- else:
- raise NonContextDiskDir("Missing context.sh")
-
- if not context:
- return results
-
- results['metadata'] = context
-
- # process single or multiple SSH keys
- ssh_key_var = None
- if "SSH_KEY" in context:
- ssh_key_var = "SSH_KEY"
- elif "SSH_PUBLIC_KEY" in context:
- ssh_key_var = "SSH_PUBLIC_KEY"
-
- if ssh_key_var:
- lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not
- l.startswith("#")]
-
- # custom hostname -- try hostname or leave cloud-init
- # itself create hostname from IP address later
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
- if k in context:
- results['metadata']['local-hostname'] = context[k]
- break
-
- # raw user data
- if "USER_DATA" in context:
- results['userdata'] = context["USER_DATA"]
- elif "USERDATA" in context:
- results['userdata'] = context["USERDATA"]
-
- # b64decode user data if necessary (default)
- if 'userdata' in results:
- encoding = context.get('USERDATA_ENCODING',
- context.get('USER_DATA_ENCODING'))
- if encoding == "base64":
- try:
- results['userdata'] = util.b64d(results['userdata'])
- except TypeError:
- LOG.warn("Failed base64 decoding of userdata")
-
- # generate static /etc/network/interfaces
- # only if there are any required context variables
- # http://opennebula.org/documentation:rel3.8:cong#network_configuration
- for k in context:
- if re.match(r'^ETH\d+_IP$', k):
- (out, _) = util.subp(['/sbin/ip', 'link'])
- net = OpenNebulaNetwork(out, context)
- results['network-interfaces'] = net.gen_conf()
- break
-
- return results
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceOpenNebulaNet = DataSourceOpenNebula
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
deleted file mode 100644
index c06d17f3..00000000
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-from cloudinit.sources.helpers import openstack
-
-LOG = logging.getLogger(__name__)
-
-# Various defaults/constants...
-DEF_MD_URL = "http://169.254.169.254"
-DEFAULT_IID = "iid-dsopenstack"
-DEFAULT_METADATA = {
- "instance-id": DEFAULT_IID,
-}
-
-
-class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
- self.metadata_address = None
- self.ssl_details = util.fetch_ssl_details(self.paths)
- self.version = None
- self.files = {}
- self.ec2_metadata = None
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
- return mstr
-
- def _get_url_settings(self):
- # TODO(harlowja): this is shared with ec2 datasource, we should just
- # move it to a shared location instead...
- # Note: the defaults here are different though.
-
- # max_wait < 0 indicates do not wait
- max_wait = -1
- timeout = 10
-
- try:
- max_wait = int(self.ds_cfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- try:
- timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
- filtered = [x for x in urls if util.is_resolvable_url(x)]
- if set(filtered) != set(urls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(urls) - set(filtered))))
- if len(filtered):
- urls = filtered
- else:
- LOG.warn("Empty metadata url list! using default list")
- urls = [DEF_MD_URL]
-
- md_urls = []
- url2base = {}
- for url in urls:
- md_url = url_helper.combine_url(url, 'openstack')
- md_urls.append(md_url)
- url2base[md_url] = url
-
- (max_wait, timeout) = self._get_url_settings()
- start_time = time.time()
- avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
- timeout=timeout)
- if avail_url:
- LOG.debug("Using metadata source: '%s'", url2base[avail_url])
- else:
- LOG.debug("Giving up on OpenStack md from %s after %s seconds",
- md_urls, int(time.time() - start_time))
-
- self.metadata_address = url2base.get(avail_url)
- return bool(avail_url)
-
- def get_data(self, retries=5, timeout=5):
- try:
- if not self.wait_for_metadata_service():
- return False
- except IOError:
- return False
-
- try:
- results = util.log_time(LOG.debug,
- 'Crawl of openstack metadata service',
- read_metadata_service,
- args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': retries,
- 'timeout': timeout})
- except openstack.NonReadable:
- return False
- except (openstack.BrokenMetadata, IOError):
- util.logexc(LOG, "Broken metadata address %s",
- self.metadata_address)
- return False
-
- self.dsmode = self._determine_dsmode([results.get('dsmode')])
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- md = results.get('metadata', {})
- md = util.mergemanydict([md, DEFAULT_METADATA])
- self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
-
- vd = results.get('vendordata')
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
- except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
-
-def read_metadata_service(base_url, ssl_details=None,
- timeout=5, retries=5):
- reader = openstack.MetadataReader(base_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries)
- return reader.read_v2()
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
deleted file mode 100644
index ccc86883..00000000
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ /dev/null
@@ -1,781 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Datasource for provisioning on SmartOS. This works on Joyent
-# and public/private Clouds using SmartOS.
-#
-# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
-# The meta-data is transmitted via key/value pairs made by
-# requests on the console. For example, to get the hostname, you
-# would send "GET hostname" on /dev/ttyS1.
-# For Linux Guests running in LX-Brand Zones on SmartOS hosts
-# a socket (/native/.zonecontrol/metadata.sock) is used instead
-# of a serial console.
-#
-# Certain behavior is defined by the DataDictionary
-# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
-# Comments with "@datadictionary" are snippets of the definition
-
-import base64
-import binascii
-import json
-import os
-import random
-import re
-import socket
-
-from cloudinit import log as logging
-from cloudinit import serial
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-SMARTOS_ATTRIB_MAP = {
- # Cloud-init Key : (SmartOS Key, Strip line endings)
- 'instance-id': ('sdc:uuid', True),
- 'local-hostname': ('hostname', True),
- 'public-keys': ('root_authorized_keys', True),
- 'user-script': ('user-script', False),
- 'legacy-user-data': ('user-data', False),
- 'user-data': ('cloud-init:user-data', False),
- 'iptables_disable': ('iptables_disable', True),
- 'motd_sys_info': ('motd_sys_info', True),
- 'availability_zone': ('sdc:datacenter_name', True),
- 'vendor-data': ('sdc:vendor-data', False),
- 'operator-script': ('sdc:operator-script', False),
-}
-
-SMARTOS_ATTRIB_JSON = {
- # Cloud-init Key : (SmartOS Key known JSON)
- 'network-data': 'sdc:nics',
-}
-
-SMARTOS_ENV_LX_BRAND = "lx-brand"
-SMARTOS_ENV_KVM = "kvm"
-
-DS_NAME = 'SmartOS'
-DS_CFG_PATH = ['datasource', DS_NAME]
-NO_BASE64_DECODE = [
- 'iptables_disable',
- 'motd_sys_info',
- 'root_authorized_keys',
- 'sdc:datacenter_name',
- 'sdc:uuid'
- 'user-data',
- 'user-script',
-]
-
-METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
-SERIAL_DEVICE = '/dev/ttyS1'
-SERIAL_TIMEOUT = 60
-
-# BUILT-IN DATASOURCE CONFIGURATION
-# The following is the built-in configuration. If the values
-# are not set via the system configuration, then these default
-# will be used:
-# serial_device: which serial device to use for the meta-data
-# serial_timeout: how long to wait on the device
-# no_base64_decode: values which are not base64 encoded and
-# are fetched directly from SmartOS, not meta-data values
-# base64_keys: meta-data keys that are delivered in base64
-# base64_all: with the exclusion of no_base64_decode values,
-# treat all meta-data as base64 encoded
-# disk_setup: describes how to partition the ephemeral drive
-# fs_setup: describes how to format the ephemeral drive
-#
-BUILTIN_DS_CONFIG = {
- 'serial_device': SERIAL_DEVICE,
- 'serial_timeout': SERIAL_TIMEOUT,
- 'metadata_sockfile': METADATA_SOCKFILE,
- 'no_base64_decode': NO_BASE64_DECODE,
- 'base64_keys': [],
- 'base64_all': False,
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
-}
-
-BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': False,
- 'overwrite': False}
- },
- 'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext3',
- 'device': 'ephemeral0'}],
-}
-
-# builtin vendor-data is a boothook that writes a script into
-# /var/lib/cloud/scripts/per-boot. *That* script then handles
-# executing the 'operator-script' and 'user-script' files
-# that cloud-init writes into /var/lib/cloud/instance/data/
-# if they exist.
-#
-# This is all very indirect, but its done like this so that at
-# some point in the future, perhaps cloud-init wouldn't do it at
-# all, but rather the vendor actually provide vendor-data that accomplished
-# their desires. (That is the point of vendor-data).
-#
-# cloud-init does cheat a bit, and write the operator-script and user-script
-# itself. It could have the vendor-script do that, but it seems better
-# to not require the image to contain a tool (mdata-get) to read those
-# keys when we have a perfectly good one inside cloud-init.
-BUILTIN_VENDOR_DATA = """\
-#cloud-boothook
-#!/bin/sh
-fname="%(per_boot_d)s/01_smartos_vendor_data.sh"
-mkdir -p "${fname%%/*}"
-cat > "$fname" <<"END_SCRIPT"
-#!/bin/sh
-##
-# This file is written as part of the default vendor data for SmartOS.
-# The SmartOS datasource writes the listed file from the listed metadata key
-# sdc:operator-script -> %(operator_script)s
-# user-script -> %(user_script)s
-#
-# You can view content with 'mdata-get <key>'
-#
-for script in "%(operator_script)s" "%(user_script)s"; do
- [ -x "$script" ] || continue
- echo "executing '$script'" 1>&2
- "$script"
-done
-END_SCRIPT
-chmod +x "$fname"
-"""
-
-
-# @datadictionary: this is legacy path for placing files from metadata
-# per the SmartOS location. It is not preferable, but is done for
-# legacy reasons
-LEGACY_USER_D = "/var/db"
-
-
-class DataSourceSmartOS(sources.DataSource):
- _unset = "_unset"
- smartos_type = _unset
- md_client = _unset
-
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- self.ds_cfg,
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
-
- self.metadata = {}
- self.network_data = None
- self._network_config = None
-
- self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
-
- self._init()
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [client=%s]" % (root, self.md_client)
-
- def _init(self):
- if self.smartos_type == self._unset:
- self.smartos_type = get_smartos_environ()
- if self.smartos_type is None:
- self.md_client = None
-
- if self.md_client == self._unset:
- self.md_client = jmc_client_factory(
- smartos_type=self.smartos_type,
- metadata_sockfile=self.ds_cfg['metadata_sockfile'],
- serial_device=self.ds_cfg['serial_device'],
- serial_timeout=self.ds_cfg['serial_timeout'])
-
- def _set_provisioned(self):
- '''Mark the instance provisioning state as successful.
-
- When run in a zone, the host OS will look for /var/svc/provisioning
- to be renamed as /var/svc/provision_success. This should be done
- after meta-data is successfully retrieved and from this point
- the host considers the provision of the zone to be a success and
- keeps the zone running.
- '''
-
- LOG.debug('Instance provisioning state set as successful')
- svc_path = '/var/svc'
- if os.path.exists('/'.join([svc_path, 'provisioning'])):
- os.rename('/'.join([svc_path, 'provisioning']),
- '/'.join([svc_path, 'provision_success']))
-
- def get_data(self):
- self._init()
-
- md = {}
- ud = ""
-
- if not self.smartos_type:
- LOG.debug("Not running on smartos")
- return False
-
- if not self.md_client.exists():
- LOG.debug("No metadata device '%r' found for SmartOS datasource",
- self.md_client)
- return False
-
- for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
- smartos_noun, strip = attribute
- md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
-
- for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
- md[ci_noun] = self.md_client.get_json(smartos_noun)
-
- # @datadictionary: This key may contain a program that is written
- # to a file in the filesystem of the guest on each boot and then
- # executed. It may be of any format that would be considered
- # executable in the guest instance.
- #
- # We write 'user-script' and 'operator-script' into the
- # instance/data directory. The default vendor-data then handles
- # executing them later.
- data_d = os.path.join(self.paths.get_cpath(), 'instances',
- md['instance-id'], 'data')
- user_script = os.path.join(data_d, 'user-script')
- u_script_l = "%s/user-script" % LEGACY_USER_D
- write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0o700)
-
- operator_script = os.path.join(data_d, 'operator-script')
- write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False,
- mode=0o700)
-
- # @datadictionary: This key has no defined format, but its value
- # is written to the file /var/db/mdata-user-data on each boot prior
- # to the phase that runs user-script. This file is not to be executed.
- # This allows a configuration file of some kind to be injected into
- # the machine to be consumed by the user-script when it runs.
- u_data = md.get('legacy-user-data')
- u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
- write_boot_content(u_data, u_data_f)
-
- # Handle the cloud-init regular meta
- if not md['local-hostname']:
- md['local-hostname'] = md['instance-id']
-
- ud = None
- if md['user-data']:
- ud = md['user-data']
-
- if not md['vendor-data']:
- md['vendor-data'] = BUILTIN_VENDOR_DATA % {
- 'user_script': user_script,
- 'operator_script': operator_script,
- 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
- 'per-boot'),
- }
-
- self.metadata = util.mergemanydict([md, self.metadata])
- self.userdata_raw = ud
- self.vendordata_raw = md['vendor-data']
- self.network_data = md['network-data']
-
- self._set_provisioned()
- return True
-
- def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
-
- def get_config_obj(self):
- if self.smartos_type == SMARTOS_ENV_KVM:
- return BUILTIN_CLOUD_CONFIG
- return {}
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self.network_data is not None:
- self._network_config = (
- convert_smartos_network_data(self.network_data))
- return self._network_config
-
-
-class JoyentMetadataFetchException(Exception):
- pass
-
-
-class JoyentMetadataClient(object):
- """
- A client implementing v2 of the Joyent Metadata Protocol Specification.
-
- The full specification can be found at
- http://eng.joyent.com/mdata/protocol.html
- """
- line_regex = re.compile(
- r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
- r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
- r'( (?P<payload>.+))?)')
-
- def __init__(self, smartos_type=None, fp=None):
- if smartos_type is None:
- smartos_type = get_smartos_environ()
- self.smartos_type = smartos_type
- self.fp = fp
-
- def _checksum(self, body):
- return '{0:08x}'.format(
- binascii.crc32(body.encode('utf-8')) & 0xffffffff)
-
- def _get_value_from_frame(self, expected_request_id, frame):
- frame_data = self.line_regex.match(frame).groupdict()
- if int(frame_data['length']) != len(frame_data['body']):
- raise JoyentMetadataFetchException(
- 'Incorrect frame length given ({0} != {1}).'.format(
- frame_data['length'], len(frame_data['body'])))
- expected_checksum = self._checksum(frame_data['body'])
- if frame_data['checksum'] != expected_checksum:
- raise JoyentMetadataFetchException(
- 'Invalid checksum (expected: {0}; got {1}).'.format(
- expected_checksum, frame_data['checksum']))
- if frame_data['request_id'] != expected_request_id:
- raise JoyentMetadataFetchException(
- 'Request ID mismatch (expected: {0}; got {1}).'.format(
- expected_request_id, frame_data['request_id']))
- if not frame_data.get('payload', None):
- LOG.debug('No value found.')
- return None
- value = util.b64d(frame_data['payload'])
- LOG.debug('Value "%s" found.', value)
- return value
-
- def request(self, rtype, param=None):
- request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
- message_body = ' '.join((request_id, rtype,))
- if param:
- message_body += ' ' + base64.b64encode(param.encode()).decode()
- msg = 'V2 {0} {1} {2}\n'.format(
- len(message_body), self._checksum(message_body), message_body)
- LOG.debug('Writing "%s" to metadata transport.', msg)
-
- need_close = False
- if not self.fp:
- self.open_transport()
- need_close = True
-
- self.fp.write(msg.encode('ascii'))
- self.fp.flush()
-
- response = bytearray()
- response.extend(self.fp.read(1))
- while response[-1:] != b'\n':
- response.extend(self.fp.read(1))
-
- if need_close:
- self.close_transport()
-
- response = response.rstrip().decode('ascii')
- LOG.debug('Read "%s" from metadata transport.', response)
-
- if 'SUCCESS' not in response:
- return None
-
- value = self._get_value_from_frame(request_id, response)
- return value
-
- def get(self, key, default=None, strip=False):
- result = self.request(rtype='GET', param=key)
- if result is None:
- return default
- if result and strip:
- result = result.strip()
- return result
-
- def get_json(self, key, default=None):
- result = self.get(key, default=default)
- if result is None:
- return default
- return json.loads(result)
-
- def list(self):
- result = self.request(rtype='KEYS')
- if result:
- result = result.split('\n')
- return result
-
- def put(self, key, val):
- param = b' '.join([base64.b64encode(i.encode())
- for i in (key, val)]).decode()
- return self.request(rtype='PUT', param=param)
-
- def delete(self, key):
- return self.request(rtype='DELETE', param=key)
-
- def close_transport(self):
- if self.fp:
- self.fp.close()
- self.fp = None
-
- def __enter__(self):
- if self.fp:
- return self
- self.open_transport()
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.close_transport()
- return
-
- def open_transport(self):
- raise NotImplementedError
-
-
-class JoyentMetadataSocketClient(JoyentMetadataClient):
- def __init__(self, socketpath):
- self.socketpath = socketpath
-
- def open_transport(self):
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(self.socketpath)
- self.fp = sock.makefile('rwb')
-
- def exists(self):
- return os.path.exists(self.socketpath)
-
- def __repr__(self):
- return "%s(socketpath=%s)" % (self.__class__.__name__, self.socketpath)
-
-
-class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=None):
- super(JoyentMetadataSerialClient, self).__init__(smartos_type)
- self.device = device
- self.timeout = timeout
-
- def exists(self):
- return os.path.exists(self.device)
-
- def open_transport(self):
- ser = serial.Serial(self.device, timeout=self.timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % self.device)
- self.fp = ser
-
- def __repr__(self):
- return "%s(device=%s, timeout=%s)" % (
- self.__class__.__name__, self.device, self.timeout)
-
-
-class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
- """V1 of the protocol was not safe for all values.
- Thus, we allowed the user to pass values in as base64 encoded.
- Users may still reasonably expect to be able to send base64 data
- and have it transparently decoded. So even though the V2 format is
- now used, and is safe (using base64 itself), we keep legacy support.
-
- The way for a user to do this was:
- a.) specify 'base64_keys' key whose value is a comma delimited
- list of keys that were base64 encoded.
- b.) base64_all: string interpreted as a boolean that indicates
- if all keys are base64 encoded.
- c.) set a key named b64-<keyname> with a boolean indicating that
- <keyname> is base64 encoded."""
-
- def __init__(self, device, timeout=10, smartos_type=None):
- s = super(JoyentMetadataLegacySerialClient, self)
- s.__init__(device, timeout, smartos_type)
- self.base64_keys = None
- self.base64_all = None
-
- def _init_base64_keys(self, reset=False):
- if reset:
- self.base64_keys = None
- self.base64_all = None
-
- keys = None
- if self.base64_all is None:
- keys = self.list()
- if 'base64_all' in keys:
- self.base64_all = util.is_true(self._get("base64_all"))
- else:
- self.base64_all = False
-
- if self.base64_all:
- # short circuit if base64_all is true
- return
-
- if self.base64_keys is None:
- if keys is None:
- keys = self.list()
- b64_keys = set()
- if 'base64_keys' in keys:
- b64_keys = set(self._get("base64_keys").split(","))
-
- # now add any b64-<keyname> that has a true value
- for key in [k[3:] for k in keys if k.startswith("b64-")]:
- if util.is_true(self._get(key)):
- b64_keys.add(key)
- else:
- if key in b64_keys:
- b64_keys.remove(key)
-
- self.base64_keys = b64_keys
-
- def _get(self, key, default=None, strip=False):
- return (super(JoyentMetadataLegacySerialClient, self).
- get(key, default=default, strip=strip))
-
- def is_b64_encoded(self, key, reset=False):
- if key in NO_BASE64_DECODE:
- return False
-
- self._init_base64_keys(reset=reset)
- if self.base64_all:
- return True
-
- return key in self.base64_keys
-
- def get(self, key, default=None, strip=False):
- mdefault = object()
- val = self._get(key, strip=False, default=mdefault)
- if val is mdefault:
- return default
-
- if self.is_b64_encoded(key):
- try:
- val = base64.b64decode(val.encode()).decode()
- # Bogus input produces different errors in Python 2 and 3
- except (TypeError, binascii.Error):
- LOG.warn("Failed base64 decoding key '%s': %s", key, val)
-
- if strip:
- val = val.strip()
-
- return val
-
-
-def jmc_client_factory(
- smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
- serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
- uname_version=None):
-
- if smartos_type is None:
- smartos_type = get_smartos_environ(uname_version)
-
- if smartos_type is None:
- return None
- elif smartos_type == SMARTOS_ENV_KVM:
- return JoyentMetadataLegacySerialClient(
- device=serial_device, timeout=serial_timeout,
- smartos_type=smartos_type)
- elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile)
-
- raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
-
-
-def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0o400):
- """
- Write the content to content_f. Under the following rules:
- 1. If no content, remove the file
- 2. Write the content
- 3. If executable and no file magic, add it
- 4. If there is a link, create it
-
- @param content: what to write
- @param content_f: the file name
- @param backup_d: the directory to save the backup at
- @param link: if defined, location to create a symlink to
- @param shebang: if no file magic, set shebang
- @param mode: file mode
-
- Becuase of the way that Cloud-init executes scripts (no shell),
- a script will fail to execute if does not have a magic bit (shebang) set
- for the file. If shebang=True, then the script will be checked for a magic
- bit and to the SmartOS default of assuming that bash.
- """
-
- if not content and os.path.exists(content_f):
- os.unlink(content_f)
- if link and os.path.islink(link):
- os.unlink(link)
- if not content:
- return
-
- util.write_file(content_f, content, mode=mode)
-
- if shebang and not content.startswith("#!"):
- try:
- cmd = ["file", "--brief", "--mime-type", content_f]
- (f_type, _err) = util.subp(cmd)
- LOG.debug("script %s mime type is %s", content_f, f_type)
- if f_type.strip() == "text/plain":
- new_content = "\n".join(["#!/bin/bash", content])
- util.write_file(content_f, new_content, mode=mode)
- LOG.debug("added shebang to file %s", content_f)
-
- except Exception as e:
- util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
-
- if link:
- try:
- if os.path.islink(link):
- os.unlink(link)
- if content and os.path.exists(content_f):
- util.ensure_dir(os.path.dirname(link))
- os.symlink(content_f, link)
- except IOError as e:
- util.logexc(LOG, "failed establishing content link: %s", e)
-
-
-def get_smartos_environ(uname_version=None, product_name=None,
- uname_arch=None):
- uname = os.uname()
- if uname_arch is None:
- uname_arch = uname[4]
-
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- return None
-
- # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
- # report 'BrandZ virtual linux' as the kernel version
- if uname_version is None:
- uname_version = uname[3]
- if uname_version.lower() == 'brandz virtual linux':
- return SMARTOS_ENV_LX_BRAND
-
- if product_name is None:
- system_type = util.read_dmi_data("system-product-name")
- else:
- system_type = product_name
-
- if system_type and 'smartdc' in system_type.lower():
- return SMARTOS_ENV_KVM
-
- return None
-
-
-# Covert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None):
- """Return a dictionary of network_config by parsing provided
- SMARTOS sdc:nics configuration data
-
- sdc:nics data is a dictionary of properties of a nic and the ip
- configuration desired. Additional nic dictionaries are appended
- to the list.
-
- Converting the format is straightforward though it does include
- duplicate information as well as data which appears to be relevant
- to the hostOS rather than the guest.
-
- For each entry in the nics list returned from query sdc:nics, we
- create a type: physical entry, and extract the interface properties:
- 'mac' -> 'mac_address', 'mtu', 'interface' -> 'name'. The remaining
- keys are related to ip configuration. For each ip in the 'ips' list
- we create a subnet entry under 'subnets' pairing the ip to a one in
- the 'gateways' list.
- """
-
- valid_keys = {
- 'physical': [
- 'mac_address',
- 'mtu',
- 'name',
- 'params',
- 'subnets',
- 'type',
- ],
- 'subnet': [
- 'address',
- 'broadcast',
- 'dns_nameservers',
- 'dns_search',
- 'gateway',
- 'metric',
- 'netmask',
- 'pointopoint',
- 'routes',
- 'scope',
- 'type',
- ],
- }
-
- config = []
- for nic in network_data:
- cfg = dict((k, v) for k, v in nic.items()
- if k in valid_keys['physical'])
- cfg.update({
- 'type': 'physical',
- 'name': nic['interface']})
- if 'mac' in nic:
- cfg.update({'mac_address': nic['mac']})
-
- subnets = []
- for ip, gw in zip(nic['ips'], nic['gateways']):
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- 'gateway': gw,
- })
- subnets.append(subnet)
- cfg.update({'subnets': subnets})
- config.append(cfg)
-
- return {'version': 1, 'config': config}
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
-
-
-if __name__ == "__main__":
- import sys
- jmc = jmc_client_factory()
- if jmc is None:
- print("Do not appear to be on smartos.")
- sys.exit(1)
- if len(sys.argv) == 1:
- keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()))
- else:
- keys = sys.argv[1:]
-
- data = {}
- for key in keys:
- if key in SMARTOS_ATTRIB_JSON:
- keyname = SMARTOS_ATTRIB_JSON[key]
- data[key] = jmc.get_json(keyname)
- else:
- if key in SMARTOS_ATTRIB_MAP:
- keyname, strip = SMARTOS_ATTRIB_MAP[key]
- else:
- keyname, strip = (key, False)
- val = jmc.get(keyname, strip=strip)
- data[key] = jmc.get(keyname, strip=strip)
-
- print(json.dumps(data, indent=1))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
deleted file mode 100644
index 87b8e524..00000000
--- a/cloudinit/sources/__init__.py
+++ /dev/null
@@ -1,371 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import abc
-import os
-
-import six
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import user_data as ud
-from cloudinit import util
-
-from cloudinit.filters import launch_index
-from cloudinit.reporting import events
-
-DSMODE_DISABLED = "disabled"
-DSMODE_LOCAL = "local"
-DSMODE_NETWORK = "net"
-DSMODE_PASS = "pass"
-
-VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
-
-DEP_FILESYSTEM = "FILESYSTEM"
-DEP_NETWORK = "NETWORK"
-DS_PREFIX = 'DataSource'
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNotFoundException(Exception):
- pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DataSource(object):
-
- dsmode = DSMODE_NETWORK
-
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
- self.sys_cfg = sys_cfg
- self.distro = distro
- self.paths = paths
- self.userdata = None
- self.metadata = None
- self.userdata_raw = None
- self.vendordata = None
- self.vendordata_raw = None
-
- # find the datasource config name.
- # remove 'DataSource' from classname on front, and remove 'Net' on end.
- # Both Foo and FooNet sources expect config in cfg['sources']['Foo']
- name = type_utils.obj_name(self)
- if name.startswith(DS_PREFIX):
- name = name[len(DS_PREFIX):]
- if name.endswith('Net'):
- name = name[0:-3]
-
- self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
- ("datasource", name), {})
- if not self.ds_cfg:
- self.ds_cfg = {}
-
- if not ud_proc:
- self.ud_proc = ud.UserDataProcessor(self.paths)
- else:
- self.ud_proc = ud_proc
-
- def __str__(self):
- return type_utils.obj_name(self)
-
- def get_userdata(self, apply_filter=False):
- if self.userdata is None:
- self.userdata = self.ud_proc.process(self.get_userdata_raw())
- if apply_filter:
- return self._filter_xdata(self.userdata)
- return self.userdata
-
- def get_vendordata(self):
- if self.vendordata is None:
- self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
- return self.vendordata
-
- @property
- def launch_index(self):
- if not self.metadata:
- return None
- if 'launch-index' in self.metadata:
- return self.metadata['launch-index']
- return None
-
- def _filter_xdata(self, processed_ud):
- filters = [
- launch_index.Filter(util.safe_int(self.launch_index)),
- ]
- new_ud = processed_ud
- for f in filters:
- new_ud = f.apply(new_ud)
- return new_ud
-
- @property
- def is_disconnected(self):
- return False
-
- def get_userdata_raw(self):
- return self.userdata_raw
-
- def get_vendordata_raw(self):
- return self.vendordata_raw
-
- # the data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return {}
-
- def get_public_ssh_keys(self):
- return normalize_pubkey_data(self.metadata.get('public-keys'))
-
- def _remap_device(self, short_name):
- # LP: #611137
- # the metadata service may believe that devices are named 'sda'
- # when the kernel named them 'vda' or 'xvda'
- # we want to return the correct value for what will actually
- # exist in this instance
- mappings = {"sd": ("vd", "xvd", "vtb")}
- for (nfrom, tlist) in mappings.items():
- if not short_name.startswith(nfrom):
- continue
- for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
- if os.path.exists(cand):
- return cand
- return None
-
- def device_name_to_device(self, _name):
- # translate a 'name' to a device
- # the primary function at this point is on ec2
- # to consult metadata service, that has
- # ephemeral0: sdb
- # and return 'sdb' for input 'ephemeral0'
- return None
-
- def get_locale(self):
- return 'en_US.UTF-8'
-
- @property
- def availability_zone(self):
- return self.metadata.get('availability-zone',
- self.metadata.get('availability_zone'))
-
- @property
- def region(self):
- return self.metadata.get('region')
-
- def get_instance_id(self):
- if not self.metadata or 'instance-id' not in self.metadata:
- # Return a magic not really instance id string
- return "iid-datasource"
- return str(self.metadata['instance-id'])
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- defdomain = "localdomain"
- defhost = "localhost"
- domain = defdomain
-
- if not self.metadata or 'local-hostname' not in self.metadata:
- # this is somewhat questionable really.
- # the cloud datasource was asked for a hostname
- # and didn't have one. raising error might be more appropriate
- # but instead, basically look up the existing hostname
- toks = []
- hostname = util.get_hostname()
- fqdn = util.get_fqdn_from_hosts(hostname)
- if fqdn and fqdn.find(".") > 0:
- toks = str(fqdn).split(".")
- elif hostname:
- toks = [hostname, defdomain]
- else:
- toks = [defhost, defdomain]
- else:
- # if there is an ipv4 address in 'local-hostname', then
- # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
- lhost = self.metadata['local-hostname']
- if util.is_ipv4(lhost):
- toks = []
- if resolve_ip:
- toks = util.gethostbyaddr(lhost)
-
- if toks:
- toks = str(toks).split('.')
- else:
- toks = ["ip-%s" % lhost.replace(".", "-")]
- else:
- toks = lhost.split(".")
-
- if len(toks) > 1:
- hostname = toks[0]
- domain = '.'.join(toks[1:])
- else:
- hostname = toks[0]
-
- if fqdn:
- return "%s.%s" % (hostname, domain)
- else:
- return hostname
-
- def get_package_mirror_info(self):
- return self.distro.get_package_mirror_info(data_source=self)
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still
- return False
-
- @staticmethod
- def _determine_dsmode(candidates, default=None, valid=None):
- # return the first candidate that is non None, warn if not valid
- if default is None:
- default = DSMODE_NETWORK
-
- if valid is None:
- valid = VALID_DSMODES
-
- for candidate in candidates:
- if candidate is None:
- continue
- if candidate in valid:
- return candidate
- else:
- LOG.warn("invalid dsmode '%s', using default=%s",
- candidate, default)
- return default
-
- return default
-
- @property
- def network_config(self):
- return None
-
- @property
- def first_instance_boot(self):
- return
-
-
-def normalize_pubkey_data(pubkey_data):
- keys = []
-
- if not pubkey_data:
- return keys
-
- if isinstance(pubkey_data, six.string_types):
- return str(pubkey_data).splitlines()
-
- if isinstance(pubkey_data, (list, set)):
- return list(pubkey_data)
-
- if isinstance(pubkey_data, (dict)):
- for (_keyname, klist) in pubkey_data.items():
- # lp:506332 uec metadata service responds with
- # data that makes boto populate a string for 'klist' rather
- # than a list.
- if isinstance(klist, six.string_types):
- klist = [klist]
- if isinstance(klist, (list, set)):
- for pkey in klist:
- # There is an empty string at
- # the end of the keylist, trim it
- if pkey:
- keys.append(pkey)
-
- return keys
-
-
-def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
- ds_list = list_sources(cfg_list, ds_deps, pkg_list)
- ds_names = [type_utils.obj_name(f) for f in ds_list]
- mode = "network" if DEP_NETWORK in ds_deps else "local"
- LOG.debug("Searching for %s data source in: %s", mode, ds_names)
-
- for name, cls in zip(ds_names, ds_list):
- myrep = events.ReportEventStack(
- name="search-%s" % name.replace("DataSource", ""),
- description="searching for %s data from %s" % (mode, name),
- message="no %s data found from %s" % (mode, name),
- parent=reporter)
- try:
- with myrep:
- LOG.debug("Seeing if we can get any data from %s", cls)
- s = cls(sys_cfg, distro, paths)
- if s.get_data():
- myrep.message = "found %s data from %s" % (mode, name)
- return (s, type_utils.obj_name(cls))
- except Exception:
- util.logexc(LOG, "Getting data from %s failed", cls)
-
- msg = ("Did not find any data source,"
- " searched classes: (%s)") % (", ".join(ds_names))
- raise DataSourceNotFoundException(msg)
-
-
-# Return a list of classes that have the same depends as 'depends'
-# iterate through cfg_list, loading "DataSource*" modules
-# and calling their "get_datasource_list".
-# Return an ordered list of classes that match (if any)
-def list_sources(cfg_list, depends, pkg_list):
- src_list = []
- LOG.debug(("Looking for for data source in: %s,"
- " via packages %s that matches dependencies %s"),
- cfg_list, pkg_list, depends)
- for ds_name in cfg_list:
- if not ds_name.startswith(DS_PREFIX):
- ds_name = '%s%s' % (DS_PREFIX, ds_name)
- m_locs, _looked_locs = importer.find_module(ds_name,
- pkg_list,
- ['get_datasource_list'])
- for m_loc in m_locs:
- mod = importer.import_module(m_loc)
- lister = getattr(mod, "get_datasource_list")
- matches = lister(depends)
- if matches:
- src_list.extend(matches)
- break
- return src_list
-
-
-def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
- # quickly (local check only) if self.instance_id is still valid
- # we check kernel command line or files.
- if not instance_id:
- return False
-
- dmi_value = util.read_dmi_data(field)
- if not dmi_value:
- return False
- return instance_id.lower() == dmi_value.lower()
-
-
-# 'depends' is a list of dependencies (DEP_FILESYSTEM)
-# ds_list is a list of 2 item lists
-# ds_list = [
-# ( class, ( depends-that-this-class-needs ) )
-# }
-# It returns a list of 'class' that matched these deps exactly
-# It mainly is a helper function for DataSourceCollections
-def list_from_depends(depends, ds_list):
- ret_list = []
- depset = set(depends)
- for (cls, deps) in ds_list:
- if depset == set(deps):
- ret_list.append(cls)
- return ret_list
diff --git a/cloudinit/sources/helpers/__init__.py b/cloudinit/sources/helpers/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
deleted file mode 100644
index 63ccf10e..00000000
--- a/cloudinit/sources/helpers/azure.py
+++ /dev/null
@@ -1,279 +0,0 @@
-import logging
-import os
-import re
-import socket
-import struct
-import tempfile
-import time
-
-from contextlib import contextmanager
-from xml.etree import ElementTree
-
-from cloudinit import util
-
-
-LOG = logging.getLogger(__name__)
-
-
-@contextmanager
-def cd(newdir):
- prevdir = os.getcwd()
- os.chdir(os.path.expanduser(newdir))
- try:
- yield
- finally:
- os.chdir(prevdir)
-
-
-class AzureEndpointHttpClient(object):
-
- headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
- }
-
- def __init__(self, certificate):
- self.extra_secure_headers = {
- "x-ms-cipher-name": "DES_EDE3_CBC",
- "x-ms-guest-agent-public-x509-cert": certificate,
- }
-
- def get(self, url, secure=False):
- headers = self.headers
- if secure:
- headers = self.headers.copy()
- headers.update(self.extra_secure_headers)
- return util.read_file_or_url(url, headers=headers)
-
- def post(self, url, data=None, extra_headers=None):
- headers = self.headers
- if extra_headers is not None:
- headers = self.headers.copy()
- headers.update(extra_headers)
- return util.read_file_or_url(url, data=data, headers=headers)
-
-
-class GoalState(object):
-
- def __init__(self, xml, http_client):
- self.http_client = http_client
- self.root = ElementTree.fromstring(xml)
- self._certificates_xml = None
-
- def _text_from_xpath(self, xpath):
- element = self.root.find(xpath)
- if element is not None:
- return element.text
- return None
-
- @property
- def container_id(self):
- return self._text_from_xpath('./Container/ContainerId')
-
- @property
- def incarnation(self):
- return self._text_from_xpath('./Incarnation')
-
- @property
- def instance_id(self):
- return self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance/InstanceId')
-
- @property
- def certificates_xml(self):
- if self._certificates_xml is None:
- url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
- if url is not None:
- self._certificates_xml = self.http_client.get(
- url, secure=True).contents
- return self._certificates_xml
-
-
-class OpenSSLManager(object):
-
- certificate_names = {
- 'private_key': 'TransportPrivate.pem',
- 'certificate': 'TransportCert.pem',
- }
-
- def __init__(self):
- self.tmpdir = tempfile.mkdtemp()
- self.certificate = None
- self.generate_certificate()
-
- def clean_up(self):
- util.del_dir(self.tmpdir)
-
- def generate_certificate(self):
- LOG.debug('Generating certificate for communication with fabric...')
- if self.certificate is not None:
- LOG.debug('Certificate already generated.')
- return
- with cd(self.tmpdir):
- util.subp([
- 'openssl', 'req', '-x509', '-nodes', '-subj',
- '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
- '-keyout', self.certificate_names['private_key'],
- '-out', self.certificate_names['certificate'],
- ])
- certificate = ''
- for line in open(self.certificate_names['certificate']):
- if "CERTIFICATE" not in line:
- certificate += line.rstrip()
- self.certificate = certificate
- LOG.debug('New certificate generated.')
-
- def parse_certificates(self, certificates_xml):
- tag = ElementTree.fromstring(certificates_xml).find(
- './/Data')
- certificates_content = tag.text
- lines = [
- b'MIME-Version: 1.0',
- b'Content-Disposition: attachment; filename="Certificates.p7m"',
- b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
- b'Content-Transfer-Encoding: base64',
- b'',
- certificates_content.encode('utf-8'),
- ]
- with cd(self.tmpdir):
- with open('Certificates.p7m', 'wb') as f:
- f.write(b'\n'.join(lines))
- out, _ = util.subp(
- 'openssl cms -decrypt -in Certificates.p7m -inkey'
- ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
- ' -password pass:'.format(**self.certificate_names),
- shell=True)
- private_keys, certificates = [], []
- current = []
- for line in out.splitlines():
- current.append(line)
- if re.match(r'[-]+END .*?KEY[-]+$', line):
- private_keys.append('\n'.join(current))
- current = []
- elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificates.append('\n'.join(current))
- current = []
- keys = []
- for certificate in certificates:
- with cd(self.tmpdir):
- public_key, _ = util.subp(
- 'openssl x509 -noout -pubkey |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin',
- data=certificate,
- shell=True)
- keys.append(public_key)
- return keys
-
-
-class WALinuxAgentShim(object):
-
- REPORT_READY_XML_TEMPLATE = '\n'.join([
- '<?xml version="1.0" encoding="utf-8"?>',
- '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
- ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
- ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
- ' <Container>',
- ' <ContainerId>{container_id}</ContainerId>',
- ' <RoleInstanceList>',
- ' <Role>',
- ' <InstanceId>{instance_id}</InstanceId>',
- ' <Health>',
- ' <State>Ready</State>',
- ' </Health>',
- ' </Role>',
- ' </RoleInstanceList>',
- ' </Container>',
- '</Health>'])
-
- def __init__(self):
- LOG.debug('WALinuxAgentShim instantiated...')
- self.endpoint = self.find_endpoint()
- self.openssl_manager = None
- self.values = {}
-
- def clean_up(self):
- if self.openssl_manager is not None:
- self.openssl_manager.clean_up()
-
- @staticmethod
- def get_ip_from_lease_value(lease_value):
- unescaped_value = lease_value.replace('\\', '')
- if len(unescaped_value) > 4:
- hex_string = ''
- for hex_pair in unescaped_value.split(':'):
- if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
- hex_string += hex_pair
- packed_bytes = struct.pack(
- '>L', int(hex_string.replace(':', ''), 16))
- else:
- packed_bytes = unescaped_value.encode('utf-8')
- return socket.inet_ntoa(packed_bytes)
-
- @staticmethod
- def find_endpoint():
- LOG.debug('Finding Azure endpoint...')
- content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
- value = None
- for line in content.splitlines():
- if 'unknown-245' in line:
- value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
- if value is None:
- raise ValueError('No endpoint found in DHCP config.')
- endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
- LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
- return endpoint_ip_address
-
- def register_with_azure_and_fetch_data(self):
- self.openssl_manager = OpenSSLManager()
- http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
- LOG.info('Registering with Azure...')
- attempts = 0
- while True:
- try:
- response = http_client.get(
- 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception:
- if attempts < 10:
- time.sleep(attempts + 1)
- else:
- raise
- else:
- break
- attempts += 1
- LOG.debug('Successfully fetched GoalState XML.')
- goal_state = GoalState(response.contents, http_client)
- public_keys = []
- if goal_state.certificates_xml is not None:
- LOG.debug('Certificate XML found; parsing out public keys.')
- public_keys = self.openssl_manager.parse_certificates(
- goal_state.certificates_xml)
- data = {
- 'public-keys': public_keys,
- }
- self._report_ready(goal_state, http_client)
- return data
-
- def _report_ready(self, goal_state, http_client):
- LOG.debug('Reporting ready to Azure fabric.')
- document = self.REPORT_READY_XML_TEMPLATE.format(
- incarnation=goal_state.incarnation,
- container_id=goal_state.container_id,
- instance_id=goal_state.instance_id,
- )
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
- LOG.info('Reported ready to Azure fabric.')
-
-
-def get_metadata_from_fabric():
- shim = WALinuxAgentShim()
- try:
- return shim.register_with_azure_and_fetch_data()
- finally:
- shim.clean_up()
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
deleted file mode 100644
index 2e7a1d47..00000000
--- a/cloudinit/sources/helpers/openstack.py
+++ /dev/null
@@ -1,648 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import abc
-import base64
-import copy
-import functools
-import os
-
-import six
-
-from cloudinit import ec2_utils
-from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-# For reference: http://tinyurl.com/laora4c
-
-LOG = logging.getLogger(__name__)
-
-FILES_V1 = {
- # Path <-> (metadata key name, translator function, default value)
- 'etc/network/interfaces': ('network_config', lambda x: x, ''),
- 'meta.js': ('meta_js', util.load_json, {}),
- "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
-}
-KEY_COPIES = (
- # Cloud-init metadata names <-> (metadata key, is required)
- ('local-hostname', 'hostname', False),
- ('instance-id', 'uuid', True),
-)
-OS_LATEST = 'latest'
-OS_FOLSOM = '2012-08-10'
-OS_GRIZZLY = '2013-04-04'
-OS_HAVANA = '2013-10-17'
-OS_LIBERTY = '2015-10-15'
-# keep this in chronological order. new supported versions go at the end.
-OS_VERSIONS = (
- OS_FOLSOM,
- OS_GRIZZLY,
- OS_HAVANA,
- OS_LIBERTY,
-)
-
-
-class NonReadable(IOError):
- pass
-
-
-class BrokenMetadata(IOError):
- pass
-
-
-class SourceMixin(object):
- def _ec2_name_to_device(self, name):
- if not self.ec2_metadata:
- return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
- for (ent_name, device) in bdm.items():
- if name == ent_name:
- return device
- return None
-
- def get_public_ssh_keys(self):
- name = "public_keys"
- if self.version == 1:
- name = "public-keys"
- return sources.normalize_pubkey_data(self.metadata.get(name))
-
- def _os_name_to_device(self, name):
- device = None
- try:
- criteria = 'LABEL=%s' % (name)
- if name == 'swap':
- criteria = 'TYPE=%s' % (name)
- dev_entries = util.find_devs_with(criteria)
- if dev_entries:
- device = dev_entries[0]
- except util.ProcessExecutionError:
- pass
- return device
-
- def _validate_device_name(self, device):
- if not device:
- return None
- if not device.startswith("/"):
- device = "/dev/%s" % device
- if os.path.exists(device):
- return device
- # Durn, try adjusting the mapping
- remapped = self._remap_device(os.path.basename(device))
- if remapped:
- LOG.debug("Remapped device name %s => %s", device, remapped)
- return remapped
- return None
-
- def device_name_to_device(self, name):
- # Translate a 'name' to a 'physical' device
- if not name:
- return None
- # Try the ec2 mapping first
- names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
- device = None
- LOG.debug("Using ec2 style lookup to find device %s", names)
- for n in names:
- device = self._ec2_name_to_device(n)
- device = self._validate_device_name(device)
- if device:
- break
- # Try the openstack way second
- if not device:
- LOG.debug("Using openstack style lookup to find device %s", names)
- for n in names:
- device = self._os_name_to_device(n)
- device = self._validate_device_name(device)
- if device:
- break
- # Ok give up...
- if not device:
- return None
- else:
- LOG.debug("Mapped %s to device %s", name, device)
- return device
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseReader(object):
-
- def __init__(self, base_path):
- self.base_path = base_path
-
- @abc.abstractmethod
- def _path_join(self, base, *add_ons):
- pass
-
- @abc.abstractmethod
- def _path_read(self, path, decode=False):
- pass
-
- @abc.abstractmethod
- def _fetch_available_versions(self):
- pass
-
- @abc.abstractmethod
- def _read_ec2_metadata(self):
- pass
-
- def _find_working_version(self):
- try:
- versions_available = self._fetch_available_versions()
- except Exception as e:
- LOG.debug("Unable to read openstack versions from %s due to: %s",
- self.base_path, e)
- versions_available = []
-
- # openstack.OS_VERSIONS is stored in chronological order, so
- # reverse it to check newest first.
- supported = [v for v in reversed(list(OS_VERSIONS))]
- selected_version = OS_LATEST
-
- for potential_version in supported:
- if potential_version not in versions_available:
- continue
- selected_version = potential_version
- break
-
- LOG.debug("Selected version '%s' from %s", selected_version,
- versions_available)
- return selected_version
-
- def _read_content_path(self, item, decode=False):
- path = item.get('content_path', '').lstrip("/")
- path_pieces = path.split("/")
- valid_pieces = [p for p in path_pieces if len(p)]
- if not valid_pieces:
- raise BrokenMetadata("Item %s has no valid content path" % (item))
- path = self._path_join(self.base_path, "openstack", *path_pieces)
- return self._path_read(path, decode=decode)
-
- def read_v2(self):
- """Reads a version 2 formatted location.
-
- Return a dict with metadata, userdata, ec2-metadata, dsmode,
- network_config, files and version (2).
-
- If not a valid location, raise a NonReadable exception.
- """
-
- load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list) + six.string_types)
-
- def datafiles(version):
- files = {}
- files['metadata'] = (
- # File path to read
- self._path_join("openstack", version, 'meta_data.json'),
- # Is it required?
- True,
- # Translator function (applied after loading)
- util.load_json,
- )
- files['userdata'] = (
- self._path_join("openstack", version, 'user_data'),
- False,
- lambda x: x,
- )
- files['vendordata'] = (
- self._path_join("openstack", version, 'vendor_data.json'),
- False,
- load_json_anytype,
- )
- files['networkdata'] = (
- self._path_join("openstack", version, 'network_data.json'),
- False,
- load_json_anytype,
- )
- return files
-
- results = {
- 'userdata': '',
- 'version': 2,
- }
- data = datafiles(self._find_working_version())
- for (name, (path, required, translator)) in data.items():
- path = self._path_join(self.base_path, path)
- data = None
- found = False
- try:
- data = self._path_read(path)
- except IOError as e:
- if not required:
- LOG.debug("Failed reading optional path %s due"
- " to: %s", path, e)
- else:
- LOG.debug("Failed reading mandatory path %s due"
- " to: %s", path, e)
- else:
- found = True
- if required and not found:
- raise NonReadable("Missing mandatory path: %s" % path)
- if found and translator:
- try:
- data = translator(data)
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
- if found:
- results[name] = data
-
- metadata = results['metadata']
- if 'random_seed' in metadata:
- random_seed = metadata['random_seed']
- try:
- metadata['random_seed'] = base64.b64decode(random_seed)
- except (ValueError, TypeError) as e:
- raise BrokenMetadata("Badly formatted metadata"
- " random_seed entry: %s" % e)
-
- # load any files that were provided
- files = {}
- metadata_files = metadata.get('files', [])
- for item in metadata_files:
- if 'path' not in item:
- continue
- path = item['path']
- try:
- files[path] = self._read_content_path(item)
- except Exception as e:
- raise BrokenMetadata("Failed to read provided "
- "file %s: %s" % (path, e))
- results['files'] = files
-
- # The 'network_config' item in metadata is a content pointer
- # to the network config that should be applied. It is just a
- # ubuntu/debian '/etc/network/interfaces' file.
- net_item = metadata.get("network_config", None)
- if net_item:
- try:
- content = self._read_content_path(net_item, decode=True)
- results['network_config'] = content
- except IOError as e:
- raise BrokenMetadata("Failed to read network"
- " configuration: %s" % (e))
-
- # To openstack, user can specify meta ('nova boot --meta=key=value')
- # and those will appear under metadata['meta'].
- # if they specify 'dsmode' they're indicating the mode that they intend
- # for this datasource to operate in.
- try:
- results['dsmode'] = metadata['meta']['dsmode']
- except KeyError:
- pass
-
- # Read any ec2-metadata (if applicable)
- results['ec2-metadata'] = self._read_ec2_metadata()
-
- # Perform some misc. metadata key renames...
- for (target_key, source_key, is_required) in KEY_COPIES:
- if is_required and source_key not in metadata:
- raise BrokenMetadata("No '%s' entry in metadata" % source_key)
- if source_key in metadata:
- metadata[target_key] = metadata.get(source_key)
- return results
-
-
-class ConfigDriveReader(BaseReader):
- def __init__(self, base_path):
- super(ConfigDriveReader, self).__init__(base_path)
- self._versions = None
-
- def _path_join(self, base, *add_ons):
- components = [base] + list(add_ons)
- return os.path.join(*components)
-
- def _path_read(self, path, decode=False):
- return util.load_file(path, decode=decode)
-
- def _fetch_available_versions(self):
- if self._versions is None:
- path = self._path_join(self.base_path, 'openstack')
- found = [d for d in os.listdir(path)
- if os.path.isdir(os.path.join(path))]
- self._versions = sorted(found)
- return self._versions
-
- def _read_ec2_metadata(self):
- path = self._path_join(self.base_path,
- 'ec2', 'latest', 'meta-data.json')
- if not os.path.exists(path):
- return {}
- else:
- try:
- return util.load_json(self._path_read(path))
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
-
- def read_v1(self):
- """Reads a version 1 formatted location.
-
- Return a dict with metadata, userdata, dsmode, files and version (1).
-
- If not a valid path, raise a NonReadable exception.
- """
-
- found = {}
- for name in FILES_V1.keys():
- path = self._path_join(self.base_path, name)
- if os.path.exists(path):
- found[name] = path
- if len(found) == 0:
- raise NonReadable("%s: no files found" % (self.base_path))
-
- md = {}
- for (name, (key, translator, default)) in FILES_V1.items():
- if name in found:
- path = found[name]
- try:
- contents = self._path_read(path)
- except IOError:
- raise BrokenMetadata("Failed to read: %s" % path)
- try:
- md[key] = translator(contents)
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
- else:
- md[key] = copy.deepcopy(default)
-
- keydata = md['authorized_keys']
- meta_js = md['meta_js']
-
- # keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
- if keydata:
- lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
-
- # config-drive-v1 has no way for openstack to provide the instance-id
- # so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
-
- results = {
- 'version': 1,
- 'metadata': md,
- }
-
- # allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
-
- # config-drive-v1 has no way of specifying user-data, so the user has
- # to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data', '')
-
- # this implementation does not support files other than
- # network/interfaces and authorized_keys...
- results['files'] = {}
-
- return results
-
-
-class MetadataReader(BaseReader):
- def __init__(self, base_url, ssl_details=None, timeout=5, retries=5):
- super(MetadataReader, self).__init__(base_url)
- self.ssl_details = ssl_details
- self.timeout = float(timeout)
- self.retries = int(retries)
- self._versions = None
-
- def _fetch_available_versions(self):
- # <baseurl>/openstack/ returns a newline separated list of versions
- if self._versions is not None:
- return self._versions
- found = []
- version_path = self._path_join(self.base_path, "openstack")
- content = self._path_read(version_path)
- for line in content.splitlines():
- line = line.strip()
- if not line:
- continue
- found.append(line)
- self._versions = found
- return self._versions
-
- def _path_read(self, path, decode=False):
-
- def should_retry_cb(_request_args, cause):
- try:
- code = int(cause.code)
- if code >= 400:
- return False
- except (TypeError, ValueError):
- # Older versions of requests didn't have a code.
- pass
- return True
-
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
- if decode:
- return response.contents.decode()
- else:
- return response.contents
-
- def _path_join(self, base, *add_ons):
- return url_helper.combine_url(base, *add_ons)
-
- def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
- timeout=self.timeout,
- retries=self.retries)
-
-
-# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
-def convert_net_json(network_json=None, known_macs=None):
- """Return a dictionary of network_config by parsing provided
- OpenStack ConfigDrive NetworkData json format
-
- OpenStack network_data.json provides a 3 element dictionary
- - "links" (links are network devices, physical or virtual)
- - "networks" (networks are ip network configurations for one or more
- links)
- - services (non-ip services, like dns)
-
- networks and links are combined via network items referencing specific
- links via a 'link_id' which maps to a links 'id' field.
-
- To convert this format to network_config yaml, we first iterate over the
- links and then walk the network list to determine if any of the networks
- utilize the current link; if so we generate a subnet entry for the device
-
- We also need to map network_data.json fields to network_config fields. For
- example, the network_data links 'id' field is equivalent to network_config
- 'name' field for devices. We apply more of this mapping to the various
- link types that we encounter.
-
- There are additional fields that are populated in the network_data.json
- from OpenStack that are not relevant to network_config yaml, so we
- enumerate a dictionary of valid keys for network_yaml and apply filtering
- to drop these superflous keys from the network_config yaml.
- """
- if network_json is None:
- return None
-
- # dict of network_config key for filtering network_json
- valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
- ],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
- ],
- }
-
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
-
- config = []
- for link in links:
- subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
- # 'name' is not in openstack spec yet, but we will support it if it is
- # present. The 'id' in the spec is currently implemented as the host
- # nic's name, meaning something like 'tap-adfasdffd'. We do not want
- # to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
- if 'dhcp' in network['type']:
- t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
- subnet.update({
- 'type': t,
- })
- else:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- if network['type'] == 'ipv4':
- subnet['ipv4'] = True
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
- subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
- cfg.update({
- 'type': 'physical',
- 'mac_address': link['ethernet_mac_address']})
- elif link['type'] in ['bond']:
- params = {}
- for k, v in link.items():
- if k == 'bond_links':
- continue
- elif k.startswith('bond'):
- params.update({k: v})
- cfg.update({
- 'bond_interfaces': copy.deepcopy(link['bond_links']),
- 'params': params,
- })
- elif link['type'] in ['vlan']:
- cfg.update({
- 'name': "%s.%s" % (link['vlan_link'],
- link['vlan_id']),
- 'vlan_link': link['vlan_link'],
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- else:
- raise ValueError(
- 'Unknown network_data link type: %s' % link['type'])
-
- config.append(cfg)
-
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
-
- if need_names:
- if known_macs is None:
- known_macs = net.get_interfaces_by_mac()
-
- for d in need_names:
- mac = d.get('mac_address')
- if not mac:
- raise ValueError("No mac_address or name entry for %s" % d)
- if mac not in known_macs:
- raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
-
- for service in services:
- cfg = service
- cfg.update({'type': 'nameserver'})
- config.append(cfg)
-
- return {'version': 1, 'config': config}
-
-
-def convert_vendordata_json(data, recurse=True):
- """data: a loaded json *object* (strings, arrays, dicts).
- return something suitable for cloudinit vendordata_raw.
-
- if data is:
- None: return None
- string: return string
- list: return data
- the list is then processed in UserDataProcessor
- dict: return convert_vendordata_json(data.get('cloud-init'))
- """
- if not data:
- return None
- if isinstance(data, six.string_types):
- return data
- if isinstance(data, list):
- return copy.deepcopy(data)
- if isinstance(data, dict):
- if recurse is True:
- return convert_vendordata_json(data.get('cloud-init'),
- recurse=False)
- raise ValueError("vendordata['cloud-init'] cannot be dict")
- raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/vmware/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/vmware/imc/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
deleted file mode 100644
index fb53ec1d..00000000
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class BootProtoEnum(object):
- """Specifies the NIC Boot Settings."""
-
- DHCP = 'dhcp'
- STATIC = 'static'
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
deleted file mode 100644
index d645c497..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .nic import Nic
-
-
-class Config(object):
- """
- Stores the Contents specified in the Customization
- Specification file.
- """
-
- DNS = 'DNS|NAMESERVER|'
- SUFFIX = 'DNS|SUFFIX|'
- PASS = 'PASSWORD|-PASS'
- TIMEZONE = 'DATETIME|TIMEZONE'
- UTC = 'DATETIME|UTC'
- HOSTNAME = 'NETWORK|HOSTNAME'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
-
- def __init__(self, configFile):
- self._configFile = configFile
-
- @property
- def host_name(self):
- """Return the hostname."""
- return self._configFile.get(Config.HOSTNAME, None)
-
- @property
- def domain_name(self):
- """Return the domain name."""
- return self._configFile.get(Config.DOMAINNAME, None)
-
- @property
- def timezone(self):
- """Return the timezone."""
- return self._configFile.get(Config.TIMEZONE, None)
-
- @property
- def utc(self):
- """Retrieves whether to set time to UTC or Local."""
- return self._configFile.get(Config.UTC, None)
-
- @property
- def admin_password(self):
- """Return the root password to be set."""
- return self._configFile.get(Config.PASS, None)
-
- @property
- def name_servers(self):
- """Return the list of DNS servers."""
- res = []
- cnt = self._configFile.get_count_with_prefix(Config.DNS)
- for i in range(1, cnt + 1):
- key = Config.DNS + str(i)
- res.append(self._configFile[key])
-
- return res
-
- @property
- def dns_suffixes(self):
- """Return the list of DNS Suffixes."""
- res = []
- cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
- for i in range(1, cnt + 1):
- key = Config.SUFFIX + str(i)
- res.append(self._configFile[key])
-
- return res
-
- @property
- def nics(self):
- """Return the list of associated NICs."""
- res = []
- nics = self._configFile['NIC-CONFIG|NICS']
- for nic in nics.split(','):
- res.append(Nic(nic, self._configFile))
-
- return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
deleted file mode 100644
index bb9fb7dc..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
-from .config_source import ConfigSource
-
-logger = logging.getLogger(__name__)
-
-
-class ConfigFile(ConfigSource, dict):
- """ConfigFile module to load the content from a specified source."""
-
- def __init__(self, filename):
- self._loadConfigFile(filename)
- pass
-
- def _insertKey(self, key, val):
- """
- Inserts a Key Value pair.
-
- Keyword arguments:
- key -- The key to insert
- val -- The value to insert for the key
-
- """
- key = key.strip()
- val = val.strip()
-
- if key.startswith('-') or '|-' in key:
- canLog = False
- else:
- canLog = True
-
- # "sensitive" settings shall not be logged
- if canLog:
- logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
- else:
- logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
-
- self[key] = val
-
- def _loadConfigFile(self, filename):
- """
- Parses properties from the specified config file.
-
- Any previously available properties will be removed.
- Sensitive data will not be logged in case the key starts
- from '-'.
-
- Keyword arguments:
- filename - The full path to the config file.
- """
- logger.info('Parsing the config file %s.' % filename)
-
- config = configparser.ConfigParser()
- config.optionxform = str
- config.read(filename)
-
- self.clear()
-
- for category in config.sections():
- logger.debug("FOUND CATEGORY = '%s'" % category)
-
- for (key, value) in config.items(category):
- self._insertKey(category + '|' + key, value)
-
- def should_keep_current_value(self, key):
- """
- Determines whether a value for a property must be kept.
-
- If the propery is missing, it is treated as it should be not
- changed by the engine.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "empty" value which is used to indicate
- # "removal"
- return key not in self
-
- def should_remove_current_value(self, key):
- """
- Determines whether a value for the property must be removed.
-
- If the specified key is empty, it is treated as it should be
- removed by the engine.
-
- Return true if the value can be removed, false otherwise.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "missing" value which is used to indicate
- # "keeping unchanged"
- if key in self:
- return not bool(self[key])
- else:
- return False
-
- def get_count_with_prefix(self, prefix):
- """
- Return the total count of keys that start with the specified prefix.
-
- Keyword arguments:
- prefix -- prefix of the key
- """
- return len([key for key in self if key.startswith(prefix)])
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
deleted file mode 100644
index b28830f5..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .config_source import ConfigSource
-
-
-class ConfigNamespace(ConfigSource):
- """Specifies the Config Namespace."""
- pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
deleted file mode 100644
index 511cc918..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2016 VMware INC.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import os
-import re
-
-from cloudinit import util
-
-logger = logging.getLogger(__name__)
-
-
-class NicConfigurator(object):
- def __init__(self, nics):
- """
- Initialize the Nic Configurator
- @param nics (list) an array of nics to configure
- """
- self.nics = nics
- self.mac2Name = {}
- self.ipv4PrimaryGateway = None
- self.ipv6PrimaryGateway = None
- self.find_devices()
- self._primaryNic = self.get_primary_nic()
-
- def get_primary_nic(self):
- """
- Retrieve the primary nic if it exists
- @return (NicBase): the primary nic if exists, None otherwise
- """
- primary_nics = [nic for nic in self.nics if nic.primary]
- if not primary_nics:
- return None
- elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
- [nic.mac for nic in primary_nics])
- else:
- return primary_nics[0]
-
- def find_devices(self):
- """
- Create the mac2Name dictionary
- The mac address(es) are in the lower case
- """
- cmd = ['ip', 'addr', 'show']
- (output, err) = util.subp(cmd)
- sections = re.split(r'\n\d+: ', '\n' + output)[1:]
-
- macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
- for section in sections:
- match = re.search(macPat, section)
- if not match: # Only keep info about nics
- continue
- mac = match.group(1).lower()
- name = section.split(':', 1)[0]
- self.mac2Name[mac] = name
-
- def gen_one_nic(self, nic):
- """
- Return the lines needed to configure a nic
- @return (str list): the string list to configure the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
- name = self.mac2Name.get(nic.mac.lower())
- if not name:
- raise ValueError('No known device has MACADDR: %s' % nic.mac)
-
- if nic.onboot:
- lines.append('auto %s' % name)
-
- # Customize IPv4
- lines.extend(self.gen_ipv4(name, nic))
-
- # Customize IPv6
- lines.extend(self.gen_ipv6(name, nic))
-
- lines.append('')
-
- return lines
-
- def gen_ipv4(self, name, nic):
- """
- Return the lines needed to configure the IPv4 setting of a nic
- @return (str list): the string list to configure the gateways
- @param name (str): name of the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
-
- bootproto = nic.bootProto.lower()
- if nic.ipv4_mode.lower() == 'disabled':
- bootproto = 'manual'
- lines.append('iface %s inet %s' % (name, bootproto))
-
- if bootproto != 'static':
- return lines
-
- # Static Ipv4
- v4 = nic.staticIpv4
- if v4.ip:
- lines.append(' address %s' % v4.ip)
- if v4.netmask:
- lines.append(' netmask %s' % v4.netmask)
-
- # Add the primary gateway
- if nic.primary and v4.gateways:
- self.ipv4PrimaryGateway = v4.gateways[0]
- lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway)
- return lines
-
- # Add routes if there is no primary nic
- if not self._primaryNic:
- lines.extend(self.gen_ipv4_route(nic, v4.gateways))
-
- return lines
-
- def gen_ipv4_route(self, nic, gateways):
- """
- Return the lines needed to configure additional Ipv4 route
- @return (str list): the string list to configure the gateways
- @param nic (NicBase): the nic to configure
- @param gateways (str list): the list of gateways
- """
- lines = []
-
- for gateway in gateways:
- lines.append(' up route add default gw %s metric 10000' %
- gateway)
-
- return lines
-
- def gen_ipv6(self, name, nic):
- """
- Return the lines needed to configure the gateways for a nic
- @return (str list): the string list to configure the gateways
- @param name (str): name of the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
-
- if not nic.staticIpv6:
- return lines
-
- # Static Ipv6
- addrs = nic.staticIpv6
- lines.append('iface %s inet6 static' % name)
- lines.append(' address %s' % addrs[0].ip)
- lines.append(' netmask %s' % addrs[0].netmask)
-
- for addr in addrs[1:]:
- lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
- addr.netmask))
- # Add the primary gateway
- if nic.primary:
- for addr in addrs:
- if addr.gateway:
- self.ipv6PrimaryGateway = addr.gateway
- lines.append(' gateway %s' % self.ipv6PrimaryGateway)
- return lines
-
- # Add routes if there is no primary nic
- if not self._primaryNic:
- lines.extend(self._genIpv6Route(name, nic, addrs))
-
- return lines
-
- def _genIpv6Route(self, name, nic, addrs):
- lines = []
-
- for addr in addrs:
- lines.append(' up route -A inet6 add default gw '
- '%s metric 10000' % addr.gateway)
-
- return lines
-
- def generate(self):
- """Return the lines that is needed to configure the nics"""
- lines = []
- lines.append('iface lo inet loopback')
- lines.append('auto lo')
- lines.append('')
-
- for nic in self.nics:
- lines.extend(self.gen_one_nic(nic))
-
- return lines
-
- def clear_dhcp(self):
- logger.info('Clearing DHCP leases')
-
- # Ignore the return code 1.
- util.subp(["pkill", "dhclient"], rcs=[0, 1])
- util.subp(["rm", "-f", "/var/lib/dhcp/*"])
-
- def if_down_up(self):
- names = []
- for nic in self.nics:
- name = self.mac2Name.get(nic.mac.lower())
- names.append(name)
-
- for name in names:
- logger.info('Bring down interface %s' % name)
- util.subp(["ifdown", "%s" % name])
-
- self.clear_dhcp()
-
- for name in names:
- logger.info('Bring up interface %s' % name)
- util.subp(["ifup", "%s" % name])
-
- def configure(self):
- """
- Configure the /etc/network/intefaces
- Make a back up of the original
- """
- containingDir = '/etc/network'
-
- interfaceFile = os.path.join(containingDir, 'interfaces')
- originalFile = os.path.join(containingDir,
- 'interfaces.before_vmware_customization')
-
- if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
- os.rename(interfaceFile, originalFile)
-
- lines = self.generate()
- with open(interfaceFile, 'w') as fp:
- for line in lines:
- fp.write('%s\n' % line)
-
- self.if_down_up()
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
deleted file mode 100644
index 28ef306a..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class ConfigSource(object):
- """Specifies a source for the Config Content."""
- pass
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
deleted file mode 100644
index d1546852..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustErrorEnum(object):
- """Specifies different errors of Guest Customization engine"""
-
- GUESTCUST_ERROR_SUCCESS = 0
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
deleted file mode 100644
index ce90c898..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustEventEnum(object):
- """Specifies different types of Guest Customization Events"""
-
- GUESTCUST_EVENT_CUSTOMIZE_FAILED = 100
- GUESTCUST_EVENT_NETWORK_SETUP_FAILED = 101
- GUESTCUST_EVENT_ENABLE_NICS = 103
- GUESTCUST_EVENT_QUERY_NICS = 104
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
deleted file mode 100644
index 422a096d..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustStateEnum(object):
- """Specifies different states of Guest Customization engine"""
-
- GUESTCUST_STATE_RUNNING = 4
- GUESTCUST_STATE_DONE = 5
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
deleted file mode 100644
index c07c5949..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import os
-import time
-
-from cloudinit import util
-
-from .guestcust_event import GuestCustEventEnum
-from .guestcust_state import GuestCustStateEnum
-
-logger = logging.getLogger(__name__)
-
-
-CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log"
-QUERY_NICS_SUPPORTED = "queryNicsSupported"
-NICS_STATUS_CONNECTED = "connected"
-
-
-# This will send a RPC command to the underlying
-# VMware Virtualization Platform.
-def send_rpc(rpc):
- if not rpc:
- return None
-
- out = ""
- err = "Error sending the RPC command"
-
- try:
- logger.debug("Sending RPC command: %s", rpc)
- (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
- # Remove the trailing newline in the output.
- if out:
- out = out.rstrip()
- except Exception as e:
- logger.debug("Failed to send RPC command")
- logger.exception(e)
-
- return (out, err)
-
-
-# This will send the customization status to the
-# underlying VMware Virtualization Platform.
-def set_customization_status(custstate, custerror, errormessage=None):
- message = ""
-
- if errormessage:
- message = CLOUDINIT_LOG_FILE + "@" + errormessage
- else:
- message = CLOUDINIT_LOG_FILE
-
- rpc = "deployPkg.update.state %d %d %s" % (custstate, custerror, message)
- (out, err) = send_rpc(rpc)
- return (out, err)
-
-
-# This will read the file nics.txt in the specified directory
-# and return the content
-def get_nics_to_enable(dirpath):
- if not dirpath:
- return None
-
- NICS_SIZE = 1024
- nicsfilepath = os.path.join(dirpath, "nics.txt")
- if not os.path.exists(nicsfilepath):
- return None
-
- with open(nicsfilepath, 'r') as fp:
- nics = fp.read(NICS_SIZE)
-
- return nics
-
-
-# This will send a RPC command to the underlying VMware Virtualization platform
-# and enable nics.
-def enable_nics(nics):
- if not nics:
- logger.warning("No Nics found")
- return
-
- enableNicsWaitRetries = 5
- enableNicsWaitCount = 5
- enableNicsWaitSeconds = 1
-
- for attempt in range(0, enableNicsWaitRetries):
- logger.debug("Trying to connect interfaces, attempt %d", attempt)
- (out, err) = set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
- nics)
- if not out:
- time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
- continue
-
- if out != QUERY_NICS_SUPPORTED:
- logger.warning("NICS connection status query is not supported")
- return
-
- for count in range(0, enableNicsWaitCount):
- (out, err) = set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
- nics)
- if out and out == NICS_STATUS_CONNECTED:
- logger.info("NICS are connected on %d second", count)
- return
-
- time.sleep(enableNicsWaitSeconds)
-
- logger.warning("Can't connect network interfaces after %d attempts",
- enableNicsWaitRetries)
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
deleted file mode 100644
index 873ddc3b..00000000
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class Ipv4ModeEnum(object):
- """
- The IPv4 configuration mode which directly represents the user's goal.
-
- This mode effectively acts as a contract of the in-guest customization
- engine. It must be set based on what the user has requested and should
- not be changed by those layers. It's up to the in-guest engine to
- interpret and materialize the user's request.
- """
-
- # The legacy mode which only allows dhcp/static based on whether IPv4
- # addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
-
- # IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = 'STATIC'
-
- # IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = 'DHCP'
-
- # IPv4 must be disabled
- IPV4_MODE_DISABLED = 'DISABLED'
-
- # IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = 'AS_IS'
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
deleted file mode 100644
index b5d704ea..00000000
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .boot_proto import BootProtoEnum
-from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base
-
-
-class Nic(NicBase):
- """
- Holds the information about each NIC specified
- in the customization specification file
- """
-
- def __init__(self, name, configFile):
- self._name = name
- self._configFile = configFile
-
- def _get(self, what):
- return self._configFile.get(self.name + '|' + what, None)
-
- def _get_count_with_prefix(self, prefix):
- return self._configFile.get_count_with_prefix(self.name + prefix)
-
- @property
- def name(self):
- return self._name
-
- @property
- def mac(self):
- return self._get('MACADDR').lower()
-
- @property
- def primary(self):
- value = self._get('PRIMARY')
- if value:
- value = value.lower()
- return value == 'yes' or value == 'true'
- else:
- return False
-
- @property
- def onboot(self):
- value = self._get('ONBOOT')
- if value:
- value = value.lower()
- return value == 'yes' or value == 'true'
- else:
- return False
-
- @property
- def bootProto(self):
- value = self._get('BOOTPROTO')
- if value:
- return value.lower()
- else:
- return ""
-
- @property
- def ipv4_mode(self):
- value = self._get('IPv4_MODE')
- if value:
- return value.lower()
- else:
- return ""
-
- @property
- def staticIpv4(self):
- """
- Checks the BOOTPROTO property and returns StaticIPv4Addr
- configuration object if STATIC configuration is set.
- """
- if self.bootProto == BootProtoEnum.STATIC:
- return [StaticIpv4Addr(self)]
- else:
- return None
-
- @property
- def staticIpv6(self):
- cnt = self._get_count_with_prefix('|IPv6ADDR|')
-
- if not cnt:
- return None
-
- result = []
- for index in range(1, cnt + 1):
- result.append(StaticIpv6Addr(self, index))
-
- return result
-
-
-class StaticIpv4Addr(StaticIpv4Base):
- """Static IPV4 Setting."""
-
- def __init__(self, nic):
- self._nic = nic
-
- @property
- def ip(self):
- return self._nic._get('IPADDR')
-
- @property
- def netmask(self):
- return self._nic._get('NETMASK')
-
- @property
- def gateways(self):
- value = self._nic._get('GATEWAY')
- if value:
- return [x.strip() for x in value.split(',')]
- else:
- return None
-
-
-class StaticIpv6Addr(StaticIpv6Base):
- """Static IPV6 Address."""
-
- def __init__(self, nic, index):
- self._nic = nic
- self._index = index
-
- @property
- def ip(self):
- return self._nic._get('IPv6ADDR|' + str(self._index))
-
- @property
- def netmask(self):
- return self._nic._get('IPv6NETMASK|' + str(self._index))
-
- @property
- def gateway(self):
- return self._nic._get('IPv6GATEWAY|' + str(self._index))
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
deleted file mode 100644
index 3c892db0..00000000
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class NicBase(object):
- """
- Define what are expected of each nic.
- The following properties should be provided in an implementation class.
- """
-
- @property
- def mac(self):
- """
- Retrieves the mac address of the nic
- @return (str) : the MACADDR setting
- """
- raise NotImplementedError('MACADDR')
-
- @property
- def primary(self):
- """
- Retrieves whether the nic is the primary nic
- Indicates whether NIC will be used to define the default gateway.
- If none of the NICs is configured to be primary, default gateway won't
- be set.
- @return (bool): the PRIMARY setting
- """
- raise NotImplementedError('PRIMARY')
-
- @property
- def onboot(self):
- """
- Retrieves whether the nic should be up at the boot time
- @return (bool) : the ONBOOT setting
- """
- raise NotImplementedError('ONBOOT')
-
- @property
- def bootProto(self):
- """
- Retrieves the boot protocol of the nic
- @return (str): the BOOTPROTO setting, valid values: dhcp and static.
- """
- raise NotImplementedError('BOOTPROTO')
-
- @property
- def ipv4_mode(self):
- """
- Retrieves the IPv4_MODE
- @return (str): the IPv4_MODE setting, valid values:
- backwards_compatible, static, dhcp, disabled, as_is
- """
- raise NotImplementedError('IPv4_MODE')
-
- @property
- def staticIpv4(self):
- """
- Retrieves the static IPv4 configuration of the nic
- @return (StaticIpv4Base list): the static ipv4 setting
- """
- raise NotImplementedError('Static IPv4')
-
- @property
- def staticIpv6(self):
- """
- Retrieves the IPv6 configuration of the nic
- @return (StaticIpv6Base list): the static ipv6 setting
- """
- raise NotImplementedError('Static Ipv6')
-
- def validate(self):
- """
- Validate the object
- For example, the staticIpv4 property is required and should not be
- empty when ipv4Mode is STATIC
- """
- raise NotImplementedError('Check constraints on properties')
-
-
-class StaticIpv4Base(object):
- """
- Define what are expected of a static IPv4 setting
- The following properties should be provided in an implementation class.
- """
-
- @property
- def ip(self):
- """
- Retrieves the Ipv4 address
- @return (str): the IPADDR setting
- """
- raise NotImplementedError('Ipv4 Address')
-
- @property
- def netmask(self):
- """
- Retrieves the Ipv4 NETMASK setting
- @return (str): the NETMASK setting
- """
- raise NotImplementedError('Ipv4 NETMASK')
-
- @property
- def gateways(self):
- """
- Retrieves the gateways on this Ipv4 subnet
- @return (str list): the GATEWAY setting
- """
- raise NotImplementedError('Ipv4 GATEWAY')
-
-
-class StaticIpv6Base(object):
- """Define what are expected of a static IPv6 setting
- The following properties should be provided in an implementation class.
- """
-
- @property
- def ip(self):
- """
- Retrieves the Ipv6 address
- @return (str): the IPv6ADDR setting
- """
- raise NotImplementedError('Ipv6 Address')
-
- @property
- def netmask(self):
- """
- Retrieves the Ipv6 NETMASK setting
- @return (str): the IPv6NETMASK setting
- """
- raise NotImplementedError('Ipv6 NETMASK')
-
- @property
- def gateway(self):
- """
- Retrieves the Ipv6 GATEWAY setting
- @return (str): the IPv6GATEWAY setting
- """
- raise NotImplementedError('Ipv6 GATEWAY')
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
deleted file mode 100644
index c74a7ae2..00000000
--- a/cloudinit/ssh_util.py
+++ /dev/null
@@ -1,314 +0,0 @@
-#!/usr/bin/python
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-# See: man sshd_config
-DEF_SSHD_CFG = "/etc/ssh/sshd_config"
-
-# taken from openssh source key.c/key_type_from_name
-VALID_KEY_TYPES = (
- "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
- "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
- "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
- "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com",
- "ecdsa-sha2-nistp256-cert-v01@openssh.com",
- "ecdsa-sha2-nistp384-cert-v01@openssh.com",
- "ecdsa-sha2-nistp521-cert-v01@openssh.com")
-
-
-class AuthKeyLine(object):
- def __init__(self, source, keytype=None, base64=None,
- comment=None, options=None):
- self.base64 = base64
- self.comment = comment
- self.options = options
- self.keytype = keytype
- self.source = source
-
- def valid(self):
- return (self.base64 and self.keytype)
-
- def __str__(self):
- toks = []
- if self.options:
- toks.append(self.options)
- if self.keytype:
- toks.append(self.keytype)
- if self.base64:
- toks.append(self.base64)
- if self.comment:
- toks.append(self.comment)
- if not toks:
- return self.source
- else:
- return ' '.join(toks)
-
-
-class AuthKeyLineParser(object):
- """
- AUTHORIZED_KEYS FILE FORMAT
- AuthorizedKeysFile specifies the file containing public keys for public
- key authentication; if none is specified, the default is
- ~/.ssh/authorized_keys. Each line of the file contains one key (empty
- (because of the size of the public key encoding) up to a limit of 8 kilo-
- bytes, which permits DSA keys up to 8 kilobits and RSA keys up to 16
- kilobits. You don't want to type them in; instead, copy the
- identity.pub, id_dsa.pub, or the id_rsa.pub file and edit it.
-
- sshd enforces a minimum RSA key modulus size for protocol 1 and protocol
- 2 keys of 768 bits.
-
- The options (if present) consist of comma-separated option specifica-
- tions. No spaces are permitted, except within double quotes. The fol-
- lowing option specifications are supported (note that option keywords are
- case-insensitive):
- """
-
- def _extract_options(self, ent):
- """
- The options (if present) consist of comma-separated option specifica-
- tions. No spaces are permitted, except within double quotes.
- Note that option keywords are case-insensitive.
- """
- quoted = False
- i = 0
- while (i < len(ent) and
- ((quoted) or (ent[i] not in (" ", "\t")))):
- curc = ent[i]
- if i + 1 >= len(ent):
- i = i + 1
- break
- nextc = ent[i + 1]
- if curc == "\\" and nextc == '"':
- i = i + 1
- elif curc == '"':
- quoted = not quoted
- i = i + 1
-
- options = ent[0:i]
-
- # Return the rest of the string in 'remain'
- remain = ent[i:].lstrip()
- return (options, remain)
-
- def parse(self, src_line, options=None):
- # modeled after opensshes auth2-pubkey.c:user_key_allowed2
- line = src_line.rstrip("\r\n")
- if line.startswith("#") or line.strip() == '':
- return AuthKeyLine(src_line)
-
- def parse_ssh_key(ent):
- # return ketype, key, [comment]
- toks = ent.split(None, 2)
- if len(toks) < 2:
- raise TypeError("To few fields: %s" % len(toks))
- if toks[0] not in VALID_KEY_TYPES:
- raise TypeError("Invalid keytype %s" % toks[0])
-
- # valid key type and 2 or 3 fields:
- if len(toks) == 2:
- # no comment in line
- toks.append("")
-
- return toks
-
- ent = line.strip()
- try:
- (keytype, base64, comment) = parse_ssh_key(ent)
- except TypeError:
- (keyopts, remain) = self._extract_options(ent)
- if options is None:
- options = keyopts
-
- try:
- (keytype, base64, comment) = parse_ssh_key(remain)
- except TypeError:
- return AuthKeyLine(src_line)
-
- return AuthKeyLine(src_line, keytype=keytype, base64=base64,
- comment=comment, options=options)
-
-
-def parse_authorized_keys(fname):
- lines = []
- try:
- if os.path.isfile(fname):
- lines = util.load_file(fname).splitlines()
- except (IOError, OSError):
- util.logexc(LOG, "Error reading lines from %s", fname)
- lines = []
-
- parser = AuthKeyLineParser()
- contents = []
- for line in lines:
- contents.append(parser.parse(line))
- return contents
-
-
-def update_authorized_keys(old_entries, keys):
- to_add = list(keys)
-
- for i in range(0, len(old_entries)):
- ent = old_entries[i]
- if not ent.valid():
- continue
- # Replace those with the same base64
- for k in keys:
- if not ent.valid():
- continue
- if k.base64 == ent.base64:
- # Replace it with our better one
- ent = k
- # Don't add it later
- if k in to_add:
- to_add.remove(k)
- old_entries[i] = ent
-
- # Now append any entries we did not match above
- for key in to_add:
- old_entries.append(key)
-
- # Now format them back to strings...
- lines = [str(b) for b in old_entries]
-
- # Ensure it ends with a newline
- lines.append('')
- return '\n'.join(lines)
-
-
-def users_ssh_info(username):
- pw_ent = pwd.getpwnam(username)
- if not pw_ent or not pw_ent.pw_dir:
- raise RuntimeError("Unable to get ssh info for user %r" % (username))
- return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent)
-
-
-def extract_authorized_keys(username):
- (ssh_dir, pw_ent) = users_ssh_info(username)
- auth_key_fn = None
- with util.SeLinuxGuard(ssh_dir, recursive=True):
- try:
- # The 'AuthorizedKeysFile' may contain tokens
- # of the form %T which are substituted during connection set-up.
- # The following tokens are defined: %% is replaced by a literal
- # '%', %h is replaced by the home directory of the user being
- # authenticated and %u is replaced by the username of that user.
- ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG)
- auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
- if not auth_key_fn:
- auth_key_fn = "%h/.ssh/authorized_keys"
- auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir)
- auth_key_fn = auth_key_fn.replace("%u", username)
- auth_key_fn = auth_key_fn.replace("%%", '%')
- if not auth_key_fn.startswith('/'):
- auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
- except (IOError, OSError):
- # Give up and use a default key filename
- auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
- util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh "
- "config from %r, using 'AuthorizedKeysFile' file "
- "%r instead", DEF_SSHD_CFG, auth_key_fn)
- return (auth_key_fn, parse_authorized_keys(auth_key_fn))
-
-
-def setup_user_keys(keys, username, options=None):
- # Make sure the users .ssh dir is setup accordingly
- (ssh_dir, pwent) = users_ssh_info(username)
- if not os.path.isdir(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0o700)
- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
-
- # Turn the 'update' keys given into actual entries
- parser = AuthKeyLineParser()
- key_entries = []
- for k in keys:
- key_entries.append(parser.parse(str(k), options=options))
-
- # Extract the old and make the new
- (auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
- with util.SeLinuxGuard(ssh_dir, recursive=True):
- content = update_authorized_keys(auth_key_entries, key_entries)
- util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
- util.write_file(auth_key_fn, content, mode=0o600)
- util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
-
-
-class SshdConfigLine(object):
- def __init__(self, line, k=None, v=None):
- self.line = line
- self._key = k
- self.value = v
-
- @property
- def key(self):
- if self._key is None:
- return None
- # Keywords are case-insensitive
- return self._key.lower()
-
- def __str__(self):
- if self._key is None:
- return str(self.line)
- else:
- v = str(self._key)
- if self.value:
- v += " " + str(self.value)
- return v
-
-
-def parse_ssh_config(fname):
- # See: man sshd_config
- # The file contains keyword-argument pairs, one per line.
- # Lines starting with '#' and empty lines are interpreted as comments.
- # Note: key-words are case-insensitive and arguments are case-sensitive
- lines = []
- if not os.path.isfile(fname):
- return lines
- for line in util.load_file(fname).splitlines():
- line = line.strip()
- if not line or line.startswith("#"):
- lines.append(SshdConfigLine(line))
- continue
- try:
- key, val = line.split(None, 1)
- except ValueError:
- key, val = line.split('=', 1)
- lines.append(SshdConfigLine(line, key, val))
- return lines
-
-
-def parse_ssh_config_map(fname):
- lines = parse_ssh_config(fname)
- if not lines:
- return {}
- ret = {}
- for line in lines:
- if not line.key:
- continue
- ret[line.key] = line.value
- return ret
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
deleted file mode 100644
index 47deac6e..00000000
--- a/cloudinit/stages.py
+++ /dev/null
@@ -1,890 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import os
-import sys
-
-import six
-from six.moves import cPickle as pickle
-
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG)
-
-from cloudinit import handlers
-
-# Default handlers (used if not overridden)
-from cloudinit.handlers import boot_hook as bh_part
-from cloudinit.handlers import cloud_config as cc_part
-from cloudinit.handlers import shell_script as ss_part
-from cloudinit.handlers import upstart_job as up_part
-
-from cloudinit import cloud
-from cloudinit import config
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net import cmdline
-from cloudinit.reporting import events
-from cloudinit import sources
-from cloudinit import type_utils
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-NULL_DATA_SOURCE = None
-NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID"
-
-
-class Init(object):
- def __init__(self, ds_deps=None, reporter=None):
- if ds_deps is not None:
- self.ds_deps = ds_deps
- else:
- self.ds_deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
- # Created on first use
- self._cfg = None
- self._paths = None
- self._distro = None
- # Changed only when a fetch occurs
- self.datasource = NULL_DATA_SOURCE
- self.ds_restored = False
- self._previous_iid = None
-
- if reporter is None:
- reporter = events.ReportEventStack(
- name="init-reporter", description="init-desc",
- reporting_enabled=False)
- self.reporter = reporter
-
- def _reset(self, reset_ds=False):
- # Recreated on access
- self._cfg = None
- self._paths = None
- self._distro = None
- if reset_ds:
- self.datasource = NULL_DATA_SOURCE
- self.ds_restored = False
-
- @property
- def distro(self):
- if not self._distro:
- # Try to find the right class to use
- system_config = self._extract_cfg('system')
- distro_name = system_config.pop('distro', 'ubuntu')
- distro_cls = distros.fetch(distro_name)
- LOG.debug("Using distro class %s", distro_cls)
- self._distro = distro_cls(distro_name, system_config, self.paths)
- # If we have an active datasource we need to adjust
- # said datasource and move its distro/system config
- # from whatever it was to a new set...
- if self.datasource is not NULL_DATA_SOURCE:
- self.datasource.distro = self._distro
- self.datasource.sys_cfg = system_config
- return self._distro
-
- @property
- def cfg(self):
- return self._extract_cfg('restricted')
-
- def _extract_cfg(self, restriction):
- # Ensure actually read
- self.read_cfg()
- # Nobody gets the real config
- ocfg = copy.deepcopy(self._cfg)
- if restriction == 'restricted':
- ocfg.pop('system_info', None)
- elif restriction == 'system':
- ocfg = util.get_cfg_by_path(ocfg, ('system_info',), {})
- elif restriction == 'paths':
- ocfg = util.get_cfg_by_path(ocfg, ('system_info', 'paths'), {})
- if not isinstance(ocfg, (dict)):
- ocfg = {}
- return ocfg
-
- @property
- def paths(self):
- if not self._paths:
- path_info = self._extract_cfg('paths')
- self._paths = helpers.Paths(path_info, self.datasource)
- return self._paths
-
- def _initial_subdirs(self):
- c_dir = self.paths.cloud_dir
- initial_dirs = [
- c_dir,
- os.path.join(c_dir, 'scripts'),
- os.path.join(c_dir, 'scripts', 'per-instance'),
- os.path.join(c_dir, 'scripts', 'per-once'),
- os.path.join(c_dir, 'scripts', 'per-boot'),
- os.path.join(c_dir, 'scripts', 'vendor'),
- os.path.join(c_dir, 'seed'),
- os.path.join(c_dir, 'instances'),
- os.path.join(c_dir, 'handlers'),
- os.path.join(c_dir, 'sem'),
- os.path.join(c_dir, 'data'),
- ]
- return initial_dirs
-
- def purge_cache(self, rm_instance_lnk=False):
- rm_list = []
- rm_list.append(self.paths.boot_finished)
- if rm_instance_lnk:
- rm_list.append(self.paths.instance_link)
- for f in rm_list:
- util.del_file(f)
- return len(rm_list)
-
- def initialize(self):
- self._initialize_filesystem()
-
- def _initialize_filesystem(self):
- util.ensure_dirs(self._initial_subdirs())
- log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
- if log_file:
- util.ensure_file(log_file)
- perms = self.cfg.get('syslog_fix_perms')
- if not perms:
- perms = {}
- if not isinstance(perms, list):
- perms = [perms]
-
- error = None
- for perm in perms:
- u, g = util.extract_usergroup(perm)
- try:
- util.chownbyname(log_file, u, g)
- return
- except OSError as e:
- error = e
-
- LOG.warn("Failed changing perms on '%s'. tried: %s. %s",
- log_file, ','.join(perms), error)
-
- def read_cfg(self, extra_fns=None):
- # None check so that we don't keep on re-loading if empty
- if self._cfg is None:
- self._cfg = self._read_cfg(extra_fns)
- # LOG.debug("Loaded 'init' config %s", self._cfg)
-
- def _read_cfg(self, extra_fns):
- no_cfg_paths = helpers.Paths({}, self.datasource)
- merger = helpers.ConfigMerger(paths=no_cfg_paths,
- datasource=self.datasource,
- additional_fns=extra_fns,
- base_cfg=fetch_base_config())
- return merger.cfg
-
- def _restore_from_cache(self):
- # We try to restore from a current link and static path
- # by using the instance link, if purge_cache was called
- # the file wont exist.
- return _pkl_load(self.paths.get_ipath_cur('obj_pkl'))
-
- def _write_to_cache(self):
- if self.datasource is NULL_DATA_SOURCE:
- return False
- return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
-
- def _get_datasources(self):
- # Any config provided???
- pkg_list = self.cfg.get('datasource_pkg_list') or []
- # Add the defaults at the end
- for n in ['', type_utils.obj_name(sources)]:
- if n not in pkg_list:
- pkg_list.append(n)
- cfg_list = self.cfg.get('datasource_list') or []
- return (cfg_list, pkg_list)
-
- def _restore_from_checked_cache(self, existing):
- if existing not in ("check", "trust"):
- raise ValueError("Unexpected value for existing: %s" % existing)
-
- ds = self._restore_from_cache()
- if not ds:
- return (None, "no cache found")
-
- run_iid_fn = self.paths.get_runpath('instance_id')
- if os.path.exists(run_iid_fn):
- run_iid = util.load_file(run_iid_fn).strip()
- else:
- run_iid = None
-
- if run_iid == ds.get_instance_id():
- return (ds, "restored from cache with run check: %s" % ds)
- elif existing == "trust":
- return (ds, "restored from cache: %s" % ds)
- else:
- if (hasattr(ds, 'check_instance_id') and
- ds.check_instance_id(self.cfg)):
- return (ds, "restored from checked cache: %s" % ds)
- else:
- return (None, "cache invalid in datasource: %s" % ds)
-
- def _get_data_source(self, existing):
- if self.datasource is not NULL_DATA_SOURCE:
- return self.datasource
-
- with events.ReportEventStack(
- name="check-cache",
- description="attempting to read from cache [%s]" % existing,
- parent=self.reporter) as myrep:
-
- ds, desc = self._restore_from_checked_cache(existing)
- myrep.description = desc
- self.ds_restored = bool(ds)
- LOG.debug(myrep.description)
-
- if not ds:
- util.del_file(self.paths.instance_link)
- (cfg_list, pkg_list) = self._get_datasources()
- # Deep copy so that user-data handlers can not modify
- # (which will affect user-data handlers down the line...)
- (ds, dsname) = sources.find_source(self.cfg,
- self.distro,
- self.paths,
- copy.deepcopy(self.ds_deps),
- cfg_list,
- pkg_list, self.reporter)
- LOG.info("Loaded datasource %s - %s", dsname, ds)
- self.datasource = ds
- # Ensure we adjust our path members datasource
- # now that we have one (thus allowing ipath to be used)
- self._reset()
- return ds
-
- def _get_instance_subdirs(self):
- return ['handlers', 'scripts', 'sem']
-
- def _get_ipath(self, subname=None):
- # Force a check to see if anything
- # actually comes back, if not
- # then a datasource has not been assigned...
- instance_dir = self.paths.get_ipath(subname)
- if not instance_dir:
- raise RuntimeError(("No instance directory is available."
- " Has a datasource been fetched??"))
- return instance_dir
-
- def _reflect_cur_instance(self):
- # Remove the old symlink and attach a new one so
- # that further reads/writes connect into the right location
- idir = self._get_ipath()
- util.del_file(self.paths.instance_link)
- util.sym_link(idir, self.paths.instance_link)
-
- # Ensures these dirs exist
- dir_list = []
- for d in self._get_instance_subdirs():
- dir_list.append(os.path.join(idir, d))
- util.ensure_dirs(dir_list)
-
- # Write out information on what is being used for the current instance
- # and what may have been used for a previous instance...
- dp = self.paths.get_cpath('data')
-
- # Write what the datasource was and is..
- ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
- previous_ds = None
- ds_fn = os.path.join(idir, 'datasource')
- try:
- previous_ds = util.load_file(ds_fn).strip()
- except Exception:
- pass
- if not previous_ds:
- previous_ds = ds
- util.write_file(ds_fn, "%s\n" % ds)
- util.write_file(os.path.join(dp, 'previous-datasource'),
- "%s\n" % (previous_ds))
-
- # What the instance id was and is...
- iid = self.datasource.get_instance_id()
- iid_fn = os.path.join(dp, 'instance-id')
-
- previous_iid = self.previous_iid()
- util.write_file(iid_fn, "%s\n" % iid)
- util.write_file(self.paths.get_runpath('instance_id'), "%s\n" % iid)
- util.write_file(os.path.join(dp, 'previous-instance-id'),
- "%s\n" % (previous_iid))
-
- self._write_to_cache()
- # Ensure needed components are regenerated
- # after change of instance which may cause
- # change of configuration
- self._reset()
- return iid
-
- def previous_iid(self):
- if self._previous_iid is not None:
- return self._previous_iid
-
- dp = self.paths.get_cpath('data')
- iid_fn = os.path.join(dp, 'instance-id')
- try:
- self._previous_iid = util.load_file(iid_fn).strip()
- except Exception:
- self._previous_iid = NO_PREVIOUS_INSTANCE_ID
-
- LOG.debug("previous iid found to be %s", self._previous_iid)
- return self._previous_iid
-
- def is_new_instance(self):
- previous = self.previous_iid()
- ret = (previous == NO_PREVIOUS_INSTANCE_ID or
- previous != self.datasource.get_instance_id())
- return ret
-
- def fetch(self, existing="check"):
- return self._get_data_source(existing=existing)
-
- def instancify(self):
- return self._reflect_cur_instance()
-
- def cloudify(self):
- # Form the needed options to cloudify our members
- return cloud.Cloud(self.datasource,
- self.paths, self.cfg,
- self.distro, helpers.Runners(self.paths),
- reporter=self.reporter)
-
- def update(self):
- self._store_userdata()
- self._store_vendordata()
-
- def _store_userdata(self):
- raw_ud = self.datasource.get_userdata_raw()
- if raw_ud is None:
- raw_ud = b''
- util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
- # processed userdata is a Mime message, so write it as string.
- processed_ud = self.datasource.get_userdata()
- if processed_ud is None:
- raw_ud = ''
- util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600)
-
- def _store_vendordata(self):
- raw_vd = self.datasource.get_vendordata_raw()
- if raw_vd is None:
- raw_vd = b''
- util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
- # processed vendor data is a Mime message, so write it as string.
- processed_vd = str(self.datasource.get_vendordata())
- if processed_vd is None:
- processed_vd = ''
- util.write_file(self._get_ipath('vendordata'), str(processed_vd),
- 0o600)
-
- def _default_handlers(self, opts=None):
- if opts is None:
- opts = {}
-
- opts.update({
- 'paths': self.paths,
- 'datasource': self.datasource,
- })
- # TODO(harlowja) Hmmm, should we dynamically import these??
- def_handlers = [
- cc_part.CloudConfigPartHandler(**opts),
- ss_part.ShellScriptPartHandler(**opts),
- bh_part.BootHookPartHandler(**opts),
- up_part.UpstartJobPartHandler(**opts),
- ]
- return def_handlers
-
- def _default_userdata_handlers(self):
- return self._default_handlers()
-
- def _default_vendordata_handlers(self):
- return self._default_handlers(
- opts={'script_path': 'vendor_scripts',
- 'cloud_config_path': 'vendor_cloud_config'})
-
- def _do_handlers(self, data_msg, c_handlers_list, frequency,
- excluded=None):
- """
- Generalized handlers suitable for use with either vendordata
- or userdata
- """
- if excluded is None:
- excluded = []
-
- cdir = self.paths.get_cpath("handlers")
- idir = self._get_ipath("handlers")
-
- # Add the path to the plugins dir to the top of our list for importing
- # new handlers.
- #
- # Note(harlowja): instance dir should be read before cloud-dir
- for d in [cdir, idir]:
- if d and d not in sys.path:
- sys.path.insert(0, d)
-
- def register_handlers_in_dir(path):
- # Attempts to register any handler modules under the given path.
- if not path or not os.path.isdir(path):
- return
- potential_handlers = util.find_modules(path)
- for (fname, mod_name) in potential_handlers.items():
- try:
- mod_locs, looked_locs = importer.find_module(
- mod_name, [''], ['list_types', 'handle_part'])
- if not mod_locs:
- LOG.warn("Could not find a valid user-data handler"
- " named %s in file %s (searched %s)",
- mod_name, fname, looked_locs)
- continue
- mod = importer.import_module(mod_locs[0])
- mod = handlers.fixup_handler(mod)
- types = c_handlers.register(mod)
- if types:
- LOG.debug("Added custom handler for %s [%s] from %s",
- types, mod, fname)
- except Exception:
- util.logexc(LOG, "Failed to register handler from %s",
- fname)
-
- # This keeps track of all the active handlers
- c_handlers = helpers.ContentHandlers()
-
- # Add any handlers in the cloud-dir
- register_handlers_in_dir(cdir)
-
- # Register any other handlers that come from the default set. This
- # is done after the cloud-dir handlers so that the cdir modules can
- # take over the default user-data handler content-types.
- for mod in c_handlers_list:
- types = c_handlers.register(mod, overwrite=False)
- if types:
- LOG.debug("Added default handler for %s from %s", types, mod)
-
- # Form our cloud interface
- data = self.cloudify()
-
- def init_handlers():
- # Init the handlers first
- for (_ctype, mod) in c_handlers.items():
- if mod in c_handlers.initialized:
- # Avoid initing the same module twice (if said module
- # is registered to more than one content-type).
- continue
- handlers.call_begin(mod, data, frequency)
- c_handlers.initialized.append(mod)
-
- def walk_handlers(excluded):
- # Walk the user data
- part_data = {
- 'handlers': c_handlers,
- # Any new handlers that are encountered get writen here
- 'handlerdir': idir,
- 'data': data,
- # The default frequency if handlers don't have one
- 'frequency': frequency,
- # This will be used when new handlers are found
- # to help write there contents to files with numbered
- # names...
- 'handlercount': 0,
- 'excluded': excluded,
- }
- handlers.walk(data_msg, handlers.walker_callback, data=part_data)
-
- def finalize_handlers():
- # Give callbacks opportunity to finalize
- for (_ctype, mod) in c_handlers.items():
- if mod not in c_handlers.initialized:
- # Said module was never inited in the first place, so lets
- # not attempt to finalize those that never got called.
- continue
- c_handlers.initialized.remove(mod)
- try:
- handlers.call_end(mod, data, frequency)
- except Exception:
- util.logexc(LOG, "Failed to finalize handler: %s", mod)
-
- try:
- init_handlers()
- walk_handlers(excluded)
- finally:
- finalize_handlers()
-
- def consume_data(self, frequency=PER_INSTANCE):
- # Consume the userdata first, because we need want to let the part
- # handlers run first (for merging stuff)
- with events.ReportEventStack("consume-user-data",
- "reading and applying user-data",
- parent=self.reporter):
- self._consume_userdata(frequency)
- with events.ReportEventStack("consume-vendor-data",
- "reading and applying vendor-data",
- parent=self.reporter):
- self._consume_vendordata(frequency)
-
- # Perform post-consumption adjustments so that
- # modules that run during the init stage reflect
- # this consumed set.
- #
- # They will be recreated on future access...
- self._reset()
- # Note(harlowja): the 'active' datasource will have
- # references to the previous config, distro, paths
- # objects before the load of the userdata happened,
- # this is expected.
-
- def _consume_vendordata(self, frequency=PER_INSTANCE):
- """
- Consume the vendordata and run the part handlers on it
- """
- # User-data should have been consumed first.
- # So we merge the other available cloud-configs (everything except
- # vendor provided), and check whether or not we should consume
- # vendor data at all. That gives user or system a chance to override.
- if not self.datasource.get_vendordata_raw():
- LOG.debug("no vendordata from datasource")
- return
-
- _cc_merger = helpers.ConfigMerger(paths=self._paths,
- datasource=self.datasource,
- additional_fns=[],
- base_cfg=self.cfg,
- include_vendor=False)
- vdcfg = _cc_merger.cfg.get('vendor_data', {})
-
- if not isinstance(vdcfg, dict):
- vdcfg = {'enabled': False}
- LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
-
- enabled = vdcfg.get('enabled')
- no_handlers = vdcfg.get('disabled_handlers', None)
-
- if not util.is_true(enabled):
- LOG.debug("vendordata consumption is disabled.")
- return
-
- LOG.debug("vendor data will be consumed. disabled_handlers=%s",
- no_handlers)
-
- # Ensure vendordata source fetched before activation (just incase)
- vendor_data_msg = self.datasource.get_vendordata()
-
- # This keeps track of all the active handlers, while excluding what the
- # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
- c_handlers_list = self._default_vendordata_handlers()
-
- # Run the handlers
- self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
- excluded=no_handlers)
-
- def _consume_userdata(self, frequency=PER_INSTANCE):
- """
- Consume the userdata and run the part handlers
- """
-
- # Ensure datasource fetched before activation (just incase)
- user_data_msg = self.datasource.get_userdata(True)
-
- # This keeps track of all the active handlers
- c_handlers_list = self._default_handlers()
-
- # Run the handlers
- self._do_handlers(user_data_msg, c_handlers_list, frequency)
-
- def _find_networking_config(self):
- disable_file = os.path.join(
- self.paths.get_cpath('data'), 'upgraded-network')
- if os.path.exists(disable_file):
- return (None, disable_file)
-
- cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config())
- dscfg = ('ds', None)
- if self.datasource and hasattr(self.datasource, 'network_config'):
- dscfg = ('ds', self.datasource.network_config)
- sys_cfg = ('system_cfg', self.cfg.get('network'))
-
- for loc, ncfg in (cmdline_cfg, sys_cfg, dscfg):
- if net.is_disabled_cfg(ncfg):
- LOG.debug("network config disabled by %s", loc)
- return (None, loc)
- if ncfg:
- return (ncfg, loc)
- return (net.generate_fallback_config(), "fallback")
-
- def apply_network_config(self, bring_up):
- netcfg, src = self._find_networking_config()
- if netcfg is None:
- LOG.info("network config is disabled by %s", src)
- return
-
- try:
- LOG.debug("applying net config names for %s" % netcfg)
- self.distro.apply_network_config_names(netcfg)
- except Exception as e:
- LOG.warn("Failed to rename devices: %s", e)
-
- if (self.datasource is not NULL_DATA_SOURCE and
- not self.is_new_instance()):
- LOG.debug("not a new instance. network config is not applied.")
- return
-
- LOG.info("Applying network configuration from %s bringup=%s: %s",
- src, bring_up, netcfg)
- try:
- return self.distro.apply_network_config(netcfg, bring_up=bring_up)
- except NotImplementedError:
- LOG.warn("distro '%s' does not implement apply_network_config. "
- "networking may not be configured properly." %
- self.distro)
- return
-
-
-class Modules(object):
- def __init__(self, init, cfg_files=None, reporter=None):
- self.init = init
- self.cfg_files = cfg_files
- # Created on first use
- self._cached_cfg = None
- if reporter is None:
- reporter = events.ReportEventStack(
- name="module-reporter", description="module-desc",
- reporting_enabled=False)
- self.reporter = reporter
-
- @property
- def cfg(self):
- # None check to avoid empty case causing re-reading
- if self._cached_cfg is None:
- merger = helpers.ConfigMerger(paths=self.init.paths,
- datasource=self.init.datasource,
- additional_fns=self.cfg_files,
- base_cfg=self.init.cfg)
- self._cached_cfg = merger.cfg
- # LOG.debug("Loading 'module' config %s", self._cached_cfg)
- # Only give out a copy so that others can't modify this...
- return copy.deepcopy(self._cached_cfg)
-
- def _read_modules(self, name):
- module_list = []
- if name not in self.cfg:
- return module_list
- cfg_mods = self.cfg[name]
- # Create 'module_list', an array of hashes
- # Where hash['mod'] = module name
- # hash['freq'] = frequency
- # hash['args'] = arguments
- for item in cfg_mods:
- if not item:
- continue
- if isinstance(item, six.string_types):
- module_list.append({
- 'mod': item.strip(),
- })
- elif isinstance(item, (list)):
- contents = {}
- # Meant to fall through...
- if len(item) >= 1:
- contents['mod'] = item[0].strip()
- if len(item) >= 2:
- contents['freq'] = item[1].strip()
- if len(item) >= 3:
- contents['args'] = item[2:]
- if contents:
- module_list.append(contents)
- elif isinstance(item, (dict)):
- contents = {}
- valid = False
- if 'name' in item:
- contents['mod'] = item['name'].strip()
- valid = True
- if 'frequency' in item:
- contents['freq'] = item['frequency'].strip()
- if 'args' in item:
- contents['args'] = item['args'] or []
- if contents and valid:
- module_list.append(contents)
- else:
- raise TypeError(("Failed to read '%s' item in config,"
- " unknown type %s") %
- (item, type_utils.obj_name(item)))
- return module_list
-
- def _fixup_modules(self, raw_mods):
- mostly_mods = []
- for raw_mod in raw_mods:
- raw_name = raw_mod['mod']
- freq = raw_mod.get('freq')
- run_args = raw_mod.get('args') or []
- mod_name = config.form_module_name(raw_name)
- if not mod_name:
- continue
- if freq and freq not in FREQUENCIES:
- LOG.warn(("Config specified module %s"
- " has an unknown frequency %s"), raw_name, freq)
- # Reset it so when ran it will get set to a known value
- freq = None
- mod_locs, looked_locs = importer.find_module(
- mod_name, ['', type_utils.obj_name(config)], ['handle'])
- if not mod_locs:
- LOG.warn("Could not find module named %s (searched %s)",
- mod_name, looked_locs)
- continue
- mod = config.fixup_module(importer.import_module(mod_locs[0]))
- mostly_mods.append([mod, raw_name, freq, run_args])
- return mostly_mods
-
- def _run_modules(self, mostly_mods):
- cc = self.init.cloudify()
- # Return which ones ran
- # and which ones failed + the exception of why it failed
- failures = []
- which_ran = []
- for (mod, name, freq, args) in mostly_mods:
- try:
- # Try the modules frequency, otherwise fallback to a known one
- if not freq:
- freq = mod.frequency
- if freq not in FREQUENCIES:
- freq = PER_INSTANCE
- LOG.debug("Running module %s (%s) with frequency %s",
- name, mod, freq)
-
- # Use the configs logger and not our own
- # TODO(harlowja): possibly check the module
- # for having a LOG attr and just give it back
- # its own logger?
- func_args = [name, self.cfg,
- cc, config.LOG, args]
- # Mark it as having started running
- which_ran.append(name)
- # This name will affect the semaphore name created
- run_name = "config-%s" % (name)
-
- desc = "running %s with frequency %s" % (run_name, freq)
- myrep = events.ReportEventStack(
- name=run_name, description=desc, parent=self.reporter)
-
- with myrep:
- ran, _r = cc.run(run_name, mod.handle, func_args,
- freq=freq)
- if ran:
- myrep.message = "%s ran successfully" % run_name
- else:
- myrep.message = "%s previously ran" % run_name
-
- except Exception as e:
- util.logexc(LOG, "Running module %s (%s) failed", name, mod)
- failures.append((name, e))
- return (which_ran, failures)
-
- def run_single(self, mod_name, args=None, freq=None):
- # Form the users module 'specs'
- mod_to_be = {
- 'mod': mod_name,
- 'args': args,
- 'freq': freq,
- }
- # Now resume doing the normal fixups and running
- raw_mods = [mod_to_be]
- mostly_mods = self._fixup_modules(raw_mods)
- return self._run_modules(mostly_mods)
-
- def run_section(self, section_name):
- raw_mods = self._read_modules(section_name)
- mostly_mods = self._fixup_modules(raw_mods)
- d_name = self.init.distro.name
-
- skipped = []
- forced = []
- overridden = self.cfg.get('unverified_modules', [])
- for (mod, name, _freq, _args) in mostly_mods:
- worked_distros = set(mod.distros)
- worked_distros.update(
- distros.Distro.expand_osfamily(mod.osfamilies))
-
- # module does not declare 'distros' or lists this distro
- if not worked_distros or d_name in worked_distros:
- continue
-
- if name in overridden:
- forced.append(name)
- else:
- skipped.append(name)
-
- if skipped:
- LOG.info("Skipping modules %s because they are not verified "
- "on distro '%s'. To run anyway, add them to "
- "'unverified_modules' in config.", skipped, d_name)
- if forced:
- LOG.info("running unverified_modules: %s", forced)
-
- return self._run_modules(mostly_mods)
-
-
-def fetch_base_config():
- base_cfgs = []
- default_cfg = util.get_builtin_cfg()
-
- # Anything in your conf.d location??
- # or the 'default' cloud.cfg location???
- base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG))
-
- # Kernel/cmdline parameters override system config
- kern_contents = util.read_cc_from_cmdline()
- if kern_contents:
- base_cfgs.append(util.load_yaml(kern_contents, default={}))
-
- # And finally the default gets to play
- if default_cfg:
- base_cfgs.append(default_cfg)
-
- return util.mergemanydict(base_cfgs)
-
-
-def _pkl_store(obj, fname):
- try:
- pk_contents = pickle.dumps(obj)
- except Exception:
- util.logexc(LOG, "Failed pickling datasource %s", obj)
- return False
- try:
- util.write_file(fname, pk_contents, omode="wb", mode=0o400)
- except Exception:
- util.logexc(LOG, "Failed pickling datasource to %s", fname)
- return False
- return True
-
-
-def _pkl_load(fname):
- pickle_contents = None
- try:
- pickle_contents = util.load_file(fname, decode=False)
- except Exception as e:
- if os.path.isfile(fname):
- LOG.warn("failed loading pickle in %s: %s" % (fname, e))
- pass
-
- # This is allowed so just return nothing successfully loaded...
- if not pickle_contents:
- return None
- try:
- return pickle.loads(pickle_contents)
- except Exception:
- util.logexc(LOG, "Failed loading pickled blob from %s", fname)
- return None
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
deleted file mode 100644
index 41ef27e3..00000000
--- a/cloudinit/templater.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-# Copyright (C) 2016 Amazon.com, Inc. or its affiliates.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Andrew Jorgensen <ajorgens@amazon.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import collections
-import re
-
-try:
- from Cheetah.Template import Template as CTemplate
- CHEETAH_AVAILABLE = True
-except (ImportError, AttributeError):
- CHEETAH_AVAILABLE = False
-
-try:
- import jinja2
- from jinja2 import Template as JTemplate
- JINJA_AVAILABLE = True
-except (ImportError, AttributeError):
- JINJA_AVAILABLE = False
-
-from cloudinit import log as logging
-from cloudinit import type_utils as tu
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I)
-BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
-
-
-def basic_render(content, params):
- """This does simple replacement of bash variable like templates.
-
- It identifies patterns like ${a} or $a and can also identify patterns like
- ${a.b} or $a.b which will look for a key 'b' in the dictionary rooted
- by key 'a'.
- """
-
- def replacer(match):
- # Only 1 of the 2 groups will actually have a valid entry.
- name = match.group(1)
- if name is None:
- name = match.group(2)
- if name is None:
- raise RuntimeError("Match encountered but no valid group present")
- path = collections.deque(name.split("."))
- selected_params = params
- while len(path) > 1:
- key = path.popleft()
- if not isinstance(selected_params, dict):
- raise TypeError("Can not traverse into"
- " non-dictionary '%s' of type %s while"
- " looking for subkey '%s'"
- % (selected_params,
- tu.obj_name(selected_params),
- key))
- selected_params = selected_params[key]
- key = path.popleft()
- if not isinstance(selected_params, dict):
- raise TypeError("Can not extract key '%s' from non-dictionary"
- " '%s' of type %s"
- % (key, selected_params,
- tu.obj_name(selected_params)))
- return str(selected_params[key])
-
- return BASIC_MATCHER.sub(replacer, content)
-
-
-def detect_template(text):
-
- def cheetah_render(content, params):
- return CTemplate(content, searchList=[params]).respond()
-
- def jinja_render(content, params):
- # keep_trailing_newline is in jinja2 2.7+, not 2.6
- add = "\n" if content.endswith("\n") else ""
- return JTemplate(content,
- undefined=jinja2.StrictUndefined,
- trim_blocks=True).render(**params) + add
-
- if text.find("\n") != -1:
- ident, rest = text.split("\n", 1)
- else:
- ident = text
- rest = ''
- type_match = TYPE_MATCHER.match(ident)
- if not type_match:
- if CHEETAH_AVAILABLE:
- LOG.debug("Using Cheetah as the renderer for unknown template.")
- return ('cheetah', cheetah_render, text)
- else:
- return ('basic', basic_render, text)
- else:
- template_type = type_match.group(1).lower().strip()
- if template_type not in ('jinja', 'cheetah', 'basic'):
- raise ValueError("Unknown template rendering type '%s' requested"
- % template_type)
- if template_type == 'jinja' and not JINJA_AVAILABLE:
- LOG.warn("Jinja not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
- return ('basic', basic_render, rest)
- elif template_type == 'jinja' and JINJA_AVAILABLE:
- return ('jinja', jinja_render, rest)
- if template_type == 'cheetah' and not CHEETAH_AVAILABLE:
- LOG.warn("Cheetah not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
- return ('basic', basic_render, rest)
- elif template_type == 'cheetah' and CHEETAH_AVAILABLE:
- return ('cheetah', cheetah_render, rest)
- # Only thing left over is the basic renderer (it is always available).
- return ('basic', basic_render, rest)
-
-
-def render_from_file(fn, params):
- if not params:
- params = {}
- template_type, renderer, content = detect_template(util.load_file(fn))
- LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type)
- return renderer(content, params)
-
-
-def render_to_file(fn, outfn, params, mode=0o644):
- contents = render_from_file(fn, params)
- util.write_file(outfn, contents, mode=mode)
-
-
-def render_string_to_file(content, outfn, params, mode=0o644):
- contents = render_string(content, params)
- util.write_file(outfn, contents, mode=mode)
-
-
-def render_string(content, params):
- if not params:
- params = {}
- template_type, renderer, content = detect_template(content)
- return renderer(content, params)
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
deleted file mode 100644
index b93efd6a..00000000
--- a/cloudinit/type_utils.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import types
-
-import six
-
-
-if six.PY3:
- _NAME_TYPES = (
- types.ModuleType,
- types.FunctionType,
- types.LambdaType,
- type,
- )
-else:
- _NAME_TYPES = (
- types.TypeType,
- types.ModuleType,
- types.FunctionType,
- types.LambdaType,
- types.ClassType,
- )
-
-
-def obj_name(obj):
- if isinstance(obj, _NAME_TYPES):
- return six.text_type(obj.__name__)
- else:
- if not hasattr(obj, '__class__'):
- return repr(obj)
- else:
- return obj_name(obj.__class__)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
deleted file mode 100644
index c05e9d90..00000000
--- a/cloudinit/url_helper.py
+++ /dev/null
@@ -1,509 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import requests
-import six
-import time
-
-from email.utils import parsedate
-from functools import partial
-
-import oauthlib.oauth1 as oauth1
-from requests import exceptions
-
-from six.moves.urllib.parse import (
- urlparse, urlunparse,
- quote as urlquote)
-
-from cloudinit import log as logging
-from cloudinit import version
-
-LOG = logging.getLogger(__name__)
-
-if six.PY2:
- import httplib
- NOT_FOUND = httplib.NOT_FOUND
-else:
- import http.client
- NOT_FOUND = http.client.NOT_FOUND
-
-
-# Check if requests has ssl support (added in requests >= 0.8.8)
-SSL_ENABLED = False
-CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
-_REQ_VER = None
-try:
- from distutils.version import LooseVersion
- import pkg_resources
- _REQ = pkg_resources.get_distribution('requests')
- _REQ_VER = LooseVersion(_REQ.version)
- if _REQ_VER >= LooseVersion('0.8.8'):
- SSL_ENABLED = True
- if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'):
- CONFIG_ENABLED = True
-except ImportError:
- pass
-
-
-def _cleanurl(url):
- parsed_url = list(urlparse(url, scheme='http'))
- if not parsed_url[1] and parsed_url[2]:
- # Swap these since this seems to be a common
- # occurrence when given urls like 'www.google.com'
- parsed_url[1] = parsed_url[2]
- parsed_url[2] = ''
- return urlunparse(parsed_url)
-
-
-def combine_url(base, *add_ons):
-
- def combine_single(url, add_on):
- url_parsed = list(urlparse(url))
- path = url_parsed[2]
- if path and not path.endswith("/"):
- path += "/"
- path += urlquote(str(add_on), safe="/:")
- url_parsed[2] = path
- return urlunparse(url_parsed)
-
- url = base
- for add_on in add_ons:
- url = combine_single(url, add_on)
- return url
-
-
-# Made to have same accessors as UrlResponse so that the
-# read_file_or_url can return this or that object and the
-# 'user' of those objects will not need to know the difference.
-class StringResponse(object):
- def __init__(self, contents, code=200):
- self.code = code
- self.headers = {}
- self.contents = contents
- self.url = None
-
- def ok(self, *args, **kwargs):
- if self.code != 200:
- return False
- return True
-
- def __str__(self):
- return self.contents
-
-
-class FileResponse(StringResponse):
- def __init__(self, path, contents, code=200):
- StringResponse.__init__(self, contents, code=code)
- self.url = path
-
-
-class UrlResponse(object):
- def __init__(self, response):
- self._response = response
-
- @property
- def contents(self):
- return self._response.content
-
- @property
- def url(self):
- return self._response.url
-
- def ok(self, redirects_ok=False):
- upper = 300
- if redirects_ok:
- upper = 400
- if self.code >= 200 and self.code < upper:
- return True
- else:
- return False
-
- @property
- def headers(self):
- return self._response.headers
-
- @property
- def code(self):
- return self._response.status_code
-
- def __str__(self):
- return self._response.text
-
-
-class UrlError(IOError):
- def __init__(self, cause, code=None, headers=None, url=None):
- IOError.__init__(self, str(cause))
- self.cause = cause
- self.code = code
- self.headers = headers
- if self.headers is None:
- self.headers = {}
- self.url = url
-
-
-def _get_ssl_args(url, ssl_details):
- ssl_args = {}
- scheme = urlparse(url).scheme
- if scheme == 'https' and ssl_details:
- if not SSL_ENABLED:
- LOG.warn("SSL is not supported in requests v%s, "
- "cert. verification can not occur!", _REQ_VER)
- else:
- if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
- ssl_args['verify'] = ssl_details['ca_certs']
- else:
- ssl_args['verify'] = True
- if 'cert_file' in ssl_details and 'key_file' in ssl_details:
- ssl_args['cert'] = [ssl_details['cert_file'],
- ssl_details['key_file']]
- elif 'cert_file' in ssl_details:
- ssl_args['cert'] = str(ssl_details['cert_file'])
- return ssl_args
-
-
-def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
- headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, exception_cb=None):
- url = _cleanurl(url)
- req_args = {
- 'url': url,
- }
- req_args.update(_get_ssl_args(url, ssl_details))
- req_args['allow_redirects'] = allow_redirects
- req_args['method'] = 'GET'
- if timeout is not None:
- req_args['timeout'] = max(float(timeout), 0)
- if data:
- req_args['method'] = 'POST'
- # It doesn't seem like config
- # was added in older library versions (or newer ones either), thus we
- # need to manually do the retries if it wasn't...
- if CONFIG_ENABLED:
- req_config = {
- 'store_cookies': False,
- }
- # Don't use the retry support built-in
- # since it doesn't allow for 'sleep_times'
- # in between tries....
- # if retries:
- # req_config['max_retries'] = max(int(retries), 0)
- req_args['config'] = req_config
- manual_tries = 1
- if retries:
- manual_tries = max(int(retries) + 1, 1)
-
- def_headers = {
- 'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
- }
- if headers:
- def_headers.update(headers)
- headers = def_headers
-
- if not headers_cb:
- def _cb(url):
- return headers
- headers_cb = _cb
- if data:
- req_args['data'] = data
- if sec_between is None:
- sec_between = -1
-
- excps = []
- # Handle retrying ourselves since the built-in support
- # doesn't handle sleeping between tries...
- for i in range(0, manual_tries):
- req_args['headers'] = headers_cb(url)
- filtered_req_args = {}
- for (k, v) in req_args.items():
- if k == 'data':
- continue
- filtered_req_args[k] = v
- try:
- LOG.debug("[%s/%s] open '%s' with %s configuration", i,
- manual_tries, url, filtered_req_args)
-
- r = requests.request(**req_args)
- if check_status:
- r.raise_for_status()
- LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
- r.status_code, len(r.content), (i + 1))
- # Doesn't seem like we can make it use a different
- # subclass for responses, so add our own backward-compat
- # attrs
- return UrlResponse(r)
- except exceptions.RequestException as e:
- if (isinstance(e, (exceptions.HTTPError)) and
- hasattr(e, 'response') and # This appeared in v 0.10.8
- hasattr(e.response, 'status_code')):
- excps.append(UrlError(e, code=e.response.status_code,
- headers=e.response.headers,
- url=url))
- else:
- excps.append(UrlError(e, url=url))
- if SSL_ENABLED and isinstance(e, exceptions.SSLError):
- # ssl exceptions are not going to get fixed by waiting a
- # few seconds
- break
- if exception_cb and exception_cb(req_args.copy(), excps[-1]):
- # if an exception callback was given it should return None
- # a true-ish value means to break and re-raise the exception
- break
- if i + 1 < manual_tries and sec_between > 0:
- LOG.debug("Please wait %s seconds while we wait to try again",
- sec_between)
- time.sleep(sec_between)
- if excps:
- raise excps[-1]
- return None # Should throw before this...
-
-
-def wait_for_url(urls, max_wait=None, timeout=None,
- status_cb=None, headers_cb=None, sleep_time=1,
- exception_cb=None):
- """
- urls: a list of urls to try
- max_wait: roughly the maximum time to wait before giving up
- The max time is *actually* len(urls)*timeout as each url will
- be tried once and given the timeout provided.
- a number <= 0 will always result in only one try
- timeout: the timeout provided to urlopen
- status_cb: call method with string message when a url is not available
- headers_cb: call method with single argument of url to get headers
- for request.
- exception_cb: call method with 2 arguments 'msg' (per status_cb) and
- 'exception', the exception that occurred.
-
- the idea of this routine is to wait for the EC2 metdata service to
- come up. On both Eucalyptus and EC2 we have seen the case where
- the instance hit the MD before the MD service was up. EC2 seems
- to have permenantely fixed this, though.
-
- In openstack, the metadata service might be painfully slow, and
- unable to avoid hitting a timeout of even up to 10 seconds or more
- (LP: #894279) for a simple GET.
-
- Offset those needs with the need to not hang forever (and block boot)
- on a system where cloud-init is configured to look for EC2 Metadata
- service but is not going to find one. It is possible that the instance
- data host (169.254.169.254) may be firewalled off Entirely for a sytem,
- meaning that the connection will block forever unless a timeout is set.
- """
- start_time = time.time()
-
- def log_status_cb(msg, exc=None):
- LOG.debug(msg)
-
- if status_cb is None:
- status_cb = log_status_cb
-
- def timeup(max_wait, start_time):
- return ((max_wait <= 0 or max_wait is None) or
- (time.time() - start_time > max_wait))
-
- loop_n = 0
- while True:
- sleep_time = int(loop_n / 5) + 1
- for url in urls:
- now = time.time()
- if loop_n != 0:
- if timeup(max_wait, start_time):
- break
- if timeout and (now + timeout > (start_time + max_wait)):
- # shorten timeout to not run way over max_time
- timeout = int((start_time + max_wait) - now)
-
- reason = ""
- url_exc = None
- try:
- if headers_cb is not None:
- headers = headers_cb(url)
- else:
- headers = {}
-
- response = readurl(url, headers=headers, timeout=timeout,
- check_status=False)
- if not response.contents:
- reason = "empty response [%s]" % (response.code)
- url_exc = UrlError(ValueError(reason), code=response.code,
- headers=response.headers, url=url)
- elif not response.ok():
- reason = "bad status code [%s]" % (response.code)
- url_exc = UrlError(ValueError(reason), code=response.code,
- headers=response.headers, url=url)
- else:
- return url
- except UrlError as e:
- reason = "request error [%s]" % e
- url_exc = e
- except Exception as e:
- reason = "unexpected error [%s]" % e
- url_exc = e
-
- time_taken = int(time.time() - start_time)
- status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
- time_taken,
- max_wait,
- reason)
- status_cb(status_msg)
- if exception_cb:
- # This can be used to alter the headers that will be sent
- # in the future, for example this is what the MAAS datasource
- # does.
- exception_cb(msg=status_msg, exception=url_exc)
-
- if timeup(max_wait, start_time):
- break
-
- loop_n = loop_n + 1
- LOG.debug("Please wait %s seconds while we wait to try again",
- sleep_time)
- time.sleep(sleep_time)
-
- return False
-
-
-class OauthUrlHelper(object):
- def __init__(self, consumer_key=None, token_key=None,
- token_secret=None, consumer_secret=None,
- skew_data_file="/run/oauth_skew.json"):
- self.consumer_key = consumer_key
- self.consumer_secret = consumer_secret or ""
- self.token_key = token_key
- self.token_secret = token_secret
- self.skew_data_file = skew_data_file
- self._do_oauth = True
- self.skew_change_limit = 5
- required = (self.token_key, self.token_secret, self.consumer_key)
- if not any(required):
- self._do_oauth = False
- elif not all(required):
- raise ValueError("all or none of token_key, token_secret, or "
- "consumer_key can be set")
-
- old = self.read_skew_file()
- self.skew_data = old or {}
-
- def read_skew_file(self):
- if self.skew_data_file and os.path.isfile(self.skew_data_file):
- with open(self.skew_data_file, mode="r") as fp:
- return json.load(fp)
- return None
-
- def update_skew_file(self, host, value):
- # this is not atomic
- if not self.skew_data_file:
- return
- cur = self.read_skew_file()
- if cur is None:
- cur = {}
- cur[host] = value
- with open(self.skew_data_file, mode="w") as fp:
- fp.write(json.dumps(cur))
-
- def exception_cb(self, msg, exception):
- if not (isinstance(exception, UrlError) and
- (exception.code == 403 or exception.code == 401)):
- return
-
- if 'date' not in exception.headers:
- LOG.warn("Missing header 'date' in %s response", exception.code)
- return
-
- date = exception.headers['date']
- try:
- remote_time = time.mktime(parsedate(date))
- except Exception as e:
- LOG.warn("Failed to convert datetime '%s': %s", date, e)
- return
-
- skew = int(remote_time - time.time())
- host = urlparse(exception.url).netloc
- old_skew = self.skew_data.get(host, 0)
- if abs(old_skew - skew) > self.skew_change_limit:
- self.update_skew_file(host, skew)
- LOG.warn("Setting oauth clockskew for %s to %d", host, skew)
- self.skew_data[host] = skew
-
- return
-
- def headers_cb(self, url):
- if not self._do_oauth:
- return {}
-
- timestamp = None
- host = urlparse(url).netloc
- if self.skew_data and host in self.skew_data:
- timestamp = int(time.time()) + self.skew_data[host]
-
- return oauth_headers(
- url=url, consumer_key=self.consumer_key,
- token_key=self.token_key, token_secret=self.token_secret,
- consumer_secret=self.consumer_secret, timestamp=timestamp)
-
- def _wrapped(self, wrapped_func, args, kwargs):
- kwargs['headers_cb'] = partial(
- self._headers_cb, kwargs.get('headers_cb'))
- kwargs['exception_cb'] = partial(
- self._exception_cb, kwargs.get('exception_cb'))
- return wrapped_func(*args, **kwargs)
-
- def wait_for_url(self, *args, **kwargs):
- return self._wrapped(wait_for_url, args, kwargs)
-
- def readurl(self, *args, **kwargs):
- return self._wrapped(readurl, args, kwargs)
-
- def _exception_cb(self, extra_exception_cb, msg, exception):
- ret = None
- try:
- if extra_exception_cb:
- ret = extra_exception_cb(msg, exception)
- finally:
- self.exception_cb(msg, exception)
- return ret
-
- def _headers_cb(self, extra_headers_cb, url):
- headers = {}
- if extra_headers_cb:
- headers = extra_headers_cb(url)
- headers.update(self.headers_cb(url))
- return headers
-
-
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
- timestamp=None):
- if timestamp:
- timestamp = str(timestamp)
- else:
- timestamp = None
-
- client = oauth1.Client(
- consumer_key,
- client_secret=consumer_secret,
- resource_owner_key=token_key,
- resource_owner_secret=token_secret,
- signature_method=oauth1.SIGNATURE_PLAINTEXT,
- timestamp=timestamp)
- uri, signed_headers, body = client.sign(url)
- return signed_headers
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
deleted file mode 100644
index 393bf0bb..00000000
--- a/cloudinit/user_data.py
+++ /dev/null
@@ -1,356 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from email.mime.base import MIMEBase
-from email.mime.multipart import MIMEMultipart
-from email.mime.nonmultipart import MIMENonMultipart
-from email.mime.text import MIMEText
-
-import six
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-# Constants copied in from the handler module
-NOT_MULTIPART_TYPE = handlers.NOT_MULTIPART_TYPE
-PART_FN_TPL = handlers.PART_FN_TPL
-OCTET_TYPE = handlers.OCTET_TYPE
-
-# Saves typing errors
-CONTENT_TYPE = 'Content-Type'
-
-# Various special content types that cause special actions
-TYPE_NEEDED = ["text/plain", "text/x-not-multipart"]
-INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
-ARCHIVE_TYPES = ["text/cloud-config-archive"]
-UNDEF_TYPE = "text/plain"
-ARCHIVE_UNDEF_TYPE = "text/cloud-config"
-ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
-
-# This seems to hit most of the gzip possible content types.
-DECOMP_TYPES = [
- 'application/gzip',
- 'application/gzip-compressed',
- 'application/gzipped',
- 'application/x-compress',
- 'application/x-compressed',
- 'application/x-gunzip',
- 'application/x-gzip',
- 'application/x-gzip-compressed',
-]
-
-# Msg header used to track attachments
-ATTACHMENT_FIELD = 'Number-Attachments'
-
-# Only the following content types can have there launch index examined
-# in there payload, evey other content type can still provide a header
-EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"]
-
-
-def _replace_header(msg, key, value):
- del msg[key]
- msg[key] = value
-
-
-def _set_filename(msg, filename):
- del msg['Content-Disposition']
- msg.add_header('Content-Disposition',
- 'attachment', filename=str(filename))
-
-
-class UserDataProcessor(object):
- def __init__(self, paths):
- self.paths = paths
- self.ssl_details = util.fetch_ssl_details(paths)
-
- def process(self, blob):
- accumulating_msg = MIMEMultipart()
- if isinstance(blob, list):
- for b in blob:
- self._process_msg(convert_string(b), accumulating_msg)
- else:
- self._process_msg(convert_string(blob), accumulating_msg)
- return accumulating_msg
-
- def _process_msg(self, base_msg, append_msg):
-
- def find_ctype(payload):
- return handlers.type_from_starts_with(payload)
-
- for part in base_msg.walk():
- if is_skippable(part):
- continue
-
- ctype = None
- ctype_orig = part.get_content_type()
- payload = util.fully_decoded_payload(part)
- was_compressed = False
-
- # When the message states it is of a gzipped content type ensure
- # that we attempt to decode said payload so that the decompressed
- # data can be examined (instead of the compressed data).
- if ctype_orig in DECOMP_TYPES:
- try:
- payload = util.decomp_gzip(payload, quiet=False)
- # At this point we don't know what the content-type is
- # since we just decompressed it.
- ctype_orig = None
- was_compressed = True
- except util.DecompressionError as e:
- LOG.warn("Failed decompressing payload from %s of length"
- " %s due to: %s", ctype_orig, len(payload), e)
- continue
-
- # Attempt to figure out the payloads content-type
- if not ctype_orig:
- ctype_orig = UNDEF_TYPE
- if ctype_orig in TYPE_NEEDED:
- ctype = find_ctype(payload)
- if ctype is None:
- ctype = ctype_orig
-
- # In the case where the data was compressed, we want to make sure
- # that we create a new message that contains the found content
- # type with the uncompressed content since later traversals of the
- # messages will expect a part not compressed.
- if was_compressed:
- maintype, subtype = ctype.split("/", 1)
- n_part = MIMENonMultipart(maintype, subtype)
- n_part.set_payload(payload)
- # Copy various headers from the old part to the new one,
- # but don't include all the headers since some are not useful
- # after decoding and decompression.
- if part.get_filename():
- _set_filename(n_part, part.get_filename())
- for h in ('Launch-Index',):
- if h in part:
- _replace_header(n_part, h, str(part[h]))
- part = n_part
-
- if ctype != ctype_orig:
- _replace_header(part, CONTENT_TYPE, ctype)
-
- if ctype in INCLUDE_TYPES:
- self._do_include(payload, append_msg)
- continue
-
- if ctype in ARCHIVE_TYPES:
- self._explode_archive(payload, append_msg)
- continue
-
- # TODO(harlowja): Should this be happening, shouldn't
- # the part header be modified and not the base?
- _replace_header(base_msg, CONTENT_TYPE, ctype)
-
- self._attach_part(append_msg, part)
-
- def _attach_launch_index(self, msg):
- header_idx = msg.get('Launch-Index', None)
- payload_idx = None
- if msg.get_content_type() in EXAMINE_FOR_LAUNCH_INDEX:
- try:
- # See if it has a launch-index field
- # that might affect the final header
- payload = util.load_yaml(msg.get_payload(decode=True))
- if payload:
- payload_idx = payload.get('launch-index')
- except Exception:
- pass
- # Header overrides contents, for now (?) or the other way around?
- if header_idx is not None:
- payload_idx = header_idx
- # Nothing found in payload, use header (if anything there)
- if payload_idx is None:
- payload_idx = header_idx
- if payload_idx is not None:
- try:
- msg.add_header('Launch-Index', str(int(payload_idx)))
- except (ValueError, TypeError):
- pass
-
- def _get_include_once_filename(self, entry):
- entry_fn = util.hash_blob(entry, 'md5', 64)
- return os.path.join(self.paths.get_ipath_cur('data'),
- 'urlcache', entry_fn)
-
- def _process_before_attach(self, msg, attached_id):
- if not msg.get_filename():
- _set_filename(msg, PART_FN_TPL % (attached_id))
- self._attach_launch_index(msg)
-
- def _do_include(self, content, append_msg):
- # Include a list of urls, one per line
- # also support '#include <url here>'
- # or #include-once '<url here>'
- include_once_on = False
- for line in content.splitlines():
- lc_line = line.lower()
- if lc_line.startswith("#include-once"):
- line = line[len("#include-once"):].lstrip()
- # Every following include will now
- # not be refetched.... but will be
- # re-read from a local urlcache (if it worked)
- include_once_on = True
- elif lc_line.startswith("#include"):
- line = line[len("#include"):].lstrip()
- # Disable the include once if it was on
- # if it wasn't, then this has no effect.
- include_once_on = False
- if line.startswith("#"):
- continue
- include_url = line.strip()
- if not include_url:
- continue
-
- include_once_fn = None
- content = None
- if include_once_on:
- include_once_fn = self._get_include_once_filename(include_url)
- if include_once_on and os.path.isfile(include_once_fn):
- content = util.load_file(include_once_fn)
- else:
- resp = util.read_file_or_url(include_url,
- ssl_details=self.ssl_details)
- if include_once_on and resp.ok():
- util.write_file(include_once_fn, resp.contents, mode=0o600)
- if resp.ok():
- content = resp.contents
- else:
- LOG.warn(("Fetching from %s resulted in"
- " a invalid http code of %s"),
- include_url, resp.code)
-
- if content is not None:
- new_msg = convert_string(content)
- self._process_msg(new_msg, append_msg)
-
- def _explode_archive(self, archive, append_msg):
- entries = util.load_yaml(archive, default=[], allowed=(list, set))
- for ent in entries:
- # ent can be one of:
- # dict { 'filename' : 'value', 'content' :
- # 'value', 'type' : 'value' }
- # filename and type not be present
- # or
- # scalar(payload)
- if isinstance(ent, six.string_types):
- ent = {'content': ent}
- if not isinstance(ent, (dict)):
- # TODO(harlowja) raise?
- continue
-
- content = ent.get('content', '')
- mtype = ent.get('type')
- if not mtype:
- default = ARCHIVE_UNDEF_TYPE
- if isinstance(content, six.binary_type):
- default = ARCHIVE_UNDEF_BINARY_TYPE
- mtype = handlers.type_from_starts_with(content, default)
-
- maintype, subtype = mtype.split('/', 1)
- if maintype == "text":
- if isinstance(content, six.binary_type):
- content = content.decode()
- msg = MIMEText(content, _subtype=subtype)
- else:
- msg = MIMEBase(maintype, subtype)
- msg.set_payload(content)
-
- if 'filename' in ent:
- _set_filename(msg, ent['filename'])
- if 'launch-index' in ent:
- msg.add_header('Launch-Index', str(ent['launch-index']))
-
- for header in list(ent.keys()):
- if header.lower() in ('content', 'filename', 'type',
- 'launch-index', 'content-disposition',
- ATTACHMENT_FIELD.lower(),
- CONTENT_TYPE.lower()):
- continue
- msg.add_header(header, ent[header])
-
- self._attach_part(append_msg, msg)
-
- def _multi_part_count(self, outer_msg, new_count=None):
- """
- Return the number of attachments to this MIMEMultipart by looking
- at its 'Number-Attachments' header.
- """
- if ATTACHMENT_FIELD not in outer_msg:
- outer_msg[ATTACHMENT_FIELD] = '0'
-
- if new_count is not None:
- _replace_header(outer_msg, ATTACHMENT_FIELD, str(new_count))
-
- fetched_count = 0
- try:
- fetched_count = int(outer_msg.get(ATTACHMENT_FIELD))
- except (ValueError, TypeError):
- _replace_header(outer_msg, ATTACHMENT_FIELD, str(fetched_count))
- return fetched_count
-
- def _attach_part(self, outer_msg, part):
- """
- Attach a message to an outer message. outermsg must be a MIMEMultipart.
- Modifies a header in the outer message to keep track of number of
- attachments.
- """
- part_count = self._multi_part_count(outer_msg)
- self._process_before_attach(part, part_count + 1)
- outer_msg.attach(part)
- self._multi_part_count(outer_msg, part_count + 1)
-
-
-def is_skippable(part):
- # multipart/* are just containers
- part_maintype = part.get_content_maintype() or ''
- if part_maintype.lower() == 'multipart':
- return True
- return False
-
-
-# Coverts a raw string into a mime message
-def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
- if not raw_data:
- raw_data = ''
-
- def create_binmsg(data, content_type):
- maintype, subtype = content_type.split("/", 1)
- msg = MIMEBase(maintype, subtype)
- msg.set_payload(data)
- return msg
-
- try:
- data = util.decode_binary(util.decomp_gzip(raw_data))
- if "mime-version:" in data[0:4096].lower():
- msg = util.message_from_string(data)
- else:
- msg = create_binmsg(data, content_type)
- except UnicodeDecodeError:
- msg = create_binmsg(raw_data, content_type)
-
- return msg
diff --git a/cloudinit/util.py b/cloudinit/util.py
deleted file mode 100644
index e5dd61a0..00000000
--- a/cloudinit/util.py
+++ /dev/null
@@ -1,2246 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import contextlib
-import copy as obj_copy
-import ctypes
-import email
-import errno
-import glob
-import grp
-import gzip
-import hashlib
-import json
-import os
-import os.path
-import platform
-import pwd
-import random
-import re
-import shutil
-import socket
-import stat
-import string
-import subprocess
-import sys
-import tempfile
-import time
-
-from base64 import b64decode, b64encode
-from six.moves.urllib import parse as urlparse
-
-import six
-import yaml
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import safeyaml
-from cloudinit import type_utils
-from cloudinit import url_helper
-from cloudinit import version
-
-from cloudinit.settings import (CFG_BUILTIN)
-
-
-_DNS_REDIRECT_IP = None
-LOG = logging.getLogger(__name__)
-
-# Helps cleanup filenames to ensure they aren't FS incompatible
-FN_REPLACEMENTS = {
- os.sep: '_',
-}
-FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
-
-TRUE_STRINGS = ('true', '1', 'on', 'yes')
-FALSE_STRINGS = ('off', '0', 'no', 'false')
-
-
-# Helper utils to see if running in a container
-CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
- ['running-in-container'],
- ['lxc-is-container'])
-
-PROC_CMDLINE = None
-
-
-def decode_binary(blob, encoding='utf-8'):
- # Converts a binary type into a text type using given encoding.
- if isinstance(blob, six.text_type):
- return blob
- return blob.decode(encoding)
-
-
-def encode_text(text, encoding='utf-8'):
- # Converts a text string into a binary type using given encoding.
- if isinstance(text, six.binary_type):
- return text
- return text.encode(encoding)
-
-
-def b64d(source):
- # Base64 decode some data, accepting bytes or unicode/str, and returning
- # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
- decoded = b64decode(source)
- try:
- return decoded.decode('utf-8')
- except UnicodeDecodeError:
- return decoded
-
-
-def b64e(source):
- # Base64 encode some data, accepting bytes or unicode/str, and returning
- # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
- if not isinstance(source, bytes):
- source = source.encode('utf-8')
- return b64encode(source).decode('utf-8')
-
-
-def fully_decoded_payload(part):
- # In Python 3, decoding the payload will ironically hand us a bytes object.
- # 'decode' means to decode according to Content-Transfer-Encoding, not
- # according to any charset in the Content-Type. So, if we end up with
- # bytes, first try to decode to str via CT charset, and failing that, try
- # utf-8 using surrogate escapes.
- cte_payload = part.get_payload(decode=True)
- if (six.PY3 and
- part.get_content_maintype() == 'text' and
- isinstance(cte_payload, bytes)):
- charset = part.get_charset()
- if charset and charset.input_codec:
- encoding = charset.input_codec
- else:
- encoding = 'utf-8'
- return cte_payload.decode(encoding, errors='surrogateescape')
- return cte_payload
-
-
-# Path for DMI Data
-DMI_SYS_PATH = "/sys/class/dmi/id"
-
-# dmidecode and /sys/class/dmi/id/* use different names for the same value,
-# this allows us to refer to them by one canonical name
-DMIDECODE_TO_DMI_SYS_MAPPING = {
- 'baseboard-asset-tag': 'board_asset_tag',
- 'baseboard-manufacturer': 'board_vendor',
- 'baseboard-product-name': 'board_name',
- 'baseboard-serial-number': 'board_serial',
- 'baseboard-version': 'board_version',
- 'bios-release-date': 'bios_date',
- 'bios-vendor': 'bios_vendor',
- 'bios-version': 'bios_version',
- 'chassis-asset-tag': 'chassis_asset_tag',
- 'chassis-manufacturer': 'chassis_vendor',
- 'chassis-serial-number': 'chassis_serial',
- 'chassis-version': 'chassis_version',
- 'system-manufacturer': 'sys_vendor',
- 'system-product-name': 'product_name',
- 'system-serial-number': 'product_serial',
- 'system-uuid': 'product_uuid',
- 'system-version': 'product_version',
-}
-
-
-class ProcessExecutionError(IOError):
-
- MESSAGE_TMPL = ('%(description)s\n'
- 'Command: %(cmd)s\n'
- 'Exit code: %(exit_code)s\n'
- 'Reason: %(reason)s\n'
- 'Stdout: %(stdout)r\n'
- 'Stderr: %(stderr)r')
-
- def __init__(self, stdout=None, stderr=None,
- exit_code=None, cmd=None,
- description=None, reason=None,
- errno=None):
- if not cmd:
- self.cmd = '-'
- else:
- self.cmd = cmd
-
- if not description:
- self.description = 'Unexpected error while running command.'
- else:
- self.description = description
-
- if not isinstance(exit_code, six.integer_types):
- self.exit_code = '-'
- else:
- self.exit_code = exit_code
-
- if not stderr:
- self.stderr = ''
- else:
- self.stderr = stderr
-
- if not stdout:
- self.stdout = ''
- else:
- self.stdout = stdout
-
- if reason:
- self.reason = reason
- else:
- self.reason = '-'
-
- self.errno = errno
- message = self.MESSAGE_TMPL % {
- 'description': self.description,
- 'cmd': self.cmd,
- 'exit_code': self.exit_code,
- 'stdout': self.stdout,
- 'stderr': self.stderr,
- 'reason': self.reason,
- }
- IOError.__init__(self, message)
- # For backward compatibility with Python 2.
- if not hasattr(self, 'message'):
- self.message = message
-
-
-class SeLinuxGuard(object):
- def __init__(self, path, recursive=False):
- # Late import since it might not always
- # be possible to use this
- try:
- self.selinux = importer.import_module('selinux')
- except ImportError:
- self.selinux = None
- self.path = path
- self.recursive = recursive
-
- def __enter__(self):
- if self.selinux and self.selinux.is_selinux_enabled():
- return True
- else:
- return False
-
- def __exit__(self, excp_type, excp_value, excp_traceback):
- if not self.selinux or not self.selinux.is_selinux_enabled():
- return
- if not os.path.lexists(self.path):
- return
-
- path = os.path.realpath(self.path)
- # path should be a string, not unicode
- if six.PY2:
- path = str(path)
- try:
- stats = os.lstat(path)
- self.selinux.matchpathcon(path, stats[stat.ST_MODE])
- except OSError:
- return
-
- LOG.debug("Restoring selinux mode for %s (recursive=%s)",
- path, self.recursive)
- self.selinux.restorecon(path, recursive=self.recursive)
-
-
-class MountFailedError(Exception):
- pass
-
-
-class DecompressionError(Exception):
- pass
-
-
-def ExtendedTemporaryFile(**kwargs):
- fh = tempfile.NamedTemporaryFile(**kwargs)
- # Replace its unlink with a quiet version
- # that does not raise errors when the
- # file to unlink has been unlinked elsewhere..
- LOG.debug("Created temporary file %s", fh.name)
- fh.unlink = del_file
-
- # Add a new method that will unlink
- # right 'now' but still lets the exit
- # method attempt to remove it (which will
- # not throw due to our del file being quiet
- # about files that are not there)
- def unlink_now():
- fh.unlink(fh.name)
-
- setattr(fh, 'unlink_now', unlink_now)
- return fh
-
-
-def fork_cb(child_cb, *args, **kwargs):
- fid = os.fork()
- if fid == 0:
- try:
- child_cb(*args, **kwargs)
- os._exit(0)
- except Exception:
- logexc(LOG, "Failed forking and calling callback %s",
- type_utils.obj_name(child_cb))
- os._exit(1)
- else:
- LOG.debug("Forked child %s who will run callback %s",
- fid, type_utils.obj_name(child_cb))
-
-
-def is_true(val, addons=None):
- if isinstance(val, (bool)):
- return val is True
- check_set = TRUE_STRINGS
- if addons:
- check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
- return True
- return False
-
-
-def is_false(val, addons=None):
- if isinstance(val, (bool)):
- return val is False
- check_set = FALSE_STRINGS
- if addons:
- check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
- return True
- return False
-
-
-def translate_bool(val, addons=None):
- if not val:
- # This handles empty lists and false and
- # other things that python believes are false
- return False
- # If its already a boolean skip
- if isinstance(val, (bool)):
- return val
- return is_true(val, addons)
-
-
-def rand_str(strlen=32, select_from=None):
- if not select_from:
- select_from = string.ascii_letters + string.digits
- return "".join([random.choice(select_from) for _x in range(0, strlen)])
-
-
-def rand_dict_key(dictionary, postfix=None):
- if not postfix:
- postfix = ""
- while True:
- newkey = rand_str(strlen=8) + "_" + postfix
- if newkey not in dictionary:
- break
- return newkey
-
-
-def read_conf(fname):
- try:
- return load_yaml(load_file(fname), default={})
- except IOError as e:
- if e.errno == errno.ENOENT:
- return {}
- else:
- raise
-
-
-# Merges X lists, and then keeps the
-# unique ones, but orders by sort order
-# instead of by the original order
-def uniq_merge_sorted(*lists):
- return sorted(uniq_merge(*lists))
-
-
-# Merges X lists and then iterates over those
-# and only keeps the unique items (order preserving)
-# and returns that merged and uniqued list as the
-# final result.
-#
-# Note: if any entry is a string it will be
-# split on commas and empty entries will be
-# evicted and merged in accordingly.
-def uniq_merge(*lists):
- combined_list = []
- for a_list in lists:
- if isinstance(a_list, six.string_types):
- a_list = a_list.strip().split(",")
- # Kickout the empty ones
- a_list = [a for a in a_list if len(a)]
- combined_list.extend(a_list)
- return uniq_list(combined_list)
-
-
-def clean_filename(fn):
- for (k, v) in FN_REPLACEMENTS.items():
- fn = fn.replace(k, v)
- removals = []
- for k in fn:
- if k not in FN_ALLOWED:
- removals.append(k)
- for k in removals:
- fn = fn.replace(k, '')
- fn = fn.strip()
- return fn
-
-
-def decomp_gzip(data, quiet=True, decode=True):
- try:
- buf = six.BytesIO(encode_text(data))
- with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
- if decode:
- return decode_binary(gh.read())
- else:
- return gh.read()
- except Exception as e:
- if quiet:
- return data
- else:
- raise DecompressionError(six.text_type(e))
-
-
-def extract_usergroup(ug_pair):
- if not ug_pair:
- return (None, None)
- ug_parted = ug_pair.split(':', 1)
- u = ug_parted[0].strip()
- if len(ug_parted) == 2:
- g = ug_parted[1].strip()
- else:
- g = None
- if not u or u == "-1" or u.lower() == "none":
- u = None
- if not g or g == "-1" or g.lower() == "none":
- g = None
- return (u, g)
-
-
-def find_modules(root_dir):
- entries = dict()
- for fname in glob.glob(os.path.join(root_dir, "*.py")):
- if not os.path.isfile(fname):
- continue
- modname = os.path.basename(fname)[0:-3]
- modname = modname.strip()
- if modname and modname.find(".") == -1:
- entries[fname] = modname
- return entries
-
-
-def multi_log(text, console=True, stderr=True,
- log=None, log_level=logging.DEBUG):
- if stderr:
- sys.stderr.write(text)
- if console:
- conpath = "/dev/console"
- if os.path.exists(conpath):
- with open(conpath, 'w') as wfh:
- wfh.write(text)
- wfh.flush()
- else:
- # A container may lack /dev/console (arguably a container bug). If
- # it does not exist, then write output to stdout. this will result
- # in duplicate stderr and stdout messages if stderr was True.
- #
- # even though upstart or systemd might have set up output to go to
- # /dev/console, the user may have configured elsewhere via
- # cloud-config 'output'. If there is /dev/console, messages will
- # still get there.
- sys.stdout.write(text)
- if log:
- if text[-1] == "\n":
- log.log(log_level, text[:-1])
- else:
- log.log(log_level, text)
-
-
-def load_json(text, root_types=(dict,)):
- decoded = json.loads(decode_binary(text))
- if not isinstance(decoded, tuple(root_types)):
- expected_types = ", ".join([str(t) for t in root_types])
- raise TypeError("(%s) root types expected, got %s instead"
- % (expected_types, type(decoded)))
- return decoded
-
-
-def is_ipv4(instr):
- """determine if input string is a ipv4 address. return boolean."""
- toks = instr.split('.')
- if len(toks) != 4:
- return False
-
- try:
- toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
- except Exception:
- return False
-
- return len(toks) == 4
-
-
-def get_cfg_option_bool(yobj, key, default=False):
- if key not in yobj:
- return default
- return translate_bool(yobj[key])
-
-
-def get_cfg_option_str(yobj, key, default=None):
- if key not in yobj:
- return default
- val = yobj[key]
- if not isinstance(val, six.string_types):
- val = str(val)
- return val
-
-
-def get_cfg_option_int(yobj, key, default=0):
- return int(get_cfg_option_str(yobj, key, default=default))
-
-
-def system_info():
- return {
- 'platform': platform.platform(),
- 'release': platform.release(),
- 'python': platform.python_version(),
- 'uname': platform.uname(),
- 'dist': platform.linux_distribution(),
- }
-
-
-def get_cfg_option_list(yobj, key, default=None):
- """
- Gets the C{key} config option from C{yobj} as a list of strings. If the
- key is present as a single string it will be returned as a list with one
- string arg.
-
- @param yobj: The configuration object.
- @param key: The configuration key to get.
- @param default: The default to return if key is not found.
- @return: The configuration option as a list of strings or default if key
- is not found.
- """
- if key not in yobj:
- return default
- if yobj[key] is None:
- return []
- val = yobj[key]
- if isinstance(val, (list)):
- cval = [v for v in val]
- return cval
- if not isinstance(val, six.string_types):
- val = str(val)
- return [val]
-
-
-# get a cfg entry by its path array
-# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
-def get_cfg_by_path(yobj, keyp, default=None):
- cur = yobj
- for tok in keyp:
- if tok not in cur:
- return default
- cur = cur[tok]
- return cur
-
-
-def fixup_output(cfg, mode):
- (outfmt, errfmt) = get_output_cfg(cfg, mode)
- redirect_output(outfmt, errfmt)
- return (outfmt, errfmt)
-
-
-# redirect_output(outfmt, errfmt, orig_out, orig_err)
-# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
-# fmt can be:
-# > FILEPATH
-# >> FILEPATH
-# | program [ arg1 [ arg2 [ ... ] ] ]
-#
-# with a '|', arguments are passed to shell, so one level of
-# shell escape is required.
-#
-# if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true
-# value then output input will not be closed (useful for debugging).
-#
-def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
-
- if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")):
- LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT")
- return
-
- if not o_out:
- o_out = sys.stdout
- if not o_err:
- o_err = sys.stderr
-
- if outfmt:
- LOG.debug("Redirecting %s to %s", o_out, outfmt)
- (mode, arg) = outfmt.split(" ", 1)
- if mode == ">" or mode == ">>":
- owith = "ab"
- if mode == ">":
- owith = "wb"
- new_fp = open(arg, owith)
- elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
- else:
- raise TypeError("Invalid type for output format: %s" % outfmt)
-
- if o_out:
- os.dup2(new_fp.fileno(), o_out.fileno())
-
- if errfmt == outfmt:
- LOG.debug("Redirecting %s to %s", o_err, outfmt)
- os.dup2(new_fp.fileno(), o_err.fileno())
- return
-
- if errfmt:
- LOG.debug("Redirecting %s to %s", o_err, errfmt)
- (mode, arg) = errfmt.split(" ", 1)
- if mode == ">" or mode == ">>":
- owith = "ab"
- if mode == ">":
- owith = "wb"
- new_fp = open(arg, owith)
- elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
- else:
- raise TypeError("Invalid type for error format: %s" % errfmt)
-
- if o_err:
- os.dup2(new_fp.fileno(), o_err.fileno())
-
-
-def make_url(scheme, host, port=None,
- path='', params='', query='', fragment=''):
-
- pieces = []
- pieces.append(scheme or '')
-
- netloc = ''
- if host:
- netloc = str(host)
-
- if port is not None:
- netloc += ":" + "%s" % (port)
-
- pieces.append(netloc or '')
- pieces.append(path or '')
- pieces.append(params or '')
- pieces.append(query or '')
- pieces.append(fragment or '')
-
- return urlparse.urlunparse(pieces)
-
-
-def mergemanydict(srcs, reverse=False):
- if reverse:
- srcs = reversed(srcs)
- merged_cfg = {}
- for cfg in srcs:
- if cfg:
- # Figure out which mergers to apply...
- mergers_to_apply = mergers.dict_extract_mergers(cfg)
- if not mergers_to_apply:
- mergers_to_apply = mergers.default_mergers()
- merger = mergers.construct(mergers_to_apply)
- merged_cfg = merger.merge(merged_cfg, cfg)
- return merged_cfg
-
-
-@contextlib.contextmanager
-def chdir(ndir):
- curr = os.getcwd()
- try:
- os.chdir(ndir)
- yield ndir
- finally:
- os.chdir(curr)
-
-
-@contextlib.contextmanager
-def umask(n_msk):
- old = os.umask(n_msk)
- try:
- yield old
- finally:
- os.umask(old)
-
-
-@contextlib.contextmanager
-def tempdir(**kwargs):
- # This seems like it was only added in python 3.2
- # Make it since its useful...
- # See: http://bugs.python.org/file12970/tempdir.patch
- tdir = tempfile.mkdtemp(**kwargs)
- try:
- yield tdir
- finally:
- del_dir(tdir)
-
-
-def center(text, fill, max_len):
- return '{0:{fill}{align}{size}}'.format(text, fill=fill,
- align="^", size=max_len)
-
-
-def del_dir(path):
- LOG.debug("Recursively deleting %s", path)
- shutil.rmtree(path)
-
-
-def runparts(dirp, skip_no_exist=True, exe_prefix=None):
- if skip_no_exist and not os.path.isdir(dirp):
- return
-
- failed = []
- attempted = []
-
- if exe_prefix is None:
- prefix = []
- elif isinstance(exe_prefix, str):
- prefix = [str(exe_prefix)]
- elif isinstance(exe_prefix, list):
- prefix = exe_prefix
- else:
- raise TypeError("exe_prefix must be None, str, or list")
-
- for exe_name in sorted(os.listdir(dirp)):
- exe_path = os.path.join(dirp, exe_name)
- if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
- attempted.append(exe_path)
- try:
- subp(prefix + [exe_path], capture=False)
- except ProcessExecutionError as e:
- logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
- failed.append(e)
-
- if failed and attempted:
- raise RuntimeError('Runparts: %s failures in %s attempted commands'
- % (len(failed), len(attempted)))
-
-
-# read_optional_seed
-# returns boolean indicating success or failure (presense of files)
-# if files are present, populates 'fill' dictionary with 'user-data' and
-# 'meta-data' entries
-def read_optional_seed(fill, base="", ext="", timeout=5):
- try:
- (md, ud) = read_seeded(base, ext, timeout)
- fill['user-data'] = ud
- fill['meta-data'] = md
- return True
- except url_helper.UrlError as e:
- if e.code == url_helper.NOT_FOUND:
- return False
- raise
-
-
-def fetch_ssl_details(paths=None):
- ssl_details = {}
- # Lookup in these locations for ssl key/cert files
- ssl_cert_paths = [
- '/var/lib/cloud/data/ssl',
- '/var/lib/cloud/instance/data/ssl',
- ]
- if paths:
- ssl_cert_paths.extend([
- os.path.join(paths.get_ipath_cur('data'), 'ssl'),
- os.path.join(paths.get_cpath('data'), 'ssl'),
- ])
- ssl_cert_paths = uniq_merge(ssl_cert_paths)
- ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
- cert_file = None
- for d in ssl_cert_paths:
- if os.path.isfile(os.path.join(d, 'cert.pem')):
- cert_file = os.path.join(d, 'cert.pem')
- break
- key_file = None
- for d in ssl_cert_paths:
- if os.path.isfile(os.path.join(d, 'key.pem')):
- key_file = os.path.join(d, 'key.pem')
- break
- if cert_file and key_file:
- ssl_details['cert_file'] = cert_file
- ssl_details['key_file'] = key_file
- elif cert_file:
- ssl_details['cert_file'] = cert_file
- return ssl_details
-
-
-def read_file_or_url(url, timeout=5, retries=10,
- headers=None, data=None, sec_between=1, ssl_details=None,
- headers_cb=None, exception_cb=None):
- url = url.lstrip()
- if url.startswith("/"):
- url = "file://%s" % url
- if url.lower().startswith("file://"):
- if data:
- LOG.warn("Unable to post data to file resource %s", url)
- file_path = url[len("file://"):]
- try:
- contents = load_file(file_path, decode=False)
- except IOError as e:
- code = e.errno
- if e.errno == errno.ENOENT:
- code = url_helper.NOT_FOUND
- raise url_helper.UrlError(cause=e, code=code, headers=None,
- url=url)
- return url_helper.FileResponse(file_path, contents=contents)
- else:
- return url_helper.readurl(url,
- timeout=timeout,
- retries=retries,
- headers=headers,
- headers_cb=headers_cb,
- data=data,
- sec_between=sec_between,
- ssl_details=ssl_details,
- exception_cb=exception_cb)
-
-
-def load_yaml(blob, default=None, allowed=(dict,)):
- loaded = default
- blob = decode_binary(blob)
- try:
- LOG.debug("Attempting to load yaml from string "
- "of length %s with allowed root types %s",
- len(blob), allowed)
- converted = safeyaml.load(blob)
- if not isinstance(converted, allowed):
- # Yes this will just be caught, but thats ok for now...
- raise TypeError(("Yaml load allows %s root types,"
- " but got %s instead") %
- (allowed, type_utils.obj_name(converted)))
- loaded = converted
- except (yaml.YAMLError, TypeError, ValueError):
- if len(blob) == 0:
- LOG.debug("load_yaml given empty string, returning default")
- else:
- logexc(LOG, "Failed loading yaml blob")
- return loaded
-
-
-def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
- if base.startswith("/"):
- base = "file://%s" % base
-
- # default retries for file is 0. for network is 10
- if base.startswith("file://"):
- retries = file_retries
-
- if base.find("%s") >= 0:
- ud_url = base % ("user-data" + ext)
- md_url = base % ("meta-data" + ext)
- else:
- ud_url = "%s%s%s" % (base, "user-data", ext)
- md_url = "%s%s%s" % (base, "meta-data", ext)
-
- md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
- md = None
- if md_resp.ok():
- md = load_yaml(decode_binary(md_resp.contents), default={})
-
- ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
- ud = None
- if ud_resp.ok():
- ud = ud_resp.contents
-
- return (md, ud)
-
-
-def read_conf_d(confd):
- # Get reverse sorted list (later trumps newer)
- confs = sorted(os.listdir(confd), reverse=True)
-
- # Remove anything not ending in '.cfg'
- confs = [f for f in confs if f.endswith(".cfg")]
-
- # Remove anything not a file
- confs = [f for f in confs
- if os.path.isfile(os.path.join(confd, f))]
-
- # Load them all so that they can be merged
- cfgs = []
- for fn in confs:
- cfgs.append(read_conf(os.path.join(confd, fn)))
-
- return mergemanydict(cfgs)
-
-
-def read_conf_with_confd(cfgfile):
- cfg = read_conf(cfgfile)
-
- confd = False
- if "conf_d" in cfg:
- confd = cfg['conf_d']
- if confd:
- if not isinstance(confd, six.string_types):
- raise TypeError(("Config file %s contains 'conf_d' "
- "with non-string type %s") %
- (cfgfile, type_utils.obj_name(confd)))
- else:
- confd = str(confd).strip()
- elif os.path.isdir("%s.d" % cfgfile):
- confd = "%s.d" % cfgfile
-
- if not confd or not os.path.isdir(confd):
- return cfg
-
- # Conf.d settings override input configuration
- confd_cfg = read_conf_d(confd)
- return mergemanydict([confd_cfg, cfg])
-
-
-def read_cc_from_cmdline(cmdline=None):
- # this should support reading cloud-config information from
- # the kernel command line. It is intended to support content of the
- # format:
- # cc: <yaml content here> [end_cc]
- # this would include:
- # cc: ssh_import_id: [smoser, kirkland]\\n
- # cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
- # cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
- if cmdline is None:
- cmdline = get_cmdline()
-
- tag_begin = "cc:"
- tag_end = "end_cc"
- begin_l = len(tag_begin)
- end_l = len(tag_end)
- clen = len(cmdline)
- tokens = []
- begin = cmdline.find(tag_begin)
- while begin >= 0:
- end = cmdline.find(tag_end, begin + begin_l)
- if end < 0:
- end = clen
- tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
- "\n"))
-
- begin = cmdline.find(tag_begin, end + end_l)
-
- return '\n'.join(tokens)
-
-
-def dos2unix(contents):
- # find first end of line
- pos = contents.find('\n')
- if pos <= 0 or contents[pos - 1] != '\r':
- return contents
- return contents.replace('\r\n', '\n')
-
-
-def get_hostname_fqdn(cfg, cloud):
- # return the hostname and fqdn from 'cfg'. If not found in cfg,
- # then fall back to data from cloud
- if "fqdn" in cfg:
- # user specified a fqdn. Default hostname then is based off that
- fqdn = cfg['fqdn']
- hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
- else:
- if "hostname" in cfg and cfg['hostname'].find('.') > 0:
- # user specified hostname, and it had '.' in it
- # be nice to them. set fqdn and hostname from that
- fqdn = cfg['hostname']
- hostname = cfg['hostname'][:fqdn.find('.')]
- else:
- # no fqdn set, get fqdn from cloud.
- # get hostname from cfg if available otherwise cloud
- fqdn = cloud.get_hostname(fqdn=True)
- if "hostname" in cfg:
- hostname = cfg['hostname']
- else:
- hostname = cloud.get_hostname()
- return (hostname, fqdn)
-
-
-def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
- """
- For each host a single line should be present with
- the following information:
-
- IP_address canonical_hostname [aliases...]
-
- Fields of the entry are separated by any number of blanks and/or tab
- characters. Text from a "#" character until the end of the line is a
- comment, and is ignored. Host names may contain only alphanumeric
- characters, minus signs ("-"), and periods ("."). They must begin with
- an alphabetic character and end with an alphanumeric character.
- Optional aliases provide for name changes, alternate spellings, shorter
- hostnames, or generic hostnames (for example, localhost).
- """
- fqdn = None
- try:
- for line in load_file(filename).splitlines():
- hashpos = line.find("#")
- if hashpos >= 0:
- line = line[0:hashpos]
- line = line.strip()
- if not line:
- continue
-
- # If there there is less than 3 entries
- # (IP_address, canonical_hostname, alias)
- # then ignore this line
- toks = line.split()
- if len(toks) < 3:
- continue
-
- if hostname in toks[2:]:
- fqdn = toks[1]
- break
- except IOError:
- pass
- return fqdn
-
-
-def get_cmdline_url(names=('cloud-config-url', 'url'),
- starts=b"#cloud-config", cmdline=None):
- if cmdline is None:
- cmdline = get_cmdline()
-
- data = keyval_str_to_dict(cmdline)
- url = None
- key = None
- for key in names:
- if key in data:
- url = data[key]
- break
-
- if not url:
- return (None, None, None)
-
- resp = read_file_or_url(url)
- # allow callers to pass starts as text when comparing to bytes contents
- starts = encode_text(starts)
- if resp.ok() and resp.contents.startswith(starts):
- return (key, url, resp.contents)
-
- return (key, url, None)
-
-
-def is_resolvable(name):
- """determine if a url is resolvable, return a boolean
- This also attempts to be resilent against dns redirection.
-
- Note, that normal nsswitch resolution is used here. So in order
- to avoid any utilization of 'search' entries in /etc/resolv.conf
- we have to append '.'.
-
- The top level 'invalid' domain is invalid per RFC. And example.com
- should also not exist. The random entry will be resolved inside
- the search list.
- """
- global _DNS_REDIRECT_IP
- if _DNS_REDIRECT_IP is None:
- badips = set()
- badnames = ("does-not-exist.example.com.", "example.invalid.",
- rand_str())
- badresults = {}
- for iname in badnames:
- try:
- result = socket.getaddrinfo(iname, None, 0, 0,
- socket.SOCK_STREAM,
- socket.AI_CANONNAME)
- badresults[iname] = []
- for (_fam, _stype, _proto, cname, sockaddr) in result:
- badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
- badips.add(sockaddr[0])
- except (socket.gaierror, socket.error):
- pass
- _DNS_REDIRECT_IP = badips
- if badresults:
- LOG.debug("detected dns redirection: %s", badresults)
-
- try:
- result = socket.getaddrinfo(name, None)
- # check first result's sockaddr field
- addr = result[0][4][0]
- if addr in _DNS_REDIRECT_IP:
- return False
- return True
- except (socket.gaierror, socket.error):
- return False
-
-
-def get_hostname():
- hostname = socket.gethostname()
- return hostname
-
-
-def gethostbyaddr(ip):
- try:
- return socket.gethostbyaddr(ip)[0]
- except socket.herror:
- return None
-
-
-def is_resolvable_url(url):
- """determine if this url is resolvable (existing or ip)."""
- return is_resolvable(urlparse.urlparse(url).hostname)
-
-
-def search_for_mirror(candidates):
- """
- Search through a list of mirror urls for one that works
- This needs to return quickly.
- """
- for cand in candidates:
- try:
- if is_resolvable_url(cand):
- return cand
- except Exception:
- pass
- return None
-
-
-def close_stdin():
- """
- reopen stdin as /dev/null so even subprocesses or other os level things get
- /dev/null as input.
-
- if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true
- value then input will not be closed (useful for debugging).
- """
- if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")):
- return
- with open(os.devnull) as fp:
- os.dup2(fp.fileno(), sys.stdin.fileno())
-
-
-def find_devs_with(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
- """
- find devices matching given criteria (via blkid)
- criteria can be *one* of:
- TYPE=<filesystem>
- LABEL=<label>
- UUID=<uuid>
- """
- blk_id_cmd = ['blkid']
- options = []
- if criteria:
- # Search for block devices with tokens named NAME that
- # have the value 'value' and display any devices which are found.
- # Common values for NAME include TYPE, LABEL, and UUID.
- # If there are no devices specified on the command line,
- # all block devices will be searched; otherwise,
- # only search the devices specified by the user.
- options.append("-t%s" % (criteria))
- if tag:
- # For each (specified) device, show only the tags that match tag.
- options.append("-s%s" % (tag))
- if no_cache:
- # If you want to start with a clean cache
- # (i.e. don't report devices previously scanned
- # but not necessarily available at this time), specify /dev/null.
- options.extend(["-c", "/dev/null"])
- if oformat:
- # Display blkid's output using the specified format.
- # The format parameter may be:
- # full, value, list, device, udev, export
- options.append('-o%s' % (oformat))
- if path:
- options.append(path)
- cmd = blk_id_cmd + options
- # See man blkid for why 2 is added
- try:
- (out, _err) = subp(cmd, rcs=[0, 2])
- except ProcessExecutionError as e:
- if e.errno == errno.ENOENT:
- # blkid not found...
- out = ""
- else:
- raise
- entries = []
- for line in out.splitlines():
- line = line.strip()
- if line:
- entries.append(line)
- return entries
-
-
-def peek_file(fname, max_bytes):
- LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
- with open(fname, 'rb') as ifh:
- return ifh.read(max_bytes)
-
-
-def uniq_list(in_list):
- out_list = []
- for i in in_list:
- if i in out_list:
- continue
- else:
- out_list.append(i)
- return out_list
-
-
-def load_file(fname, read_cb=None, quiet=False, decode=True):
- LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
- ofh = six.BytesIO()
- try:
- with open(fname, 'rb') as ifh:
- pipe_in_out(ifh, ofh, chunk_cb=read_cb)
- except IOError as e:
- if not quiet:
- raise
- if e.errno != errno.ENOENT:
- raise
- contents = ofh.getvalue()
- LOG.debug("Read %s bytes from %s", len(contents), fname)
- if decode:
- return decode_binary(contents)
- else:
- return contents
-
-
-def get_cmdline():
- if 'DEBUG_PROC_CMDLINE' in os.environ:
- return os.environ["DEBUG_PROC_CMDLINE"]
-
- global PROC_CMDLINE
- if PROC_CMDLINE is not None:
- return PROC_CMDLINE
-
- if is_container():
- try:
- contents = load_file("/proc/1/cmdline")
- # replace nulls with space and drop trailing null
- cmdline = contents.replace("\x00", " ")[:-1]
- except Exception as e:
- LOG.warn("failed reading /proc/1/cmdline: %s", e)
- cmdline = ""
- else:
- try:
- cmdline = load_file("/proc/cmdline").strip()
- except Exception:
- cmdline = ""
-
- PROC_CMDLINE = cmdline
- return cmdline
-
-
-def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
- bytes_piped = 0
- while True:
- data = in_fh.read(chunk_size)
- if len(data) == 0:
- break
- else:
- out_fh.write(data)
- bytes_piped += len(data)
- if chunk_cb:
- chunk_cb(bytes_piped)
- out_fh.flush()
- return bytes_piped
-
-
-def chownbyid(fname, uid=None, gid=None):
- if uid in [None, -1] and gid in [None, -1]:
- # Nothing to do
- return
- LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
- os.chown(fname, uid, gid)
-
-
-def chownbyname(fname, user=None, group=None):
- uid = -1
- gid = -1
- try:
- if user:
- uid = pwd.getpwnam(user).pw_uid
- if group:
- gid = grp.getgrnam(group).gr_gid
- except KeyError as e:
- raise OSError("Unknown user or group: %s" % (e))
- chownbyid(fname, uid, gid)
-
-
-# Always returns well formated values
-# cfg is expected to have an entry 'output' in it, which is a dictionary
-# that includes entries for 'init', 'config', 'final' or 'all'
-# init: /var/log/cloud.out
-# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
-# final:
-# output: "| logger -p"
-# error: "> /dev/null"
-# this returns the specific 'mode' entry, cleanly formatted, with value
-def get_output_cfg(cfg, mode):
- ret = [None, None]
- if not cfg or 'output' not in cfg:
- return ret
-
- outcfg = cfg['output']
- if mode in outcfg:
- modecfg = outcfg[mode]
- else:
- if 'all' not in outcfg:
- return ret
- # if there is a 'all' item in the output list
- # then it applies to all users of this (init, config, final)
- modecfg = outcfg['all']
-
- # if value is a string, it specifies stdout and stderr
- if isinstance(modecfg, str):
- ret = [modecfg, modecfg]
-
- # if its a list, then we expect (stdout, stderr)
- if isinstance(modecfg, list):
- if len(modecfg) > 0:
- ret[0] = modecfg[0]
- if len(modecfg) > 1:
- ret[1] = modecfg[1]
-
- # if it is a dictionary, expect 'out' and 'error'
- # items, which indicate out and error
- if isinstance(modecfg, dict):
- if 'output' in modecfg:
- ret[0] = modecfg['output']
- if 'error' in modecfg:
- ret[1] = modecfg['error']
-
- # if err's entry == "&1", then make it same as stdout
- # as in shell syntax of "echo foo >/dev/null 2>&1"
- if ret[1] == "&1":
- ret[1] = ret[0]
-
- swlist = [">>", ">", "|"]
- for i in range(len(ret)):
- if not ret[i]:
- continue
- val = ret[i].lstrip()
- found = False
- for s in swlist:
- if val.startswith(s):
- val = "%s %s" % (s, val[len(s):].strip())
- found = True
- break
- if not found:
- # default behavior is append
- val = "%s %s" % (">>", val.strip())
- ret[i] = val
-
- return ret
-
-
-def logexc(log, msg, *args):
- # Setting this here allows this to change
- # levels easily (not always error level)
- # or even desirable to have that much junk
- # coming out to a non-debug stream
- if msg:
- log.warn(msg, *args)
- # Debug gets the full trace. However, nose has a bug whereby its
- # logcapture plugin doesn't properly handle the case where there is no
- # actual exception. To avoid tracebacks during the test suite then, we'll
- # do the actual exc_info extraction here, and if there is no exception in
- # flight, we'll just pass in None.
- exc_info = sys.exc_info()
- if exc_info == (None, None, None):
- exc_info = None
- log.debug(msg, exc_info=exc_info, *args)
-
-
-def hash_blob(blob, routine, mlen=None):
- hasher = hashlib.new(routine)
- hasher.update(encode_text(blob))
- digest = hasher.hexdigest()
- # Don't get to long now
- if mlen is not None:
- return digest[0:mlen]
- else:
- return digest
-
-
-def is_user(name):
- try:
- if pwd.getpwnam(name):
- return True
- except KeyError:
- return False
-
-
-def is_group(name):
- try:
- if grp.getgrnam(name):
- return True
- except KeyError:
- return False
-
-
-def rename(src, dest):
- LOG.debug("Renaming %s to %s", src, dest)
- # TODO(harlowja) use a se guard here??
- os.rename(src, dest)
-
-
-def ensure_dirs(dirlist, mode=0o755):
- for d in dirlist:
- ensure_dir(d, mode)
-
-
-def read_write_cmdline_url(target_fn):
- if not os.path.exists(target_fn):
- try:
- (key, url, content) = get_cmdline_url()
- except Exception:
- logexc(LOG, "Failed fetching command line url")
- return
- try:
- if key and content:
- write_file(target_fn, content, mode=0o600)
- LOG.debug(("Wrote to %s with contents of command line"
- " url %s (len=%s)"), target_fn, url, len(content))
- elif key and not content:
- LOG.debug(("Command line key %s with url"
- " %s had no contents"), key, url)
- except Exception:
- logexc(LOG, "Failed writing url content to %s", target_fn)
-
-
-def yaml_dumps(obj, explicit_start=True, explicit_end=True):
- return yaml.safe_dump(obj,
- line_break="\n",
- indent=4,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- default_flow_style=False)
-
-
-def ensure_dir(path, mode=None):
- if not os.path.isdir(path):
- # Make the dir and adjust the mode
- with SeLinuxGuard(os.path.dirname(path), recursive=True):
- os.makedirs(path)
- chmod(path, mode)
- else:
- # Just adjust the mode
- chmod(path, mode)
-
-
-@contextlib.contextmanager
-def unmounter(umount):
- try:
- yield umount
- finally:
- if umount:
- umount_cmd = ["umount", umount]
- subp(umount_cmd)
-
-
-def mounts():
- mounted = {}
- try:
- # Go through mounts to see what is already mounted
- if os.path.exists("/proc/mounts"):
- mount_locs = load_file("/proc/mounts").splitlines()
- method = 'proc'
- else:
- (mountoutput, _err) = subp("mount")
- mount_locs = mountoutput.splitlines()
- method = 'mount'
- mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
- for mpline in mount_locs:
- # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
- # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
- try:
- if method == 'proc':
- (dev, mp, fstype, opts, _freq, _passno) = mpline.split()
- else:
- m = re.search(mountre, mpline)
- dev = m.group(1)
- mp = m.group(2)
- fstype = m.group(3)
- opts = m.group(4)
- except Exception:
- continue
- # If the name of the mount point contains spaces these
- # can be escaped as '\040', so undo that..
- mp = mp.replace("\\040", " ")
- mounted[dev] = {
- 'fstype': fstype,
- 'mountpoint': mp,
- 'opts': opts,
- }
- LOG.debug("Fetched %s mounts from %s", mounted, method)
- except (IOError, OSError):
- logexc(LOG, "Failed fetching mount points")
- return mounted
-
-
-def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
- """
- Mount the device, call method 'callback' passing the directory
- in which it was mounted, then unmount. Return whatever 'callback'
- returned. If data != None, also pass data to callback.
-
- mtype is a filesystem type. it may be a list, string (a single fsname)
- or a list of fsnames.
- """
-
- if isinstance(mtype, str):
- mtypes = [mtype]
- elif isinstance(mtype, (list, tuple)):
- mtypes = list(mtype)
- elif mtype is None:
- mtypes = None
-
- # clean up 'mtype' input a bit based on platform.
- platsys = platform.system().lower()
- if platsys == "linux":
- if mtypes is None:
- mtypes = ["auto"]
- elif platsys.endswith("bsd"):
- if mtypes is None:
- mtypes = ['ufs', 'cd9660', 'vfat']
- for index, mtype in enumerate(mtypes):
- if mtype == "iso9660":
- mtypes[index] = "cd9660"
- else:
- # we cannot do a smart "auto", so just call 'mount' once with no -t
- mtypes = ['']
-
- mounted = mounts()
- with tempdir() as tmpd:
- umount = False
- if os.path.realpath(device) in mounted:
- mountpoint = mounted[os.path.realpath(device)]['mountpoint']
- else:
- failure_reason = None
- for mtype in mtypes:
- mountpoint = None
- try:
- mountcmd = ['mount']
- mountopts = []
- if rw:
- mountopts.append('rw')
- else:
- mountopts.append('ro')
- if sync:
- # This seems like the safe approach to do
- # (ie where this is on by default)
- mountopts.append("sync")
- if mountopts:
- mountcmd.extend(["-o", ",".join(mountopts)])
- if mtype:
- mountcmd.extend(['-t', mtype])
- mountcmd.append(device)
- mountcmd.append(tmpd)
- subp(mountcmd)
- umount = tmpd # This forces it to be unmounted (when set)
- mountpoint = tmpd
- break
- except (IOError, OSError) as exc:
- LOG.debug("Failed mount of '%s' as '%s': %s",
- device, mtype, exc)
- failure_reason = exc
- if not mountpoint:
- raise MountFailedError("Failed mounting %s to %s due to: %s" %
- (device, tmpd, failure_reason))
-
- # Be nice and ensure it ends with a slash
- if not mountpoint.endswith("/"):
- mountpoint += "/"
- with unmounter(umount):
- if data is None:
- ret = callback(mountpoint)
- else:
- ret = callback(mountpoint, data)
- return ret
-
-
-def get_builtin_cfg():
- # Deep copy so that others can't modify
- return obj_copy.deepcopy(CFG_BUILTIN)
-
-
-def sym_link(source, link, force=False):
- LOG.debug("Creating symbolic link from %r => %r", link, source)
- if force and os.path.exists(link):
- del_file(link)
- os.symlink(source, link)
-
-
-def del_file(path):
- LOG.debug("Attempting to remove %s", path)
- try:
- os.unlink(path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise e
-
-
-def copy(src, dest):
- LOG.debug("Copying %s to %s", src, dest)
- shutil.copy(src, dest)
-
-
-def time_rfc2822():
- try:
- ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
- except Exception:
- ts = "??"
- return ts
-
-
-def uptime():
- uptime_str = '??'
- method = 'unknown'
- try:
- if os.path.exists("/proc/uptime"):
- method = '/proc/uptime'
- contents = load_file("/proc/uptime")
- if contents:
- uptime_str = contents.split()[0]
- else:
- method = 'ctypes'
- libc = ctypes.CDLL('/lib/libc.so.7')
- size = ctypes.c_size_t()
- buf = ctypes.c_int()
- size.value = ctypes.sizeof(buf)
- libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
- ctypes.byref(size), None, 0)
- now = time.time()
- bootup = buf.value
- uptime_str = now - bootup
-
- except Exception:
- logexc(LOG, "Unable to read uptime using method: %s" % method)
- return uptime_str
-
-
-def append_file(path, content):
- write_file(path, content, omode="ab", mode=None)
-
-
-def ensure_file(path, mode=0o644):
- write_file(path, content='', omode="ab", mode=mode)
-
-
-def safe_int(possible_int):
- try:
- return int(possible_int)
- except (ValueError, TypeError):
- return None
-
-
-def chmod(path, mode):
- real_mode = safe_int(mode)
- if path and real_mode:
- with SeLinuxGuard(path):
- os.chmod(path, real_mode)
-
-
-def write_file(filename, content, mode=0o644, omode="wb"):
- """
- Writes a file with the given content and sets the file mode as specified.
- Resotres the SELinux context if possible.
-
- @param filename: The full path of the file to write.
- @param content: The content to write to the file.
- @param mode: The filesystem mode to set on the file.
- @param omode: The open mode used when opening the file (w, wb, a, etc.)
- """
- ensure_dir(os.path.dirname(filename))
- if 'b' in omode.lower():
- content = encode_text(content)
- write_type = 'bytes'
- else:
- content = decode_binary(content)
- write_type = 'characters'
- LOG.debug("Writing to %s - %s: [%s] %s %s",
- filename, omode, mode, len(content), write_type)
- with SeLinuxGuard(path=filename):
- with open(filename, omode) as fh:
- fh.write(content)
- fh.flush()
- chmod(filename, mode)
-
-
-def delete_dir_contents(dirname):
- """
- Deletes all contents of a directory without deleting the directory itself.
-
- @param dirname: The directory whose contents should be deleted.
- """
- for node in os.listdir(dirname):
- node_fullpath = os.path.join(dirname, node)
- if os.path.isdir(node_fullpath):
- del_dir(node_fullpath)
- else:
- del_file(node_fullpath)
-
-
-def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
- logstring=False):
- if rcs is None:
- rcs = [0]
- try:
-
- if not logstring:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
- else:
- LOG.debug(("Running hidden command to protect sensitive "
- "input/output logstring: %s"), logstring)
-
- if not capture:
- stdout = None
- stderr = None
- else:
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- stdin = subprocess.PIPE
- kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
- env=env, shell=shell)
- if six.PY3:
- # Use this so subprocess output will be (Python 3) str, not bytes.
- kws['universal_newlines'] = True
- sp = subprocess.Popen(args, **kws)
- (out, err) = sp.communicate(data)
- except OSError as e:
- raise ProcessExecutionError(cmd=args, reason=e,
- errno=e.errno)
- rc = sp.returncode
- if rc not in rcs:
- raise ProcessExecutionError(stdout=out, stderr=err,
- exit_code=rc,
- cmd=args)
- # Just ensure blank instead of none?? (iff capturing)
- if not out and capture:
- out = ''
- if not err and capture:
- err = ''
- return (out, err)
-
-
-def make_header(comment_char="#", base='created'):
- ci_ver = version.version_string()
- header = str(comment_char)
- header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
- header += " on %s" % time_rfc2822()
- return header
-
-
-def abs_join(*paths):
- return os.path.abspath(os.path.join(*paths))
-
-
-# shellify, takes a list of commands
-# for each entry in the list
-# if it is an array, shell protect it (with single ticks)
-# if it is a string, do nothing
-def shellify(cmdlist, add_header=True):
- content = ''
- if add_header:
- content += "#!/bin/sh\n"
- escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
- cmds_made = 0
- for args in cmdlist:
- # If the item is a list, wrap all items in single tick.
- # If its not, then just write it directly.
- if isinstance(args, list):
- fixed = []
- for f in args:
- fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
- content = "%s%s\n" % (content, ' '.join(fixed))
- cmds_made += 1
- elif isinstance(args, six.string_types):
- content = "%s%s\n" % (content, args)
- cmds_made += 1
- else:
- raise RuntimeError(("Unable to shellify type %s"
- " which is not a list or string")
- % (type_utils.obj_name(args)))
- LOG.debug("Shellified %s commands.", cmds_made)
- return content
-
-
-def strip_prefix_suffix(line, prefix=None, suffix=None):
- if prefix and line.startswith(prefix):
- line = line[len(prefix):]
- if suffix and line.endswith(suffix):
- line = line[:-len(suffix)]
- return line
-
-
-def is_container():
- """
- Checks to see if this code running in a container of some sort
- """
-
- for helper in CONTAINER_TESTS:
- try:
- # try to run a helper program. if it returns true/zero
- # then we're inside a container. otherwise, no
- subp(helper)
- return True
- except (IOError, OSError):
- pass
-
- # this code is largely from the logic in
- # ubuntu's /etc/init/container-detect.conf
- try:
- # Detect old-style libvirt
- # Detect OpenVZ containers
- pid1env = get_proc_env(1)
- if "container" in pid1env:
- return True
- if "LIBVIRT_LXC_UUID" in pid1env:
- return True
- except (IOError, OSError):
- pass
-
- # Detect OpenVZ containers
- if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
- return True
-
- try:
- # Detect Vserver containers
- lines = load_file("/proc/self/status").splitlines()
- for line in lines:
- if line.startswith("VxID:"):
- (_key, val) = line.strip().split(":", 1)
- if val != "0":
- return True
- except (IOError, OSError):
- pass
-
- return False
-
-
-def get_proc_env(pid):
- """
- Return the environment in a dict that a given process id was started with.
- """
-
- env = {}
- fn = os.path.join("/proc/", str(pid), "environ")
- try:
- contents = load_file(fn)
- toks = contents.split("\x00")
- for tok in toks:
- if tok == "":
- continue
- (name, val) = tok.split("=", 1)
- if name:
- env[name] = val
- except (IOError, OSError):
- pass
- return env
-
-
-def keyval_str_to_dict(kvstring):
- ret = {}
- for tok in kvstring.split():
- try:
- (key, val) = tok.split("=", 1)
- except ValueError:
- key = tok
- val = True
- ret[key] = val
- return ret
-
-
-def is_partition(device):
- if device.startswith("/dev/"):
- device = device[5:]
-
- return os.path.isfile("/sys/class/block/%s/partition" % device)
-
-
-def expand_package_list(version_fmt, pkgs):
- # we will accept tuples, lists of tuples, or just plain lists
- if not isinstance(pkgs, list):
- pkgs = [pkgs]
-
- pkglist = []
- for pkg in pkgs:
- if isinstance(pkg, six.string_types):
- pkglist.append(pkg)
- continue
-
- if isinstance(pkg, (tuple, list)):
- if len(pkg) < 1 or len(pkg) > 2:
- raise RuntimeError("Invalid package & version tuple.")
-
- if len(pkg) == 2 and pkg[1]:
- pkglist.append(version_fmt % tuple(pkg))
- continue
-
- pkglist.append(pkg[0])
-
- else:
- raise RuntimeError("Invalid package type.")
-
- return pkglist
-
-
-def parse_mount_info(path, mountinfo_lines, log=LOG):
- """Return the mount information for PATH given the lines from
- /proc/$$/mountinfo."""
-
- path_elements = [e for e in path.split('/') if e]
- devpth = None
- fs_type = None
- match_mount_point = None
- match_mount_point_elements = None
- for i, line in enumerate(mountinfo_lines):
- parts = line.split()
-
- # Completely fail if there is anything in any line that is
- # unexpected, as continuing to parse past a bad line could
- # cause an incorrect result to be returned, so it's better
- # return nothing than an incorrect result.
-
- # The minimum number of elements in a valid line is 10.
- if len(parts) < 10:
- log.debug("Line %d has two few columns (%d): %s",
- i + 1, len(parts), line)
- return None
-
- mount_point = parts[4]
- mount_point_elements = [e for e in mount_point.split('/') if e]
-
- # Ignore mounts deeper than the path in question.
- if len(mount_point_elements) > len(path_elements):
- continue
-
- # Ignore mounts where the common path is not the same.
- l = min(len(mount_point_elements), len(path_elements))
- if mount_point_elements[0:l] != path_elements[0:l]:
- continue
-
- # Ignore mount points higher than an already seen mount
- # point.
- if (match_mount_point_elements is not None and
- len(match_mount_point_elements) > len(mount_point_elements)):
- continue
-
- # Find the '-' which terminates a list of optional columns to
- # find the filesystem type and the path to the device. See
- # man 5 proc for the format of this file.
- try:
- i = parts.index('-')
- except ValueError:
- log.debug("Did not find column named '-' in line %d: %s",
- i + 1, line)
- return None
-
- # Get the path to the device.
- try:
- fs_type = parts[i + 1]
- devpth = parts[i + 2]
- except IndexError:
- log.debug("Too few columns after '-' column in line %d: %s",
- i + 1, line)
- return None
-
- match_mount_point = mount_point
- match_mount_point_elements = mount_point_elements
-
- if devpth and fs_type and match_mount_point:
- return (devpth, fs_type, match_mount_point)
- else:
- return None
-
-
-def parse_mtab(path):
- """On older kernels there's no /proc/$$/mountinfo, so use mtab."""
- for line in load_file("/etc/mtab").splitlines():
- devpth, mount_point, fs_type = line.split()[:3]
- if mount_point == path:
- return devpth, fs_type, mount_point
- return None
-
-
-def parse_mount(path):
- (mountoutput, _err) = subp("mount")
- mount_locs = mountoutput.splitlines()
- for line in mount_locs:
- m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
- devpth = m.group(1)
- mount_point = m.group(2)
- fs_type = m.group(3)
- if mount_point == path:
- return devpth, fs_type, mount_point
- return None
-
-
-def get_mount_info(path, log=LOG):
- # Use /proc/$$/mountinfo to find the device where path is mounted.
- # This is done because with a btrfs filesystem using os.stat(path)
- # does not return the ID of the device.
- #
- # Here, / has a device of 18 (decimal).
- #
- # $ stat /
- # File: '/'
- # Size: 234 Blocks: 0 IO Block: 4096 directory
- # Device: 12h/18d Inode: 256 Links: 1
- # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
- # Access: 2013-01-13 07:31:04.358011255 +0000
- # Modify: 2013-01-13 18:48:25.930011255 +0000
- # Change: 2013-01-13 18:48:25.930011255 +0000
- # Birth: -
- #
- # Find where / is mounted:
- #
- # $ mount | grep ' / '
- # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
- #
- # And the device ID for /dev/vda1 is not 18:
- #
- # $ ls -l /dev/vda1
- # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
- #
- # So use /proc/$$/mountinfo to find the device underlying the
- # input path.
- mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
- if os.path.exists(mountinfo_path):
- lines = load_file(mountinfo_path).splitlines()
- return parse_mount_info(path, lines, log)
- elif os.path.exists("/etc/mtab"):
- return parse_mtab(path)
- else:
- return parse_mount(path)
-
-
-def which(program):
- # Return path of program for execution if found in path
- def is_exe(fpath):
- return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
- _fpath, _ = os.path.split(program)
- if _fpath:
- if is_exe(program):
- return program
- else:
- for path in os.environ.get("PATH", "").split(os.pathsep):
- path = path.strip('"')
- exe_file = os.path.join(path, program)
- if is_exe(exe_file):
- return exe_file
-
- return None
-
-
-def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
- if args is None:
- args = []
- if kwargs is None:
- kwargs = {}
-
- start = time.time()
-
- ustart = None
- if get_uptime:
- try:
- ustart = float(uptime())
- except ValueError:
- pass
-
- try:
- ret = func(*args, **kwargs)
- finally:
- delta = time.time() - start
- udelta = None
- if ustart is not None:
- try:
- udelta = float(uptime()) - ustart
- except ValueError:
- pass
-
- tmsg = " took %0.3f seconds" % delta
- if get_uptime:
- if isinstance(udelta, (float)):
- tmsg += " (%0.2f)" % udelta
- else:
- tmsg += " (N/A)"
- try:
- logfunc(msg + tmsg)
- except Exception:
- pass
- return ret
-
-
-def expand_dotted_devname(dotted):
- toks = dotted.rsplit(".", 1)
- if len(toks) > 1:
- return toks
- else:
- return (dotted, None)
-
-
-def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
- # return a dictionary populated with keys in 'required' and 'optional'
- # by reading files in prefix + delim + entry
- if required is None:
- required = []
- if optional is None:
- optional = []
-
- missing = []
- ret = {}
- for f in required + optional:
- try:
- ret[f] = load_file(base + delim + f, quiet=False, decode=False)
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
- if f in required:
- missing.append(f)
-
- if len(missing):
- raise ValueError("Missing required files: %s", ','.join(missing))
-
- return ret
-
-
-def read_meminfo(meminfo="/proc/meminfo", raw=False):
- # read a /proc/meminfo style file and return
- # a dict with 'total', 'free', and 'available'
- mpliers = {'kB': 2 ** 10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30}
- kmap = {'MemTotal:': 'total', 'MemFree:': 'free',
- 'MemAvailable:': 'available'}
- ret = {}
- for line in load_file(meminfo).splitlines():
- try:
- key, value, unit = line.split()
- except ValueError:
- key, value = line.split()
- unit = 'B'
- if raw:
- ret[key] = int(value) * mpliers[unit]
- elif key in kmap:
- ret[kmap[key]] = int(value) * mpliers[unit]
-
- return ret
-
-
-def human2bytes(size):
- """Convert human string or integer to size in bytes
- 10M => 10485760
- .5G => 536870912
- """
- size_in = size
- if size.endswith("B"):
- size = size[:-1]
-
- mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
-
- num = size
- mplier = 'B'
- for m in mpliers:
- if size.endswith(m):
- mplier = m
- num = size[0:-len(m)]
-
- try:
- num = float(num)
- except ValueError:
- raise ValueError("'%s' is not valid input." % size_in)
-
- if num < 0:
- raise ValueError("'%s': cannot be negative" % size_in)
-
- return int(num * mpliers[mplier])
-
-
-def _read_dmi_syspath(key):
- """
- Reads dmi data with from /sys/class/dmi/id
- """
- if key not in DMIDECODE_TO_DMI_SYS_MAPPING:
- return None
- mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
- dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
- LOG.debug("querying dmi data %s", dmi_key_path)
- try:
- if not os.path.exists(dmi_key_path):
- LOG.debug("did not find %s", dmi_key_path)
- return None
-
- key_data = load_file(dmi_key_path, decode=False)
- if not key_data:
- LOG.debug("%s did not return any data", dmi_key_path)
- return None
-
- # uninitialized dmi values show as all \xff and /sys appends a '\n'.
- # in that event, return a string of '.' in the same length.
- if key_data == b'\xff' * (len(key_data) - 1) + b'\n':
- key_data = b""
-
- str_data = key_data.decode('utf8').strip()
- LOG.debug("dmi data %s returned %s", dmi_key_path, str_data)
- return str_data
-
- except Exception:
- logexc(LOG, "failed read of %s", dmi_key_path)
- return None
-
-
-def _call_dmidecode(key, dmidecode_path):
- """
- Calls out to dmidecode to get the data out. This is mostly for supporting
- OS's without /sys/class/dmi/id support.
- """
- try:
- cmd = [dmidecode_path, "--string", key]
- (result, _err) = subp(cmd)
- LOG.debug("dmidecode returned '%s' for '%s'", result, key)
- result = result.strip()
- if result.replace(".", "") == "":
- return ""
- return result
- except (IOError, OSError) as _err:
- LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err)
- return None
-
-
-def read_dmi_data(key):
- """
- Wrapper for reading DMI data.
-
- This will do the following (returning the first that produces a
- result):
- 1) Use a mapping to translate `key` from dmidecode naming to
- sysfs naming and look in /sys/class/dmi/... for a value.
- 2) Use `key` as a sysfs key directly and look in /sys/class/dmi/...
- 3) Fall-back to passing `key` to `dmidecode --string`.
-
- If all of the above fail to find a value, None will be returned.
- """
- syspath_value = _read_dmi_syspath(key)
- if syspath_value is not None:
- return syspath_value
-
- dmidecode_path = which('dmidecode')
- if dmidecode_path:
- return _call_dmidecode(key, dmidecode_path)
-
- LOG.warn("did not find either path %s or dmidecode command",
- DMI_SYS_PATH)
- return None
-
-
-def message_from_string(string):
- if sys.version_info[:2] < (2, 7):
- return email.message_from_file(six.StringIO(string))
- return email.message_from_string(string)
diff --git a/cloudinit/version.py b/cloudinit/version.py
deleted file mode 100644
index 3d1d1d23..00000000
--- a/cloudinit/version.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from distutils import version as vr
-
-
-def version():
- return vr.StrictVersion("0.7.7")
-
-
-def version_string():
- return str(version())
diff --git a/config/cloud.cfg b/config/cloud.cfg
deleted file mode 100644
index a6afcc83..00000000
--- a/config/cloud.cfg
+++ /dev/null
@@ -1,115 +0,0 @@
-# The top level settings are used as module
-# and system configuration.
-
-# A set of users which may be applied and/or used by various modules
-# when a 'default' entry is found it will reference the 'default_user'
-# from the distro configuration specified below
-users:
- - default
-
-# If this is set, 'root' will not be able to ssh in and they
-# will get a message to login instead as the above $user (ubuntu)
-disable_root: true
-
-# This will cause the set+update hostname module to not operate (if true)
-preserve_hostname: false
-
-# Example datasource config
-# datasource:
-# Ec2:
-# metadata_urls: [ 'blah.com' ]
-# timeout: 5 # (defaults to 50 seconds)
-# max_wait: 10 # (defaults to 120 seconds)
-
-# The modules that run in the 'init' stage
-cloud_init_modules:
- - migrator
- - ubuntu-init-switch
- - seed_random
- - bootcmd
- - write-files
- - growpart
- - resizefs
- - set_hostname
- - update_hostname
- - update_etc_hosts
- - ca-certs
- - rsyslog
- - users-groups
- - ssh
-
-# The modules that run in the 'config' stage
-cloud_config_modules:
-# Emit the cloud config ready event
-# this can be used by upstart jobs for 'start on cloud-config'.
- - emit_upstart
- - disk_setup
- - mounts
- - ssh-import-id
- - locale
- - set-passwords
- - snappy
- - grub-dpkg
- - apt-pipelining
- - apt-configure
- - package-update-upgrade-install
- - fan
- - landscape
- - timezone
- - lxd
- - puppet
- - chef
- - salt-minion
- - mcollective
- - disable-ec2-metadata
- - runcmd
- - byobu
-
-# The modules that run in the 'final' stage
-cloud_final_modules:
- - rightscale_userdata
- - scripts-vendor
- - scripts-per-once
- - scripts-per-boot
- - scripts-per-instance
- - scripts-user
- - ssh-authkey-fingerprints
- - keys-to-console
- - phone-home
- - final-message
- - power-state-change
-
-# System and/or distro specific settings
-# (not accessible to handlers/transforms)
-system_info:
- # This will affect which distro class gets used
- distro: ubuntu
- # Default user name + that default users groups (if added/used)
- default_user:
- name: ubuntu
- lock_passwd: True
- gecos: Ubuntu
- groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video]
- sudo: ["ALL=(ALL) NOPASSWD:ALL"]
- shell: /bin/bash
- # Other config here will be given to the distro class and/or path classes
- paths:
- cloud_dir: /var/lib/cloud/
- templates_dir: /etc/cloud/templates/
- upstart_dir: /etc/init/
- package_mirrors:
- - arches: [i386, amd64]
- failsafe:
- primary: http://archive.ubuntu.com/ubuntu
- security: http://security.ubuntu.com/ubuntu
- search:
- primary:
- - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/
- - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/
- - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/
- security: []
- - arches: [armhf, armel, default]
- failsafe:
- primary: http://ports.ubuntu.com/ubuntu-ports
- security: http://ports.ubuntu.com/ubuntu-ports
- ssh_svcname: ssh
diff --git a/config/cloud.cfg-freebsd b/config/cloud.cfg-freebsd
deleted file mode 100644
index be664f5d..00000000
--- a/config/cloud.cfg-freebsd
+++ /dev/null
@@ -1,88 +0,0 @@
-# The top level settings are used as module
-# and system configuration.
-
-syslog_fix_perms: root:wheel
-
-# This should not be required, but leave it in place until the real cause of
-# not beeing able to find -any- datasources is resolved.
-datasource_list: ['ConfigDrive', 'OpenStack', 'Ec2']
-
-# A set of users which may be applied and/or used by various modules
-# when a 'default' entry is found it will reference the 'default_user'
-# from the distro configuration specified below
-users:
- - default
-
-# If this is set, 'root' will not be able to ssh in and they
-# will get a message to login instead as the above $user (ubuntu)
-disable_root: false
-
-# This will cause the set+update hostname module to not operate (if true)
-preserve_hostname: false
-
-# Example datasource config
-# datasource:
-# Ec2:
-# metadata_urls: [ 'blah.com' ]
-# timeout: 5 # (defaults to 50 seconds)
-# max_wait: 10 # (defaults to 120 seconds)
-
-# The modules that run in the 'init' stage
-cloud_init_modules:
-# - migrator
- - seed_random
- - bootcmd
-# - write-files
- - growpart
- - resizefs
- - set_hostname
- - update_hostname
-# - update_etc_hosts
-# - ca-certs
-# - rsyslog
- - users-groups
- - ssh
-
-# The modules that run in the 'config' stage
-cloud_config_modules:
-# - disk_setup
-# - mounts
- - ssh-import-id
- - locale
- - set-passwords
- - package-update-upgrade-install
-# - landscape
- - timezone
-# - puppet
-# - chef
-# - salt-minion
-# - mcollective
- - disable-ec2-metadata
- - runcmd
-# - byobu
-
-# The modules that run in the 'final' stage
-cloud_final_modules:
- - rightscale_userdata
- - scripts-vendor
- - scripts-per-once
- - scripts-per-boot
- - scripts-per-instance
- - scripts-user
- - ssh-authkey-fingerprints
- - keys-to-console
- - phone-home
- - final-message
- - power-state-change
-
-# System and/or distro specific settings
-# (not accessible to handlers/transforms)
-system_info:
- distro: freebsd
- default_user:
- name: freebsd
- lock_passwd: True
- gecos: FreeBSD
- groups: [wheel]
- sudo: ["ALL=(ALL) NOPASSWD:ALL"]
- shell: /bin/tcsh
diff --git a/config/cloud.cfg.d/05_logging.cfg b/config/cloud.cfg.d/05_logging.cfg
deleted file mode 100644
index 2e180730..00000000
--- a/config/cloud.cfg.d/05_logging.cfg
+++ /dev/null
@@ -1,66 +0,0 @@
-## This yaml formated config file handles setting
-## logger information. The values that are necessary to be set
-## are seen at the bottom. The top '_log' are only used to remove
-## redundency in a syslog and fallback-to-file case.
-##
-## The 'log_cfgs' entry defines a list of logger configs
-## Each entry in the list is tried, and the first one that
-## works is used. If a log_cfg list entry is an array, it will
-## be joined with '\n'.
-_log:
- - &log_base |
- [loggers]
- keys=root,cloudinit
-
- [handlers]
- keys=consoleHandler,cloudLogHandler
-
- [formatters]
- keys=simpleFormatter,arg0Formatter
-
- [logger_root]
- level=DEBUG
- handlers=consoleHandler,cloudLogHandler
-
- [logger_cloudinit]
- level=DEBUG
- qualname=cloudinit
- handlers=
- propagate=1
-
- [handler_consoleHandler]
- class=StreamHandler
- level=WARNING
- formatter=arg0Formatter
- args=(sys.stderr,)
-
- [formatter_arg0Formatter]
- format=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s
-
- [formatter_simpleFormatter]
- format=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s
- - &log_file |
- [handler_cloudLogHandler]
- class=FileHandler
- level=DEBUG
- formatter=arg0Formatter
- args=('/var/log/cloud-init.log',)
- - &log_syslog |
- [handler_cloudLogHandler]
- class=handlers.SysLogHandler
- level=DEBUG
- formatter=simpleFormatter
- args=("/dev/log", handlers.SysLogHandler.LOG_USER)
-
-log_cfgs:
-# These will be joined into a string that defines the configuration
- - [ *log_base, *log_syslog ]
-# These will be joined into a string that defines the configuration
- - [ *log_base, *log_file ]
-# A file path can also be used
-# - /etc/log.conf
-
-# this tells cloud-init to redirect its stdout and stderr to
-# 'tee -a /var/log/cloud-init-output.log' so the user can see output
-# there without needing to look on the console.
-output: {all: '| tee -a /var/log/cloud-init-output.log'}
diff --git a/config/cloud.cfg.d/README b/config/cloud.cfg.d/README
deleted file mode 100644
index 60702e9d..00000000
--- a/config/cloud.cfg.d/README
+++ /dev/null
@@ -1,3 +0,0 @@
-# All files in this directory will be read by cloud-init
-# They are read in lexical order. Later files overwrite values in
-# earlier files.
diff --git a/doc/README b/doc/README
deleted file mode 100644
index 83559192..00000000
--- a/doc/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This project is cloud-init it is hosted on launchpad at
-https://launchpad.net/cloud-init
-
-The package was previously named ec2-init.
diff --git a/doc/examples/cloud-config-add-apt-repos.txt b/doc/examples/cloud-config-add-apt-repos.txt
deleted file mode 100644
index be9d5472..00000000
--- a/doc/examples/cloud-config-add-apt-repos.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-#cloud-config
-
-# Add apt repositories
-#
-# Default: auto select based on cloud metadata
-# in ec2, the default is <region>.archive.ubuntu.com
-# apt_mirror:
-# use the provided mirror
-# apt_mirror_search:
-# search the list for the first mirror.
-# this is currently very limited, only verifying that
-# the mirror is dns resolvable or an IP address
-#
-# if neither apt_mirror nor apt_mirror search is set (the default)
-# then use the mirror provided by the DataSource found.
-# In EC2, that means using <region>.ec2.archive.ubuntu.com
-#
-# if no mirror is provided by the DataSource, and 'apt_mirror_search_dns' is
-# true, then search for dns names '<distro>-mirror' in each of
-# - fqdn of this host per cloud metadata
-# - localdomain
-# - no domain (which would search domains listed in /etc/resolv.conf)
-# If there is a dns entry for <distro>-mirror, then it is assumed that there
-# is a distro mirror at http://<distro>-mirror.<domain>/<distro>
-#
-# That gives the cloud provider the opportunity to set mirrors of a distro
-# up and expose them only by creating dns entries.
-#
-# if none of that is found, then the default distro mirror is used
-apt_mirror: http://us.archive.ubuntu.com/ubuntu/
-apt_mirror_search:
- - http://local-mirror.mydomain
- - http://archive.ubuntu.com
-apt_mirror_search_dns: False
diff --git a/doc/examples/cloud-config-archive-launch-index.txt b/doc/examples/cloud-config-archive-launch-index.txt
deleted file mode 100644
index e2ac2869..00000000
--- a/doc/examples/cloud-config-archive-launch-index.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-#cloud-config-archive
-
-# This is an example of a cloud archive
-# format which includes a set of launch indexes
-# that will be filtered on (thus only showing
-# up in instances with that launch index), this
-# is done by adding the 'launch-index' key which
-# maps to the integer 'launch-index' that the
-# corresponding content should be used with.
-#
-# It is possible to leave this value out which
-# will mean that the content will be applicable
-# for all instances
-
-- type: foo/wark
- filename: bar
- content: |
- This is my payload
- hello
- launch-index: 1 # I will only be used on launch-index 1
-- this is also payload
-- |
- multi line payload
- here
--
- type: text/upstart-job
- filename: my-upstart.conf
- content: |
- whats this, yo?
- launch-index: 0 # I will only be used on launch-index 0
diff --git a/doc/examples/cloud-config-archive.txt b/doc/examples/cloud-config-archive.txt
deleted file mode 100644
index 23b1024c..00000000
--- a/doc/examples/cloud-config-archive.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-#cloud-config-archive
-- type: foo/wark
- filename: bar
- content: |
- This is my payload
- hello
-- this is also payload
-- |
- multi line payload
- here
--
- type: text/upstart-job
- filename: my-upstart.conf
- content: |
- whats this, yo?
-
diff --git a/doc/examples/cloud-config-boot-cmds.txt b/doc/examples/cloud-config-boot-cmds.txt
deleted file mode 100644
index 3e59755d..00000000
--- a/doc/examples/cloud-config-boot-cmds.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-#cloud-config
-
-# boot commands
-# default: none
-# this is very similar to runcmd, but commands run very early
-# in the boot process, only slightly after a 'boothook' would run.
-# bootcmd should really only be used for things that could not be
-# done later in the boot process. bootcmd is very much like
-# boothook, but possibly with more friendly.
-# - bootcmd will run on every boot
-# - the INSTANCE_ID variable will be set to the current instance id.
-# - you can use 'cloud-init-boot-per' command to help only run once
-bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
- - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
diff --git a/doc/examples/cloud-config-ca-certs.txt b/doc/examples/cloud-config-ca-certs.txt
deleted file mode 100644
index 5e9115a0..00000000
--- a/doc/examples/cloud-config-ca-certs.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-#cloud-config
-#
-# This is an example file to configure an instance's trusted CA certificates
-# system-wide for SSL/TLS trust establishment when the instance boots for the
-# first time.
-#
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-
-ca-certs:
- # If present and set to True, the 'remove-defaults' parameter will remove
- # all the default trusted CA certificates that are normally shipped with
- # Ubuntu.
- # This is mainly for paranoid admins - most users will not need this
- # functionality.
- remove-defaults: true
-
- # If present, the 'trusted' parameter should contain a certificate (or list
- # of certificates) to add to the system as trusted CA certificates.
- # Pay close attention to the YAML multiline list syntax. The example shown
- # here is for a list of multiline certificates.
- trusted:
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
-
diff --git a/doc/examples/cloud-config-chef-oneiric.txt b/doc/examples/cloud-config-chef-oneiric.txt
deleted file mode 100644
index 2e5f4b16..00000000
--- a/doc/examples/cloud-config-chef-oneiric.txt
+++ /dev/null
@@ -1,90 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically install chef-client and run a
-# list of recipes when the instance boots for the first time.
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-#
-# This example assumes the instance is 11.10 (oneiric)
-
-
-# The default is to install from packages.
-
-# Key from http://apt.opscode.com/packages@opscode.com.gpg.key
-apt_sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
-
-chef:
-
- # 11.10 will fail if install_type is "gems" (LP: #960576)
- install_type: "packages"
-
- # Chef settings
- server_url: "https://chef.yourorg.com:4000"
-
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
-
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
-
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
-
- # value of validation_cert is not used if validation_key defined,
- # but variable needs to be defined (LP: #960547)
- validation_cert: "unused"
- validation_key: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
- run_list:
- - "recipe[apache2]"
- - "role[db]"
-
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
- apache:
- prefork:
- maxclients: 100
- keepalive: "off"
-
-
-# Capture all subprocess output into a logfile
-# Useful for troubleshooting cloud-init issues
-output: {all: '| tee -a /var/log/cloud-init-output.log'}
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
deleted file mode 100644
index b886cba2..00000000
--- a/doc/examples/cloud-config-chef.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically install chef-client and run a
-# list of recipes when the instance boots for the first time.
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-#
-# This example assumes the instance is 12.04 (precise)
-
-
-# The default is to install from packages.
-
-# Key from http://apt.opscode.com/packages@opscode.com.gpg.key
-apt_sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
-
-chef:
-
- # Valid values are 'gems' and 'packages' and 'omnibus'
- install_type: "packages"
-
- # Boolean: run 'install_type' code even if chef-client
- # appears already installed.
- force_install: false
-
- # Chef settings
- server_url: "https://chef.yourorg.com:4000"
-
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
-
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
-
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
- # if validation_cert's value is "system" then it is expected
- # that the file already exists on the system.
- validation_cert: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
- run_list:
- - "recipe[apache2]"
- - "role[db]"
-
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
- apache:
- prefork:
- maxclients: 100
- keepalive: "off"
-
- # if install_type is 'omnibus', change the url to download
- omnibus_url: "https://www.opscode.com/chef/install.sh"
-
-
-# Capture all subprocess output into a logfile
-# Useful for troubleshooting cloud-init issues
-output: {all: '| tee -a /var/log/cloud-init-output.log'}
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
deleted file mode 100644
index 2651c027..00000000
--- a/doc/examples/cloud-config-datasources.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-# Documentation on data sources configuration options
-datasource:
- # Ec2
- Ec2:
- # timeout: the timeout value for a request at metadata service
- timeout : 50
- # The length in seconds to wait before giving up on the metadata
- # service. The actual total wait could be up to
- # len(resolvable_metadata_urls)*timeout
- max_wait : 120
-
- #metadata_url: a list of URLs to check for metadata services
- metadata_urls:
- - http://169.254.169.254:80
- - http://instance-data:8773
-
- MAAS:
- timeout : 50
- max_wait : 120
-
- # there are no default values for metadata_url or oauth credentials
- # If no credentials are present, non-authed attempts will be made.
- metadata_url: http://mass-host.localdomain/source
- consumer_key: Xh234sdkljf
- token_key: kjfhgb3n
- token_secret: 24uysdfx1w4
-
- NoCloud:
- # default seedfrom is None
- # if found, then it should contain a url with:
- # <url>/user-data and <url>/meta-data
- # seedfrom: http://my.example.com/i-abcde
- seedfrom: None
-
- # fs_label: the label on filesystems to be searched for NoCloud source
- fs_label: cidata
-
- # these are optional, but allow you to basically provide a datasource
- # right here
- user-data: |
- # This is the user-data verbatum
- meta-data:
- instance-id: i-87018aed
- local-hostname: myhost.internal
-
- Azure:
- agent_command: [service, walinuxagent, start]
- set_hostname: True
- hostname_bounce:
- interface: eth0
- policy: on # [can be 'on', 'off' or 'force']
-
- SmartOS:
- # For KVM guests:
- # Smart OS datasource works over a serial console interacting with
- # a server on the other end. By default, the second serial console is the
- # device. SmartOS also uses a serial timeout of 60 seconds.
- serial_device: /dev/ttyS1
- serial_timeout: 60
-
- # For LX-Brand Zones guests:
- # Smart OS datasource works over a socket interacting with
- # the host on the other end. By default, the socket file is in
- # the native .zoncontrol directory.
- metadata_sockfile: /native/.zonecontrol/metadata.sock
-
- # a list of keys that will not be base64 decoded even if base64_all
- no_base64_decode: ['root_authorized_keys', 'motd_sys_info',
- 'iptables_disable']
- # a plaintext, comma delimited list of keys whose values are b64 encoded
- base64_keys: []
- # a boolean indicating that all keys not in 'no_base64_decode' are encoded
- base64_all: False
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
deleted file mode 100644
index 3e46a22e..00000000
--- a/doc/examples/cloud-config-disk-setup.txt
+++ /dev/null
@@ -1,251 +0,0 @@
-# Cloud-init supports the creation of simple partition tables and file systems
-# on devices.
-
-# Default disk definitions for AWS
-# --------------------------------
-# (Not implemented yet, but provided for future documentation)
-
-disk_setup:
- ephmeral0:
- table_type: 'mbr'
- layout: True
- overwrite: False
-
-fs_setup:
- - label: None,
- filesystem: ext3
- device: ephemeral0
- partition: auto
-
-# Default disk definitions for Windows Azure
-# ------------------------------------------
-
-device_aliases: {'ephemeral0': '/dev/sdb'}
-disk_setup:
- ephemeral0:
- table_type: mbr
- layout: True
- overwrite: False
-
-fs_setup:
- - label: ephemeral0
- filesystem: ext4
- device: ephemeral0.1
- replace_fs: ntfs
-
-
-# Default disk definitions for SmartOS
-# ------------------------------------
-
-device_aliases: {'ephemeral0': '/dev/sdb'}
-disk_setup:
- ephemeral0:
- table_type: mbr
- layout: False
- overwrite: False
-
-fs_setup:
- - label: ephemeral0
- filesystem: ext3
- device: ephemeral0.0
-
-# Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will
-# not be automatically added to the mounts.
-
-
-# The default definition is used to make sure that the ephemeral storage is
-# setup properly.
-
-# "disk_setup": disk partitioning
-# --------------------------------
-
-# The disk_setup directive instructs Cloud-init to partition a disk. The format is:
-
-disk_setup:
- ephmeral0:
- table_type: 'mbr'
- layout: 'auto'
- /dev/xvdh:
- table_type: 'mbr'
- layout:
- - 33
- - [33, 82]
- - 33
- overwrite: True
-
-# The format is a list of dicts of dicts. The first value is the name of the
-# device and the subsequent values define how to create and layout the
-# partition.
-# The general format is:
-# disk_setup:
-# <DEVICE>:
-# table_type: 'mbr'
-# layout: <LAYOUT|BOOL>
-# overwrite: <BOOL>
-#
-# Where:
-# <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
-# values which are specific to the cloud. For these devices
-# Cloud-init will look up what the real devices is and then
-# use it.
-#
-# For other devices, the kernel device name is used. At this
-# time only simply kernel devices are supported, meaning
-# that device mapper and other targets may not work.
-#
-# Note: At this time, there is no handling or setup of
-# device mapper targets.
-#
-# table_type=<TYPE>: Currently the following are supported:
-# 'mbr': default and setups a MS-DOS partition table
-#
-# Note: At this time only 'mbr' partition tables are allowed.
-# It is anticipated in the future that we'll have GPT as
-# option in the future, or even "RAID" to create a mdadm
-# RAID.
-#
-# layout={...}: The device layout. This is a list of values, with the
-# percentage of disk that partition will take.
-# Valid options are:
-# [<SIZE>, [<SIZE>, <PART_TYPE]]
-#
-# Where <SIZE> is the _percentage_ of the disk to use, while
-# <PART_TYPE> is the numerical value of the partition type.
-#
-# The following setups two partitions, with the first
-# partition having a swap label, taking 1/3 of the disk space
-# and the remainder being used as the second partition.
-# /dev/xvdh':
-# table_type: 'mbr'
-# layout:
-# - [33,82]
-# - 66
-# overwrite: True
-#
-# When layout is "true" it means single partition the entire
-# device.
-#
-# When layout is "false" it means don't partition or ignore
-# existing partitioning.
-#
-# If layout is set to "true" and overwrite is set to "false",
-# it will skip partitioning the device without a failure.
-#
-# overwrite=<BOOL>: This describes whether to ride with saftey's on and
-# everything holstered.
-#
-# 'false' is the default, which means that:
-# 1. The device will be checked for a partition table
-# 2. The device will be checked for a file system
-# 3. If either a partition of file system is found, then
-# the operation will be _skipped_.
-#
-# 'true' is cowboy mode. There are no checks and things are
-# done blindly. USE with caution, you can do things you
-# really, really don't want to do.
-#
-#
-# fs_setup: Setup the file system
-# -------------------------------
-#
-# fs_setup describes the how the file systems are supposed to look.
-
-fs_setup:
- - label: ephemeral0
- filesystem: 'ext3'
- device: 'ephemeral0'
- partition: 'auto'
- - label: mylabl2
- filesystem: 'ext4'
- device: '/dev/xvda1'
- - special:
- cmd: mkfs -t %(FILESYSTEM)s -L %(LABEL)s %(DEVICE)s
- filesystem: 'btrfs'
- device: '/dev/xvdh'
-
-# The general format is:
-# fs_setup:
-# - label: <LABEL>
-# filesystem: <FS_TYPE>
-# device: <DEVICE>
-# partition: <PART_VALUE>
-# overwrite: <OVERWRITE>
-# replace_fs: <FS_TYPE>
-#
-# Where:
-# <LABEL>: The file system label to be used. If set to None, no label is
-# used.
-#
-# <FS_TYPE>: The file system type. It is assumed that the there
-# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
-# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
-# and vfat by default.
-#
-# <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
-# are allowed and the actual device is acquired from the cloud datasource.
-# When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
-# label as 'ephemeralX' otherwise there may be issues with the mounting
-# of the ephemeral storage layer.
-#
-# If you define the device as 'ephemeralX.Y' then Y will be interpetted
-# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
-#
-# <PART_VALUE>:
-# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
-#
-# The valid options are:
-# "auto|any": tell cloud-init not to care whether there is a partition
-# or not. Auto will use the first partition that does not contain a
-# file system already. In the absence of a partition table, it will
-# put it directly on the disk.
-#
-# "auto": If a file system that matches the specification in terms of
-# label, type and device, then cloud-init will skip the creation of
-# the file system.
-#
-# "any": If a file system that matches the file system type and device,
-# then cloud-init will skip the creation of the file system.
-#
-# Devices are selected based on first-detected, starting with partitions
-# and then the raw disk. Consider the following:
-# NAME FSTYPE LABEL
-# xvdb
-# |-xvdb1 ext4
-# |-xvdb2
-# |-xvdb3 btrfs test
-# \-xvdb4 ext4 test
-#
-# If you ask for 'auto', label of 'test, and file system of 'ext4'
-# then cloud-init will select the 2nd partition, even though there
-# is a partition match at the 4th partition.
-#
-# If you ask for 'any' and a label of 'test', then cloud-init will
-# select the 1st partition.
-#
-# If you ask for 'auto' and don't define label, then cloud-init will
-# select the 1st partition.
-#
-# In general, if you have a specific partition configuration in mind,
-# you should define either the device or the partition number. 'auto'
-# and 'any' are specifically intended for formating ephemeral storage or
-# for simple schemes.
-#
-# "none": Put the file system directly on the device.
-#
-# <NUM>: where NUM is the actual partition number.
-#
-# <OVERWRITE>: Defines whether or not to overwrite any existing
-# filesystem.
-#
-# "true": Indiscriminately destroy any pre-existing file system. Use at
-# your own peril.
-#
-# "false": If an existing file system exists, skip the creation.
-#
-# <REPLACE_FS>: This is a special directive, used for Windows Azure that
-# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
-# unless you define a label, this requires the use of the 'any' partition
-# directive.
-#
-# Behavior Caveat: The default behavior is to _check_ if the file system exists.
-# If a file system matches the specification, then the operation is a no-op.
diff --git a/doc/examples/cloud-config-final-message.txt b/doc/examples/cloud-config-final-message.txt
deleted file mode 100644
index 0ce31467..00000000
--- a/doc/examples/cloud-config-final-message.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-#cloud-config
-
-# final_message
-# default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds
-# this message is written by cloud-final when the system is finished
-# its first boot
-final_message: "The system is finally up, after $UPTIME seconds"
diff --git a/doc/examples/cloud-config-gluster.txt b/doc/examples/cloud-config-gluster.txt
deleted file mode 100644
index f8183e77..00000000
--- a/doc/examples/cloud-config-gluster.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-#cloud-config
-# vim: syntax=yaml
-# Mounts volfile exported by glusterfsd running on
-# "volfile-server-hostname" onto the local mount point '/mnt/data'
-#
-# In reality, replace 'volfile-server-hostname' with one of your nodes
-# running glusterfsd.
-#
-packages:
- - glusterfs-client
-
-mounts:
- - [ 'volfile-server-hostname:6996', /mnt/data, glusterfs, "defaults,nobootwait", "0", "2" ]
-
-runcmd:
- - [ modprobe, fuse ]
- - [ mkdir, '-p', /mnt/data ]
- - [ mount, '-a' ]
diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt
deleted file mode 100644
index 393d5164..00000000
--- a/doc/examples/cloud-config-growpart.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-#cloud-config
-#
-# growpart entry is a dict, if it is not present at all
-# in config, then the default is used ({'mode': 'auto', 'devices': ['/']})
-#
-# mode:
-# values:
-# * auto: use any option possible (any available)
-# if none are available, do not warn, but debug.
-# * growpart: use growpart to grow partitions
-# if growpart is not available, this is an error.
-# * off, false
-#
-# devices:
-# a list of things to resize.
-# items can be filesystem paths or devices (in /dev)
-# examples:
-# devices: [/, /dev/vdb1]
-#
-# ignore_growroot_disabled:
-# a boolean, default is false.
-# if the file /etc/growroot-disabled exists, then cloud-init will not grow
-# the root partition. This is to allow a single file to disable both
-# cloud-initramfs-growroot and cloud-init's growroot support.
-#
-# true indicates that /etc/growroot-disabled should be ignored
-#
-growpart:
- mode: auto
- devices: ['/']
- ignore_growroot_disabled: false
diff --git a/doc/examples/cloud-config-install-packages.txt b/doc/examples/cloud-config-install-packages.txt
deleted file mode 100644
index 2edc63da..00000000
--- a/doc/examples/cloud-config-install-packages.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-#cloud-config
-
-# Install additional packages on first boot
-#
-# Default: none
-#
-# if packages are specified, this apt_update will be set to true
-#
-# packages may be supplied as a single package name or as a list
-# with the format [<package>, <version>] wherein the specifc
-# package version will be installed.
-packages:
- - pwgen
- - pastebinit
- - [libpython2.7, 2.7.3-0ubuntu3.1]
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
deleted file mode 100644
index d7ff8ef8..00000000
--- a/doc/examples/cloud-config-landscape.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-# Landscape-client configuration
-#
-# Anything under the top 'landscape: client' entry
-# will be basically rendered into a ConfigObj formated file
-# under the '[client]' section of /etc/landscape/client.conf
-#
-# Note: 'tags' should be specified as a comma delimited string
-# rather than a list.
-#
-# You can get example key/values by running 'landscape-config',
-# answer question, then look at /etc/landscape/client.config
-landscape:
- client:
- url: "https://landscape.canonical.com/message-system"
- ping_url: "http://landscape.canonical.com/ping"
- data_path: "/var/lib/landscape/client"
- http_proxy: "http://my.proxy.com/foobar"
- tags: "server,cloud"
- computer_title: footitle
- https_proxy: fooproxy
- registration_key: fookey
- account_name: fooaccount
diff --git a/doc/examples/cloud-config-launch-index.txt b/doc/examples/cloud-config-launch-index.txt
deleted file mode 100644
index e7dfdc0c..00000000
--- a/doc/examples/cloud-config-launch-index.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-#cloud-config
-# vim: syntax=yaml
-
-#
-# This is the configuration syntax that can be provided to have
-# a given set of cloud config data show up on a certain launch
-# index (and not other launches) by provided a key here which
-# will act as a filter on the instances userdata. When
-# this key is left out (or non-integer) then the content
-# of this file will always be used for all launch-indexes
-# (ie the previous behavior).
-launch-index: 5
-
-# Upgrade the instance on first boot
-# (ie run apt-get upgrade)
-#
-# Default: false
-#
-apt_upgrade: true
-
-# Other yaml keys below...
-# .......
-# .......
diff --git a/doc/examples/cloud-config-lxd.txt b/doc/examples/cloud-config-lxd.txt
deleted file mode 100644
index e96f314b..00000000
--- a/doc/examples/cloud-config-lxd.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-#cloud-config
-
-# configure lxd
-# default: none
-# all options default to none if not specified
-# lxd: config sections for lxd
-# init: dict of options for lxd init, see 'man lxd'
-# network_address: address for lxd to listen on
-# network_port: port for lxd to listen on
-# storage_backend: either 'zfs' or 'dir'
-# storage_create_device: device based storage using specified device
-# storage_create_loop: set up loop based storage with size in GB
-# storage_pool: name of storage pool to use or create
-# trust_password: password required to add new clients
-# bridge: dict of options for the lxd bridge
-# mode: one of "new", "existing" or "none". Defaults to "new"
-# name: the name of the bridge. Defaults to "lxdbr0"
-# ipv4_address: an IPv4 address (e.g. 10.0.8.1)
-# ipv4_netmask: a CIDR mask value (e.g. 24)
-# ipv4_dhcp_first: the first IP of the DHCP range (e.g. 10.0.8.2)
-# ipv4_dhcp_last: the last IP of the DHCP range (e.g. 10.0.8.254)
-# ipv4_dhcp_leases: the size of the DHCP pool (e.g. 250)
-# ipv4_nat: either "true" or "false"
-# ipv6_address: an IPv6 address (e.g. fd98:9e0:3744::1)
-# ipv6_netmask: a CIDR mask value (e.g. 64)
-# ipv6_nat: either "true" or "false"
-# domain: domain name to use for the bridge
-
-
-lxd:
- init:
- network_address: 0.0.0.0
- network_port: 8443
- storage_backend: zfs
- storage_pool: datapool
- storage_create_loop: 10
- bridge:
- mode: new
- name: lxdbr0
- ipv4_address: 10.0.8.1
- ipv4_netmask: 24
- ipv4_dhcp_first: 10.0.8.2
- ipv4_dhcp_last: 10.0.8.3
- ipv4_dhcp_leases: 250
- ipv4_nat: true
- ipv6_address: fd98:9e0:3744::1
- ipv6_netmask: 64
- ipv6_nat: true
- domain: lxd
-
-
-# The simplist working configuration is
-# lxd:
-# init:
-# storage_backend: dir
diff --git a/doc/examples/cloud-config-mcollective.txt b/doc/examples/cloud-config-mcollective.txt
deleted file mode 100644
index 67735682..00000000
--- a/doc/examples/cloud-config-mcollective.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically setup and run mcollective
-# when the instance boots for the first time.
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-mcollective:
- # Every key present in the conf object will be added to server.cfg:
- # key: value
- #
- # For example the configuration below will have the following key
- # added to server.cfg:
- # plugin.stomp.host: dbhost
- conf:
- plugin.stomp.host: dbhost
- # This will add ssl certs to mcollective
- # WARNING WARNING WARNING
- # The ec2 metadata service is a network service, and thus is readable
- # by non-root users on the system (ie: 'ec2metadata --user-data')
- # If you want security for this, please use include-once + SSL urls
- public-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
- private-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
-
diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt
deleted file mode 100644
index aa676c24..00000000
--- a/doc/examples/cloud-config-mount-points.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-#cloud-config
-
-# set up mount points
-# 'mounts' contains a list of lists
-# the inner list are entries for an /etc/fstab line
-# ie : [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]
-#
-# default:
-# mounts:
-# - [ ephemeral0, /mnt ]
-# - [ swap, none, swap, sw, 0, 0 ]
-#
-# in order to remove a previously listed mount (ie, one from defaults)
-# list only the fs_spec. For example, to override the default, of
-# mounting swap:
-# - [ swap ]
-# or
-# - [ swap, null ]
-#
-# - if a device does not exist at the time, an entry will still be
-# written to /etc/fstab.
-# - '/dev' can be ommitted for device names that begin with: xvd, sd, hd, vd
-# - if an entry does not have all 6 fields, they will be filled in
-# with values from 'mount_default_fields' below.
-#
-# Note, that you should set 'nobootwait' (see man fstab) for volumes that may
-# not be attached at instance boot (or reboot)
-#
-mounts:
- - [ ephemeral0, /mnt, auto, "defaults,noexec" ]
- - [ sdc, /opt/data ]
- - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ]
- - [ dd, /dev/zero ]
-
-# mount_default_fields
-# These values are used to fill in any entries in 'mounts' that are not
-# complete. This must be an array, and must have 7 fields.
-mount_default_fields: [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
-
-
-# swap can also be set up by the 'mounts' module
-# default is to not create any swap files, because 'size' is set to 0
-swap:
- filename: /swap.img
- size: "auto" # or size in bytes
- maxsize: size in bytes
diff --git a/doc/examples/cloud-config-phone-home.txt b/doc/examples/cloud-config-phone-home.txt
deleted file mode 100644
index 7f2b69f7..00000000
--- a/doc/examples/cloud-config-phone-home.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-#cloud-config
-
-# phone_home: if this dictionary is present, then the phone_home
-# cloud-config module will post specified data back to the given
-# url
-# default: none
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
-#
-phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt
deleted file mode 100644
index b470153d..00000000
--- a/doc/examples/cloud-config-power-state.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-#cloud-config
-
-## poweroff or reboot system after finished
-# default: none
-#
-# power_state can be used to make the system shutdown, reboot or
-# halt after boot is finished. This same thing can be acheived by
-# user-data scripts or by runcmd by simply invoking 'shutdown'.
-#
-# Doing it this way ensures that cloud-init is entirely finished with
-# modules that would be executed, and avoids any error/log messages
-# that may go to the console as a result of system services like
-# syslog being taken down while cloud-init is running.
-#
-# If you delay '+5' (5 minutes) and have a timeout of
-# 120 (2 minutes), then the max time until shutdown will be 7 minutes.
-# cloud-init will invoke 'shutdown +5' after the process finishes, or
-# when 'timeout' seconds have elapsed.
-#
-# delay: form accepted by shutdown. default is 'now'. other format
-# accepted is +m (m in minutes)
-# mode: required. must be one of 'poweroff', 'halt', 'reboot'
-# message: provided as the message argument to 'shutdown'. default is none.
-# timeout: the amount of time to give the cloud-init process to finish
-# before executing shutdown.
-# condition: apply state change only if condition is met.
-# May be boolean True (always met), or False (never met),
-# or a command string or list to be executed.
-# command's exit code indicates:
-# 0: condition met
-# 1: condition not met
-# other exit codes will result in 'not met', but are reserved
-# for future use.
-#
-power_state:
- delay: "+30"
- mode: poweroff
- message: Bye Bye
- timeout: 30
- condition: True
diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt
deleted file mode 100644
index cd3c2f8e..00000000
--- a/doc/examples/cloud-config-puppet.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically setup and run puppetd
-# when the instance boots for the first time.
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-puppet:
- # Every key present in the conf object will be added to puppet.conf:
- # [name]
- # subkey=value
- #
- # For example the configuration below will have the following section
- # added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
- # certname=i-0123456.ip-X-Y-Z.cloud.internal
- #
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
- conf:
- agent:
- server: "puppetmaster.example.org"
- # certname supports substitutions at runtime:
- # %i: instanceid
- # Example: i-0123456
- # %f: fqdn of the machine
- # Example: ip-X-Y-Z.cloud.internal
- #
- # NB: the certname will automatically be lowercased as required by puppet
- certname: "%i.%f"
- # ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
- # It should be a multi-line string (using the | yaml notation for
- # multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
- #
- ca_cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
diff --git a/doc/examples/cloud-config-reporting.txt b/doc/examples/cloud-config-reporting.txt
deleted file mode 100644
index ee00078f..00000000
--- a/doc/examples/cloud-config-reporting.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-#cloud-config
-##
-## The following sets up 2 reporting end points.
-## A 'webhook' and a 'log' type.
-## It also disables the built in default 'log'
-reporting:
- smtest:
- type: webhook
- endpoint: "http://myhost:8000/"
- consumer_key: "ckey_foo"
- consumer_secret: "csecret_foo"
- token_key: "tkey_foo"
- token_secret: "tkey_foo"
- smlogger:
- type: log
- level: WARN
- log: null
diff --git a/doc/examples/cloud-config-resolv-conf.txt b/doc/examples/cloud-config-resolv-conf.txt
deleted file mode 100644
index 37ffc91a..00000000
--- a/doc/examples/cloud-config-resolv-conf.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically configure resolv.conf when the
-# instance boots for the first time.
-#
-# Ensure that your yaml is valid and pass this as user-data when starting
-# the instance. Also be sure that your cloud.cfg file includes this
-# configuration module in the appropirate section.
-#
-manage-resolv-conf: true
-
-resolv_conf:
- nameservers: ['8.8.4.4', '8.8.8.8']
- searchdomains:
- - foo.example.com
- - bar.example.com
- domain: example.com
- options:
- rotate: true
- timeout: 1
diff --git a/doc/examples/cloud-config-rh_subscription.txt b/doc/examples/cloud-config-rh_subscription.txt
deleted file mode 100644
index be121338..00000000
--- a/doc/examples/cloud-config-rh_subscription.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-#cloud-config
-
-# register your Red Hat Enterprise Linux based operating system
-#
-# this cloud-init plugin is capable of registering by username
-# and password *or* activation and org. Following a successfully
-# registration you can:
-# - auto-attach subscriptions
-# - set the service level
-# - add subscriptions based on its pool ID
-# - enable yum repositories based on its repo id
-# - disable yum repositories based on its repo id
-# - alter the rhsm_baseurl and server-hostname in the
-# /etc/rhsm/rhs.conf file
-
-rh_subscription:
- username: joe@foo.bar
-
- ## Quote your password if it has symbols to be safe
- password: '1234abcd'
-
- ## If you prefer, you can use the activation key and
- ## org instead of username and password. Be sure to
- ## comment out username and password
-
- #activation-key: foobar
- #org: 12345
-
- ## Uncomment to auto-attach subscriptions to your system
- #auto-attach: True
-
- ## Uncomment to set the service level for your
- ## subscriptions
- #service-level: self-support
-
- ## Uncomment to add pools (needs to be a list of IDs)
- #add-pool: []
-
- ## Uncomment to add or remove yum repos
- ## (needs to be a list of repo IDs)
- #enable-repo: []
- #disable-repo: []
-
- ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf
- #rhsm-baseurl: http://url
-
- ## Uncomment to alter the server hostname in
- ## /etc/rhsm/rhsm.conf
- #server-hostname: foo.bar.com
diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt
deleted file mode 100644
index 28ea1f16..00000000
--- a/doc/examples/cloud-config-rsyslog.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-## the rsyslog module allows you to configure the systems syslog.
-## configuration of syslog is under the top level cloud-config
-## entry 'rsyslog'.
-##
-## Example:
-#cloud-config
-rsyslog:
- remotes:
- # udp to host 'maas.mydomain' port 514
- maashost: maas.mydomain
- # udp to ipv4 host on port 514
- maas: "@[10.5.1.56]:514"
- # tcp to host ipv6 host on port 555
- maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- config_dir: /etc/rsyslog.d
- config_filename: 20-cloud-config.conf
- service_reload_command: [your, syslog, reload, command]
-
-## Additionally the following legacy format is supported
-## it is converted into the format above before use.
-## rsyslog_filename -> rsyslog/config_filename
-## rsyslog_dir -> rsyslog/config_dir
-## rsyslog -> rsyslog/configs
-# rsyslog:
-# - "*.* @@192.158.1.1"
-# - content: "*.* @@192.0.2.1:10514"
-# filename: 01-example.conf
-# - content: |
-# *.* @@syslogd.example.com
-# rsyslog_filename: 20-cloud-config.conf
-# rsyslog_dir: /etc/rsyslog.d
-
-## to configure rsyslog to accept remote logging on Ubuntu
-## write the following into /etc/rsyslog.d/20-remote-udp.conf
-## $ModLoad imudp
-## $UDPServerRun 514
-## $template LogRemote,"/var/log/maas/rsyslog/%HOSTNAME%/messages"
-## :fromhost-ip, !isequal, "127.0.0.1" ?LogRemote
-## then:
-## sudo service rsyslog restart
diff --git a/doc/examples/cloud-config-run-cmds.txt b/doc/examples/cloud-config-run-cmds.txt
deleted file mode 100644
index 3bb06864..00000000
--- a/doc/examples/cloud-config-run-cmds.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-#cloud-config
-
-# run commands
-# default: none
-# runcmd contains a list of either lists or a string
-# each item will be executed in order at rc.local like level with
-# output to the console
-# - runcmd only runs during the first boot
-# - if the item is a list, the items will be properly executed as if
-# passed to execve(3) (with the first arg as the command).
-# - if the item is a string, it will be simply written to the file and
-# will be interpreted by 'sh'
-#
-# Note, that the list has to be proper yaml, so you have to quote
-# any characters yaml would eat (':' can be problematic)
-runcmd:
- - [ ls, -l, / ]
- - [ sh, -xc, "echo $(date) ': hello world!'" ]
- - [ sh, -c, echo "=========hello world'=========" ]
- - ls -l /root
- - [ wget, "http://slashdot.org", -O, /tmp/index.html ]
-
diff --git a/doc/examples/cloud-config-salt-minion.txt b/doc/examples/cloud-config-salt-minion.txt
deleted file mode 100644
index 939fdc8b..00000000
--- a/doc/examples/cloud-config-salt-minion.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically setup and run a salt
-# minion when the instance boots for the first time.
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-
-salt_minion:
- # conf contains all the directives to be assigned in /etc/salt/minion.
-
- conf:
- # Set the location of the salt master server, if the master server cannot be
- # resolved, then the minion will fail to start.
-
- master: salt.example.com
-
- # Salt keys are manually generated by: salt-key --gen-keys=GEN_KEYS,
- # where GEN_KEYS is the name of the keypair, e.g. 'minion'. The keypair
- # will be copied to /etc/salt/pki on the minion instance.
-
- public_key: |
- -----BEGIN PUBLIC KEY-----
- MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEAwI4yqk1Y12zVmu9Ejlua
- h2FD6kjrt+N9XfGqZUUVNeRb7CA0Sj5Q6NtgoaiXuIrSea2sLda6ivqAGmtxMMrP
- zpf3FwsYWxBUNF7D4YeLmYjvcTbfr3bCOIRnPNXZ+4isuvvEiM02u2cO0okZSgeb
- dofNa1NbTLYAQr9jZZb7GPKrTO4CKy0xzBih/A+sl6dL9PNDmqXQEjyJS6PXG1Vj
- PvD5jpSrxuIl5Ms/+2Ro3ALgvC8dgoY/3m3csnd06afumGKv5YOGtf+bnWLhc0bf
- 6Sk8Q6i5t0Bl+HAULSPr+B9x/I0rN76ZnPvTj1+hJ0zTof4d0hOLx/K5OQyt7AKo
- 4wIBAQ==
- -----END PUBLIC KEY-----
-
- private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- Proc-Type: 4,ENCRYPTED
- DEK-Info: AES-128-CBC,ECE30DBBA56E2DF06B7BC415F8870994
-
- YQOE5HIsghqjRsxPQqiWMH/VHmyFH6xIpBcmzxzispEHwBojlvLXviwvR66YhgNw
- 7smwE10Ik4/cwwiHTZqCk++jPATPygBiqQkUijCWzcT9kfaxmqdP4PL+hu9g7kGC
- KrD2Bm8/oO08s957aThuHC1sABRcJ1V3FRzJT6Za4fwweyvHVYRnmgaDA6zH0qV8
- NqBSB2hnNXKEdh6UFz9QGcrQxnRjfdIaW64zoEX7jT7gYYL7FkGXBa3XdMOA4fnl
- adRwLFMs0jfilisZv8oUbPdZ6J6x3o8p8LVecCF8tdZt1zkcLSIXKnoDFpHSISGs
- BD9aqD+E4ejynM/tPaVFq4IHzT8viN6h6WcH8fbpClFZ66Iyy9XL3/CjAY7Jzhh9
- fnbc4Iq28cdbmO/vkR7JyVOgEMWe1BcSqtro70XoUNRY8uDJUPqohrhm/9AigFRA
- Pwyf3LqojxRnwXjHsZtGltUtEAPZzgh3fKJnx9MyRR7DPXBRig7TAHU7n2BFRhHA
- TYThy29bK6NkIc/cKc2kEQVo98Cr04PO8jVxZM332FlhiVlP0kpAp+tFj7aMzPTG
- sJumb9kPbMsgpEuTCONm3yyoufGEBFMrIJ+Po48M2RlYOh50VkO09pI+Eu7FPtVB
- H4gKzoJIpZZ/7vYXQ3djM8s9hc5gD5CVExTZV4drbsXt6ITiwHuxZ6CNHRBPL5AY
- wmF8QZz4oivv1afdSe6E6OGC3uVmX3Psn5CVq2pE8VlRDKFy1WqfU2enRAijSS2B
- rtJs263fOJ8ZntDzMVMPgiAlzzfA285KUletpAeUmz+peR1gNzkE0eKSG6THOCi0
- rfmR8SeEzyNvin0wQ3qgYiiHjHbbFhJIMAQxoX+0hDSooM7Wo5wkLREULpGuesTg
- A6Fe3CiOivMDraNGA7H6Yg==
- -----END RSA PRIVATE KEY-----
-
diff --git a/doc/examples/cloud-config-seed-random.txt b/doc/examples/cloud-config-seed-random.txt
deleted file mode 100644
index 08f69a9f..00000000
--- a/doc/examples/cloud-config-seed-random.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-#cloud-config
-#
-# random_seed is a dictionary.
-#
-# The config module will write seed data from the datasource
-# to 'file' described below.
-#
-# Entries in this dictionary are:
-# file: the file to write random data to (default is /dev/urandom)
-# data: this data will be written to 'file' before data from
-# the datasource
-# encoding: this will be used to decode 'data' provided.
-# allowed values are 'encoding', 'raw', 'base64', 'b64'
-# 'gzip', or 'gz'. Default is 'raw'
-#
-# command: execute this command to seed random.
-# the command will have RANDOM_SEED_FILE in its environment
-# set to the value of 'file' above.
-# command_required: default False
-# if true, and 'command' is not available to be run
-# then exception is raised and cloud-init will record failure.
-# Otherwise, only debug error is mentioned.
-#
-# Note: command could be ['pollinate',
-# '--server=http://local.pollinate.server']
-# which would have pollinate populate /dev/urandom from provided server
-seed_random:
- file: '/dev/urandom'
- data: 'my random string'
- encoding: 'raw'
- command: ['sh', '-c', 'dd if=/dev/urandom of=$RANDOM_SEED_FILE']
- command_required: True
diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt
deleted file mode 100644
index 235a114f..00000000
--- a/doc/examples/cloud-config-ssh-keys.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-#cloud-config
-
-# add each entry to ~/.ssh/authorized_keys for the configured user or the
-# first user defined in the user definition directive.
-ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
-
-# Send pre-generated ssh private keys to the server
-# If these are present, they will be written to /etc/ssh and
-# new random keys will not be generated
-# in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported
-ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qcon2LZS/x
- 1cydPZ4pQpfjEha6WxZ6o8ci/Ea/w0n+0HGPwaxlEG2Z9inNtj3pgFrYcRztfECb
- 1j6HCibZbAzYtwIBIwJgO8h72WjcmvcpZ8OvHSvTwAguO2TkR6mPgHsgSaKy6GJo
- PUJnaZRWuba/HX0KGyhz19nPzLpzG5f0fYahlMJAyc13FV7K6kMBPXTRR6FxgHEg
- L0MPC7cdqAwOVNcPY6A7AjEA1bNaIjOzFN2sfZX0j7OMhQuc4zP7r80zaGc5oy6W
- p58hRAncFKEvnEq2CeL3vtuZAjEAwNBHpbNsBYTRPCHM7rZuG/iBtwp8Rxhc9I5w
- ixvzMgi+HpGLWzUIBS+P/XhekIjPAjA285rVmEP+DR255Ls65QbgYhJmTzIXQ2T9
- luLvcmFBC6l35Uc4gTgg4ALsmXLn71MCMGMpSWspEvuGInayTCL+vEjmNBT+FAdO
- W7D4zCpI43jRS9U06JVOeSc9CDk2lwiA3wIwCTB/6uc8Cq85D9YqpM10FuHjKpnP
- REPPOyrAspdeOAV+6VKRavstea7+2DZmSUgE
- -----END RSA PRIVATE KEY-----
-
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost
-
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT
- pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX
- DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR
- 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa
- LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY
- d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH
- bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3
- 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC
- /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv
- 99iziAH0KBMVbxy03Trz
- -----END DSA PRIVATE KEY-----
-
- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
-
-
diff --git a/doc/examples/cloud-config-update-apt.txt b/doc/examples/cloud-config-update-apt.txt
deleted file mode 100644
index a83ce3f7..00000000
--- a/doc/examples/cloud-config-update-apt.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-#cloud-config
-# Update apt database on first boot
-# (ie run apt-get update)
-#
-# Default: true
-# Aliases: apt_update
-package_update: false
diff --git a/doc/examples/cloud-config-update-packages.txt b/doc/examples/cloud-config-update-packages.txt
deleted file mode 100644
index 56b72c63..00000000
--- a/doc/examples/cloud-config-update-packages.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-#cloud-config
-
-# Upgrade the instance on first boot
-# (ie run apt-get upgrade)
-#
-# Default: false
-# Aliases: apt_upgrade
-package_upgrade: true
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
deleted file mode 100644
index 0e8ed243..00000000
--- a/doc/examples/cloud-config-user-groups.txt
+++ /dev/null
@@ -1,109 +0,0 @@
-# Add groups to the system
-# The following example adds the ubuntu group with members foo and bar and
-# the group cloud-users.
-groups:
- - ubuntu: [foo,bar]
- - cloud-users
-
-# Add users to the system. Users are added after groups are added.
-users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary-group: foobar
- groups: users
- selinux-user: staff_u
- expiredate: 2012-09-01
- ssh-import-id: foobar
- lock_passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: users, admin
- ssh-import-id: None
- lock_passwd: true
- ssh-authorized-keys:
- - <ssh pub key 1>
- - <ssh pub key 2>
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: true
- system: true
-
-# Valid Values:
-# name: The user's login name
-# gecos: The user name's real name, i.e. "Bob B. Smith"
-# homedir: Optional. Set to the local path you want to use. Defaults to
-# /home/<username>
-# primary-group: define the primary group. Defaults to a new group created
-# named after the user.
-# groups: Optional. Additional groups to add the user to. Defaults to none
-# selinux-user: Optional. The SELinux user for the user's login, such as
-# "staff_u". When this is omitted the system will select the default
-# SELinux user.
-# lock_passwd: Defaults to true. Lock the password to disable password login
-# inactive: Create the user as inactive
-# passwd: The hash -- not the password itself -- of the password you want
-# to use for this user. You can generate a safe hash via:
-# mkpasswd --method=SHA-512 --rounds=4096
-# (the above command would create from stdin an SHA-512 password hash
-# with 4096 salt rounds)
-#
-# Please note: while the use of a hashed password is better than
-# plain text, the use of this feature is not ideal. Also,
-# using a high number of salting rounds will help, but it should
-# not be relied upon.
-#
-# To highlight this risk, running John the Ripper against the
-# example hash above, with a readily available wordlist, revealed
-# the true password in 12 seconds on a i7-2620QM.
-#
-# In other words, this feature is a potential security risk and is
-# provided for your convenience only. If you do not fully trust the
-# medium over which your cloud-config will be transmitted, then you
-# should use SSH authentication only.
-#
-# You have thus been warned.
-# no-create-home: When set to true, do not create home directory.
-# no-user-group: When set to true, do not create a group named after the user.
-# no-log-init: When set to true, do not initialize lastlog and faillog database.
-# ssh-import-id: Optional. Import SSH ids
-# ssh-authorized-keys: Optional. [list] Add keys to user's authorized keys file
-# sudo: Defaults to none. Set to the sudo string you want to use, i.e.
-# ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following
-# format.
-# sudo:
-# - ALL=(ALL) NOPASSWD:/bin/mysql
-# - ALL=(ALL) ALL
-# Note: Please double check your syntax and make sure it is valid.
-# cloud-init does not parse/check the syntax of the sudo
-# directive.
-# system: Create the user as a system user. This means no home directory.
-#
-
-# Default user creation:
-#
-# Unless you define users, you will get a 'ubuntu' user on ubuntu systems with the
-# legacy permission (no password sudo, locked user, etc). If however, you want
-# to have the 'ubuntu' user in addition to other users, you need to instruct
-# cloud-init that you also want the default user. To do this use the following
-# syntax:
-# users:
-# - default
-# - bob
-# - ....
-# foobar: ...
-#
-# users[0] (the first user in users) overrides the user directive.
-#
-# The 'default' user above references the distro's config:
-# system_info:
-# default_user:
-# name: Ubuntu
-# plain_text_passwd: 'ubuntu'
-# home: /home/ubuntu
-# shell: /bin/bash
-# lock_passwd: True
-# gecos: Ubuntu
-# groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt
deleted file mode 100644
index 7f90847b..00000000
--- a/doc/examples/cloud-config-vendor-data.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-#cloud-config
-#
-# This explains how to control vendordata via a cloud-config
-#
-# On select Datasources, vendors have a channel for the consumptions
-# of all support user-data types via a special channel called
-# vendordata. Users of the end system are given ultimate control.
-#
-vendor_data:
- enabled: True
- prefix: /usr/bin/ltrace
-
-# enabled: whether it is enabled or not
-# prefix: the command to run before any vendor scripts.
-# Note: this is a fairly weak method of containment. It should
-# be used to profile a script, not to prevent its run
diff --git a/doc/examples/cloud-config-write-files.txt b/doc/examples/cloud-config-write-files.txt
deleted file mode 100644
index ec98bc93..00000000
--- a/doc/examples/cloud-config-write-files.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-#cloud-config
-# vim: syntax=yaml
-#
-# This is the configuration syntax that the write_files module
-# will know how to understand. encoding can be given b64 or gzip or (gz+b64).
-# The content will be decoded accordingly and then written to the path that is
-# provided.
-#
-# Note: Content strings here are truncated for example purposes.
-write_files:
-- encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
- owner: root:root
- path: /etc/sysconfig/selinux
- permissions: '0644'
-- content: |
- # My new /etc/sysconfig/samba file
-
- SMBDOPTIONS="-D"
- path: /etc/sysconfig/samba
-- content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
- ....
- path: /bin/arch
- permissions: '0555'
-- encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /usr/bin/hello
- permissions: '0755'
-
diff --git a/doc/examples/cloud-config-yum-repo.txt b/doc/examples/cloud-config-yum-repo.txt
deleted file mode 100644
index ab2c031e..00000000
--- a/doc/examples/cloud-config-yum-repo.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-#cloud-config
-# vim: syntax=yaml
-#
-# Add yum repository configuration to the system
-#
-# The following example adds the file /etc/yum.repos.d/epel_testing.repo
-# which can then subsequently be used by yum for later operations.
-yum_repos:
- # The name of the repository
- epel-testing:
- # Any repository configuration options
- # See: man yum.conf
- #
- # This one is required!
- baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch
- enabled: false
- failovermethod: priority
- gpgcheck: true
- gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
- name: Extra Packages for Enterprise Linux 5 - Testing
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
deleted file mode 100644
index 3cc9c055..00000000
--- a/doc/examples/cloud-config.txt
+++ /dev/null
@@ -1,752 +0,0 @@
-#cloud-config
-# Update apt database on first boot
-# (ie run apt-get update)
-#
-# Default: true
-# Aliases: apt_update
-package_update: false
-
-# Upgrade the instance on first boot
-# (ie run apt-get upgrade)
-#
-# Default: false
-# Aliases: apt_upgrade
-package_upgrade: true
-
-# Reboot after package install/update if necessary
-# Default: false
-# Aliases: apt_reboot_if_required
-package_reboot_if_required: true
-
-# Add apt repositories
-#
-# Default: auto select based on cloud metadata
-# in ec2, the default is <region>.archive.ubuntu.com
-# apt_mirror:
-# use the provided mirror
-# apt_mirror_search:
-# search the list for the first mirror.
-# this is currently very limited, only verifying that
-# the mirror is dns resolvable or an IP address
-#
-# if neither apt_mirror nor apt_mirror search is set (the default)
-# then use the mirror provided by the DataSource found.
-# In EC2, that means using <region>.ec2.archive.ubuntu.com
-#
-# if no mirror is provided by the DataSource, and 'apt_mirror_search_dns' is
-# true, then search for dns names '<distro>-mirror' in each of
-# - fqdn of this host per cloud metadata
-# - localdomain
-# - no domain (which would search domains listed in /etc/resolv.conf)
-# If there is a dns entry for <distro>-mirror, then it is assumed that there
-# is a distro mirror at http://<distro>-mirror.<domain>/<distro>
-#
-# That gives the cloud provider the opportunity to set mirrors of a distro
-# up and expose them only by creating dns entries.
-#
-# if none of that is found, then the default distro mirror is used
-apt_mirror: http://us.archive.ubuntu.com/ubuntu/
-apt_mirror_search:
- - http://local-mirror.mydomain
- - http://archive.ubuntu.com
-
-apt_mirror_search_dns: False
-
-# apt_proxy (configure Acquire::HTTP::Proxy)
-# 'apt_http_proxy' is an alias for 'apt_proxy'.
-# Also, available are 'apt_ftp_proxy' and 'apt_https_proxy'.
-# These affect Acquire::FTP::Proxy and Acquire::HTTPS::Proxy respectively
-apt_proxy: http://my.apt.proxy:3128
-
-# apt_pipelining (configure Acquire::http::Pipeline-Depth)
-# Default: disables HTTP pipelining. Certain web servers, such
-# as S3 do not pipeline properly (LP: #948461).
-# Valid options:
-# False/default: Disables pipelining for APT
-# None/Unchanged: Use OS default
-# Number: Set pipelining to some number (not recommended)
-apt_pipelining: False
-
-# Preserve existing /etc/apt/sources.list
-# Default: overwrite sources_list with mirror. If this is true
-# then apt_mirror above will have no effect
-apt_preserve_sources_list: true
-
-# Provide a custom template for rendering sources.list
-# Default: a default template for Ubuntu/Debain will be used as packaged in
-# Ubuntu: /etc/cloud/templates/sources.list.ubuntu.tmpl
-# Debian: /etc/cloud/templates/sources.list.debian.tmpl
-# Others: n/a
-# This will follow the normal mirror/codename replacement rules before
-# being written to disk.
-apt_custom_sources_list: |
- ## template:jinja
- ## Note, this file is written by cloud-init on first boot of an instance
- ## modifications made here will not survive a re-bundle.
- ## if you wish to make changes you can:
- ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
- ## or do the same in user-data
- ## b.) add sources in /etc/apt/sources.list.d
- ## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
- deb {{mirror}} {{codename}} main restricted
- deb-src {{mirror}} {{codename}} main restricted
-
- # could drop some of the usually used entries
-
- # could refer to other mirrors
- deb http://ddebs.ubuntu.com {{codename}} main restricted universe multiverse
- deb http://ddebs.ubuntu.com {{codename}}-updates main restricted universe multiverse
- deb http://ddebs.ubuntu.com {{codename}}-proposed main restricted universe multiverse
-
- # or even more uncommon examples like local or NFS mounted repos,
- # eventually whatever is compatible with sources.list syntax
- deb file:/home/apt/debian unstable main contrib non-free
-
-# 'source' entries in apt-sources that match this python regex
-# expression will be passed to add-apt-repository
-add_apt_repo_match: '^[\w-]+:\w'
-
-# 'apt_sources' is a dictionary
-# The key is the filename and will be prepended by /etc/apt/sources.list.d/ if
-# it doesn't start with a '/'.
-# There are certain cases - where no content is written into a source.list file
-# where the filename will be ignored - yet it can still be used as index for
-# merging.
-# The value it maps to is a dictionary with the following optional entries:
-# source: a sources.list entry (some variable replacements apply)
-# keyid: providing a key to import via shortid or fingerprint
-# key: providing a raw PGP key
-# keyserver: keyserver to fetch keys from, default is keyserver.ubuntu.com
-# filename: for compatibility with the older format (now the key to this
-# dictionary is the filename). If specified this overwrites the
-# filename given as key.
-
-# the new "filename: {specification-dictionary}, filename2: ..." format allows
-# better merging between multiple input files than a list like:
-# cloud-config1
-# sources:
-# s1: {'key': 'key1', 'source': 'source1'}
-# cloud-config2
-# sources:
-# s2: {'key': 'key2'}
-# s1: {filename: 'foo'}
-# this would be merged to
-#sources:
-# s1:
-# filename: foo
-# key: key1
-# source: source1
-# s2:
-# key: key2
-# Be aware that this style of merging is not the default (for backward
-# compatibility reasons). You should specify the following merge_how to get
-# this more complete and modern merging behaviour:
-# merge_how: "list()+dict()+str()"
-# This would then also be equivalent to the config merging used in curtin
-# (https://launchpad.net/curtin).
-
-# for more details see below in the various examples
-
-apt_sources:
- byobu-ppa.list:
- source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
- keyid: F430BBA5 # GPG key ID published on a key server
- # adding a source.list line, importing a gpg key for a given key id and
- # storing it in the file /etc/apt/sources.list.d/byobu-ppa.list
-
- # PPA shortcut:
- # * Setup correct apt sources.list line
- # * Import the signing key from LP
- #
- # See https://help.launchpad.net/Packaging/PPA for more information
- # this requires 'add-apt-repository'
- # due to that the filename key is ignored in this case
- ignored1:
- source: "ppa:smoser/ppa" # Quote the string
-
- # Custom apt repository:
- # * all that is required is 'source'
- # * Creates a file in /etc/apt/sources.list.d/ for the sources list entry
- # * [optional] Import the apt signing key from the keyserver
- # * Defaults:
- # + keyserver: keyserver.ubuntu.com
- #
- # See sources.list man page for more information about the format
- my-repo.list:
- source: deb http://archive.ubuntu.com/ubuntu karmic-backports main universe multiverse restricted
-
- # sources can use $MIRROR and $RELEASE and they will be replaced
- # with the local mirror for this cloud, and the running release
- # the entry below would be possibly turned into:
- # source: deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu natty multiverse
- my-repo.list:
- source: deb $MIRROR $RELEASE multiverse
-
- # this would have the same end effect as 'ppa:byobu/ppa'
- my-repo.list:
- source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
- keyid: F430BBA5 # GPG key ID published on a key server
- filename: byobu-ppa.list
-
- # this would only import the key without adding a ppa or other source spec
- # since this doesn't generate a source.list file the filename key is ignored
- ignored2:
- keyid: F430BBA5 # GPG key ID published on a key server
-
- # In general keyid's can also be specified via their long fingerprints
- # since this doesn't generate a source.list file the filename key is ignored
- ignored3:
- keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
-
- # Custom apt repository:
- # * The apt signing key can also be specified
- # by providing a pgp public key block
- # * Providing the PGP key here is the most robust method for
- # specifying a key, as it removes dependency on a remote key server
- my-repo.list:
- source: deb http://ppa.launchpad.net/alestic/ppa/ubuntu karmic main
- key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.0.10
-
- mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
- qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
- 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
- IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
- 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
- t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
- uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
- =Y2oI
- -----END PGP PUBLIC KEY BLOCK-----
-
- # Custom gpg key:
- # * As with keyid, a key may also be specified without a related source.
- # * all other facts mentioned above still apply
- # since this doesn't generate a source.list file the filename key is ignored
- ignored4:
- key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.0.10
-
- mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
- qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
- 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
- IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
- 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
- t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
- uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
- =Y2oI
- -----END PGP PUBLIC KEY BLOCK-----
-
-
-## apt config via system_info:
-# under the 'system_info', you can further customize cloud-init's interaction
-# with apt.
-# system_info:
-# apt_get_command: [command, argument, argument]
-# apt_get_upgrade_subcommand: dist-upgrade
-#
-# apt_get_command:
-# To specify a different 'apt-get' command, set 'apt_get_command'.
-# This must be a list, and the subcommand (update, upgrade) is appended to it.
-# default is:
-# ['apt-get', '--option=Dpkg::Options::=--force-confold',
-# '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet']
-#
-# apt_get_upgrade_subcommand:
-# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'.
-# This is the subcommand that is invoked if package_upgrade is set to true above.
-#
-# apt_get_wrapper:
-# command: eatmydata
-# enabled: [True, False, "auto"]
-#
-
-# Install additional packages on first boot
-#
-# Default: none
-#
-# if packages are specified, this apt_update will be set to true
-#
-packages:
- - pwgen
- - pastebinit
-
-# set up mount points
-# 'mounts' contains a list of lists
-# the inner list are entries for an /etc/fstab line
-# ie : [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]
-#
-# default:
-# mounts:
-# - [ ephemeral0, /mnt ]
-# - [ swap, none, swap, sw, 0, 0 ]
-#
-# in order to remove a previously listed mount (ie, one from defaults)
-# list only the fs_spec. For example, to override the default, of
-# mounting swap:
-# - [ swap ]
-# or
-# - [ swap, null ]
-#
-# - if a device does not exist at the time, an entry will still be
-# written to /etc/fstab.
-# - '/dev' can be ommitted for device names that begin with: xvd, sd, hd, vd
-# - if an entry does not have all 6 fields, they will be filled in
-# with values from 'mount_default_fields' below.
-#
-# Note, that you should set 'nobootwait' (see man fstab) for volumes that may
-# not be attached at instance boot (or reboot)
-#
-mounts:
- - [ ephemeral0, /mnt, auto, "defaults,noexec" ]
- - [ sdc, /opt/data ]
- - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ]
- - [ dd, /dev/zero ]
-
-# mount_default_fields
-# These values are used to fill in any entries in 'mounts' that are not
-# complete. This must be an array, and must have 7 fields.
-mount_default_fields: [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
-
-# add each entry to ~/.ssh/authorized_keys for the configured user or the
-# first user defined in the user definition directive.
-ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
-
-# Send pre-generated ssh private keys to the server
-# If these are present, they will be written to /etc/ssh and
-# new random keys will not be generated
-# in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported
-ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qcon2LZS/x
- 1cydPZ4pQpfjEha6WxZ6o8ci/Ea/w0n+0HGPwaxlEG2Z9inNtj3pgFrYcRztfECb
- 1j6HCibZbAzYtwIBIwJgO8h72WjcmvcpZ8OvHSvTwAguO2TkR6mPgHsgSaKy6GJo
- PUJnaZRWuba/HX0KGyhz19nPzLpzG5f0fYahlMJAyc13FV7K6kMBPXTRR6FxgHEg
- L0MPC7cdqAwOVNcPY6A7AjEA1bNaIjOzFN2sfZX0j7OMhQuc4zP7r80zaGc5oy6W
- p58hRAncFKEvnEq2CeL3vtuZAjEAwNBHpbNsBYTRPCHM7rZuG/iBtwp8Rxhc9I5w
- ixvzMgi+HpGLWzUIBS+P/XhekIjPAjA285rVmEP+DR255Ls65QbgYhJmTzIXQ2T9
- luLvcmFBC6l35Uc4gTgg4ALsmXLn71MCMGMpSWspEvuGInayTCL+vEjmNBT+FAdO
- W7D4zCpI43jRS9U06JVOeSc9CDk2lwiA3wIwCTB/6uc8Cq85D9YqpM10FuHjKpnP
- REPPOyrAspdeOAV+6VKRavstea7+2DZmSUgE
- -----END RSA PRIVATE KEY-----
-
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost
-
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT
- pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX
- DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR
- 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa
- LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY
- d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH
- bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3
- 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC
- /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv
- 99iziAH0KBMVbxy03Trz
- -----END DSA PRIVATE KEY-----
-
- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
-
-
-# remove access to the ec2 metadata service early in boot via null route
-# the null route can be removed (by root) with:
-# route del -host 169.254.169.254 reject
-# default: false (service available)
-disable_ec2_metadata: true
-
-# run commands
-# default: none
-# runcmd contains a list of either lists or a string
-# each item will be executed in order at rc.local like level with
-# output to the console
-# - if the item is a list, the items will be properly executed as if
-# passed to execve(3) (with the first arg as the command).
-# - if the item is a string, it will be simply written to the file and
-# will be interpreted by 'sh'
-#
-# Note, that the list has to be proper yaml, so you have to escape
-# any characters yaml would eat (':' can be problematic)
-runcmd:
- - [ ls, -l, / ]
- - [ sh, -xc, "echo $(date) ': hello world!'" ]
- - [ sh, -c, echo "=========hello world'=========" ]
- - ls -l /root
- - [ wget, "http://slashdot.org", -O, /tmp/index.html ]
-
-
-# boot commands
-# default: none
-# this is very similar to runcmd above, but commands run very early
-# in the boot process, only slightly after a 'boothook' would run.
-# bootcmd should really only be used for things that could not be
-# done later in the boot process. bootcmd is very much like
-# boothook, but possibly with more friendly.
-# * bootcmd will run on every boot
-# * the INSTANCE_ID variable will be set to the current instance id.
-# * you can use 'cloud-init-per' command to help only run once
-bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
- - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
-
-# cloud_config_modules:
-# default:
-# cloud_config_modules:
-# - mounts
-# - ssh
-# - apt-update-upgrade
-# - puppet
-# - updates-check
-# - disable-ec2-metadata
-# - runcmd
-#
-# This is an array of arrays or strings.
-# if item is a string, then it is read as a module name
-# if the item is an array it is of the form:
-# name, frequency, arguments
-# where 'frequency' is one of:
-# once-per-instance
-# always
-# a python file in the CloudConfig/ module directory named
-# cc_<name>.py
-# example:
-cloud_config_modules:
- - mounts
- - ssh-import-id
- - ssh
- - grub-dpkg
- - [ apt-update-upgrade, always ]
- - puppet
- - updates-check
- - disable-ec2-metadata
- - runcmd
- - byobu
-
-# unverified_modules: []
-# if a config module declares a set of distros as supported then it will be
-# skipped if running on a different distro. to override this sanity check,
-# provide a list of modules that should be run anyway in 'unverified_modules'.
-# The default is an empty list (ie, trust modules).
-#
-# Example:
-# unverified_modules: ['apt-update-upgrade']
-# default: []
-
-# ssh_import_id: [ user1, user2 ]
-# ssh_import_id will feed the list in that variable to
-# ssh-import-id, so that public keys stored in launchpad
-# can easily be imported into the configured user
-# This can be a single string ('smoser') or a list ([smoser, kirkland])
-ssh_import_id: [smoser]
-
-# Provide debconf answers / debian preseed values
-#
-# See debconf-set-selections man page.
-#
-# Default: none
-#
-debconf_selections: | # Need to perserve newlines
- # Force debconf priority to critical.
- debconf debconf/priority select critical
-
- # Override default frontend to readline, but allow user to select.
- debconf debconf/frontend select readline
- debconf debconf/frontend seen false
-
-# manage byobu defaults
-# byobu_by_default:
-# 'user' or 'enable-user': set byobu 'launch-by-default' for the default user
-# 'system' or 'enable-system' or 'enable':
-# enable 'launch-by-default' for all users, do not modify default user
-# 'disable': disable both default user and system
-# 'disable-system': disable system
-# 'disable-user': disable for default user
-# not-set: no changes made
-byobu_by_default: system
-
-# disable ssh access as root.
-# if you want to be able to ssh in to the system as the root user
-# rather than as the 'ubuntu' user, then you must set this to false
-# default: true
-disable_root: false
-
-# disable_root_opts: the value of this variable will prefix the
-# respective key in /root/.ssh/authorized_keys if disable_root is true
-# see 'man authorized_keys' for more information on what you can do here
-#
-# The string '$USER' will be replaced with the username of the default user
-#
-# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"root\".';echo;sleep 10"
-
-
-# set the locale to a given locale
-# default: en_US.UTF-8
-locale: en_US.UTF-8
-# render template default-locale.tmpl to locale_configfile
-locale_configfile: /etc/default/locale
-
-# add entries to rsyslog configuration
-# The first occurance of a given filename will truncate.
-# subsequent entries will append.
-# if value is a scalar, its content is assumed to be 'content', and the
-# default filename is used.
-# if filename is not provided, it will default to 'rsylog_filename'
-# if filename does not start with a '/', it will be put in 'rsyslog_dir'
-# rsyslog_dir default: /etc/rsyslog.d
-# rsyslog_filename default: 20-cloud-config.conf
-rsyslog:
- - ':syslogtag, isequal, "[CLOUDINIT]" /var/log/cloud-foo.log'
- - content: "*.* @@192.0.2.1:10514"
- - filename: 01-examplecom.conf
- content: "*.* @@syslogd.example.com"
-
-# resize_rootfs should the / filesytem be resized on first boot
-# this allows you to launch an instance with a larger disk / partition
-# and have the instance automatically grow / to accomoddate it
-# set to 'False' to disable
-# by default, the resizefs is done early in boot, and blocks
-# if resize_rootfs is set to 'noblock', then it will be run in parallel
-resize_rootfs: True
-
-## hostname and /etc/hosts management
-# cloud-init can handle updating some entries in /etc/hosts,
-# and can set your hostname for you.
-#
-# if you do nothing you'll end up with:
-# * /etc/hostname (and `hostname`) managed via: 'preserve_hostame: false'
-# if you do not change /etc/hostname, it will be updated with the cloud
-# provided hostname on each boot. If you make a change, then manual
-# maintenance takes over, and cloud-init will not modify it.
-#
-# * /etc/hosts managed via: 'manage_etc_hosts: false'
-# cloud-init will not manage /etc/hosts at all. It is in full manual
-# maintenance mode.
-#
-# You can change the above behavior with the following config variables:
-# Remember that these can be set in cloud-config via user-data,
-# /etc/cloud/cloud.cfg or any file in /etc/cloud/cloud.cfg.d/
-#
-# == Hostname management (via /etc/hostname) ==
-# * preserve_hostname:
-# default: False
-# If this option is set to True, then /etc/hostname will never updated
-# The default behavior is to update it if it has not been modified by
-# the user.
-#
-# * hostname:
-# this option will be used wherever the 'hostname' is needed
-# simply substitute it in the description above.
-# ** If you wish to set your hostname, set it here **
-# default: 'hostname' as returned by the metadata service
-# on EC2, the hostname portion of 'local-hostname' is used
-# which is something like 'ip-10-244-170-199'
-#
-# * fqdn:
-# this option will be used wherever 'fqdn' is needed.
-# simply substitue it in the description above.
-# default: fqdn as returned by the metadata service. on EC2 'hostname'
-# is used, so this is like: ip-10-244-170-199.ec2.internal
-#
-# == /etc/hosts management ==
-#
-# The cloud-config variable that covers management of /etc/hosts is
-# 'manage_etc_hosts'
-#
-# By default, its value is 'false' (boolean False)
-#
-# * manage_etc_hosts:
-# default: false
-#
-# false:
-# cloud-init will not modify /etc/hosts at all.
-# * Whatever is present at instance boot time will be present after boot.
-# * User changes will not be overwritten
-#
-# true or 'template':
-# on every boot, /etc/hosts will be re-written from
-# /etc/cloud/templates/hosts.tmpl.
-# The strings '$hostname' and '$fqdn' are replaced in the template
-# with the appropriate values.
-# To make modifications persistant across a reboot, you must make
-# modificatoins to /etc/cloud/templates/hosts.tmpl
-#
-# localhost:
-# This option ensures that an entry is present for fqdn as described in
-# section 5.1.2 of the debian manual
-# http://www.debian.org/doc/manuals/debian-reference/ch05.en.html
-#
-# cloud-init will generally own the 127.0.1.1 entry, and will update
-# it to the hostname and fqdn on every boot. All other entries will
-# be left as is. 'ping `hostname`' will ping 127.0.1.1
-#
-# If you want a fqdn entry with aliases other than 'hostname' to resolve
-# to a localhost interface, you'll need to use something other than
-# 127.0.1.1. For example:
-# 127.0.1.2 myhost.fqdn.example.com myhost whatup.example.com
-
-# final_message
-# default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds
-# this message is written by cloud-final when the system is finished
-# its first boot.
-# This message is rendered as if it were a template. If you
-# want jinja, you have to start the line with '## template:jinja\n'
-final_message: "The system is finally up, after $UPTIME seconds"
-
-# configure where output will go
-# 'output' entry is a dict with 'init', 'config', 'final' or 'all'
-# entries. Each one defines where
-# cloud-init, cloud-config, cloud-config-final or all output will go
-# each entry in the dict can be a string, list or dict.
-# if it is a string, it refers to stdout and stderr
-# if it is a list, entry 0 is stdout, entry 1 is stderr
-# if it is a dict, it is expected to have 'output' and 'error' fields
-# default is to write to console only
-# the special entry "&1" for an error means "same location as stdout"
-# (Note, that '&1' has meaning in yaml, so it must be quoted)
-output:
- init: "> /var/log/my-cloud-init.log"
- config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ]
- final:
- output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout"
- error: "&1"
-
-
-# phone_home: if this dictionary is present, then the phone_home
-# cloud-config module will post specified data back to the given
-# url
-# default: none
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
-#
-phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
-
-# timezone: set the timezone for this instance
-# the value of 'timezone' must exist in /usr/share/zoneinfo
-timezone: US/Eastern
-
-# def_log_file and syslog_fix_perms work together
-# if
-# - logging is set to go to a log file 'L' both with and without syslog
-# - and 'L' does not exist
-# - and syslog is configured to write to 'L'
-# then 'L' will be initially created with root:root ownership (during
-# cloud-init), and then at cloud-config time (when syslog is available)
-# the syslog daemon will be unable to write to the file.
-#
-# to remedy this situation, 'def_log_file' can be set to a filename
-# and syslog_fix_perms to a string containing "<user>:<group>"
-# if syslog_fix_perms is a list, it will iterate through and use the
-# first pair that does not raise error.
-#
-# the default values are '/var/log/cloud-init.log' and 'syslog:adm'
-# the value of 'def_log_file' should match what is configured in logging
-# if either is empty, then no change of ownership will be done
-def_log_file: /var/log/my-logging-file.log
-syslog_fix_perms: syslog:root
-
-# you can set passwords for a user or multiple users
-# this is off by default.
-# to set the default user's password, use the 'password' option.
-# if set, to 'R' or 'RANDOM', then a random password will be
-# generated and written to stdout (the console)
-# password: passw0rd
-#
-# also note, that this will expire the password, forcing a change
-# on first login. If you do not want to expire, see 'chpasswd' below.
-#
-# By default in the UEC images password authentication is disabled
-# Thus, simply setting 'password' as above will only allow you to login
-# via the console.
-#
-# in order to enable password login via ssh you must set
-# 'ssh_pwauth'.
-# If it is set, to 'True' or 'False', then sshd_config will be updated
-# to ensure the desired function. If not set, or set to '' or 'unchanged'
-# then sshd_config will not be updated.
-# ssh_pwauth: True
-#
-# there is also an option to set multiple users passwords, using 'chpasswd'
-# That looks like the following, with 'expire' set to 'True' by default.
-# to not expire users passwords, set 'expire' to 'False':
-# chpasswd:
-# list: |
-# user1:password1
-# user2:RANDOM
-# expire: True
-# ssh_pwauth: [ True, False, "" or "unchanged" ]
-#
-# So, a simple working example to allow login via ssh, and not expire
-# for the default user would look like:
-password: passw0rd
-chpasswd: { expire: False }
-ssh_pwauth: True
-
-# manual cache clean.
-# By default, the link from /var/lib/cloud/instance to
-# the specific instance in /var/lib/cloud/instances/ is removed on every
-# boot. The cloud-init code then searches for a DataSource on every boot
-# if your DataSource will not be present on every boot, then you can set
-# this option to 'True', and maintain (remove) that link before the image
-# will be booted as a new instance.
-# default is False
-manual_cache_clean: False
-
-# When cloud-init is finished running including having run
-# cloud_init_modules, then it will run this command. The default
-# is to emit an upstart signal as shown below. If the value is a
-# list, it will be passed to Popen. If it is a string, it will be
-# invoked through 'sh -c'.
-#
-# default value:
-# cc_ready_cmd: [ initctl, emit, cloud-config, CLOUD_CFG=/var/lib/instance//cloud-config.txt ]
-# example:
-# cc_ready_cmd: [ sh, -c, 'echo HI MOM > /tmp/file' ]
-
-## configure interaction with ssh server
-# ssh_svcname: ssh
-# set the name of the option to 'service restart'
-# in order to restart the ssh daemon. For fedora, use 'sshd'
-# default: ssh
-# ssh_deletekeys: True
-# boolean indicating if existing ssh keys should be deleted on a
-# per-instance basis. On a public image, this should absolutely be set
-# to 'True'
-# ssh_genkeytypes: ['rsa', 'dsa', 'ecdsa']
-# a list of the ssh key types that should be generated
-# These are passed to 'ssh-keygen -t'
-
-## configuration of ssh keys output to console
-# ssh_fp_console_blacklist: []
-# ssh_key_console_blacklist: [ssh-dss]
-# A list of key types (first token of a /etc/ssh/ssh_key_*.pub file)
-# that should be skipped when outputting key fingerprints and keys
-# to the console respectively.
-
-## poweroff or reboot system after finished
-# default: none
-#
-# power_state can be used to make the system shutdown, reboot or
-# halt after boot is finished. This same thing can be acheived by
-# user-data scripts or by runcmd by simply invoking 'shutdown'.
-#
-# Doing it this way ensures that cloud-init is entirely finished with
-# modules that would be executed, and avoids any error/log messages
-# that may go to the console as a result of system services like
-# syslog being taken down while cloud-init is running.
-#
-# delay: form accepted by shutdown. default is 'now'. other format
-# accepted is +m (m in minutes)
-# mode: required. must be one of 'poweroff', 'halt', 'reboot'
-# message: provided as the message argument to 'shutdown'. default is none.
-power_state:
- delay: 30
- mode: poweroff
- message: Bye Bye
diff --git a/doc/examples/include-once.txt b/doc/examples/include-once.txt
deleted file mode 100644
index 0cf74e5e..00000000
--- a/doc/examples/include-once.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-#include-once
-# entries are one url per line. comment lines beginning with '#' are allowed
-# urls are passed to urllib.urlopen, so the format must be supported there
-# This entries will just be processed ONE TIME by cloud-init, any further
-# iterations won't process this file
-http://www.ubuntu.com/robots.txt
-http://www.w3schools.com/html/lastpage.htm
diff --git a/doc/examples/include.txt b/doc/examples/include.txt
deleted file mode 100644
index 5bdc7991..00000000
--- a/doc/examples/include.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-#include
-# entries are one url per line. comment lines beginning with '#' are allowed
-# urls are passed to urllib.urlopen, so the format must be supported there
-http://www.ubuntu.com/robots.txt
-http://www.w3schools.com/html/lastpage.htm
diff --git a/doc/examples/kernel-cmdline.txt b/doc/examples/kernel-cmdline.txt
deleted file mode 100644
index f043baef..00000000
--- a/doc/examples/kernel-cmdline.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-cloud-config can be provided via the kernel command line.
-configuration that comes from the kernel command line has higher priority
-than configuration in /etc/cloud/cloud.cfg
-
-The format is:
- cc: <yaml content here> [end_cc]
-
-cloud-config will consider any content after 'cc:' to be cloud-config
-data. If an 'end_cc' string is present, then it will stop reading there.
-otherwise it considers everthing after 'cc:' to be cloud-config content.
-
-In order to allow carriage returns, you must enter '\\n', literally,
-on the command line two backslashes followed by a letter 'n'.
-
-Here are some examples:
- root=/dev/sda1 cc: ssh_import_id: [smoser, kirkland]\\n
- root=LABEL=uec-rootfs cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
- cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc root=/dev/sda1
diff --git a/doc/examples/part-handler-v2.txt b/doc/examples/part-handler-v2.txt
deleted file mode 100644
index 554c34a5..00000000
--- a/doc/examples/part-handler-v2.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-#part-handler
-# vi: syntax=python ts=4
-# this is an example of a version 2 part handler.
-# the differences between the initial part-handler version
-# and v2 is:
-# * handle_part receives a 5th argument, 'frequency'
-# frequency will be either 'always' or 'per-instance'
-# * handler_version must be set
-#
-# A handler declaring version 2 will be called on all instance boots, with a
-# different 'frequency' argument.
-
-handler_version = 2
-
-def list_types():
- # return a list of mime-types that are handled by this module
- return(["text/plain", "text/go-cubs-go"])
-
-def handle_part(data,ctype,filename,payload,frequency):
- # data: the cloudinit object
- # ctype: '__begin__', '__end__', or the specific mime-type of the part
- # filename: the filename for the part, or dynamically generated part if
- # no filename is given attribute is present
- # payload: the content of the part (empty for begin or end)
- # frequency: the frequency that this cloud-init run is running for
- # this is either 'per-instance' or 'always'. 'per-instance'
- # will be invoked only on the first boot. 'always' will
- # will be called on subsequent boots.
- if ctype == "__begin__":
- print "my handler is beginning, frequency=%s" % frequency
- return
- if ctype == "__end__":
- print "my handler is ending, frequency=%s" % frequency
- return
-
- print "==== received ctype=%s filename=%s ====" % (ctype,filename)
- print payload
- print "==== end ctype=%s filename=%s" % (ctype, filename)
diff --git a/doc/examples/part-handler.txt b/doc/examples/part-handler.txt
deleted file mode 100644
index a6e66415..00000000
--- a/doc/examples/part-handler.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-#part-handler
-# vi: syntax=python ts=4
-
-def list_types():
- # return a list of mime-types that are handled by this module
- return(["text/plain", "text/go-cubs-go"])
-
-def handle_part(data,ctype,filename,payload):
- # data: the cloudinit object
- # ctype: '__begin__', '__end__', or the specific mime-type of the part
- # filename: the filename for the part, or dynamically generated part if
- # no filename is given attribute is present
- # payload: the content of the part (empty for begin or end)
- if ctype == "__begin__":
- print "my handler is beginning"
- return
- if ctype == "__end__":
- print "my handler is ending"
- return
-
- print "==== received ctype=%s filename=%s ====" % (ctype,filename)
- print payload
- print "==== end ctype=%s filename=%s" % (ctype, filename)
diff --git a/doc/examples/plain-ignored.txt b/doc/examples/plain-ignored.txt
deleted file mode 100644
index fb2b59dc..00000000
--- a/doc/examples/plain-ignored.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-#ignored
-Nothing will be done with this part by the UserDataHandler
diff --git a/doc/examples/seed/README b/doc/examples/seed/README
deleted file mode 100644
index cc15839e..00000000
--- a/doc/examples/seed/README
+++ /dev/null
@@ -1,22 +0,0 @@
-This directory is an example of a 'seed' directory.
-
-
-copying these files inside an instance's
- /var/lib/cloud/seed/nocloud
-or
- /var/lib/cloud/seed/nocloud-net
-
-will cause the 'DataSourceNoCloud' and 'DataSourceNoCloudNet' modules
-to enable and read the given data.
-
-The directory must have both files.
-
-- user-data:
- This is the user data, as would be consumed from ec2's metadata service
- see examples in doc/examples.
-- meta-data:
- This file is yaml formated data similar to what is in the ec2 metadata
- service under meta-data/. See the example, or, on an ec2 instance,
- run:
- python -c 'import boto.utils, yaml; print(
- yaml.dump(boto.utils.get_instance_metadata()))'
diff --git a/doc/examples/seed/meta-data b/doc/examples/seed/meta-data
deleted file mode 100644
index d0551448..00000000
--- a/doc/examples/seed/meta-data
+++ /dev/null
@@ -1,30 +0,0 @@
-# this is yaml formated data
-# it is expected to be roughly what you would get from running the following
-# on an ec2 instance:
-# python -c 'import boto.utils, yaml; print(yaml.dump(boto.utils.get_instance_metadata()))'
-ami-id: ami-fd4aa494
-ami-launch-index: '0'
-ami-manifest-path: ubuntu-images-us/ubuntu-lucid-10.04-amd64-server-20100427.1.manifest.xml
-block-device-mapping: {ami: sda1, ephemeral0: sdb, ephemeral1: sdc, root: /dev/sda1}
-hostname: domU-12-31-38-07-19-44.compute-1.internal
-instance-action: none
-instance-id: i-87018aed
-instance-type: m1.large
-kernel-id: aki-c8b258a1
-local-hostname: domU-12-31-38-07-19-44.compute-1.internal
-local-ipv4: 10.223.26.178
-placement: {availability-zone: us-east-1d}
-public-hostname: ec2-184-72-174-120.compute-1.amazonaws.com
-public-ipv4: 184.72.174.120
-public-keys:
- ec2-keypair.us-east-1: [ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCD9dlT00vOUC8Ttq6YH8RzUCVqPQl6HaSfWSTKYnZiVCpTBj1CaRZPLRLmkSB9Nziy4aRJa/LZMbBHXytQKnB1psvNknqC2UNlrXXMk+Vx5S4vg21MXYYimK4uZEY0Qz29QUiTyNsx18jpAaF4ocUpTpRhxPEBCcSCDmMbc27MU2XuTbasM2NjW/w0bBF3ZFhdH68dZICXdTxS2jUrtrCnc1D/QXVZ5kQO3jsmSyJg8E0nE+6Onpx2YRoVRSwjpGzVZ+BlXPnN5xBREBG8XxzhNFHJbek+RgK5TfL+k4yD4XhnVZuZu53cBAFhj+xPKhtisSd+YmaEq+Jt9uS0Ekd5
- ec2-keypair.us-east-1, '']
-reservation-id: r-e2225889
-security-groups: default
-
-# of the fields above:
-# required:
-# instance-id
-# suggested:
-# local-hostname
-# public-keys
diff --git a/doc/examples/seed/user-data b/doc/examples/seed/user-data
deleted file mode 100644
index 2bc87c0b..00000000
--- a/doc/examples/seed/user-data
+++ /dev/null
@@ -1,3 +0,0 @@
-#cloud-config
-runcmd:
- - [ sh, -c, 'echo ==== $(date) ====; echo HI WORLD; echo =======' ]
diff --git a/doc/examples/upstart-cloud-config.txt b/doc/examples/upstart-cloud-config.txt
deleted file mode 100644
index 1fcec34d..00000000
--- a/doc/examples/upstart-cloud-config.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-#upstart-job
-description "My test job"
-
-start on cloud-config
-console output
-task
-
-script
-echo "====BEGIN======="
-echo "HELLO WORLD: $UPSTART_JOB"
-echo "=====END========"
-end script
diff --git a/doc/examples/upstart-rclocal.txt b/doc/examples/upstart-rclocal.txt
deleted file mode 100644
index 5cd049a9..00000000
--- a/doc/examples/upstart-rclocal.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-#upstart-job
-description "a test upstart job"
-
-start on stopped rc RUNLEVEL=[2345]
-console output
-task
-
-script
-echo "====BEGIN======="
-echo "HELLO RC.LOCAL LIKE WORLD: $UPSTART_JOB"
-echo "=====END========"
-end script
diff --git a/doc/examples/user-script.txt b/doc/examples/user-script.txt
deleted file mode 100644
index 6a87cad5..00000000
--- a/doc/examples/user-script.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-
-cat <<EOF
-============================
-My name is ${0}
-I was input via user data
-============================
-EOF
diff --git a/doc/merging.rst b/doc/merging.rst
deleted file mode 100644
index afe1a6dd..00000000
--- a/doc/merging.rst
+++ /dev/null
@@ -1,194 +0,0 @@
-Overview
---------
-
-This was implemented because it has been a common feature request that there be
-a way to specify how cloud-config yaml "dictionaries" provided as user-data are
-merged together when there are multiple yamls to merge together (say when
-performing an #include).
-
-Since previously the merging algorithm was very simple and would only overwrite
-and not append lists, or strings, and so on it was decided to create a new and
-improved way to merge dictionaries (and there contained objects) together in a
-way that is customizable, thus allowing for users who provide cloud-config
-user-data to determine exactly how there objects will be merged.
-
-For example.
-
-.. code-block:: yaml
-
- #cloud-config (1)
- run_cmd:
- - bash1
- - bash2
-
- #cloud-config (2)
- run_cmd:
- - bash3
- - bash4
-
-The previous way of merging the following 2 objects would result in a final
-cloud-config object that contains the following.
-
-.. code-block:: yaml
-
- #cloud-config (merged)
- run_cmd:
- - bash3
- - bash4
-
-Typically this is not what users want, instead they would likely prefer:
-
-.. code-block:: yaml
-
- #cloud-config (merged)
- run_cmd:
- - bash1
- - bash2
- - bash3
- - bash4
-
-This way makes it easier to combine the various cloud-config objects you have
-into a more useful list, thus reducing duplication that would have had to
-occur in the previous method to accomplish the same result.
-
-Customizability
----------------
-
-Since the above merging algorithm may not always be the desired merging
-algorithm (like how the previous merging algorithm was not always the preferred
-one) the concept of customizing how merging can be done was introduced through
-a new concept call 'merge classes'.
-
-A merge class is a class defintion which provides functions that can be used
-to merge a given type with another given type.
-
-An example of one of these merging classes is the following:
-
-.. code-block:: python
-
- class Merger(object):
- def __init__(self, merger, opts):
- self._merger = merger
- self._overwrite = 'overwrite' in opts
-
- # This merging algorithm will attempt to merge with
- # another dictionary, on encountering any other type of object
- # it will not merge with said object, but will instead return
- # the original value
- #
- # On encountering a dictionary, it will create a new dictionary
- # composed of the original and the one to merge with, if 'overwrite'
- # is enabled then keys that exist in the original will be overwritten
- # by keys in the one to merge with (and associated values). Otherwise
- # if not in overwrite mode the 2 conflicting keys themselves will
- # be merged.
- def _on_dict(self, value, merge_with):
- if not isinstance(merge_with, (dict)):
- return value
- merged = dict(value)
- for (k, v) in merge_with.items():
- if k in merged:
- if not self._overwrite:
- merged[k] = self._merger.merge(merged[k], v)
- else:
- merged[k] = v
- else:
- merged[k] = v
- return merged
-
-As you can see there is a '_on_dict' method here that will be given a source value
-and a value to merge with. The result will be the merged object. This code itself
-is called by another merging class which 'directs' the merging to happen by
-analyzing the types of the objects to merge and attempting to find a know object
-that will merge that type. I will avoid pasting that here, but it can be found
-in the `mergers/__init__.py` file (see `LookupMerger` and `UnknownMerger`).
-
-So following the typical cloud-init way of allowing source code to be downloaded
-and used dynamically, it is possible for users to inject there own merging files
-to handle specific types of merging as they choose (the basic ones included will
-handle lists, dicts, and strings). Note how each merge can have options associated
-with it which affect how the merging is performed, for example a dictionary merger
-can be told to overwrite instead of attempt to merge, or a string merger can be
-told to append strings instead of discarding other strings to merge with.
-
-How to activate
----------------
-
-There are a few ways to activate the merging algorithms, and to customize them
-for your own usage.
-
-1. The first way involves the usage of MIME messages in cloud-init to specify
- multipart documents (this is one way in which multiple cloud-config is joined
- together into a single cloud-config). Two new headers are looked for, both
- of which can define the way merging is done (the first header to exist wins).
- These new headers (in lookup order) are 'Merge-Type' and 'X-Merge-Type'. The value
- should be a string which will satisfy the new merging format defintion (see
- below for this format).
-2. The second way is actually specifying the merge-type in the body of the
- cloud-config dictionary. There are 2 ways to specify this, either as a string
- or as a dictionary (see format below). The keys that are looked up for this
- definition are the following (in order), 'merge_how', 'merge_type'.
-
-String format
-*************
-
-The string format that is expected is the following.
-
-::
-
- classname1(option1,option2)+classname2(option3,option4)....
-
-The class name there will be connected to class names used when looking for the
-class that can be used to merge and options provided will be given to the class
-on construction of that class.
-
-For example, the default string that is used when none is provided is the following:
-
-::
-
- list()+dict()+str()
-
-Dictionary format
-*****************
-
-In cases where a dictionary can be used to specify the same information as the
-string format (ie option #2 of above) it can be used, for example.
-
-.. code-block:: python
-
- {'merge_how': [{'name': 'list', 'settings': ['extend']},
- {'name': 'dict', 'settings': []},
- {'name': 'str', 'settings': ['append']}]}
-
-This would be the equivalent format for default string format but in dictionary
-form instead of string form.
-
-Specifying multiple types and its effect
-----------------------------------------
-
-Now you may be asking yourself, if I specify a merge-type header or dictionary
-for every cloud-config that I provide, what exactly happens?
-
-The answer is that when merging, a stack of 'merging classes' is kept, the
-first one on that stack is the default merging classes, this set of mergers
-will be used when the first cloud-config is merged with the initial empty
-cloud-config dictionary. If the cloud-config that was just merged provided a
-set of merging classes (via the above formats) then those merging classes will
-be pushed onto the stack. Now if there is a second cloud-config to be merged then
-the merging classes from the cloud-config before the first will be used (not the
-default) and so on. This way a cloud-config can decide how it will merge with a
-cloud-config dictionary coming after it.
-
-Other uses
-----------
-
-In addition to being used for merging user-data sections, the default merging
-algorithm for merging 'conf.d' yaml files (which form an initial yaml config
-for cloud-init) was also changed to use this mechanism so its full
-benefits (and customization) can also be used there as well. Other places that
-used the previous merging are also, similarly, now extensible (metadata
-merging, for example).
-
-Note, however, that merge algorithms are not used *across* types of
-configuration. As was the case before merging was implemented,
-user-data will overwrite conf.d configuration without merging.
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
deleted file mode 100644
index 8a391f21..00000000
--- a/doc/rtd/conf.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import os
-import sys
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('../../'))
-sys.path.insert(0, os.path.abspath('../'))
-sys.path.insert(0, os.path.abspath('./'))
-sys.path.insert(0, os.path.abspath('.'))
-
-from cloudinit import version
-
-# Supress warnings for docs that aren't used yet
-# unused_docs = [
-# ]
-
-# General information about the project.
-project = 'Cloud-Init'
-
-# -- General configuration ----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-# needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.autodoc',
- 'sphinx.ext.viewcode',
-]
-
-intersphinx_mapping = {
- 'sphinx': ('http://sphinx.pocoo.org', None)
-}
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The master toctree document.
-master_doc = 'index'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-version = version.version_string()
-release = version
-
-# Set the default Pygments syntax
-highlight_language = 'python'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-show_authors = False
-
-# -- Options for HTML output --------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-html_theme_options = {
- "bodyfont": "Ubuntu, Arial, sans-serif",
- "headfont": "Ubuntu, Arial, sans-serif"
-}
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-html_logo = 'static/logo.png'
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
deleted file mode 100644
index fe04b1a9..00000000
--- a/doc/rtd/index.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-.. _index:
-
-=====================
-Documentation
-=====================
-
-.. rubric:: Everything about cloud-init, a set of **python** scripts and utilities to make your cloud images be all they can be!
-
-Summary
------------------
-
-`Cloud-init`_ is the *defacto* multi-distribution package that handles early initialization of a cloud instance.
-
-
-----
-
-.. toctree::
- :maxdepth: 2
-
- topics/capabilities
- topics/availability
- topics/format
- topics/dir_layout
- topics/examples
- topics/datasources
- topics/modules
- topics/merging
- topics/moreinfo
- topics/hacking
-
-.. _Cloud-init: https://launchpad.net/cloud-init
diff --git a/doc/rtd/static/logo.png b/doc/rtd/static/logo.png
deleted file mode 100644
index e980fdea..00000000
--- a/doc/rtd/static/logo.png
+++ /dev/null
Binary files differ
diff --git a/doc/rtd/static/logo.svg b/doc/rtd/static/logo.svg
deleted file mode 100644
index 7a2ae21b..00000000
--- a/doc/rtd/static/logo.svg
+++ /dev/null
@@ -1,89 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- Generator: Adobe Illustrator 16.0.4, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
-<svg version="1.1" id="artwork" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
- width="1190.551px" height="841.89px" viewBox="0 0 1190.551 841.89" enable-background="new 0 0 1190.551 841.89"
- xml:space="preserve">
-<g>
- <g>
- <path fill="#000020" d="M219.086,722.686c-9.061,0-17.009-1.439-23.838-4.314c-6.833-2.875-12.586-6.902-17.258-12.082
- c-4.675-5.176-8.164-11.324-10.463-18.443c-2.302-7.119-3.452-14.992-3.452-23.623c0-8.629,1.257-16.537,3.775-23.73
- c2.515-7.189,6.074-13.408,10.679-18.658c4.601-5.25,10.247-9.348,16.935-12.299c6.688-2.945,14.13-4.422,22.328-4.422
- c5.033,0,10.065,0.432,15.101,1.295c5.033,0.863,9.85,2.232,14.454,4.1l-4.53,17.041c-3.021-1.436-6.509-2.588-10.463-3.451
- c-3.957-0.863-8.164-1.295-12.62-1.295c-11.218,0-19.813,3.527-25.779,10.572c-5.97,7.049-8.953,17.332-8.953,30.848
- c0,6.041,0.681,11.58,2.05,16.611c1.365,5.037,3.522,9.35,6.472,12.945c2.946,3.596,6.721,6.361,11.326,8.305
- c4.601,1.941,10.21,2.912,16.827,2.912c5.319,0,10.139-0.502,14.454-1.51c4.314-1.006,7.692-2.084,10.139-3.236l2.804,16.826
- c-1.153,0.723-2.804,1.402-4.962,2.051c-2.157,0.646-4.604,1.219-7.334,1.725c-2.734,0.502-5.646,0.934-8.737,1.295
- C224.944,722.504,221.961,722.686,219.086,722.686z"/>
- <path fill="#000020" d="M298.343,722.254c-12.371-0.289-21.141-2.945-26.319-7.982c-5.178-5.031-7.766-12.869-7.766-23.514
- V556.143l20.062-3.451v134.83c0,3.311,0.287,6.041,0.863,8.197c0.573,2.158,1.51,3.885,2.805,5.178
- c1.294,1.295,3.02,2.266,5.177,2.914c2.158,0.646,4.817,1.186,7.982,1.617L298.343,722.254z"/>
- <path fill="#000020" d="M415.288,664.008c0,8.92-1.294,16.971-3.883,24.162c-2.589,7.193-6.223,13.375-10.895,18.553
- c-4.675,5.176-10.247,9.168-16.72,11.971c-6.471,2.807-13.52,4.207-21.141,4.207c-7.624,0-14.669-1.4-21.141-4.207
- c-6.472-2.803-12.047-6.795-16.72-11.971c-4.675-5.178-8.305-11.359-10.894-18.553c-2.588-7.191-3.883-15.242-3.883-24.162
- c0-8.771,1.294-16.789,3.883-24.055c2.589-7.26,6.219-13.482,10.894-18.66c4.673-5.178,10.248-9.168,16.72-11.973
- c6.472-2.805,13.517-4.207,21.141-4.207c7.621,0,14.67,1.402,21.141,4.207c6.473,2.805,12.045,6.795,16.72,11.973
- c4.672,5.178,8.306,11.4,10.895,18.66C413.994,647.219,415.288,655.236,415.288,664.008z M394.362,664.008
- c0-12.654-2.841-22.686-8.521-30.094c-5.684-7.406-13.412-11.111-23.191-11.111c-9.781,0-17.512,3.705-23.19,11.111
- c-5.683,7.408-8.521,17.439-8.521,30.094c0,12.656,2.838,22.688,8.521,30.094c5.679,7.41,13.409,11.109,23.19,11.109
- c9.779,0,17.508-3.699,23.191-11.109C391.521,686.695,394.362,676.664,394.362,664.008z"/>
- <path fill="#000020" d="M527.186,716.861c-4.604,1.152-10.678,2.373-18.229,3.666c-7.551,1.295-16.288,1.943-26.211,1.943
- c-8.629,0-15.893-1.262-21.789-3.777c-5.898-2.514-10.645-6.072-14.238-10.678c-3.596-4.602-6.184-10.031-7.766-16.287
- c-1.584-6.256-2.373-13.193-2.373-20.818v-62.992h20.062v58.678c0,13.666,2.158,23.443,6.472,29.34
- c4.315,5.898,11.575,8.844,21.789,8.844c2.157,0,4.386-0.07,6.688-0.215c2.299-0.141,4.455-0.324,6.471-0.539
- c2.012-0.215,3.846-0.432,5.501-0.648c1.652-0.215,2.839-0.465,3.56-0.754v-94.705h20.062V716.861z"/>
- <path fill="#000020" d="M628.963,556.143l20.064-3.451v164.17c-4.605,1.293-10.502,2.588-17.691,3.883
- c-7.193,1.295-15.461,1.941-24.809,1.941c-8.629,0-16.396-1.369-23.298-4.098c-6.903-2.732-12.802-6.615-17.69-11.65
- c-4.891-5.033-8.666-11.182-11.326-18.445c-2.662-7.26-3.99-15.424-3.99-24.484c0-8.629,1.113-16.537,3.344-23.73
- c2.229-7.189,5.502-13.375,9.816-18.553c4.313-5.178,9.6-9.201,15.855-12.08c6.256-2.875,13.409-4.314,21.465-4.314
- c6.471,0,12.188,0.863,17.15,2.588c4.961,1.727,8.662,3.381,11.109,4.963V556.143z M628.963,631.648
- c-2.447-2.014-5.969-3.953-10.57-5.824c-4.604-1.867-9.637-2.805-15.102-2.805c-5.754,0-10.678,1.045-14.776,3.127
- c-4.1,2.088-7.443,4.963-10.033,8.631c-2.588,3.666-4.459,8.018-5.607,13.051c-1.153,5.035-1.727,10.43-1.727,16.18
- c0,13.088,3.236,23.189,9.708,30.311c6.472,7.119,15.101,10.678,25.888,10.678c5.463,0,10.031-0.25,13.697-0.756
- c3.668-0.502,6.506-1.041,8.521-1.617V631.648z"/>
- <path fill="#000020" d="M671.375,646.102h53.283v18.77h-53.283V646.102z"/>
- <path fill="#000020" d="M755.745,587.641c-3.598,0-6.654-1.188-9.168-3.561c-2.52-2.373-3.777-5.57-3.777-9.6
- c0-4.025,1.258-7.227,3.777-9.6c2.514-2.373,5.57-3.559,9.168-3.559c3.592,0,6.65,1.186,9.168,3.559
- c2.516,2.373,3.775,5.574,3.775,9.6c0,4.029-1.26,7.227-3.775,9.6C762.395,586.453,759.336,587.641,755.745,587.641z
- M765.883,720.098h-20.062v-112.18h20.062V720.098z"/>
- <path fill="#000020" d="M794.401,611.154c4.602-1.148,10.713-2.373,18.336-3.668c7.621-1.293,16.396-1.941,26.32-1.941
- c8.914,0,16.32,1.262,22.219,3.775c5.896,2.518,10.605,6.041,14.131,10.57c3.523,4.531,6.002,9.961,7.441,16.287
- c1.438,6.332,2.158,13.305,2.158,20.926v62.994h-20.062v-58.68c0-6.902-0.469-12.797-1.402-17.689
- c-0.938-4.887-2.48-8.844-4.639-11.863c-2.156-3.021-5.035-5.213-8.629-6.58c-3.596-1.365-8.055-2.051-13.375-2.051
- c-2.156,0-4.389,0.074-6.688,0.217c-2.303,0.145-4.496,0.322-6.58,0.539c-2.086,0.215-3.957,0.469-5.609,0.756
- c-1.654,0.289-2.84,0.504-3.559,0.646v94.705h-20.062V611.154z"/>
- <path fill="#000020" d="M922.088,587.641c-3.598,0-6.654-1.188-9.168-3.561c-2.52-2.373-3.777-5.57-3.777-9.6
- c0-4.025,1.258-7.227,3.777-9.6c2.514-2.373,5.57-3.559,9.168-3.559c3.592,0,6.65,1.186,9.168,3.559
- c2.516,2.373,3.775,5.574,3.775,9.6c0,4.029-1.26,7.227-3.775,9.6C928.739,586.453,925.68,587.641,922.088,587.641z
- M932.227,720.098h-20.062v-112.18h20.062V720.098z"/>
- <path fill="#000020" d="M979.663,607.918h42.5v16.828h-42.5v51.773c0,5.609,0.432,10.248,1.295,13.914
- c0.863,3.668,2.158,6.547,3.883,8.631c1.727,2.086,3.883,3.559,6.473,4.422c2.588,0.863,5.607,1.293,9.061,1.293
- c6.041,0,10.895-0.68,14.561-2.049c3.668-1.365,6.221-2.336,7.658-2.912l3.885,16.611c-2.016,1.008-5.539,2.264-10.57,3.775
- c-5.037,1.51-10.787,2.266-17.26,2.266c-7.625,0-13.914-0.971-18.877-2.914c-4.961-1.941-8.951-4.854-11.971-8.736
- c-3.021-3.883-5.145-8.662-6.365-14.346c-1.223-5.68-1.834-12.26-1.834-19.738V576.639l20.062-3.453V607.918z"/>
- </g>
- <g>
- <path fill="#E95420" d="M595.275,150.171c-93.932,0-170.078,76.146-170.078,170.079c0,53.984,25.157,102.088,64.381,133.245
- v-37.423c0-8.807,7.137-15.943,15.943-15.943s15.947,7.137,15.947,15.943v57.45c3.478,1.678,7.024,3.233,10.629,4.677v-62.127
- c0-8.807,7.139-15.943,15.943-15.943c8.807,0,15.945,7.137,15.945,15.943v71.374c3.508,0.652,7.053,1.198,10.633,1.631v-73.005
- c0-8.807,7.139-15.943,15.945-15.943c8.805,0,15.944,7.137,15.944,15.943v73.878c3.572-0.232,7.119-0.565,10.629-1.016V352.287
- c0-8.801,7.137-15.943,15.943-15.943s15.943,7.143,15.943,15.943v129.365c67.59-22.497,116.33-86.255,116.33-161.402
- C765.354,226.317,689.208,150.171,595.275,150.171z"/>
- <path fill="#FFFFFF" d="M696.856,320.25H569.303c-21.133,0-38.27-17.125-38.27-38.27c0-20.339,15.871-36.965,35.898-38.192
- c-8.953-17.953-2.489-40.012,15.128-50.188c17.611-10.165,39.949-4.739,51.019,11.997c11.076-16.736,33.416-22.162,51.025-11.994
- c17.621,10.173,24.08,32.226,15.119,50.185c20.037,1.228,35.906,17.854,35.906,38.192
- C735.129,303.125,717.993,320.25,696.856,320.25z"/>
- <g>
- <path fill="#E95420" d="M633.014,271.05c4.074,0,7.375-3.302,7.375-7.371v-34.547c0-4.07-3.301-7.37-7.375-7.37
- c-4.068,0-7.369,3.3-7.369,7.37v34.547C625.645,267.748,628.946,271.05,633.014,271.05z"/>
- <path fill="#E95420" d="M650.254,238.483c-3.043,2.7-3.316,7.36-0.615,10.405c7.746,8.728,7.348,22.036-0.916,30.302
- c-4.174,4.176-9.729,6.476-15.639,6.476c-5.9,0-11.457-2.303-15.637-6.479c-8.291-8.291-8.67-21.625-0.863-30.356
- c2.715-3.034,2.455-7.695-0.578-10.409c-3.035-2.712-7.699-2.453-10.41,0.582c-13.02,14.556-12.391,36.787,1.43,50.607
- c6.961,6.963,16.211,10.797,26.059,10.797c0,0,0,0,0.002,0c9.846,0,19.098-3.831,26.064-10.793
- c13.77-13.776,14.432-35.96,1.512-50.514C657.961,236.055,653.299,235.777,650.254,238.483z"/>
- <path fill="#E95420" d="M632.788,344.26c-4.4,0-7.969,3.568-7.969,7.969c0,4.406,3.568,7.975,7.969,7.975
- c4.406,0,7.975-3.568,7.975-7.975C640.762,347.828,637.194,344.26,632.788,344.26z"/>
- </g>
- </g>
-</g>
-</svg>
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
deleted file mode 100644
index 2d58f808..00000000
--- a/doc/rtd/topics/availability.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-============
-Availability
-============
-
-It is currently installed in the `Ubuntu Cloud Images`_ and also in the official `Ubuntu`_ images available on EC2.
-
-Versions for other systems can be (or have been) created for the following distributions:
-
-- Ubuntu
-- Fedora
-- Debian
-- RHEL
-- CentOS
-- *and more...*
-
-So ask your distribution provider where you can obtain an image with it built-in if one is not already available ☺
-
-
-.. _Ubuntu Cloud Images: http://cloud-images.ubuntu.com/
-.. _Ubuntu: http://www.ubuntu.com/
diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst
deleted file mode 100644
index 63b34270..00000000
--- a/doc/rtd/topics/capabilities.rst
+++ /dev/null
@@ -1,24 +0,0 @@
-=====================
-Capabilities
-=====================
-
-- Setting a default locale
-- Setting a instance hostname
-- Generating instance ssh private keys
-- Adding ssh keys to a users ``.ssh/authorized_keys`` so they can log in
-- Setting up ephemeral mount points
-
-User configurability
---------------------
-
-`Cloud-init`_ 's behavior can be configured via user-data.
-
- User-data can be given by the user at instance launch time.
-
-This is done via the ``--user-data`` or ``--user-data-file`` argument to ec2-run-instances for example.
-
-* Check your local clients documentation for how to provide a `user-data` string
- or `user-data` file for usage by cloud-init on instance creation.
-
-
-.. _Cloud-init: https://launchpad.net/cloud-init
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
deleted file mode 100644
index 0d7d4aca..00000000
--- a/doc/rtd/topics/datasources.rst
+++ /dev/null
@@ -1,200 +0,0 @@
-.. _datasources:
-
-=========
-Datasources
-=========
-----------
- What is a datasource?
-----------
-
-Datasources are sources of configuration data for cloud-init that typically come
-from the user (aka userdata) or come from the stack that created the configuration
-drive (aka metadata). Typical userdata would include files, yaml, and shell scripts
-while typical metadata would include server name, instance id, display name and other
-cloud specific details. Since there are multiple ways to provide this data (each cloud
-solution seems to prefer its own way) internally a datasource abstract class was
-created to allow for a single way to access the different cloud systems methods
-to provide this data through the typical usage of subclasses.
-
-The current interface that a datasource object must provide is the following:
-
-.. sourcecode:: python
-
- # returns a mime multipart message that contains
- # all the various fully-expanded components that
- # were found from processing the raw userdata string
- # - when filtering only the mime messages targeting
- # this instance id will be returned (or messages with
- # no instance id)
- def get_userdata(self, apply_filter=False)
-
- # returns the raw userdata string (or none)
- def get_userdata_raw(self)
-
- # returns a integer (or none) which can be used to identify
- # this instance in a group of instances which are typically
- # created from a single command, thus allowing programatic
- # filtering on this launch index (or other selective actions)
- @property
- def launch_index(self)
-
- # the data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self)
-
- #returns a list of public ssh keys
- def get_public_ssh_keys(self)
-
- # translates a device 'short' name into the actual physical device
- # fully qualified name (or none if said physical device is not attached
- # or does not exist)
- def device_name_to_device(self, name)
-
- # gets the locale string this instance should be applying
- # which typically used to adjust the instances locale settings files
- def get_locale(self)
-
- @property
- def availability_zone(self)
-
- # gets the instance id that was assigned to this instance by the
- # cloud provider or when said instance id does not exist in the backing
- # metadata this will return 'iid-datasource'
- def get_instance_id(self)
-
- # gets the fully qualified domain name that this host should be using
- # when configuring network or hostname releated settings, typically
- # assigned either by the cloud provider or the user creating the vm
- def get_hostname(self, fqdn=False)
-
- def get_package_mirror_info(self)
-
----------------------------
-EC2
----------------------------
-
-The EC2 datasource is the oldest and most widely used datasource that cloud-init
-supports. This datasource interacts with a *magic* ip that is provided to the
-instance by the cloud provider. Typically this ip is ``169.254.169.254`` of which
-at this ip a http server is provided to the instance so that the instance can make
-calls to get instance userdata and instance metadata.
-
-Metadata is accessible via the following URL:
-
-::
-
- GET http://169.254.169.254/2009-04-04/meta-data/
- ami-id
- ami-launch-index
- ami-manifest-path
- block-device-mapping/
- hostname
- instance-id
- instance-type
- local-hostname
- local-ipv4
- placement/
- public-hostname
- public-ipv4
- public-keys/
- reservation-id
- security-groups
-
-Userdata is accessible via the following URL:
-
-::
-
- GET http://169.254.169.254/2009-04-04/user-data
- 1234,fred,reboot,true | 4512,jimbo, | 173,,,
-
-Note that there are multiple versions of this data provided, cloud-init
-by default uses **2009-04-04** but newer versions can be supported with
-relative ease (newer versions have more data exposed, while maintaining
-backward compatibility with the previous versions).
-
-To see which versions are supported from your cloud provider use the following URL:
-
-::
-
- GET http://169.254.169.254/
- 1.0
- 2007-01-19
- 2007-03-01
- 2007-08-29
- 2007-10-10
- 2007-12-15
- 2008-02-01
- 2008-09-01
- 2009-04-04
- ...
- latest
-
----------------------------
-Config Drive
----------------------------
-
-.. include:: ../../sources/configdrive/README.rst
-
----------------------------
-OpenNebula
----------------------------
-
-.. include:: ../../sources/opennebula/README.rst
-
----------------------------
-Alt cloud
----------------------------
-
-.. include:: ../../sources/altcloud/README.rst
-
----------------------------
-No cloud
----------------------------
-
-.. include:: ../../sources/nocloud/README.rst
-
----------------------------
-MAAS
----------------------------
-
-*TODO*
-
-For now see: http://maas.ubuntu.com/
-
----------------------------
-CloudStack
----------------------------
-
-.. include:: ../../sources/cloudstack/README.rst
-
----------------------------
-OVF
----------------------------
-
-*TODO*
-
-For now see: https://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/files/head:/doc/sources/ovf/
-
----------------------------
-OpenStack
----------------------------
-
-.. include:: ../../sources/openstack/README.rst
-
----------------------------
-Fallback/None
----------------------------
-
-This is the fallback datasource when no other datasource can be selected. It is
-the equivalent of a *empty* datasource in that it provides a empty string as userdata
-and a empty dictionary as metadata. It is useful for testing as well as for when
-you do not have a need to have an actual datasource to meet your instance
-requirements (ie you just want to run modules that are not concerned with any
-external data). It is typically put at the end of the datasource search list
-so that if all other datasources are not matched, then this one will be so that
-the user is not left with an inaccessible instance.
-
-**Note:** the instance id that this datasource provides is ``iid-datasource-none``.
-
-.. _boto: http://docs.pythonboto.org/en/latest/
diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst
deleted file mode 100644
index 8815d33d..00000000
--- a/doc/rtd/topics/dir_layout.rst
+++ /dev/null
@@ -1,81 +0,0 @@
-=========
-Directory layout
-=========
-
-Cloudinits's directory structure is somewhat different from a regular application::
-
- /var/lib/cloud/
- - data/
- - instance-id
- - previous-instance-id
- - datasource
- - previous-datasource
- - previous-hostname
- - handlers/
- - instance
- - instances/
- i-00000XYZ/
- - boot-finished
- - cloud-config.txt
- - datasource
- - handlers/
- - obj.pkl
- - scripts/
- - sem/
- - user-data.txt
- - user-data.txt.i
- - scripts/
- - per-boot/
- - per-instance/
- - per-once/
- - seed/
- - sem/
-
-``/var/lib/cloud``
-
- The main directory containing the cloud-init specific subdirectories.
- It is typically located at ``/var/lib`` but there are certain configuration
- scenarios where this can be altered.
-
- TBD, describe this overriding more.
-
-``data/``
-
- Contains information releated to instance ids, datasources and hostnames of the previous
- and current instance if they are different. These can be examined as needed to
- determine any information releated to a previous boot (if applicable).
-
-``handlers/``
-
- Custom ``part-handlers`` code is written out here. Files that end up here are written
- out with in the scheme of ``part-handler-XYZ`` where ``XYZ`` is the handler number (the
- first handler found starts at 0).
-
-
-``instance``
-
- A symlink to the current ``instances/`` subdirectory that points to the currently
- active instance (which is active is dependent on the datasource loaded).
-
-``instances/``
-
- All instances that were created using this image end up with instance identifer
- subdirectories (and corresponding data for each instance). The currently active
- instance will be symlinked the the ``instance`` symlink file defined previously.
-
-``scripts/``
-
- Scripts that are downloaded/created by the corresponding ``part-handler`` will end up
- in one of these subdirectories.
-
-``seed/``
-
- TBD
-
-``sem/``
-
- Cloud-init has a concept of a module sempahore, which basically consists
- of the module name and its frequency. These files are used to ensure a module
- is only ran `per-once`, `per-instance`, `per-always`. This folder contains
- sempaphore `files` which are only supposed to run `per-once` (not tied to the instance id).
-
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
deleted file mode 100644
index 36508bde..00000000
--- a/doc/rtd/topics/examples.rst
+++ /dev/null
@@ -1,133 +0,0 @@
-.. _yaml_examples:
-
-=========
-Cloud config examples
-=========
-
-Including users and groups
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-user-groups.txt
- :language: yaml
- :linenos:
-
-
-Writing out arbitrary files
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-write-files.txt
- :language: yaml
- :linenos:
-
-
-Adding a yum repository
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-yum-repo.txt
- :language: yaml
- :linenos:
-
-Configure an instances trusted CA certificates
-------------------------------------------------------
-
-.. literalinclude:: ../../examples/cloud-config-ca-certs.txt
- :language: yaml
- :linenos:
-
-Configure an instances resolv.conf
-------------------------------------------------------
-
-*Note:* when using a config drive and a RHEL like system resolv.conf
-will also be managed 'automatically' due to the available information
-provided for dns servers in the config drive network format. For those
-that wish to have different settings use this module.
-
-.. literalinclude:: ../../examples/cloud-config-resolv-conf.txt
- :language: yaml
- :linenos:
-
-Install and run `chef`_ recipes
-------------------------------------------------------
-
-.. literalinclude:: ../../examples/cloud-config-chef.txt
- :language: yaml
- :linenos:
-
-Setup and run `puppet`_
-------------------------------------------------------
-
-.. literalinclude:: ../../examples/cloud-config-puppet.txt
- :language: yaml
- :linenos:
-
-Add apt repositories
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-add-apt-repos.txt
- :language: yaml
- :linenos:
-
-Run commands on first boot
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-boot-cmds.txt
- :language: yaml
- :linenos:
-
-.. literalinclude:: ../../examples/cloud-config-run-cmds.txt
- :language: yaml
- :linenos:
-
-
-Alter the completion message
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-final-message.txt
- :language: yaml
- :linenos:
-
-Install arbitrary packages
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-install-packages.txt
- :language: yaml
- :linenos:
-
-Run apt or yum upgrade
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-update-packages.txt
- :language: yaml
- :linenos:
-
-Adjust mount points mounted
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-mount-points.txt
- :language: yaml
- :linenos:
-
-Call a url when finished
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-phone-home.txt
- :language: yaml
- :linenos:
-
-Reboot/poweroff when finished
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-power-state.txt
- :language: yaml
- :linenos:
-
-Configure instances ssh-keys
----------------------------
-
-.. literalinclude:: ../../examples/cloud-config-ssh-keys.txt
- :language: yaml
- :linenos:
-
-
-.. _chef: http://www.opscode.com/chef/
-.. _puppet: http://puppetlabs.com/
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
deleted file mode 100644
index eba9533f..00000000
--- a/doc/rtd/topics/format.rst
+++ /dev/null
@@ -1,159 +0,0 @@
-=========
-Formats
-=========
-
-User data that will be acted upon by cloud-init must be in one of the following types.
-
-Gzip Compressed Content
-------------------------
-
-Content found to be gzip compressed will be uncompressed.
-The uncompressed data will then be used as if it were not compressed.
-This is typically is useful because user-data is limited to ~16384 [#]_ bytes.
-
-Mime Multi Part Archive
-------------------------
-
-This list of rules is applied to each part of this multi-part file.
-Using a mime-multi part file, the user can specify more than one type of data.
-
-For example, both a user data script and a cloud-config type could be specified.
-
-Supported content-types:
-
-- text/x-include-once-url
-- text/x-include-url
-- text/cloud-config-archive
-- text/upstart-job
-- text/cloud-config
-- text/part-handler
-- text/x-shellscript
-- text/cloud-boothook
-
-Helper script to generate mime messages
-~~~~~~~~~~~~~~~~
-
-.. code-block:: python
-
- #!/usr/bin/python
-
- import sys
-
- from email.mime.multipart import MIMEMultipart
- from email.mime.text import MIMEText
-
- if len(sys.argv) == 1:
- print("%s input-file:type ..." % (sys.argv[0]))
- sys.exit(1)
-
- combined_message = MIMEMultipart()
- for i in sys.argv[1:]:
- (filename, format_type) = i.split(":", 1)
- with open(filename) as fh:
- contents = fh.read()
- sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition', 'attachment; filename="%s"' % (filename))
- combined_message.attach(sub_message)
-
- print(combined_message)
-
-
-User-Data Script
-------------------------
-
-Typically used by those who just want to execute a shell script.
-
-Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME archive.
-
-Example
-~~~~~~~
-
-::
-
- $ cat myscript.sh
-
- #!/bin/sh
- echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt
-
- $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9
-
-Include File
-------------
-
-This content is a ``include`` file.
-
-The file contains a list of urls, one per line.
-Each of the URLs will be read, and their content will be passed through this same set of rules.
-Ie, the content read from the URL can be gzipped, mime-multi-part, or plain text.
-
-Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using a MIME archive.
-
-Cloud Config Data
------------------
-
-Cloud-config is the simplest way to accomplish some things
-via user-data. Using cloud-config syntax, the user can specify certain things in a human friendly format.
-
-These things include:
-
-- apt upgrade should be run on first boot
-- a different apt mirror should be used
-- additional apt sources should be added
-- certain ssh keys should be imported
-- *and many more...*
-
-**Note:** The file must be valid yaml syntax.
-
-See the :ref:`yaml_examples` section for a commented set of examples of supported cloud config formats.
-
-Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using a MIME archive.
-
-Upstart Job
------------
-
-Content is placed into a file in ``/etc/init``, and will be consumed by upstart as any other upstart job.
-
-Begins with: ``#upstart-job`` or ``Content-Type: text/upstart-job`` when using a MIME archive.
-
-Cloud Boothook
---------------
-
-This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately.
-This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself.
-It is provided with the instance id in the environment variable ``INSTANCE_I``. This could be made use of to provide a 'once-per-instance' type of functionality.
-
-Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive.
-
-Part Handler
-------------
-
-This is a ``part-handler``. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated).
-This must be python code that contains a ``list_types`` method and a ``handle_type`` method.
-Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles.
-
-The ``handle_type`` method must be like:
-
-.. code-block:: python
-
- def handle_part(data, ctype, filename, payload):
- # data = the cloudinit object
- # ctype = "__begin__", "__end__", or the mime-type of the part that is being handled.
- # filename = the filename of the part (or a generated filename if none is present in mime data)
- # payload = the parts' content
-
-Cloud-init will then call the ``handle_type`` method once at begin, once per part received, and once at end.
-The ``begin`` and ``end`` calls are to allow the part handler to do initialization or teardown.
-
-Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when using a MIME archive.
-
-Example
-~~~~~~~
-
-.. literalinclude:: ../../examples/part-handler.txt
- :language: python
- :linenos:
-
-Also this `blog`_ post offers another example for more advanced usage.
-
-.. [#] See your cloud provider for applicable user-data size limitations...
-.. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html
diff --git a/doc/rtd/topics/hacking.rst b/doc/rtd/topics/hacking.rst
deleted file mode 100644
index 96ab88ef..00000000
--- a/doc/rtd/topics/hacking.rst
+++ /dev/null
@@ -1 +0,0 @@
-.. include:: ../../../HACKING.rst
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
deleted file mode 100644
index 2bd87b16..00000000
--- a/doc/rtd/topics/merging.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-==========================
-Merging User-Data Sections
-==========================
-
-.. include:: ../../merging.rst
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
deleted file mode 100644
index 4202338b..00000000
--- a/doc/rtd/topics/modules.rst
+++ /dev/null
@@ -1,342 +0,0 @@
-=======
-Modules
-=======
-
-Apt Configure
--------------
-
-**Internal name:** ``cc_apt_configure``
-
-.. automodule:: cloudinit.config.cc_apt_configure
-
-Apt Pipelining
---------------
-
-**Internal name:** ``cc_apt_pipelining``
-
-.. automodule:: cloudinit.config.cc_apt_pipelining
-
-Bootcmd
--------
-
-**Internal name:** ``cc_bootcmd``
-
-.. automodule:: cloudinit.config.cc_bootcmd
-
-Byobu
------
-
-**Internal name:** ``cc_byobu``
-
-.. automodule:: cloudinit.config.cc_byobu
-
-Ca Certs
---------
-
-**Internal name:** ``cc_ca_certs``
-
-.. automodule:: cloudinit.config.cc_ca_certs
-
-Chef
-----
-
-**Internal name:** ``cc_chef``
-
-.. automodule:: cloudinit.config.cc_chef
- :members:
-
-Debug
------
-
-**Internal name:** ``cc_debug``
-
-.. automodule:: cloudinit.config.cc_debug
- :members:
-
-Disable Ec2 Metadata
---------------------
-
-**Internal name:** ``cc_disable_ec2_metadata``
-
-.. automodule:: cloudinit.config.cc_disable_ec2_metadata
-
-Disk Setup
-----------
-
-**Internal name:** ``cc_disk_setup``
-
-.. automodule:: cloudinit.config.cc_disk_setup
-
-Emit Upstart
-------------
-
-**Internal name:** ``cc_emit_upstart``
-
-.. automodule:: cloudinit.config.cc_emit_upstart
-
-Final Message
--------------
-
-**Internal name:** ``cc_final_message``
-
-.. automodule:: cloudinit.config.cc_final_message
-
-Foo
----
-
-**Internal name:** ``cc_foo``
-
-.. automodule:: cloudinit.config.cc_foo
-
-Growpart
---------
-
-**Internal name:** ``cc_growpart``
-
-.. automodule:: cloudinit.config.cc_growpart
-
-Grub Dpkg
----------
-
-**Internal name:** ``cc_grub_dpkg``
-
-.. automodule:: cloudinit.config.cc_grub_dpkg
-
-Keys To Console
----------------
-
-**Internal name:** ``cc_keys_to_console``
-
-.. automodule:: cloudinit.config.cc_keys_to_console
-
-Landscape
----------
-
-**Internal name:** ``cc_landscape``
-
-.. automodule:: cloudinit.config.cc_landscape
-
-Locale
-------
-
-**Internal name:** ``cc_locale``
-
-.. automodule:: cloudinit.config.cc_locale
-
-Mcollective
------------
-
-**Internal name:** ``cc_mcollective``
-
-.. automodule:: cloudinit.config.cc_mcollective
-
-Migrator
---------
-
-**Internal name:** ``cc_migrator``
-
-.. automodule:: cloudinit.config.cc_migrator
-
-Mounts
-------
-
-**Internal name:** ``cc_mounts``
-
-.. automodule:: cloudinit.config.cc_mounts
-
-Package Update Upgrade Install
-------------------------------
-
-**Internal name:** ``cc_package_update_upgrade_install``
-
-.. automodule:: cloudinit.config.cc_package_update_upgrade_install
-
-Phone Home
-----------
-
-**Internal name:** ``cc_phone_home``
-
-.. automodule:: cloudinit.config.cc_phone_home
-
-Power State Change
-------------------
-
-**Internal name:** ``cc_power_state_change``
-
-.. automodule:: cloudinit.config.cc_power_state_change
-
-Puppet
-------
-
-**Internal name:** ``cc_puppet``
-
-.. automodule:: cloudinit.config.cc_puppet
-
-Resizefs
---------
-
-**Internal name:** ``cc_resizefs``
-
-.. automodule:: cloudinit.config.cc_resizefs
-
-Resolv Conf
------------
-
-**Internal name:** ``cc_resolv_conf``
-
-.. automodule:: cloudinit.config.cc_resolv_conf
-
-Rightscale Userdata
--------------------
-
-**Internal name:** ``cc_rightscale_userdata``
-
-.. automodule:: cloudinit.config.cc_rightscale_userdata
-
-Rsyslog
--------
-
-**Internal name:** ``cc_rsyslog``
-
-.. automodule:: cloudinit.config.cc_rsyslog
-
-Runcmd
-------
-
-**Internal name:** ``cc_runcmd``
-
-.. automodule:: cloudinit.config.cc_runcmd
-
-Salt Minion
------------
-
-**Internal name:** ``cc_salt_minion``
-
-.. automodule:: cloudinit.config.cc_salt_minion
-
-Scripts Per Boot
-----------------
-
-**Internal name:** ``cc_scripts_per_boot``
-
-.. automodule:: cloudinit.config.cc_scripts_per_boot
-
-Scripts Per Instance
---------------------
-
-**Internal name:** ``cc_scripts_per_instance``
-
-.. automodule:: cloudinit.config.cc_scripts_per_instance
-
-Scripts Per Once
-----------------
-
-**Internal name:** ``cc_scripts_per_once``
-
-.. automodule:: cloudinit.config.cc_scripts_per_once
-
-Scripts User
-------------
-
-**Internal name:** ``cc_scripts_user``
-
-.. automodule:: cloudinit.config.cc_scripts_user
-
-Scripts Vendor
---------------
-
-**Internal name:** ``cc_scripts_vendor``
-
-.. automodule:: cloudinit.config.cc_scripts_vendor
-
-Seed Random
------------
-
-**Internal name:** ``cc_seed_random``
-
-.. automodule:: cloudinit.config.cc_seed_random
-
-Set Hostname
-------------
-
-**Internal name:** ``cc_set_hostname``
-
-.. automodule:: cloudinit.config.cc_set_hostname
-
-Set Passwords
--------------
-
-**Internal name:** ``cc_set_passwords``
-
-.. automodule:: cloudinit.config.cc_set_passwords
-
-Ssh
----
-
-**Internal name:** ``cc_ssh``
-
-.. automodule:: cloudinit.config.cc_ssh
-
-Ssh Authkey Fingerprints
-------------------------
-
-**Internal name:** ``cc_ssh_authkey_fingerprints``
-
-.. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints
-
-Ssh Import Id
--------------
-
-**Internal name:** ``cc_ssh_import_id``
-
-.. automodule:: cloudinit.config.cc_ssh_import_id
-
-Timezone
---------
-
-**Internal name:** ``cc_timezone``
-
-.. automodule:: cloudinit.config.cc_timezone
-
-Ubuntu Init Switch
-------------------
-
-**Internal name:** ``cc_ubuntu_init_switch``
-
-.. automodule:: cloudinit.config.cc_ubuntu_init_switch
- :members:
-
-Update Etc Hosts
-----------------
-
-**Internal name:** ``cc_update_etc_hosts``
-
-.. automodule:: cloudinit.config.cc_update_etc_hosts
-
-Update Hostname
----------------
-
-**Internal name:** ``cc_update_hostname``
-
-.. automodule:: cloudinit.config.cc_update_hostname
-
-Users Groups
-------------
-
-**Internal name:** ``cc_users_groups``
-
-.. automodule:: cloudinit.config.cc_users_groups
-
-Write Files
------------
-
-**Internal name:** ``cc_write_files``
-
-.. automodule:: cloudinit.config.cc_write_files
-
-Yum Add Repo
-------------
-
-**Internal name:** ``cc_yum_add_repo``
-
-.. automodule:: cloudinit.config.cc_yum_add_repo
diff --git a/doc/rtd/topics/moreinfo.rst b/doc/rtd/topics/moreinfo.rst
deleted file mode 100644
index 19e96af0..00000000
--- a/doc/rtd/topics/moreinfo.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-=========
-More information
-=========
-
-Useful external references
--------------------------
-
-- `The beauty of cloudinit`_
-- `Introduction to cloud-init`_ (video)
-
-.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY
-.. _The beauty of cloudinit: http://brandon.fuller.name/archives/2011/05/02/06.40.57/
diff --git a/doc/sources/altcloud/README.rst b/doc/sources/altcloud/README.rst
deleted file mode 100644
index b5d72ebb..00000000
--- a/doc/sources/altcloud/README.rst
+++ /dev/null
@@ -1,87 +0,0 @@
-The datasource altcloud will be used to pick up user data on `RHEVm`_ and `vSphere`_.
-
-RHEVm
-~~~~~~
-
-For `RHEVm`_ v3.0 the userdata is injected into the VM using floppy
-injection via the `RHEVm`_ dashboard "Custom Properties".
-
-The format of the Custom Properties entry must be:
-
-::
-
- floppyinject=user-data.txt:<base64 encoded data>
-
-For example to pass a simple bash script:
-
-::
-
- % cat simple_script.bash
- #!/bin/bash
- echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt
-
- % base64 < simple_script.bash
- IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
-
-To pass this example script to cloud-init running in a `RHEVm`_ v3.0 VM
-set the "Custom Properties" when creating the RHEMv v3.0 VM to:
-
-::
-
- floppyinject=user-data.txt:IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
-
-**NOTE:** The prefix with file name must be: ``floppyinject=user-data.txt:``
-
-It is also possible to launch a `RHEVm`_ v3.0 VM and pass optional user
-data to it using the Delta Cloud.
-
-For more information on Delta Cloud see: http://deltacloud.apache.org
-
-vSphere
-~~~~~~~~
-
-For VMWare's `vSphere`_ the userdata is injected into the VM as an ISO
-via the cdrom. This can be done using the `vSphere`_ dashboard
-by connecting an ISO image to the CD/DVD drive.
-
-To pass this example script to cloud-init running in a `vSphere`_ VM
-set the CD/DVD drive when creating the vSphere VM to point to an
-ISO on the data store.
-
-**Note:** The ISO must contain the user data.
-
-For example, to pass the same ``simple_script.bash`` to vSphere:
-
-Create the ISO
------------------
-
-::
-
- % mkdir my-iso
-
-NOTE: The file name on the ISO must be: ``user-data.txt``
-
-::
-
- % cp simple_scirpt.bash my-iso/user-data.txt
- % genisoimage -o user-data.iso -r my-iso
-
-Verify the ISO
------------------
-
-::
-
- % sudo mkdir /media/vsphere_iso
- % sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso
- % cat /media/vsphere_iso/user-data.txt
- % sudo umount /media/vsphere_iso
-
-Then, launch the `vSphere`_ VM the ISO user-data.iso attached as a CDROM.
-
-It is also possible to launch a `vSphere`_ VM and pass optional user
-data to it using the Delta Cloud.
-
-For more information on Delta Cloud see: http://deltacloud.apache.org
-
-.. _RHEVm: https://www.redhat.com/virtualization/rhev/desktop/rhevm/
-.. _vSphere: https://www.vmware.com/products/datacenter-virtualization/vsphere/overview.html
diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst
deleted file mode 100644
index 8239d1fa..00000000
--- a/doc/sources/azure/README.rst
+++ /dev/null
@@ -1,134 +0,0 @@
-================
-Azure Datasource
-================
-
-This datasource finds metadata and user-data from the Azure cloud platform.
-
-Azure Platform
---------------
-The azure cloud-platform provides initial data to an instance via an attached
-CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some
-information. Additional information is obtained via interaction with the
-"endpoint". The ip address of the endpoint is advertised to the instance
-inside of dhcp option 245. On ubuntu, that can be seen in
-/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example:
-``option unknown-245 64:41:60:82;`` is 100.65.96.130)
-
-walinuxagent
-------------
-In order to operate correctly, cloud-init needs walinuxagent to provide much
-of the interaction with azure. In addition to "provisioning" code, walinux
-does the following on the agent is a long running daemon that handles the
-following things:
-- generate a x509 certificate and send that to the endpoint
-
-waagent.conf config
-~~~~~~~~~~~~~~~~~~~
-in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults.
-
- ::
-
- # disabling provisioning turns off all 'Provisioning.*' function
- Provisioning.Enabled=n
- # this is currently not handled by cloud-init, so let walinuxagent do it.
- ResourceDisk.Format=y
- ResourceDisk.MountPoint=/mnt
-
-
-Userdata
---------
-Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init
-expects that user-data will be provided as base64 encoded value inside the
-text child of a element named ``UserData`` or ``CustomData`` which is a direct
-child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
-If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
-which will be selected.
-
-In the example below, user-data provided is 'this is my userdata', and the
-datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
-That agent command will take affect as if it were specified in system config.
-
-Example:
-
-.. code::
-
- <wa:ProvisioningSection>
- <wa:Version>1.0</wa:Version>
- <LinuxProvisioningConfigurationSet
- xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
- <HostName>myHost</HostName>
- <UserName>myuser</UserName>
- <UserPassword/>
- <CustomData>dGhpcyBpcyBteSB1c2VyZGF0YQ===</CustomData>
- <dscfg>eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0=</dscfg>
- <DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication>
- <SSH>
- <PublicKeys>
- <PublicKey>
- <Fingerprint>6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7</Fingerprint>
- <Path>this-value-unused</Path>
- </PublicKey>
- </PublicKeys>
- </SSH>
- </LinuxProvisioningConfigurationSet>
- </wa:ProvisioningSection>
-
-Configuration
--------------
-Configuration for the datasource can be read from the system config's or set
-via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in
-dscfg node is expected to be base64 encoded yaml content, and it will be
-merged into the 'datasource: Azure' entry.
-
-The '``hostname_bounce: command``' entry can be either the literal string
-'builtin' or a command to execute. The command will be invoked after the
-hostname is set, and will have the 'interface' in its environment. If
-``set_hostname`` is not true, then ``hostname_bounce`` will be ignored.
-
-An example might be:
- command: ["sh", "-c", "killall dhclient; dhclient $interface"]
-
-.. code::
-
- datasource:
- agent_command
- Azure:
- agent_command: [service, walinuxagent, start]
- set_hostname: True
- hostname_bounce:
- # the name of the interface to bounce
- interface: eth0
- # policy can be 'on', 'off' or 'force'
- policy: on
- # the method 'bounce' command.
- command: "builtin"
- hostname_command: "hostname"
- }
-
-hostname
---------
-When the user launches an instance, they provide a hostname for that instance.
-The hostname is provided to the instance in the ovf-env.xml file as
-``HostName``.
-
-Whatever value the instance provides in its dhcp request will resolve in the
-domain returned in the 'search' request.
-
-The interesting issue is that a generic image will already have a hostname
-configured. The ubuntu cloud images have 'ubuntu' as the hostname of the
-system, and the initial dhcp request on eth0 is not guaranteed to occur after
-the datasource code has been run. So, on first boot, that initial value will
-be sent in the dhcp request and *that* value will resolve.
-
-In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
-dhcp request must be made with the new value. Walinuxagent (in its current
-version) handles this by polling the state of hostname and bouncing ('``ifdown
-eth0; ifup eth0``' the network interface if it sees that a change has been
-made.
-
-cloud-init handles this by setting the hostname in the DataSource's 'get_data'
-method via '``hostname $HostName``', and then bouncing the interface. This
-behavior can be configured or disabled in the datasource config. See
-'Configuration' above.
diff --git a/doc/sources/cloudsigma/README.rst b/doc/sources/cloudsigma/README.rst
deleted file mode 100644
index 6509b585..00000000
--- a/doc/sources/cloudsigma/README.rst
+++ /dev/null
@@ -1,38 +0,0 @@
-=====================
-CloudSigma Datasource
-=====================
-
-This datasource finds metadata and user-data from the `CloudSigma`_ cloud platform.
-Data transfer occurs through a virtual serial port of the `CloudSigma`_'s VM and the
-presence of network adapter is **NOT** a requirement,
-
- See `server context`_ in the public documentation for more information.
-
-
-Setting a hostname
-~~~~~~~~~~~~~~~~~~
-
-By default the name of the server will be applied as a hostname on the first boot.
-
-
-Providing user-data
-~~~~~~~~~~~~~~~~~~~
-
-You can provide user-data to the VM using the dedicated `meta field`_ in the `server context`_
-``cloudinit-user-data``. By default *cloud-config* format is expected there and the ``#cloud-config``
-header could be omitted. However since this is a raw-text field you could provide any of the valid
-`config formats`_.
-
-You have the option to encode your user-data using Base64. In order to do that you have to add the
-``cloudinit-user-data`` field to the ``base64_fields``. The latter is a comma-separated field with
-all the meta fields whit base64 encoded values.
-
-If your user-data does not need an internet connection you can create a
-`meta field`_ in the `server context`_ ``cloudinit-dsmode`` and set "local" as value.
-If this field does not exist the default value is "net".
-
-
-.. _CloudSigma: http://cloudsigma.com/
-.. _server context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
-.. _meta field: http://cloudsigma-docs.readthedocs.org/en/latest/meta.html
-.. _config formats: http://cloudinit.readthedocs.org/en/latest/topics/format.html
diff --git a/doc/sources/cloudstack/README.rst b/doc/sources/cloudstack/README.rst
deleted file mode 100644
index eba1cd7e..00000000
--- a/doc/sources/cloudstack/README.rst
+++ /dev/null
@@ -1,29 +0,0 @@
-`Apache CloudStack`_ expose user-data, meta-data, user password and account
-sshkey thru the Virtual-Router. For more details on meta-data and user-data,
-refer the `CloudStack Administrator Guide`_.
-
-URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
-is the Virtual Router IP:
-
-.. code:: bash
-
- http://10.1.1.1/latest/user-data
- http://10.1.1.1/latest/meta-data
- http://10.1.1.1/latest/meta-data/{metadata type}
-
-Configuration
-~~~~~~~~~~~~~
-
-Apache CloudStack datasource can be configured as follows:
-
-.. code:: yaml
-
- datasource:
- CloudStack: {}
- None: {}
- datasource_list:
- - CloudStack
-
-
-.. _Apache CloudStack: http://cloudstack.apache.org/
-.. _CloudStack Administrator Guide: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/virtual_machines.html#user-data-and-meta-data \ No newline at end of file
diff --git a/doc/sources/configdrive/README.rst b/doc/sources/configdrive/README.rst
deleted file mode 100644
index 48ff579d..00000000
--- a/doc/sources/configdrive/README.rst
+++ /dev/null
@@ -1,123 +0,0 @@
-The configuration drive datasource supports the `OpenStack`_ configuration drive disk.
-
- See `the config drive extension`_ and `introduction`_ in the public
- documentation for more information.
-
-By default, cloud-init does *always* consider this source to be a full-fledged
-datasource. Instead, the typical behavior is to assume it is really only
-present to provide networking information. Cloud-init will copy off the
-network information, apply it to the system, and then continue on. The
-"full" datasource could then be found in the EC2 metadata service. If this is
-not the case then the files contained on the located drive must provide equivalents
-to what the EC2 metadata service would provide (which is typical of the version
-2 support listed below)
-
-Version 1
-~~~~~~~~~
-
-The following criteria are required to as a config drive:
-
-1. Must be formatted with `vfat`_ filesystem
-2. Must be a un-partitioned block device (/dev/vdb, not /dev/vdb1)
-3. Must contain *one* of the following files
-
-::
-
- /etc/network/interfaces
- /root/.ssh/authorized_keys
- /meta.js
-
-``/etc/network/interfaces``
-
- This file is laid down by nova in order to pass static networking
- information to the guest. Cloud-init will copy it off of the config-drive
- and into /etc/network/interfaces (or convert it to RH format) as soon as it can,
- and then attempt to bring up all network interfaces.
-
-``/root/.ssh/authorized_keys``
-
- This file is laid down by nova, and contains the ssk keys that were
- provided to nova on instance creation (nova-boot --key ....)
-
-``/meta.js``
-
- meta.js is populated on the config-drive in response to the user passing
- "meta flags" (nova boot --meta key=value ...). It is expected to be json
- formatted.
-
-Version 2
-~~~~~~~~~~~
-
-The following criteria are required to as a config drive:
-
-1. Must be formatted with `vfat`_ or `iso9660`_ filesystem
- or have a *filesystem* label of **config-2**
-2. Must be a un-partitioned block device (/dev/vdb, not /dev/vdb1)
-3. The files that will typically be present in the config drive are:
-
-::
-
- openstack/
- - 2012-08-10/ or latest/
- - meta_data.json
- - user_data (not mandatory)
- - content/
- - 0000 (referenced content files)
- - 0001
- - ....
- ec2
- - latest/
- - meta-data.json (not mandatory)
-
-Keys and values
-~~~~~~~~~~~
-
-Cloud-init's behavior can be modified by keys found in the meta.js (version 1 only) file in the following ways.
-
-::
-
- dsmode:
- values: local, net, pass
- default: pass
-
-
-This is what indicates if configdrive is a final data source or not.
-By default it is 'pass', meaning this datasource should not be read.
-Set it to 'local' or 'net' to stop cloud-init from continuing on to
-search for other data sources after network config.
-
-The difference between 'local' and 'net' is that local will not require
-networking to be up before user-data actions (or boothooks) are run.
-
-::
-
- instance-id:
- default: iid-dsconfigdrive
-
-This is utilized as the metadata's instance-id. It should generally
-be unique, as it is what is used to determine "is this a new instance".
-
-::
-
- public-keys:
- default: None
-
-If present, these keys will be used as the public keys for the
-instance. This value overrides the content in authorized_keys.
-
-Note: it is likely preferable to provide keys via user-data
-
-::
-
- user-data:
- default: None
-
-This provides cloud-init user-data. See :ref:`examples <yaml_examples>` for
-what all can be present here.
-
-.. _OpenStack: http://www.openstack.org/
-.. _introduction: http://docs.openstack.org/trunk/openstack-compute/admin/content/config-drive.html
-.. _python-novaclient: https://github.com/openstack/python-novaclient
-.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
-.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
-.. _the config drive extension: http://docs.openstack.org/user-guide/content/config-drive.html
diff --git a/doc/sources/digitalocean/README.rst b/doc/sources/digitalocean/README.rst
deleted file mode 100644
index 1bb89fe1..00000000
--- a/doc/sources/digitalocean/README.rst
+++ /dev/null
@@ -1,21 +0,0 @@
- The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's `metadata service`_. This
-metadata service serves information about the running droplet via HTTP over the link local address
-169.254.169.254. The metadata API endpoints are fully described at
-`https://developers.digitalocean.com/metadata/ <https://developers.digitalocean.com/metadata/>`_.
-
-Configuration
-~~~~~~~~~~~~~
-
-DigitalOcean's datasource can be configured as follows:
-
- datasource:
- DigitalOcean:
- retries: 3
- timeout: 2
-
-- *retries*: Determines the number of times to attempt to connect to the metadata service
-- *timeout*: Determines the timeout in seconds to wait for a response from the metadata service
-
-.. _DigitalOcean: http://digitalocean.com/
-.. _metadata service: https://developers.digitalocean.com/metadata/
-.. _Full documentation: https://developers.digitalocean.com/metadata/
diff --git a/doc/sources/kernel-cmdline.txt b/doc/sources/kernel-cmdline.txt
deleted file mode 100644
index 0b77a9af..00000000
--- a/doc/sources/kernel-cmdline.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-In order to allow an ephemeral, or otherwise pristine image to
-receive some configuration, cloud-init will read a url directed by
-the kernel command line and proceed as if its data had previously existed.
-
-This allows for configuring a meta-data service, or some other data.
-
-Note, that usage of the kernel command line is somewhat of a last resort,
-as it requires knowing in advance the correct command line or modifying
-the boot loader to append data.
-
-For example, when 'cloud-init start' runs, it will check to
-see if if one of 'cloud-config-url' or 'url' appear in key/value fashion
-in the kernel command line as in:
- root=/dev/sda ro url=http://foo.bar.zee/abcde
-
-Cloud-init will then read the contents of the given url.
-If the content starts with '#cloud-config', it will store
-that data to the local filesystem in a static filename
-'/etc/cloud/cloud.cfg.d/91_kernel_cmdline_url.cfg', and consider it as
-part of the config from that point forward.
-
-If that file exists already, it will not be overwritten, and the url parameters
-completely ignored.
-
-Then, when the DataSource runs, it will find that config already available.
-
-So, in able to configure the MAAS DataSource by controlling the kernel
-command line from outside the image, you can append:
- url=http://your.url.here/abcdefg
-or
- cloud-config-url=http://your.url.here/abcdefg
-
-Then, have the following content at that url:
- #cloud-config
- datasource:
- MAAS:
- metadata_url: http://mass-host.localdomain/source
- consumer_key: Xh234sdkljf
- token_key: kjfhgb3n
- token_secret: 24uysdfx1w4
-
-Notes:
- * Because 'url=' is so very generic, in order to avoid false positives,
- cloud-init requires the content to start with '#cloud-config' in order
- for it to be considered.
- * The url= is un-authed http GET, and contains credentials
- It could be set up to be randomly generated and also check source
- address in order to be more secure
diff --git a/doc/sources/nocloud/README.rst b/doc/sources/nocloud/README.rst
deleted file mode 100644
index 08a39377..00000000
--- a/doc/sources/nocloud/README.rst
+++ /dev/null
@@ -1,71 +0,0 @@
-The data source ``NoCloud`` and ``NoCloudNet`` allow the user to provide user-data
-and meta-data to the instance without running a network service (or even without
-having a network at all).
-
-You can provide meta-data and user-data to a local vm boot via files on a `vfat`_
-or `iso9660`_ filesystem. The filesystem volume label must be ``cidata``.
-
-These user-data and meta-data files are expected to be
-in the following format.
-
-::
-
- /user-data
- /meta-data
-
-Basically, user-data is simply user-data and meta-data is a yaml formatted file
-representing what you'd find in the EC2 metadata service.
-
-Given a disk ubuntu 12.04 cloud image in 'disk.img', you can create a sufficient disk
-by following the example below.
-
-::
-
- ## create user-data and meta-data files that will be used
- ## to modify image on first boot
- $ { echo instance-id: iid-local01; echo local-hostname: cloudimg; } > meta-data
-
- $ printf "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
-
- ## create a disk to attach with some user-data and meta-data
- $ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data
-
- ## alternatively, create a vfat filesystem with same files
- ## $ truncate --size 2M seed.img
- ## $ mkfs.vfat -n cidata seed.img
- ## $ mcopy -oi seed.img user-data meta-data ::
-
- ## create a new qcow image to boot, backed by your original image
- $ qemu-img create -f qcow2 -b disk.img boot-disk.img
-
- ## boot the image and login as 'ubuntu' with password 'passw0rd'
- ## note, passw0rd was set as password through the user-data above,
- ## there is no password set on these images.
- $ kvm -m 256 \
- -net nic -net user,hostfwd=tcp::2222-:22 \
- -drive file=boot-disk.img,if=virtio \
- -drive file=seed.iso,if=virtio
-
-**Note:** that the instance-id provided (``iid-local01`` above) is what is used to
-determine if this is "first boot". So if you are making updates to user-data
-you will also have to change that, or start the disk fresh.
-
-Also, you can inject an ``/etc/network/interfaces`` file by providing the content
-for that file in the ``network-interfaces`` field of metadata.
-
-Example metadata:
-
-::
-
- instance-id: iid-abcdefg
- network-interfaces: |
- iface eth0 inet static
- address 192.168.1.10
- network 192.168.1.0
- netmask 255.255.255.0
- broadcast 192.168.1.255
- gateway 192.168.1.254
- hostname: myhost
-
-.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
-.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
diff --git a/doc/sources/opennebula/README.rst b/doc/sources/opennebula/README.rst
deleted file mode 100644
index 4d7de27a..00000000
--- a/doc/sources/opennebula/README.rst
+++ /dev/null
@@ -1,142 +0,0 @@
-The `OpenNebula`_ (ON) datasource supports the contextualization disk.
-
- See `contextualization overview`_, `contextualizing VMs`_ and
- `network configuration`_ in the public documentation for
- more information.
-
-OpenNebula's virtual machines are contextualized (parametrized) by
-CD-ROM image, which contains a shell script *context.sh* with
-custom variables defined on virtual machine start. There are no
-fixed contextualization variables, but the datasource accepts
-many used and recommended across the documentation.
-
-Datasource configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Datasource accepts following configuration options.
-
-::
-
- dsmode:
- values: local, net, disabled
- default: net
-
-Tells if this datasource will be processed in 'local' (pre-networking) or
-'net' (post-networking) stage or even completely 'disabled'.
-
-::
-
- parseuser:
- default: nobody
-
-Unprivileged system user used for contextualization script
-processing.
-
-Contextualization disk
-~~~~~~~~~~~~~~~~~~~~~~
-
-The following criteria are required:
-
-1. Must be formatted with `iso9660`_ filesystem
- or have a *filesystem* label of **CONTEXT** or **CDROM**
-2. Must contain file *context.sh* with contextualization variables.
- File is generated by OpenNebula, it has a KEY='VALUE' format and
- can be easily read by bash
-
-Contextualization variables
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are no fixed contextualization variables in OpenNebula, no standard.
-Following variables were found on various places and revisions of
-the OpenNebula documentation. Where multiple similar variables are
-specified, only first found is taken.
-
-::
-
- DSMODE
-
-Datasource mode configuration override. Values: local, net, disabled.
-
-::
-
- DNS
- ETH<x>_IP
- ETH<x>_NETWORK
- ETH<x>_MASK
- ETH<x>_GATEWAY
- ETH<x>_DOMAIN
- ETH<x>_DNS
-
-Static `network configuration`_.
-
-::
-
- HOSTNAME
-
-Instance hostname.
-
-::
-
- PUBLIC_IP
- IP_PUBLIC
- ETH0_IP
-
-If no hostname has been specified, cloud-init will try to create hostname
-from instance's IP address in 'local' dsmode. In 'net' dsmode, cloud-init
-tries to resolve one of its IP addresses to get hostname.
-
-::
-
- SSH_KEY
- SSH_PUBLIC_KEY
-
-One or multiple SSH keys (separated by newlines) can be specified.
-
-::
-
- USER_DATA
- USERDATA
-
-cloud-init user data.
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-This example cloud-init configuration (*cloud.cfg*) enables
-OpenNebula datasource only in 'net' mode.
-
-::
-
- disable_ec2_metadata: True
- datasource_list: ['OpenNebula']
- datasource:
- OpenNebula:
- dsmode: net
- parseuser: nobody
-
-Example VM's context section
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-::
-
- CONTEXT=[
- PUBLIC_IP="$NIC[IP]",
- SSH_KEY="$USER[SSH_KEY]
- $USER[SSH_KEY1]
- $USER[SSH_KEY2] ",
- USER_DATA="#cloud-config
- # see https://help.ubuntu.com/community/CloudInit
-
- packages: []
-
- mounts:
- - [vdc,none,swap,sw,0,0]
- runcmd:
- - echo 'Instance has been configured by cloud-init.' | wall
- " ]
-
-.. _OpenNebula: http://opennebula.org/
-.. _contextualization overview: http://opennebula.org/documentation:documentation:context_overview
-.. _contextualizing VMs: http://opennebula.org/documentation:documentation:cong
-.. _network configuration: http://opennebula.org/documentation:documentation:cong#network_configuration
-.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
diff --git a/doc/sources/openstack/README.rst b/doc/sources/openstack/README.rst
deleted file mode 100644
index 8102597e..00000000
--- a/doc/sources/openstack/README.rst
+++ /dev/null
@@ -1,24 +0,0 @@
-*TODO*
-
-Vendor Data
-~~~~~~~~~~~
-
-The OpenStack metadata server can be configured to serve up vendor data
-which is available to all instances for consumption. OpenStack vendor
-data is, generally, a JSON object.
-
-cloud-init will look for configuration in the ``cloud-init`` attribute
-of the vendor data JSON object. cloud-init processes this configuration
-using the same handlers as user data, so any formats that work for user
-data should work for vendor data.
-
-For example, configuring the following as vendor data in OpenStack would
-upgrade packages and install ``htop`` on all instances:
-
-.. sourcecode:: json
-
- {"cloud-init": "#cloud-config\npackage_upgrade: True\npackages:\n - htop"}
-
-For more general information about how cloud-init handles vendor data,
-including how it can be disabled by users on instances, see
-https://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/view/head:/doc/vendordata.txt
diff --git a/doc/sources/ovf/README b/doc/sources/ovf/README
deleted file mode 100644
index e3ef12e0..00000000
--- a/doc/sources/ovf/README
+++ /dev/null
@@ -1,83 +0,0 @@
-This directory contains documentation and a demo of the OVF
-functionality that is present in cloud-init.
-
-The example/ directory contains the following files:
- example/ovf-env.xml
- This is an example ovf environment file
- to make an iso that qualifies for the ISO transport, do:
- mkdir my-iso
- cp environment.xml my-iso/ovf-env.xml
- genisoimage -o transport.iso -r my-iso
- Then, boot with that ISO attached as a CDrom
-- example/ubuntu-server.ovf
- Example generated by virtualbox "export" of a simple VM.
- It contains a functional ProductSection also. Given answers
- to each of the Properties there, a suitable OVF environment file
- (ovf-env.xml) could be created.
-
-== Demo ==
-In order to easily demonstrate this functionality, simple demo is
-contained here. To boot a local virtual machine in either kvm or virtual
-box, follow the steps below.
-
-- download a suitable Ubuntu image
- Visit http://cloud-images.ubuntu.com/releases and download a disk image
- of Natty, Oneiric or a newer release.
-
- $ burl="http://cloud-images.ubuntu.com/releases/"
- $ disk="ubuntu-11.10-server-cloudimg-i386-disk1"
- $ wget "$burl/11.10/release/$disk.img" -O "$disk.img"
-
-- If you're going to use virtual box, you will need to convert the image
- from qcow2 format into a virtual-box friendly VHD format.
- $ qemu-img convert -O vdi "$disk.img" "ubuntu.vdi"
-
-- If you're using kvm, you should create a qcow delta image to store
- the changes so you keep the original pristine.
- $ qemu-img create -f qcow2 -b "$disk.img" "ubuntu.qcow2"
-
- Optionally, you could decompress the image, which will make it boot faster
- but will take up more local disk space.
- $ qemu-img convert -O qcow2 "$disk.img" "$disk.qcow2"
- $ qemu-img create -f qcow2 -b "$disk.qcow2" ubuntu.qcow2
-
-- Create an ISO file that will provide user-data to the image.
- This will put the contents of 'user-data' into an ovf-env.xml file
- and create an ISO file that can then be attached at boot to provide
- the user data to cloud-init.
-
- $ ./make-iso ovf-env.xml.tmpl user-data --output ovftransport.iso
-
-- Boot your virtual machine
- The cloud-images boot with kernel and boot progress to ttyS0.
- You can change that at the grub prompt if you'd like by editing the
- kernel entry. Otherwise, to see progress you'll need to switch
- to the serial console. In kvm graphic mode, you do that by clicking
- in the window and then pressing pressing 'ctrl-alt-3'. For information
- on how to do that in virtualbox or kvm curses, see the relevant
- documentation.
-
- KVM:
- $ kvm -drive file=ubuntu.qcow2,if=virtio -cdrom ovftransport.iso \
- -m 256 -net nic -net user,hostfwd=tcp::2222-:22
-
- VirtualBox:
- - Launch the GUI and create a new vm with $disk.vdi and ovftransport.iso
- attached.
- - If you use 'NAT' networking, then forward a port (2222) to the
- guests' port 22 to be able to ssh.
-
- Upon successful boot you will be able to log in as the 'ubuntu' user
- with the password 'passw0rd' (which was set in the 'user-data' file).
-
- You will also be able to ssh to the instance with the provided:
- $ chmod 600 ovfdemo.pem
- $ ssh -i ovfdemo.pem -p 2222 ubuntu@localhost
-
-- Notes:
- * The 'instance-id' that is set in the ovf-env.xml image needs to
- be unique. If you want to run the first-boot code of cloud-init
- again you will either have to remove /var/lib/cloud ('rm -Rf' is fine)
- or create a new cdrom with a different instance-id. To do the
- ladder, simply add the '--instance-id=' flag to the 'make-iso'
- command above and start your vm with the new ISO attached.
diff --git a/doc/sources/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml
deleted file mode 100644
index 13e8f104..00000000
--- a/doc/sources/ovf/example/ovf-env.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
- xsi:schemaLocation="http://schemas.dmtf.org/ovf/environment/1 ../dsp8027.xsd"
- oe:id="WebTier">
-
- <!-- This example reference a local schema file, to validate against online schema use:
- xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8027_1.0.0.xsd"
- -->
-
- <!-- Information about hypervisor platform -->
- <oe:PlatformSection>
- <Kind>ESX Server</Kind>
- <Version>3.0.1</Version>
- <Vendor>VMware, Inc.</Vendor>
- <Locale>en_US</Locale>
- </oe:PlatformSection>
-
- <!--- Properties defined for this virtual machine -->
- <PropertySection>
- <!-- instance-id is required, a unique instance-id -->
- <Property oe:key="instance-id" oe:value="i-abcdefg"/>
- <!--
- seedfrom is optional, but indicates to 'seed' user-data
- and meta-data the given url. In this example, pull
- http://tinyurl.com/sm-meta-data and http://tinyurl.com/sm-user-data
- -->
- <Property oe:key="seedfrom" oe:value="http://tinyurl.com/sm-"/>
- <!--
- public-keys is a public key to add to users authorized keys
- -->
- <Property oe:key="public-keys" oe:value="ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies"/>
- <!-- hostname: the hostname to set -->
- <Property oe:key="hostname" oe:value="ubuntuhost"/>
- <!--
- The value for user-data is to be base64 encoded.
- it will be decoded, and then processed normally as user-data.
- The following represents '#!/bin/sh\necho "hi world"'
-
- -->
- <Property oe:key="user-data" oe:value="IyEvYmluL3NoCmVjaG8gImhpIHdvcmxkIgo="/>
- <Property oe:key="password" oe:value="passw0rd"/>
- </PropertySection>
-
-</Environment>
diff --git a/doc/sources/ovf/example/ubuntu-server.ovf b/doc/sources/ovf/example/ubuntu-server.ovf
deleted file mode 100644
index 846483a1..00000000
--- a/doc/sources/ovf/example/ubuntu-server.ovf
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Envelope xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
- <References>
- <File ovf:href="my.vmdk" ovf:id="file1" ovf:size="2031616"/>
- </References>
- <DiskSection>
- <Info>Virtual disk information</Info>
- <Disk ovf:capacity="52428800" ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#monolithicSparse"/>
- </DiskSection>
- <NetworkSection>
- <Info>The list of logical networks</Info>
- <Network ovf:name="bridged">
- <Description>The bridged network</Description>
- </Network>
- </NetworkSection>
- <VirtualSystem ovf:id="vm">
- <Info>A virtual machine</Info>
- <Name>Ubuntu</Name>
- <OperatingSystemSection ovf:id="93">
- <Info>11.04 (Natty Narwhal) Server</Info>
- </OperatingSystemSection>
- <ProductSection>
- <Info>Cloud-Init customization</Info>
- <Product>11.04 (Natty Narwhal) Server</Product>
- <Property ovf:key="instance-id" ovf:type="string" ovf:userConfigurable="true" ovf:value="id-ovf">
- <Label>A Unique Instance ID for this instance</Label>
- <Description>Specifies the instance id. This is required and used to determine if the machine should take "first boot" actions</Description>
- </Property>
- <Property ovf:key="hostname" ovf:type="string" ovf:userConfigurable="true" ovf:value="ubuntuguest">
- <Description>Specifies the hostname for the appliance</Description>
- </Property>
- <Property ovf:key="seedfrom" ovf:type="string" ovf:userConfigurable="true">
- <Label>Url to seed instance data from</Label>
- <Description>This field is optional, but indicates that the instance should 'seed' user-data and meta-data from the given url. If set to 'http://tinyurl.com/sm-' is given, meta-data will be pulled from http://tinyurl.com/sm-meta-data and user-data from http://tinyurl.com/sm-user-data. Leave this empty if you do not want to seed from a url.</Description>
- </Property>
- <Property ovf:key="public-keys" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
- <Label>ssh public keys</Label>
- <Description>This field is optional, but indicates that the instance should populate the default user's 'authorized_keys' with this value</Description>
- </Property>
- <Property ovf:key="user-data" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
- <Label>Encoded user-data</Label>
- <Description>In order to fit into a xml attribute, this value is base64 encoded . It will be decoded, and then processed normally as user-data.</Description>
- <!-- The following represents '#!/bin/sh\necho "hi world"'
- ovf:value="IyEvYmluL3NoCmVjaG8gImhpIHdvcmxkIgo="
- -->
- </Property>
- <Property ovf:key="password" ovf:type="string" ovf:userConfigurable="true" ovf:value="">
- <Label>Default User's password</Label>
- <Description>If set, the default user's password will be set to this value to allow password based login. The password will be good for only a single login. If set to the string 'RANDOM' then a random password will be generated, and written to the console.</Description>
- </Property>
- </ProductSection>
- <VirtualHardwareSection>
- <Info>Virtual hardware requirements</Info>
- <System>
- <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
- <vssd:InstanceID>0</vssd:InstanceID>
- <vssd:VirtualSystemIdentifier>Ubuntu 11.04 (Natty Narwhal) Server</vssd:VirtualSystemIdentifier>
- <vssd:VirtualSystemType>vmx-07 qemu-pc qemu-pc-0.13 virtualbox-2.2</vssd:VirtualSystemType>
- </System>
- <Item>
- <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
- <rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>
- <rasd:InstanceID>1</rasd:InstanceID>
- <rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
- </Item>
- <Item>
- <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
- <rasd:Description>Memory Size</rasd:Description>
- <rasd:ElementName>256MB of memory</rasd:ElementName>
- <rasd:InstanceID>2</rasd:InstanceID>
- <rasd:ResourceType>4</rasd:ResourceType>
- <rasd:VirtualQuantity>256</rasd:VirtualQuantity>
- </Item>
- <Item ovf:required="false">
- <rasd:Address>0</rasd:Address>
- <rasd:Description>USB Controller</rasd:Description>
- <rasd:ElementName>usb</rasd:ElementName>
- <rasd:InstanceID>3</rasd:InstanceID>
- <rasd:ResourceType>23</rasd:ResourceType>
- </Item>
- <Item>
- <rasd:Address>0</rasd:Address>
- <rasd:Description>SCSI Controller</rasd:Description>
- <rasd:ElementName>scsiController0</rasd:ElementName>
- <rasd:InstanceID>4</rasd:InstanceID>
- <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
- <rasd:ResourceType>6</rasd:ResourceType>
- </Item>
- <Item>
- <rasd:Address>1</rasd:Address>
- <rasd:Description>IDE Controller</rasd:Description>
- <rasd:ElementName>ideController1</rasd:ElementName>
- <rasd:InstanceID>5</rasd:InstanceID>
- <rasd:ResourceType>5</rasd:ResourceType>
- </Item>
- <Item ovf:required="false">
- <rasd:AddressOnParent>0</rasd:AddressOnParent>
- <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
- <rasd:ElementName>cdrom1</rasd:ElementName>
- <rasd:InstanceID>6</rasd:InstanceID>
- <rasd:Parent>5</rasd:Parent>
- <rasd:ResourceType>15</rasd:ResourceType>
- </Item>
- <Item>
- <rasd:AddressOnParent>0</rasd:AddressOnParent>
- <rasd:ElementName>disk1</rasd:ElementName>
- <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>
- <rasd:InstanceID>7</rasd:InstanceID>
- <rasd:Parent>4</rasd:Parent>
- <rasd:ResourceType>17</rasd:ResourceType>
- </Item>
- <Item>
- <rasd:AddressOnParent>2</rasd:AddressOnParent>
- <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>bridged</rasd:Connection>
- <rasd:Description>ethernet adapter on &quot;bridged&quot;</rasd:Description>
- <rasd:ElementName>ethernet0</rasd:ElementName>
- <rasd:InstanceID>8</rasd:InstanceID>
- <rasd:ResourceSubType>E1000</rasd:ResourceSubType>
- <rasd:ResourceType>10</rasd:ResourceType>
- </Item>
- </VirtualHardwareSection>
- <AnnotationSection ovf:required="false">
- <Info>For more information, see http://ubuntu.com</Info>
- <Annotation>This is Ubuntu Server.</Annotation>
- </AnnotationSection>
- </VirtualSystem>
-</Envelope>
diff --git a/doc/sources/ovf/make-iso b/doc/sources/ovf/make-iso
deleted file mode 100755
index 91d0e2e5..00000000
--- a/doc/sources/ovf/make-iso
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/bin/bash
-
-VERBOSITY=0
-PROPERTIES=( instance-id hostname user-data seedfrom )
-DEFAULTS=( "i-ovfdemo00" "ovfdemo.localdomain" "" "" )
-
-DEF_OUTPUT="ovftransport.iso"
-TEMP_D=""
-
-error() { echo "$@" 1>&2; }
-fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
-
-# propvalue(name, value)
-propvalue() {
- local prop="" val="$2" i=0
- for prop in "${PROPERTIES[@]}"; do
- if [ "$prop" = "$1" ]; then
- [ $# -eq 1 ] || DEFAULTS[$i]="$2"
- _RET=${DEFAULTS[$i]}
- return
- fi
- i=$(($i+1))
- done
- return
-}
-
-Usage() {
- cat <<EOF
-Usage: ${0##*/} ovf-env.xml.tmpl [user-data-file]
-
- create an an ovf transport iso with ovf-env.xml.tmpl
- as ovf-env.xml on the iso.
-
- if user-data-file is given, the file's contents will be base64 encoded
- and stuffed inside ovf-env.xml. This will override the '--user-data'
- argument.
-
- options:
- -o | --output OUTPUT write output to OUTPUT [default: $DEF_OUTPUT]
- -v | --verbose increase verbosity
-
-EOF
- local i=""
- for i in "${PROPERTIES[@]}"; do
- propvalue "$i"
- printf "%10s--%-17s%s\n" "" "$i" "set $i. [default: '$_RET']"
- done
- cat <<EOF
-
- Example:
- $ ${0##*/} --hostname "foobar.mydomain" ovf-env.xml.tmpl user-data
-
-EOF
-}
-
-bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
-cleanup() {
- [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
-}
-
-debug() {
- local level=${1}; shift;
- [ "${level}" -ge "${VERBOSITY}" ] && return
- error "${@}"
-}
-
-short_opts="ho:v"
-long_opts="help,output:,verbose"
-for i in "${PROPERTIES[@]}"; do
- long_opts="$long_opts,$i:"
-done
-getopt_out=$(getopt --name "${0##*/}" \
- --options "${short_opts}" --long "${long_opts}" -- "$@") &&
- eval set -- "${getopt_out}" ||
- bad_Usage
-
-## <<insert default variables here>>
-output="${DEF_OUTPUT}"
-user_data=""
-
-while [ $# -ne 0 ]; do
- cur=${1}; next=${2};
- case "$cur" in
- -h|--help) Usage ; exit 0;;
- -o|--output) output=${2}; shift;;
- -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
- --) shift; break;;
- --*)
- for i in "${PROPERTIES[@]}" _none_; do
- [ "${cur#--}" == "$i" ] || continue
- [ "$i" != "user-data" ] ||
- next=$(echo "$next" | base64 --wrap=0) ||
- fail "failed to base64 encode userdata"
- propvalue "$i" "$next"
- break
- done
- [ "$i" = "_none_" ] && bad_Usage "confused by $cur"
- ;;
- esac
- shift;
-done
-
-[ $# -eq 1 -o $# -eq 2 ] ||
- bad_Usage "wrong number of arguments"
-
-env_tmpl="$1"
-ud_file="$2"
-
-[ -f "$env_tmpl" ] || bad_Usage "$env_tmpl: not a file"
-[ -z "$ud_file" -o -f "$ud_file" ] ||
- bad_Usage "$ud_file: not a file"
-
-TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
- fail "failed to make tempdir"
-trap cleanup EXIT
-
-mkdir "$TEMP_D/iso" && iso_d="$TEMP_D/iso" ||
- fail "failed to make a tempdir?"
-ovf_env="$TEMP_D/iso/ovf-env.xml"
-
-if [ -n "$ud_file" ]; then
- user_data=$(base64 --wrap=0 "$ud_file") ||
- fail "failed to base64 encode $ud_file. Do you have base64 installed?"
- propvalue user-data "$user_data"
-fi
-
-changes=( )
-for i in "${PROPERTIES[@]}"; do
- changes[${#changes[@]}]="-e"
- propvalue "$i"
- changes[${#changes[@]}]="s|@@$i@@|$_RET|g"
-done
-
-sed "${changes[@]}" "$env_tmpl" > "$ovf_env" ||
- fail "failed to replace string in $env_tmpl"
-
-if [ "${#changes[@]}" -ne 0 ]; then
- cmp "$ovf_env" "$env_tmpl" >/dev/null &&
- fail "nothing replaced in $ovf_env. template is identical to output"
-fi
-
-debug 1 "creating iso with: genisoimage -o tmp.iso -r iso"
-( cd "$TEMP_D" &&
- genisoimage -V OVF-TRANSPORT -o tmp.iso -r iso 2>/dev/null ) ||
- fail "failed to create iso. do you have genisoimage?"
-
-if [ "$output" = "-" ]; then
- cat "$TEMP_D/tmp.iso"
-else
- cp "$TEMP_D/tmp.iso" "$output" ||
- fail "failed to write to $output"
-fi
-
-error "wrote iso to $output"
-exit 0
-# vi: ts=4 noexpandtab
diff --git a/doc/sources/ovf/ovf-env.xml.tmpl b/doc/sources/ovf/ovf-env.xml.tmpl
deleted file mode 100644
index 8e255d43..00000000
--- a/doc/sources/ovf/ovf-env.xml.tmpl
+++ /dev/null
@@ -1,28 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
- xsi:schemaLocation="http://schemas.dmtf.org/ovf/environment/1 ../dsp8027.xsd"
- oe:id="WebTier">
-
- <!-- This example reference a local schema file, to validate against online schema use:
- xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8027_1.0.0.xsd"
- -->
-
- <!-- Information about hypervisor platform -->
- <oe:PlatformSection>
- <Kind>ESX Server</Kind>
- <Version>3.0.1</Version>
- <Vendor>VMware, Inc.</Vendor>
- <Locale>en_US</Locale>
- </oe:PlatformSection>
-
- <!--- Properties defined for this virtual machine -->
- <PropertySection>
- <Property oe:key="instance-id" oe:value="@@instance-id@@"/>
- <Property oe:key="hostname" oe:value="@@hostname@@"/>
- <Property oe:key="user-data" oe:value="@@user-data@@"/>
- <Property oe:key="seedfrom" oe:value="@@seedfrom@@"/>
- </PropertySection>
-
-</Environment>
diff --git a/doc/sources/ovf/ovfdemo.pem b/doc/sources/ovf/ovfdemo.pem
deleted file mode 100644
index 5bc629c8..00000000
--- a/doc/sources/ovf/ovfdemo.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA1Zq/11Rky/uHdbKJewmEtDABGoSjIFyjoY04T5dFYUNwi0B6
-Km7b85Ylqmi/1KmR4Zvi++dj10XnusoWr/Zruv85hHilMZ9GozL2RD6jU/CaI+rB
-QkKSaR/CdmEHBbRimq6T2E9chMhJY0jNzeexJSKVR3QeLdbRZ64H7QGTHp7Ulodu
-vS9VwAWcpYbGgcM541fboFAiJOLICM1UPH4x5WDkTq/6yeElSmeiE2lHtESHhyMJ
-OSDB3YZ5hw1+4bY3sR+0vZ3VQWzpn1Lwg1X3AZA8yf+ZsmMZHhTFeCglsd8jlLHk
-Wudh5mJBkCuwPvRQk1gE5gSnTGti0TUqLIrNRwIDAQABAoIBAGZMrdIXxgp3VWHF
-9tfpMBgH4Y9stJ98HpXxh2V+4ih53v2iDKAj5c1cPH/HmQ/lgktVmDjikct43El2
-HbV6RBATyd0q1prUWEUy1ATNJvW9hmTrOlFchrg4EK8XOwC9angAYig3oeyp65PU
-O1SAwTMyw+GruARmHHYWQA9/MJF5yexrjBw00w7hnCsqjezU5YIYsXwgcz0Zw+Ix
-fDJcZFXF9X3Al7H3ZILW3PpfhcVl7WzkL47TIX4oB/ab2kltaTE90SZMXKVcLvTI
-6To2xJAnMUyasRfcGmvE8m0SqWqp66POAUDF2I8qu78inKH2u0rNtLQjyx5btF5K
-A39bPnkCgYEA8Joba3QFrbd0zPTP/DawRtTXzdIQcNjj4XEefxBN3Cw7MlCsfgDc
-xiAR703zqQ/IDkF00XrU5w7rmDga3Pv66JRzFDwvRVtGb6QV+lg7Ypd/6NI1G5AS
-0Qzneer2JytEpHoTqGH/vWcXzJRH2BfaPK/vEF4qhAXBqouz2DXn3EUCgYEA40ZU
-eDc4MmHOSuqoggSEDJ5NITgPbdkwOta0BmnBZ36M5vgqN8EfAZISKocLNlERDrRG
-MpBlQCulq3rpU7WYkx8hGE21f1YBo+vKkffI56ptO2lAp5iLflkSOypdiVN6OELW
-5SzkViohDnxKc6eshVycnNoxh6MqE6ugWSd6ahsCgYEA6t0kQwIgwPDCfYfEt2kT
-LjF675lNHzs5R8pKgLKDrpcmufjySJXC7UxE9ZrcbX3QRcozpIEI7vwrko3B+1Gm
-Hf87TtdpNYTh/vznz1btsVI+NCFuYheDprm4A9UOsDGWchAQvF/dayAFpVhhwVmX
-WYJMFWg2jGWqJTb2Oep1CRkCgYEAqzdkk1wmPe5o1w+I+sokIM1xFcGB/iNMrkbp
-QJuTVECGLcpvI6mdjjVY8ijiTX0s+ILfD2CwpnM7T8A83w9DbjJZYFHKla9ZdQBB
-j024UK6Xs9ZLGvdUv06i6We1J6t3u8K+2c/EBRWf6aXBAPgkhCOM6K2H+sL1A/Sb
-zA5trlkCgYArqJCk999mXQuMjNv6UTwzB0iYDjAFNgJdFmPMXlogD51r0HlGeCgD
-OEyup4FdIvX1ZYOCkKyieSngmPmY/P4lZBgQbM23FMp+oUkA+FlVW+WNVoXagUrh
-abatKtbZ+WZHHmgSoC8sAo5KnxM9O0R6fWlpoIhJTVoihkZYdmnpMg==
------END RSA PRIVATE KEY-----
diff --git a/doc/sources/ovf/user-data b/doc/sources/ovf/user-data
deleted file mode 100644
index bfac51fd..00000000
--- a/doc/sources/ovf/user-data
+++ /dev/null
@@ -1,7 +0,0 @@
-#cloud-config
-password: passw0rd
-chpasswd: { expire: False }
-ssh_pwauth: True
-
-ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVmr/XVGTL+4d1sol7CYS0MAEahKMgXKOhjThPl0VhQ3CLQHoqbtvzliWqaL/UqZHhm+L752PXRee6yhav9mu6/zmEeKUxn0ajMvZEPqNT8Joj6sFCQpJpH8J2YQcFtGKarpPYT1yEyEljSM3N57ElIpVHdB4t1tFnrgftAZMentSWh269L1XABZylhsaBwznjV9ugUCIk4sgIzVQ8fjHlYOROr/rJ4SVKZ6ITaUe0RIeHIwk5IMHdhnmHDX7htjexH7S9ndVBbOmfUvCDVfcBkDzJ/5myYxkeFMV4KCWx3yOUseRa52HmYkGQK7A+9FCTWATmBKdMa2LRNSosis1H ubuntu@ovfdemo
diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst
deleted file mode 100644
index e63f311f..00000000
--- a/doc/sources/smartos/README.rst
+++ /dev/null
@@ -1,149 +0,0 @@
-==================
-SmartOS Datasource
-==================
-
-This datasource finds metadata and user-data from the SmartOS virtualization
-platform (i.e. Joyent).
-
-Please see http://smartos.org/ for information about SmartOS.
-
-SmartOS Platform
-----------------
-The SmartOS virtualization platform uses meta-data to the instance via the
-second serial console. On Linux, this is /dev/ttyS1. The data is a provided
-via a simple protocol: something queries for the data, the console responds
-responds with the status and if "SUCCESS" returns until a single ".\n".
-
-New versions of the SmartOS tooling will include support for base64 encoded data.
-
-Meta-data channels
-------------------
-
-Cloud-init supports three modes of delivering user/meta-data via the flexible
-channels of SmartOS.
-
-* user-data is written to /var/db/user-data
- - per the spec, user-data is for consumption by the end-user, not provisioning
- tools
- - cloud-init entirely ignores this channel other than writting it to disk
- - removal of the meta-data key means that /var/db/user-data gets removed
- - a backup of previous meta-data is maintained as /var/db/user-data.<timestamp>
- - <timestamp> is the epoch time when cloud-init ran
-
-* user-script is written to /var/lib/cloud/scripts/per-boot/99_user_data
- - this is executed each boot
- - a link is created to /var/db/user-script
- - previous versions of the user-script is written to
- /var/lib/cloud/scripts/per-boot.backup/99_user_script.<timestamp>.
- - <timestamp> is the epoch time when cloud-init ran.
- - when the 'user-script' meta-data key goes missing, the user-script is
- removed from the file system, although a backup is maintained.
- - if the script is not shebanged (i.e. starts with #!<executable>), then
- or is not an executable, cloud-init will add a shebang of "#!/bin/bash"
-
-* cloud-init:user-data is treated like on other Clouds.
- - this channel is used for delivering _all_ cloud-init instructions
- - scripts delivered over this channel must be well formed (i.e. must have
- a shebang)
-
-Cloud-init supports reading the traditional meta-data fields supported by the
-SmartOS tools. These are:
- * root_authorized_keys
- * hostname
- * enable_motd_sys_info
- * iptables_disable
-
-Note: At this time iptables_disable and enable_motd_sys_info are read but
- are not actioned.
-
-disabling user-script
----------------------
-
-Cloud-init uses the per-boot script functionality to handle the execution
-of the user-script. If you want to prevent this use a cloud-config of:
-
-#cloud-config
-cloud_final_modules:
- - scripts-per-once
- - scripts-per-instance
- - scripts-user
- - ssh-authkey-fingerprints
- - keys-to-console
- - phone-home
- - final-message
- - power-state-change
-
-Alternatively you can use the json patch method
-#cloud-config-jsonp
-[
- { "op": "replace",
- "path": "/cloud_final_modules",
- "value": ["scripts-per-once",
- "scripts-per-instance",
- "scripts-user",
- "ssh-authkey-fingerprints",
- "keys-to-console",
- "phone-home",
- "final-message",
- "power-state-change"]
- }
-]
-
-The default cloud-config includes "script-per-boot". Cloud-init will still
-ingest and write the user-data but will not execute it, when you disable
-the per-boot script handling.
-
-Note: Unless you have an explicit use-case, it is recommended that you not
- disable the per-boot script execution, especially if you are using
- any of the life-cycle management features of SmartOS.
-
-The cloud-config needs to be delivered over the cloud-init:user-data channel
-in order for cloud-init to ingest it.
-
-base64
-------
-
-The following are exempt from base64 encoding, owing to the fact that they
-are provided by SmartOS:
- * root_authorized_keys
- * enable_motd_sys_info
- * iptables_disable
- * user-data
- * user-script
-
-This list can be changed through system config of variable 'no_base64_decode'.
-
-This means that user-script and user-data as well as other values can be
-base64 encoded. Since Cloud-init can only guess as to whether or not something
-is truly base64 encoded, the following meta-data keys are hints as to whether
-or not to base64 decode something:
- * base64_all: Except for excluded keys, attempt to base64 decode
- the values. If the value fails to decode properly, it will be
- returned in its text
- * base64_keys: A comma deliminated list of which keys are base64 encoded.
- * b64-<key>:
- for any key, if there exists an entry in the metadata for 'b64-<key>'
- Then 'b64-<key>' is expected to be a plaintext boolean indicating whether
- or not its value is encoded.
- * no_base64_decode: This is a configuration setting
- (i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be
- base64 decoded.
-
-disk_aliases and ephemeral disk:
----------------
-By default, SmartOS only supports a single ephemeral disk. That disk is
-completely empty (un-partitioned with no filesystem).
-
-The SmartOS datasource has built-in cloud-config which instructs the
-'disk_setup' module to partition and format the ephemeral disk.
-
-You can control the disk_setup then in 2 ways:
- 1. through the datasource config, you can change the 'alias' of
- ephermeral0 to reference another device. The default is:
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
- Which means anywhere disk_setup sees a device named 'ephemeral0'
- then /dev/vdb will be substituted.
- 2. you can provide disk_setup or fs_setup data in user-data to overwrite
- the datasource's built-in values.
-
-See doc/examples/cloud-config-disk-setup.txt for information on disk_setup.
diff --git a/doc/status.txt b/doc/status.txt
deleted file mode 100644
index 60993216..00000000
--- a/doc/status.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-cloud-init will keep a 'status' file up to date for other applications
-wishing to use it to determine cloud-init status.
-
-It will manage 2 files:
- status.json
- result.json
-
-The files will be written to /var/lib/cloud/data/ .
-A symlink will be created in /run/cloud-init. The link from /run is to ensure
-that if the file exists, it is not stale for this boot.
-
-status.json's format is:
- {
- 'v1': {
- 'init': {
- errors: [] # list of strings for each error that occurred
- start: float # time.time() that this stage started or None
- end: float # time.time() that this stage finished or None
- },
- 'init-local': {
- 'errors': [], 'start': <float>, 'end' <float> # (same as 'init' above)
- },
- 'modules-config': {
- 'errors': [], 'start': <float>, 'end' <float> # (same as 'init' above)
- },
- 'modules-final': {
- 'errors': [], 'start': <float>, 'end' <float> # (same as 'init' above)
- },
- 'datasource': string describing datasource found or None
- 'stage': string representing stage that is currently running
- ('init', 'init-local', 'modules-final', 'modules-config', None)
- if None, then no stage is running. Reader must read the start/end
- of each of the above stages to determine the state.
- }
-
-result.json's format is:
- {
- 'v1': {
- 'datasource': string describing the datasource found
- 'errors': [] # list of errors reported
- }
- }
-
-Thus, to determine if cloud-init is finished:
- fin = "/run/cloud-init/result.json"
- if os.path.exists(fin):
- ret = json.load(open(fin, "r"))
- if len(ret['v1']['errors']):
- print "Finished with errors:" + "\n".join(ret['v1']['errors'])
- else:
- print "Finished no errors"
- else:
- print "Not Finished"
diff --git a/doc/userdata.txt b/doc/userdata.txt
deleted file mode 100644
index cc691ae6..00000000
--- a/doc/userdata.txt
+++ /dev/null
@@ -1,79 +0,0 @@
-=== Overview ===
-Userdata is data provided by the entity that launches an instance.
-The cloud provider makes this data available to the instance via in one
-way or anohter.
-
-In EC2, the data is provided by the user via the '--user-data' or
-'user-data-file' argument to ec2-run-instances. The EC2 cloud makes the
-data available to the instance via its meta-data service at
-http://169.254.169.254/latest/user-data
-
-cloud-init can read this input and act on it in different ways.
-
-=== Input Formats ===
-cloud-init will download and cache to filesystem any user-data that it
-finds. However, certain types of user-data are handled specially.
-
- * Gzip Compressed Content
- content found to be gzip compressed will be uncompressed, and
- these rules applied to the uncompressed data
-
- * Mime Multi Part archive
- This list of rules is applied to each part of this multi-part file
- Using a mime-multi part file, the user can specify more than one
- type of data. For example, both a user data script and a
- cloud-config type could be specified.
-
- * User-Data Script
- begins with: #! or Content-Type: text/x-shellscript
- script will be executed at "rc.local-like" level during first boot.
- rc.local-like means "very late in the boot sequence"
-
- * Include File
- begins with #include or Content-Type: text/x-include-url
- This content is a "include" file. The file contains a list of
- urls, one per line. Each of the URLs will be read, and their content
- will be passed through this same set of rules. Ie, the content
- read from the URL can be gzipped, mime-multi-part, or plain text
-
-* Include File Once
- begins with #include-once or Content-Type: text/x-include-once-url
- This content is a "include" file. The file contains a list of
- urls, one per line. Each of the URLs will be read, and their content
- will be passed through this same set of rules. Ie, the content
- read from the URL can be gzipped, mime-multi-part, or plain text
- This file will just be downloaded only once per instance, and its
- contents cached for subsequent boots. This allows you to pass in
- one-time-use or expiring URLs.
-
- * Cloud Config Data
- begins with #cloud-config or Content-Type: text/cloud-config
-
- This content is "cloud-config" data. See the examples for a
- commented example of supported config formats.
-
- * Upstart Job
- begins with #upstart-job or Content-Type: text/upstart-job
-
- Content is placed into a file in /etc/init, and will be consumed
- by upstart as any other upstart job.
-
- * Cloud Boothook
- begins with #cloud-boothook or Content-Type: text/cloud-boothook
-
- This content is "boothook" data. It is stored in a file under
- /var/lib/cloud and then executed immediately.
-
- This is the earliest "hook" available. Note, that there is no
- mechanism provided for running only once. The boothook must take
- care of this itself. It is provided with the instance id in the
- environment variable "INSTANCE_ID". This could be made use of to
- provide a 'once-per-instance'
-
-=== Examples ===
-There are examples in the examples subdirectory.
-Additionally, the 'tools' directory contains 'write-mime-multipart',
-which can be used to easily generate mime-multi-part files from a list
-of input files. That data can then be given to an instance.
-
-See 'write-mime-multipart --help' for usage.
diff --git a/doc/var-lib-cloud.txt b/doc/var-lib-cloud.txt
deleted file mode 100644
index 7776d772..00000000
--- a/doc/var-lib-cloud.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-/var/lib/cloud has the following structure:
- - scripts/
- per-instance/
- per-boot/
- per-once/
-
- files in these directories will be run by 'run-parts' once per
- instance, once per boot, and once per *ever*.
-
- - seed/
- <datasource>/
- sys-user-data
- user-data
- meta-data
-
- The 'seed/' directory allows you to seed a specific datasource
- For example, to seed the 'nocloud' datasource you would need to
- populate
- seed/nocloud/user-data
- seed/nocloud/meta-data
-
- - instance -> instances/i-abcde
- This is a symlink to the current instance/<instance-id> directory
- created/updated on boot
- - instances/
- i-abcdefgh/
- scripts/ # all scripts in scripts are per-instance
- sem/
- config-puppet
- config-ssh
- set-hostname
- cloud-config.txt
- user-data.txt
- user-data.txt.i
- obj.pkl
- handlers/
- data/ # just a per-instance data location to be used
- boot-finished
- # this file indicates when "boot" is finished
- # it is created by the 'final_message' cloud-config
- datasource # a file containing the class and string of datasource
-
- - sem/
- scripts.once
- These are the cloud-specific semaphores. The only thing that
- would go here are files to mark that a "per-once" script
- has run.
-
- - handlers/
- "persistent" handlers (not per-instance). Same as handlers
- from user-data, just will be cross-instance id
-
- - data/
- this is a persistent data location. cloud-init won't really
- use it, but something else (a handler or script could)
- previous-datasource
- previous-instance-id
- previous-hostname
-
-to clear out the current instance's data as if to force a "new run" on reboot
-do:
- ( cd /var/lib/cloud/instance && sudo rm -Rf * )
-
diff --git a/doc/vendordata.txt b/doc/vendordata.txt
deleted file mode 100644
index 9acbe41c..00000000
--- a/doc/vendordata.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-=== Overview ===
-Vendordata is data provided by the entity that launches an instance
-(for example, the cloud provider). This data can be used to
-customize the image to fit into the particular environment it is
-being run in.
-
-Vendordata follows the same rules as user-data, with the following
-caveats:
- 1. Users have ultimate control over vendordata. They can disable its
- execution or disable handling of specific parts of multipart input.
- 2. By default it only runs on first boot
- 3. Vendordata can be disabled by the user. If the use of vendordata is
- required for the instance to run, then vendordata should not be
- used.
- 4. user supplied cloud-config is merged over cloud-config from
- vendordata.
-
-Users providing cloud-config data can use the '#cloud-config-jsonp' method
-to more finely control their modifications to the vendor supplied
-cloud-config. For example, if both vendor and user have provided
-'runcnmd' then the default merge handler will cause the user's runcmd to
-override the one provided by the vendor. To append to 'runcmd', the user
-could better provide multipart input with a cloud-config-jsonp part like:
- #cloud-config-jsonp
- [{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}]
-
-Further, we strongly advise vendors to not 'be evil'. By evil, we
-mean any action that could compromise a system. Since users trust
-you, please take care to make sure that any vendordata is safe,
-atomic, idempotent and does not put your users at risk.
-
-=== Input Formats ===
-cloud-init will download and cache to filesystem any vendor-data that it
-finds. Vendordata is handled exactly like user-data. That means that
-the vendor can supply multipart input and have those parts acted on
-in the same way as user-data.
-
-The only differences are:
- * user-scripts are stored in a different location than user-scripts (to
- avoid namespace collision)
- * user can disable part handlers by cloud-config settings.
- For example, to disable handling of 'part-handlers' in vendor-data,
- the user could provide user-data like this:
- #cloud-config
- vendordata: {excluded: 'text/part-handler'}
-
-=== Examples ===
-There are examples in the examples subdirectory.
-Additionally, the 'tools' directory contains 'write-mime-multipart',
-which can be used to easily generate mime-multi-part files from a list
-of input files. That data can then be given to an instance.
-
-See 'write-mime-multipart --help' for usage.
diff --git a/packages/bddeb b/packages/bddeb
deleted file mode 100755
index 3c77ce1d..00000000
--- a/packages/bddeb
+++ /dev/null
@@ -1,267 +0,0 @@
-#!/usr/bin/env python3
-
-import glob
-import os
-import shutil
-import sys
-
-
-def find_root():
- # expected path is in <top_dir>/packages/
- top_dir = os.environ.get("CLOUD_INIT_TOP_D", None)
- if top_dir is None:
- top_dir = os.path.dirname(
- os.path.dirname(os.path.abspath(sys.argv[0])))
- if os.path.isfile(os.path.join(top_dir, 'setup.py')):
- return os.path.abspath(top_dir)
- raise OSError(("Unable to determine where your cloud-init topdir is."
- " set CLOUD_INIT_TOP_D?"))
-
-# Use the util functions from cloudinit
-sys.path.insert(0, find_root())
-
-from cloudinit import templater
-from cloudinit import util
-
-import argparse
-
-# Package names that will showup in requires to what we can actually
-# use in our debian 'control' file, this is a translation of the 'requires'
-# file pypi package name to a debian/ubuntu package name.
-STD_NAMED_PACKAGES = [
- 'configobj',
- 'jinja2',
- 'jsonpatch',
- 'oauthlib',
- 'prettytable',
- 'requests',
- 'six',
- 'httpretty',
- 'mock',
- 'nose',
- 'setuptools',
- 'flake8',
- 'hacking',
- 'unittest2',
-]
-NONSTD_NAMED_PACKAGES = {
- 'argparse': ('python-argparse', None),
- 'contextlib2': ('python-contextlib2', None),
- 'cheetah': ('python-cheetah', None),
- 'pyserial': ('python-serial', 'python3-serial'),
- 'pyyaml': ('python-yaml', 'python3-yaml'),
- 'six': ('python-six', 'python3-six'),
- 'pep8': ('pep8', 'python3-pep8'),
- 'pyflakes': ('pyflakes', 'pyflakes'),
-}
-
-DEBUILD_ARGS = ["-S", "-d"]
-
-
-def write_debian_folder(root, version, revno, pkgmap,
- pyver="3", append_requires=[]):
- deb_dir = util.abs_join(root, 'debian')
- os.makedirs(deb_dir)
-
- # Fill in the change log template
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'changelog.in'),
- util.abs_join(deb_dir, 'changelog'),
- params={
- 'version': version,
- 'revision': revno,
- })
-
- # Write out the control file template
- cmd = [util.abs_join(find_root(), 'tools', 'read-dependencies')]
- (stdout, _stderr) = util.subp(cmd)
- pypi_pkgs = [p.lower().strip() for p in stdout.splitlines()]
-
- (stdout, _stderr) = util.subp(cmd + ['test-requirements.txt'])
- pypi_test_pkgs = [p.lower().strip() for p in stdout.splitlines()]
-
- # Map to known packages
- requires = append_requires
- test_requires = []
- lists = ((pypi_pkgs, requires), (pypi_test_pkgs, test_requires))
- for pypilist, target in lists:
- for p in pypilist:
- if p not in pkgmap:
- raise RuntimeError(("Do not know how to translate pypi "
- "dependency %r to a known package") % (p))
- elif pkgmap[p]:
- target.append(pkgmap[p])
-
- if pyver == "3":
- python = "python3"
- else:
- python = "python"
-
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'control.in'),
- util.abs_join(deb_dir, 'control'),
- params={'requires': ','.join(requires),
- 'test_requires': ','.join(test_requires),
- 'python': python})
-
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'rules.in'),
- util.abs_join(deb_dir, 'rules'),
- params={'python': python, 'pyver': pyver})
-
- # Just copy any other files directly (including .in)
- pdeb_d = util.abs_join(find_root(), 'packages', 'debian')
- for f in [os.path.join(pdeb_d, f) for f in os.listdir(pdeb_d)]:
- if os.path.isfile(f):
- shutil.copy(f, util.abs_join(deb_dir, os.path.basename(f)))
-
-
-def main():
-
- parser = argparse.ArgumentParser()
- parser.add_argument("-v", "--verbose", dest="verbose",
- help=("run verbosely"
- " (default: %(default)s)"),
- default=False,
- action='store_true')
- parser.add_argument("--cloud-utils", dest="cloud_utils",
- help=("depend on cloud-utils package"
- " (default: %(default)s)"),
- default=False,
- action='store_true')
-
- parser.add_argument("--python2", dest="python2",
- help=("build debs for python2 rather than python3"),
- default=False, action='store_true')
-
- parser.add_argument("--init-system", dest="init_system",
- help=("build deb with INIT_SYSTEM=xxx"
- " (default: %(default)s"),
- default=os.environ.get("INIT_SYSTEM",
- "upstart,systemd"))
-
-
- for ent in DEBUILD_ARGS:
- parser.add_argument(ent, dest="debuild_args", action='append_const',
- const=ent, help=("pass through '%s' to debuild" % ent),
- default=[])
-
- parser.add_argument("--sign", default=False, action='store_true',
- help="sign result. do not pass -us -uc to debuild")
-
- parser.add_argument("--signuser", default=False, action='store',
- help="user to sign, see man dpkg-genchanges")
-
- args = parser.parse_args()
-
- if not args.sign:
- args.debuild_args.extend(['-us', '-uc'])
-
- if args.signuser:
- args.debuild_args.extend(['-e%s' % args.signuser])
-
- os.environ['INIT_SYSTEM'] = args.init_system
-
- capture = True
- if args.verbose:
- capture = False
-
- pkgmap = {}
- for p in NONSTD_NAMED_PACKAGES:
- pkgmap[p] = NONSTD_NAMED_PACKAGES[p][int(not args.python2)]
-
- for p in STD_NAMED_PACKAGES:
- if args.python2:
- pkgmap[p] = "python-" + p
- pyver = "2"
- else:
- pkgmap[p] = "python3-" + p
- pyver = "3"
-
- with util.tempdir() as tdir:
-
- cmd = [util.abs_join(find_root(), 'tools', 'read-version')]
- (sysout, _stderr) = util.subp(cmd)
- version = sysout.strip()
-
- cmd = ['bzr', 'revno']
- (sysout, _stderr) = util.subp(cmd)
- revno = sysout.strip()
-
- # This is really only a temporary archive
- # since we will extract it then add in the debian
- # folder, then re-archive it for debian happiness
- print("Creating a temporary tarball using the 'make-tarball' helper")
- cmd = [util.abs_join(find_root(), 'tools', 'make-tarball')]
- (sysout, _stderr) = util.subp(cmd)
- arch_fn = sysout.strip()
- tmp_arch_fn = util.abs_join(tdir, os.path.basename(arch_fn))
- shutil.move(arch_fn, tmp_arch_fn)
-
- print("Extracting temporary tarball %r" % (tmp_arch_fn))
- cmd = ['tar', '-xvzf', tmp_arch_fn, '-C', tdir]
- util.subp(cmd, capture=capture)
- extracted_name = tmp_arch_fn[:-len('.tar.gz')]
- os.remove(tmp_arch_fn)
-
- xdir = util.abs_join(tdir, 'cloud-init')
- shutil.move(extracted_name, xdir)
-
- print("Creating a debian/ folder in %r" % (xdir))
- if args.cloud_utils:
- append_requires=['cloud-utils | cloud-guest-utils']
- else:
- append_requires=[]
- write_debian_folder(xdir, version, revno, pkgmap,
- pyver=pyver, append_requires=append_requires)
-
- # The naming here seems to follow some debian standard
- # so it will whine if it is changed...
- tar_fn = "cloud-init_%s~bzr%s.orig.tar.gz" % (version, revno)
- print("Archiving the adjusted source into %r" %
- (util.abs_join(tdir, tar_fn)))
- cmd = ['tar', '-czvf',
- util.abs_join(tdir, tar_fn),
- '-C', xdir]
- cmd.extend(os.listdir(xdir))
- util.subp(cmd, capture=capture)
-
- # Copy it locally for reference
- shutil.copy(util.abs_join(tdir, tar_fn),
- util.abs_join(os.getcwd(), tar_fn))
- print("Copied that archive to %r for local usage (if desired)." %
- (util.abs_join(os.getcwd(), tar_fn)))
-
- print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args),
- xdir))
- with util.chdir(xdir):
- cmd = ['debuild', '--preserve-envvar', 'INIT_SYSTEM']
- if args.debuild_args:
- cmd.extend(args.debuild_args)
- util.subp(cmd, capture=capture)
-
- link_fn = os.path.join(os.getcwd(), 'cloud-init_all.deb')
- link_dsc = os.path.join(os.getcwd(), 'cloud-init.dsc')
- for base_fn in os.listdir(os.path.join(tdir)):
- full_fn = os.path.join(tdir, base_fn)
- if not os.path.isfile(full_fn):
- continue
- shutil.move(full_fn, base_fn)
- print("Wrote %r" % (base_fn))
- if base_fn.endswith('_all.deb'):
- # Add in the local link
- util.del_file(link_fn)
- os.symlink(base_fn, link_fn)
- print("Linked %r to %r" % (base_fn,
- os.path.basename(link_fn)))
- if base_fn.endswith('.dsc'):
- util.del_file(link_dsc)
- os.symlink(base_fn, link_dsc)
- print("Linked %r to %r" % (base_fn,
- os.path.basename(link_dsc)))
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/packages/brpm b/packages/brpm
deleted file mode 100755
index 45e47610..00000000
--- a/packages/brpm
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/python
-
-import argparse
-import contextlib
-import glob
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-import re
-
-from datetime import datetime
-
-
-def find_root():
- # expected path is in <top_dir>/packages/
- top_dir = os.environ.get("CLOUD_INIT_TOP_D", None)
- if top_dir is None:
- top_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
- if os.path.isfile(os.path.join(top_dir, 'setup.py')):
- return os.path.abspath(top_dir)
- raise OSError(("Unable to determine where your cloud-init topdir is."
- " set CLOUD_INIT_TOP_D?"))
-
-
-# Use the util functions from cloudinit
-sys.path.insert(0, find_root())
-
-from cloudinit import templater
-from cloudinit import util
-
-# Mapping of expected packages to there full name...
-# this is a translation of the 'requires'
-# file pypi package name to a redhat/fedora package name.
-PKG_MP = {
- 'redhat': {
- 'argparse': 'python-argparse',
- 'cheetah': 'python-cheetah',
- 'jinja2': 'python-jinja2',
- 'configobj': 'python-configobj',
- 'jsonpatch': 'python-jsonpatch',
- 'oauthlib': 'python-oauthlib',
- 'prettytable': 'python-prettytable',
- 'pyserial': 'pyserial',
- 'pyyaml': 'PyYAML',
- 'requests': 'python-requests',
- 'six': 'python-six',
- },
- 'suse': {
- 'argparse': 'python-argparse',
- 'cheetah': 'python-cheetah',
- 'configobj': 'python-configobj',
- 'jsonpatch': 'python-jsonpatch',
- 'oauthlib': 'python-oauthlib',
- 'prettytable': 'python-prettytable',
- 'pyserial': 'python-pyserial',
- 'pyyaml': 'python-yaml',
- 'requests': 'python-requests',
- 'six': 'python-six',
- }
-}
-
-# Subdirectories of the ~/rpmbuild dir
-RPM_BUILD_SUBDIRS = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']
-
-
-def get_log_header(version):
- # Try to find the version in the tags output
- cmd = ['bzr', 'tags']
- (stdout, _stderr) = util.subp(cmd)
- a_rev = None
- for t in stdout.splitlines():
- ver, rev = t.split(None)
- if ver == version:
- a_rev = rev
- break
- if not a_rev:
- return None
-
- # Extract who made that tag as the header
- cmd = ['bzr', 'log', '-r%s' % (a_rev), '--timezone=utc']
- (stdout, _stderr) = util.subp(cmd)
- kvs = {
- 'comment': version,
- }
-
- for line in stdout.splitlines():
- if line.startswith('committer:'):
- kvs['who'] = line[len('committer:'):].strip()
- if line.startswith('timestamp:'):
- ts = line[len('timestamp:'):]
- ts = ts.strip()
- # http://bugs.python.org/issue6641
- ts = ts.replace("+0000", '').strip()
- ds = datetime.strptime(ts, '%a %Y-%m-%d %H:%M:%S')
- kvs['ds'] = ds
-
- return format_change_line(**kvs)
-
-
-def format_change_line(ds, who, comment=None):
- # Rpmbuild seems to be pretty strict about the date format
- d = ds.strftime("%a %b %d %Y")
- d += " - %s" % (who)
- if comment:
- d += " - %s" % (comment)
- return "* %s" % (d)
-
-
-def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):
-
- # Figure out the version and revno
- cmd = [util.abs_join(find_root(), 'tools', 'read-version')]
- (stdout, _stderr) = util.subp(cmd)
- version = stdout.strip()
-
- cmd = ['bzr', 'revno']
- (stdout, _stderr) = util.subp(cmd)
- revno = stdout.strip()
-
- # Tmpl params
- subs = {}
- subs['version'] = version
- subs['revno'] = revno
- subs['release'] = "bzr%s" % (revno)
- if args.sub_release is not None:
- subs['subrelease'] = "." + str(args.sub_release)
- else:
- subs['subrelease'] = ''
- subs['archive_name'] = arc_fn
-
- cmd = [util.abs_join(find_root(), 'tools', 'read-dependencies')]
- (stdout, _stderr) = util.subp(cmd)
- pkgs = [p.lower().strip() for p in stdout.splitlines()]
-
- # Map to known packages
- requires = []
- for p in pkgs:
- if p == 'argparse' and sys.version_info[0:2] >= (2, 7):
- # Not needed on anything but 2.6 or older.
- continue
- tgt_pkg = PKG_MP[args.distro].get(p)
- if not tgt_pkg:
- raise RuntimeError(("Do not know how to translate pypi dependency"
- " %r to a known package") % (p))
- else:
- requires.append(tgt_pkg)
- subs['requires'] = requires
-
- # Format a nice changelog (as best as we can)
- changelog = util.load_file(util.abs_join(find_root(), 'ChangeLog'))
- changelog_lines = []
- missing_versions = 0
- for line in changelog.splitlines():
- if not line.strip():
- continue
- if re.match(r"^\s*[\d][.][\d][.][\d]:\s*", line):
- line = line.strip(":")
- header = get_log_header(line)
- if not header:
- missing_versions += 1
- if missing_versions == 1:
- # Must be using a new 'dev'/'trunk' release
- changelog_lines.append(format_change_line(datetime.now(),
- '??'))
- else:
- sys.stderr.write(("Changelog version line %s does not "
- "have a corresponding tag!\n") % (line))
- else:
- changelog_lines.append(header)
- else:
- changelog_lines.append(line)
- subs['changelog'] = "\n".join(changelog_lines)
-
- if args.boot == 'sysvinit':
- subs['sysvinit'] = True
- else:
- subs['sysvinit'] = False
-
- if args.boot == 'systemd':
- subs['systemd'] = True
- else:
- subs['systemd'] = False
-
- subs['defines'] = ["_topdir %s" % (top_dir)]
- subs['init_sys'] = args.boot
- subs['patches'] = [os.path.basename(p) for p in args.patches]
- return templater.render_from_file(tmpl_fn, params=subs)
-
-
-def main():
-
- parser = argparse.ArgumentParser()
- parser.add_argument("-d", "--distro", dest="distro",
- help="select distro (default: %(default)s)",
- metavar="DISTRO", default='redhat',
- choices=('redhat', 'suse'))
- parser.add_argument("-b", "--boot", dest="boot",
- help="select boot type (default: %(default)s)",
- metavar="TYPE", default='sysvinit',
- choices=('sysvinit', 'systemd'))
- parser.add_argument("-v", "--verbose", dest="verbose",
- help=("run verbosely"
- " (default: %(default)s)"),
- default=False,
- action='store_true')
- parser.add_argument('-s', "--sub-release", dest="sub_release",
- metavar="RELEASE",
- help=("a 'internal' release number to concat"
- " with the bzr version number to form"
- " the final version number"),
- type=int,
- default=None)
- parser.add_argument("-p", "--patch", dest="patches",
- help=("include the following patch when building"),
- default=[],
- action='append')
- args = parser.parse_args()
- capture = True
- if args.verbose:
- capture = False
-
- # Clean out the root dir and make sure the dirs we want are in place
- root_dir = os.path.expanduser("~/rpmbuild")
- if os.path.isdir(root_dir):
- shutil.rmtree(root_dir)
-
- arc_dir = util.abs_join(root_dir, 'SOURCES')
- build_dirs = [root_dir, arc_dir]
- for dname in RPM_BUILD_SUBDIRS:
- build_dirs.append(util.abs_join(root_dir, dname))
- build_dirs.sort()
- util.ensure_dirs(build_dirs)
-
- # Archive the code
- cmd = [util.abs_join(find_root(), 'tools', 'make-tarball')]
- (stdout, _stderr) = util.subp(cmd)
- archive_fn = stdout.strip()
- real_archive_fn = os.path.join(arc_dir, os.path.basename(archive_fn))
- shutil.move(archive_fn, real_archive_fn)
- print("Archived the code in %r" % (real_archive_fn))
-
- # Form the spec file to be used
- tmpl_fn = util.abs_join(find_root(), 'packages',
- args.distro, 'cloud-init.spec.in')
- contents = generate_spec_contents(args, tmpl_fn, root_dir,
- os.path.basename(archive_fn))
- spec_fn = util.abs_join(root_dir, 'cloud-init.spec')
- util.write_file(spec_fn, contents)
- print("Created spec file at %r" % (spec_fn))
- print(contents)
- for p in args.patches:
- util.copy(p, util.abs_join(arc_dir, os.path.basename(p)))
-
- # Now build it!
- print("Running 'rpmbuild' in %r" % (root_dir))
- cmd = ['rpmbuild', '-ba', spec_fn]
- util.subp(cmd, capture=capture)
-
- # Copy the items built to our local dir
- globs = []
- globs.extend(glob.glob("%s/*.rpm" %
- (util.abs_join(root_dir, 'RPMS', 'noarch'))))
- globs.extend(glob.glob("%s/*.rpm" %
- (util.abs_join(root_dir, 'RPMS', 'x86_64'))))
- globs.extend(glob.glob("%s/*.rpm" %
- (util.abs_join(root_dir, 'RPMS'))))
- globs.extend(glob.glob("%s/*.rpm" %
- (util.abs_join(root_dir, 'SRPMS'))))
- for rpm_fn in globs:
- tgt_fn = util.abs_join(os.getcwd(), os.path.basename(rpm_fn))
- shutil.move(rpm_fn, tgt_fn)
- print("Wrote out %s package %r" % (args.distro, tgt_fn))
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/packages/debian/changelog.in b/packages/debian/changelog.in
deleted file mode 100644
index c9affe47..00000000
--- a/packages/debian/changelog.in
+++ /dev/null
@@ -1,6 +0,0 @@
-## template:basic
-cloud-init (${version}~bzr${revision}-1) UNRELEASED; urgency=low
-
- * build
-
- -- Scott Moser <smoser@ubuntu.com> Fri, 16 Dec 2011 11:50:25 -0500
diff --git a/packages/debian/cloud-init.postinst b/packages/debian/cloud-init.postinst
deleted file mode 100644
index cdd0466d..00000000
--- a/packages/debian/cloud-init.postinst
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-cleanup_lp1552999() {
- local oldver="$1" last_bad_ver="0.7.7~bzr1178"
- dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0
- local edir="/etc/systemd/system/multi-user.target.wants"
- rm -f "$edir/cloud-config.service" "$edir/cloud-final.service" \
- "$edir/cloud-init-local.service" "$edir/cloud-init.service"
-}
-
-
-#DEBHELPER#
-
-if [ "$1" = "configure" ]; then
- oldver="$2"
- cleanup_lp1552999 "$oldver"
-fi
diff --git a/packages/debian/cloud-init.preinst b/packages/debian/cloud-init.preinst
deleted file mode 100644
index 3c2af06d..00000000
--- a/packages/debian/cloud-init.preinst
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-# vi: ts=4 expandtab
-
-cleanup_lp1552999() {
- local oldver="$1" last_bad_ver="0.7.7~bzr1178"
- dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0
- local hdir="/var/lib/systemd/deb-systemd-helper-enabled"
- hdir="$hdir/multi-user.target.wants"
- local edir="/etc/systemd/system/multi-user.target.wants"
- rm -f "$hdir/cloud-config.service" "$hdir/cloud-final.service" \
- "$hdir/cloud-init-local.service" "$hdir/cloud-init.service"
-}
-
-
-if [ "$1" = "upgrade" ]; then
- oldver="$2"
- cleanup_lp1552999 "$oldver"
-fi
-
-#DEBHELPER#
diff --git a/packages/debian/compat b/packages/debian/compat
deleted file mode 100644
index ec635144..00000000
--- a/packages/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/packages/debian/control.in b/packages/debian/control.in
deleted file mode 100644
index b58561e7..00000000
--- a/packages/debian/control.in
+++ /dev/null
@@ -1,29 +0,0 @@
-## template:basic
-Source: cloud-init
-Section: admin
-Priority: optional
-Maintainer: Scott Moser <smoser@ubuntu.com>
-Build-Depends: debhelper (>= 9),
- dh-python,
- dh-systemd,
- iproute2,
- pep8,
- pyflakes,
- python3-pyflakes | pyflakes (<< 1.1.0-2),
- ${python},
- ${test_requires},
- ${requires}
-XS-Python-Version: all
-Standards-Version: 3.9.6
-
-Package: cloud-init
-Architecture: all
-Depends: procps,
- ${python},
- ${misc:Depends},
- ${${python}:Depends}
-Recommends: eatmydata, sudo, software-properties-common, gdisk
-XB-Python-Version: ${python:Versions}
-Description: Init scripts for cloud instances
- Cloud instances need special scripts to run during initialisation
- to retrieve and install ssh keys and to let the user run various scripts.
diff --git a/packages/debian/copyright b/packages/debian/copyright
deleted file mode 100644
index c694f30d..00000000
--- a/packages/debian/copyright
+++ /dev/null
@@ -1,29 +0,0 @@
-Format-Specification: http://svn.debian.org/wsvn/dep/web/deps/dep5.mdwn?op=file&rev=135
-Name: cloud-init
-Maintainer: Scott Moser <scott.moser@canonical.com>
-Source: https://launchpad.net/cloud-init
-
-This package was debianized by Soren Hansen <soren@ubuntu.com> on
-Thu, 04 Sep 2008 12:49:15 +0200 as ec2-init. It was later renamed to
-cloud-init by Scott Moser <scott.moser@canonical.com>
-
-Upstream Author: Scott Moser <smoser@canonical.com>
- Soren Hansen <soren@canonical.com>
- Chuck Short <chuck.short@canonical.com>
-
-Copyright: 2010, Canonical Ltd.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License version 3, as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
- The complete text of the GPL version 3 can be seen in
- /usr/share/common-licenses/GPL-3.
diff --git a/packages/debian/dirs b/packages/debian/dirs
deleted file mode 100644
index 9a633c60..00000000
--- a/packages/debian/dirs
+++ /dev/null
@@ -1,6 +0,0 @@
-var/lib/cloud
-usr/bin
-etc/init
-usr/share/doc/cloud
-etc/cloud
-lib/udev/rules.d
diff --git a/packages/debian/rules.in b/packages/debian/rules.in
deleted file mode 100755
index cf2dd405..00000000
--- a/packages/debian/rules.in
+++ /dev/null
@@ -1,23 +0,0 @@
-## template:basic
-#!/usr/bin/make -f
-INIT_SYSTEM ?= upstart,systemd
-export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
-PYVER ?= python${pyver}
-
-%:
- dh $@ --with $(PYVER),systemd --buildsystem pybuild
-
-override_dh_install:
- dh_install
- install -d debian/cloud-init/etc/rsyslog.d
- cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
-
-override_dh_auto_test:
-ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
- http_proxy= make PYVER=${pyver} check
-else
- @echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS)
-endif
-
-override_dh_systemd_start:
- dh_systemd_start --no-restart-on-upgrade --no-start
diff --git a/packages/debian/watch b/packages/debian/watch
deleted file mode 100644
index 0f7a600b..00000000
--- a/packages/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=3
-https://launchpad.net/cloud-init/+download .*/\+download/cloud-init-(.+)\.tar.gz
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
deleted file mode 100644
index 254d209b..00000000
--- a/packages/redhat/cloud-init.spec.in
+++ /dev/null
@@ -1,204 +0,0 @@
-## This is a cheetah template
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-
-# See: http://www.zarb.org/~jasonc/macros.php
-# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
-# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
-
-#for $d in $defines
-%define ${d}
-#end for
-
-Name: cloud-init
-Version: ${version}
-Release: ${release}${subrelease}%{?dist}
-Summary: Cloud instance init scripts
-
-Group: System Environment/Base
-License: GPLv3
-URL: http://launchpad.net/cloud-init
-
-Source0: ${archive_name}
-BuildArch: noarch
-BuildRoot: %{_tmppath}
-
-BuildRequires: python-devel
-BuildRequires: python-setuptools
-BuildRequires: python-cheetah
-
-# System util packages needed
-Requires: shadow-utils
-Requires: rsyslog
-Requires: iproute
-Requires: e2fsprogs
-Requires: net-tools
-Requires: procps
-Requires: shadow-utils
-Requires: sudo >= 1.7.2p2-3
-
-# Install pypi 'dynamic' requirements
-#for $r in $requires
-Requires: ${r}
-#end for
-
-# Custom patches
-#set $size = 0
-#for $p in $patches
-Patch${size}: $p
-#set $size += 1
-#end for
-
-#if $sysvinit
-Requires(post): chkconfig
-Requires(postun): initscripts
-Requires(preun): chkconfig
-Requires(preun): initscripts
-#end if
-
-#if $systemd
-BuildRequires: systemd-units
-Requires(post): systemd-units
-Requires(postun): systemd-units
-Requires(preun): systemd-units
-#end if
-
-%description
-Cloud-init is a set of init scripts for cloud instances. Cloud instances
-need special scripts to run during initialization to retrieve and install
-ssh keys and to let the user run various scripts.
-
-%prep
-%setup -q -n %{name}-%{version}~${release}
-
-# Custom patches activation
-#set $size = 0
-#for $p in $patches
-%patch${size} -p1
-#set $size += 1
-#end for
-
-%build
-%{__python} setup.py build
-
-%install
-
-%{__python} setup.py install -O1 \
- --skip-build --root \$RPM_BUILD_ROOT \
- --init-system=${init_sys}
-
-# Note that /etc/rsyslog.d didn't exist by default until F15.
-# el6 request: https://bugzilla.redhat.com/show_bug.cgi?id=740420
-mkdir -p \$RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d
-cp -p tools/21-cloudinit.conf \
- \$RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
-
-# Remove the tests
-rm -rf \$RPM_BUILD_ROOT%{python_sitelib}/tests
-
-# Required dirs...
-mkdir -p \$RPM_BUILD_ROOT/%{_sharedstatedir}/cloud
-mkdir -p \$RPM_BUILD_ROOT/%{_libexecdir}/%{name}
-
-#if $systemd
-mkdir -p \$RPM_BUILD_ROOT/%{_unitdir}
-cp -p systemd/* \$RPM_BUILD_ROOT/%{_unitdir}
-#end if
-
-%clean
-rm -rf \$RPM_BUILD_ROOT
-
-%post
-
-#if $systemd
-if [ \$1 -eq 1 ]
-then
- /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || :
- /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || :
- /bin/systemctl enable cloud-init.service >/dev/null 2>&1 || :
- /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || :
-fi
-#end if
-
-#if $sysvinit
-/sbin/chkconfig --add %{_initrddir}/cloud-init-local
-/sbin/chkconfig --add %{_initrddir}/cloud-init
-/sbin/chkconfig --add %{_initrddir}/cloud-config
-/sbin/chkconfig --add %{_initrddir}/cloud-final
-#end if
-
-%preun
-
-#if $sysvinit
-if [ \$1 -eq 0 ]
-then
- /sbin/service cloud-init stop >/dev/null 2>&1 || :
- /sbin/chkconfig --del cloud-init || :
- /sbin/service cloud-init-local stop >/dev/null 2>&1 || :
- /sbin/chkconfig --del cloud-init-local || :
- /sbin/service cloud-config stop >/dev/null 2>&1 || :
- /sbin/chkconfig --del cloud-config || :
- /sbin/service cloud-final stop >/dev/null 2>&1 || :
- /sbin/chkconfig --del cloud-final || :
-fi
-#end if
-
-#if $systemd
-if [ \$1 -eq 0 ]
-then
- /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || :
- /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || :
- /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || :
- /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || :
-fi
-#end if
-
-%postun
-
-#if $systemd
-/bin/systemctl daemon-reload >/dev/null 2>&1 || :
-#end if
-
-%files
-
-/lib/udev/rules.d/66-azure-ephemeral.rules
-
-#if $sysvinit
-%attr(0755, root, root) %{_initddir}/cloud-config
-%attr(0755, root, root) %{_initddir}/cloud-final
-%attr(0755, root, root) %{_initddir}/cloud-init-local
-%attr(0755, root, root) %{_initddir}/cloud-init
-#end if
-
-#if $systemd
-/usr/lib/systemd/system-generators/cloud-init-generator
-%{_unitdir}/cloud-*
-%{_unitdir}/cloud-*
-#end if
-
-# Program binaries
-%{_bindir}/cloud-init*
-%{_libexecdir}/%{name}/uncloud-init
-%{_libexecdir}/%{name}/write-ssh-key-fingerprints
-
-# Docs
-%doc LICENSE ChangeLog TODO.rst requirements.txt
-%doc %{_defaultdocdir}/cloud-init/*
-
-# Configs
-%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg
-%dir %{_sysconfdir}/cloud/cloud.cfg.d
-%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg
-%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README
-%dir %{_sysconfdir}/cloud/templates
-%config(noreplace) %{_sysconfdir}/cloud/templates/*
-%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf
-
-%{_libexecdir}/%{name}
-%dir %{_sharedstatedir}/cloud
-
-# Python code is here...
-%{python_sitelib}/*
-
-%changelog
-
-${changelog}
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
deleted file mode 100644
index 53e6ad13..00000000
--- a/packages/suse/cloud-init.spec.in
+++ /dev/null
@@ -1,163 +0,0 @@
-## This is a cheetah template
-
-# See: http://www.zarb.org/~jasonc/macros.php
-# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
-# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
-
-#for $d in $defines
-%define ${d}
-#end for
-
-Name: cloud-init
-Version: ${version}
-Release: ${release}${subrelease}%{?dist}
-Summary: Cloud instance init scripts
-
-Group: System/Management
-License: GPLv3
-URL: http://launchpad.net/cloud-init
-
-Source0: ${archive_name}
-BuildRoot: %{_tmppath}/%{name}-%{version}-build
-
-%if 0%{?suse_version} && 0%{?suse_version} <= 1110
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-%else
-BuildArch: noarch
-%endif
-
-BuildRequires: fdupes
-BuildRequires: filesystem
-BuildRequires: python-devel
-BuildRequires: python-setuptools
-BuildRequires: python-cheetah
-
-%if 0%{?suse_version} && 0%{?suse_version} <= 1210
- %define initsys sysvinit
-%else
- %define initsys systemd
-%endif
-
-# System util packages needed
-Requires: iproute2
-Requires: e2fsprogs
-Requires: net-tools
-Requires: procps
-Requires: sudo
-
-# Install pypi 'dynamic' requirements
-#for $r in $requires
-Requires: ${r}
-#end for
-
-# Custom patches
-#set $size = 0
-#for $p in $patches
-Patch${size}: $p
-#set $size += 1
-#end for
-
-%description
-Cloud-init is a set of init scripts for cloud instances. Cloud instances
-need special scripts to run during initialization to retrieve and install
-ssh keys and to let the user run various scripts.
-
-%prep
-%setup -q -n %{name}-%{version}~${release}
-
-# Custom patches activation
-#set $size = 0
-#for $p in $patches
-%patch${size} -p1
-#set $size += 1
-#end for
-
-%build
-%{__python} setup.py build
-
-%install
-%{__python} setup.py install \
- --skip-build --root=%{buildroot} --prefix=%{_prefix} \
- --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \
- --init-system=%{initsys}
-
-# Remove non-SUSE templates
-rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.*
-rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.*
-rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.*
-
-# Remove cloud-init tests
-rm -r %{buildroot}/%{python_sitelib}/tests
-
-# Move sysvinit scripts to the correct place and create symbolic links
-%if %{initsys} == sysvinit
- mkdir -p %{buildroot}/%{_initddir}
- mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/
- rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d
- rmdir %{buildroot}%{_sysconfdir}/rc.d
-
- mkdir -p %{buildroot}/%{_sbindir}
- pushd %{buildroot}/%{_initddir}
- for file in * ; do
- ln -s %{_initddir}/\${file} %{buildroot}/%{_sbindir}/rc\${file}
- done
- popd
-%endif
-
-# Move documentation
-mkdir -p %{buildroot}/%{_defaultdocdir}
-mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir}
-for doc in TODO LICENSE ChangeLog requirements.txt; do
- cp \${doc} %{buildroot}/%{_defaultdocdir}/cloud-init
-done
-
-# Remove duplicate files
-%if 0%{?suse_version}
- %fdupes %{buildroot}/%{python_sitelib}
-%endif
-
-mkdir -p %{buildroot}/var/lib/cloud
-
-%postun
-%insserv_cleanup
-
-%files
-
-# Sysvinit scripts
-%if %{initsys} == sysvinit
- %attr(0755, root, root) %{_initddir}/cloud-config
- %attr(0755, root, root) %{_initddir}/cloud-final
- %attr(0755, root, root) %{_initddir}/cloud-init-local
- %attr(0755, root, root) %{_initddir}/cloud-init
-
- %{_sbindir}/rccloud-*
-%endif
-
-# Program binaries
-%{_bindir}/cloud-init*
-
-# There doesn't seem to be an agreed upon place for these
-# although it appears the standard says /usr/lib but rpmbuild
-# will try /usr/lib64 ??
-/usr/lib/%{name}/uncloud-init
-/usr/lib/%{name}/write-ssh-key-fingerprints
-
-# Docs
-%doc %{_defaultdocdir}/cloud-init/*
-
-# Configs
-%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg
-%dir %{_sysconfdir}/cloud/cloud.cfg.d
-%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg
-%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README
-%dir %{_sysconfdir}/cloud/templates
-%config(noreplace) %{_sysconfdir}/cloud/templates/*
-
-# Python code is here...
-%{python_sitelib}/*
-
-/var/lib/cloud
-
-%changelog
-
-${changelog}
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index cc1dc05f..00000000
--- a/requirements.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-# Pypi requirements for cloud-init to work
-
-# Used for untemplating any files or strings with parameters.
-jinja2
-
-# This is used for any pretty printing of tabular data.
-PrettyTable
-
-# This one is currently only used by the MAAS datasource. If that
-# datasource is removed, this is no longer needed
-oauthlib
-
-# This one is currently used only by the CloudSigma and SmartOS datasources.
-# If these datasources are removed, this is no longer needed.
-#
-# This will not work in py2.6 so it is only optionally installed on
-# python 2.7 and later.
-#
-# pyserial
-
-# This is only needed for places where we need to support configs in a manner
-# that the built-in config parser is not sufficent (ie
-# when we need to preserve comments, or do not have a top-level
-# section)...
-configobj
-
-# All new style configurations are in the yaml format
-pyyaml
-
-# The new main entrypoint uses argparse instead of optparse
-argparse
-
-# Requests handles ssl correctly!
-requests
-
-# For patching pieces of cloud-config together
-jsonpatch
-
-# For Python 2/3 compatibility
-six
diff --git a/setup.py b/setup.py
deleted file mode 100755
index 0af576a9..00000000
--- a/setup.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Distutils magic for ec2-init
-#
-# Copyright (C) 2009 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Soren Hansen <soren@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from glob import glob
-
-import os
-import sys
-
-import setuptools
-from setuptools.command.install import install
-
-from distutils.errors import DistutilsArgError
-
-import subprocess
-
-
-def is_f(p):
- return os.path.isfile(p)
-
-
-def tiny_p(cmd, capture=True):
- # Darn python 2.6 doesn't have check_output (argggg)
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- if not capture:
- stdout = None
- stderr = None
- sp = subprocess.Popen(cmd, stdout=stdout,
- stderr=stderr, stdin=None,
- universal_newlines=True)
- (out, err) = sp.communicate()
- ret = sp.returncode
- if ret not in [0]:
- raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" %
- (cmd, ret, out, err))
- return (out, err)
-
-
-def pkg_config_read(library, var):
- fallbacks = {
- 'systemd': {
- 'systemdsystemunitdir': '/lib/systemd/system',
- 'systemdsystemgeneratordir': '/lib/systemd/system-generators',
- }
- }
- cmd = ['pkg-config', '--variable=%s' % var, library]
- try:
- (path, err) = tiny_p(cmd)
- except Exception:
- return fallbacks[library][var]
- return str(path).strip()
-
-
-INITSYS_FILES = {
- 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
- 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],
- 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
- 'systemd': [f for f in (glob('systemd/*.service') +
- glob('systemd/*.target')) if is_f(f)],
- 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)],
- 'upstart': [f for f in glob('upstart/*') if is_f(f)],
-}
-INITSYS_ROOTS = {
- 'sysvinit': '/etc/rc.d/init.d',
- 'sysvinit_freebsd': '/usr/local/etc/rc.d',
- 'sysvinit_deb': '/etc/init.d',
- 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'),
- 'systemd.generators': pkg_config_read('systemd',
- 'systemdsystemgeneratordir'),
- 'upstart': '/etc/init/',
-}
-INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()])
-
-# Install everything in the right location and take care of Linux (default) and
-# FreeBSD systems.
-USR = "/usr"
-ETC = "/etc"
-USR_LIB_EXEC = "/usr/lib"
-LIB = "/lib"
-if os.uname()[0] == 'FreeBSD':
- USR = "/usr/local"
- USR_LIB_EXEC = "/usr/local/lib"
- ETC = "/usr/local/etc"
-elif os.path.isfile('/etc/redhat-release'):
- USR_LIB_EXEC = "/usr/libexec"
-
-
-# Avoid having datafiles installed in a virtualenv...
-def in_virtualenv():
- try:
- if sys.real_prefix == sys.prefix:
- return False
- else:
- return True
- except AttributeError:
- return False
-
-
-def get_version():
- cmd = ['tools/read-version']
- (ver, _e) = tiny_p(cmd)
- return str(ver).strip()
-
-
-def read_requires():
- cmd = ['tools/read-dependencies']
- (deps, _e) = tiny_p(cmd)
- return str(deps).splitlines()
-
-
-# TODO: Is there a better way to do this??
-class InitsysInstallData(install):
- init_system = None
- user_options = install.user_options + [
- # This will magically show up in member variable 'init_sys'
- ('init-system=', None,
- ('init system(s) to configure (%s) [default: None]' %
- (", ".join(INITSYS_TYPES)))),
- ]
-
- def initialize_options(self):
- install.initialize_options(self)
- self.init_system = ""
-
- def finalize_options(self):
- install.finalize_options(self)
-
- if self.init_system and isinstance(self.init_system, str):
- self.init_system = self.init_system.split(",")
-
- if len(self.init_system) == 0:
- raise DistutilsArgError(
- ("You must specify one of (%s) when"
- " specifying init system(s)!") % (", ".join(INITSYS_TYPES)))
-
- bad = [f for f in self.init_system if f not in INITSYS_TYPES]
- if len(bad) != 0:
- raise DistutilsArgError(
- "Invalid --init-system: %s" % (','.join(bad)))
-
- for system in self.init_system:
- # add data files for anything that starts with '<system>.'
- datakeys = [k for k in INITSYS_ROOTS
- if k.partition(".")[0] == system]
- for k in datakeys:
- self.distribution.data_files.append(
- (INITSYS_ROOTS[k], INITSYS_FILES[k]))
- # Force that command to reinitalize (with new file list)
- self.distribution.reinitialize_command('install_data', True)
-
-
-if in_virtualenv():
- data_files = []
- cmdclass = {}
-else:
- data_files = [
- (ETC + '/cloud', glob('config/*.cfg')),
- (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
- (ETC + '/cloud/templates', glob('templates/*')),
- (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init',
- 'tools/write-ssh-key-fingerprints']),
- (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
- (USR + '/share/doc/cloud-init/examples',
- [f for f in glob('doc/examples/*') if is_f(f)]),
- (USR + '/share/doc/cloud-init/examples/seed',
- [f for f in glob('doc/examples/seed/*') if is_f(f)]),
- (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]),
- ]
- # Use a subclass for install that handles
- # adding on the right init system configuration files
- cmdclass = {
- 'install': InitsysInstallData,
- }
-
-
-requirements = read_requires()
-if sys.version_info < (3,):
- requirements.append('cheetah')
-
-setuptools.setup(
- name='cloud-init',
- version=get_version(),
- description='EC2 initialisation magic',
- author='Scott Moser',
- author_email='scott.moser@canonical.com',
- url='http://launchpad.net/cloud-init/',
- packages=setuptools.find_packages(exclude=['tests']),
- scripts=['tools/cloud-init-per'],
- license='GPLv3',
- data_files=data_files,
- install_requires=requirements,
- cmdclass=cmdclass,
- entry_points={
- 'console_scripts': [
- 'cloud-init = cloudinit.cmd.main:main'
- ],
- }
-)
diff --git a/systemd/cloud-config.service b/systemd/cloud-config.service
deleted file mode 100644
index 3309e08a..00000000
--- a/systemd/cloud-config.service
+++ /dev/null
@@ -1,16 +0,0 @@
-[Unit]
-Description=Apply the settings specified in cloud-config
-After=network-online.target cloud-config.target
-Wants=network-online.target cloud-config.target
-
-[Service]
-Type=oneshot
-ExecStart=/usr/bin/cloud-init modules --mode=config
-RemainAfterExit=yes
-TimeoutSec=0
-
-# Output needs to appear in instance console output
-StandardOutput=journal+console
-
-[Install]
-WantedBy=cloud-init.target
diff --git a/systemd/cloud-config.target b/systemd/cloud-config.target
deleted file mode 100644
index ae9b7d02..00000000
--- a/systemd/cloud-config.target
+++ /dev/null
@@ -1,11 +0,0 @@
-# cloud-init normally emits a "cloud-config" upstart event to inform third
-# parties that cloud-config is available, which does us no good when we're
-# using systemd. cloud-config.target serves as this synchronization point
-# instead. Services that would "start on cloud-config" with upstart can
-# instead use "After=cloud-config.target" and "Wants=cloud-config.target"
-# as appropriate.
-
-[Unit]
-Description=Cloud-config availability
-Wants=cloud-init-local.service cloud-init.service
-After=cloud-init-local.service cloud-init.service
diff --git a/systemd/cloud-final.service b/systemd/cloud-final.service
deleted file mode 100644
index 3927710f..00000000
--- a/systemd/cloud-final.service
+++ /dev/null
@@ -1,17 +0,0 @@
-[Unit]
-Description=Execute cloud user/final scripts
-After=network-online.target cloud-config.service rc-local.service
-Wants=network-online.target cloud-config.service
-
-[Service]
-Type=oneshot
-ExecStart=/usr/bin/cloud-init modules --mode=final
-RemainAfterExit=yes
-TimeoutSec=0
-KillMode=process
-
-# Output needs to appear in instance console output
-StandardOutput=journal+console
-
-[Install]
-WantedBy=cloud-init.target
diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator
deleted file mode 100755
index 2d319695..00000000
--- a/systemd/cloud-init-generator
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/bin/sh
-set -f
-
-LOG=""
-DEBUG_LEVEL=1
-LOG_D="/run/cloud-init"
-ENABLE="enabled"
-DISABLE="disabled"
-CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target"
-CLOUD_TARGET_NAME="cloud-init.target"
-# lxc sets 'container', but lets make that explicitly a global
-CONTAINER="${container}"
-
-debug() {
- local lvl="$1"
- shift
- [ "$lvl" -gt "$DEBUG_LEVEL" ] && return
- if [ -z "$LOG" ]; then
- local log="$LOG_D/${0##*/}.log"
- { [ -d "$LOG_D" ] || mkdir -p "$LOG_D"; } &&
- { : > "$log"; } >/dev/null 2>&1 && LOG="$log" ||
- LOG="/dev/kmsg"
- fi
- echo "$@" >> "$LOG"
-}
-
-etc_file() {
- local pprefix="${1:-/etc/cloud/cloud-init.}"
- _RET="unset"
- [ -f "${pprefix}$ENABLE" ] && _RET="$ENABLE" && return 0
- [ -f "${pprefix}$DISABLE" ] && _RET="$DISABLE" && return 0
- return 0
-}
-
-read_proc_cmdline() {
- # return /proc/cmdline for non-container, and /proc/1/cmdline for container
- local ctname="systemd"
- if [ -n "$CONTAINER" ] && ctname=$CONTAINER ||
- systemd-detect-virt --container --quiet; then
- if { _RET=$(tr '\0' ' ' < /proc/1/cmdline); } 2>/dev/null; then
- _RET_MSG="container[$ctname]: pid 1 cmdline"
- return
- fi
- _RET=""
- _RET_MSG="container[$ctname]: pid 1 cmdline not available"
- return 0
- fi
-
- _RET_MSG="/proc/cmdline"
- read _RET < /proc/cmdline
-}
-
-kernel_cmdline() {
- local cmdline="" tok=""
- if [ -n "${KERNEL_CMDLINE+x}" ]; then
- # use KERNEL_CMDLINE if present in environment even if empty
- cmdline=${KERNEL_CMDLINE}
- debug 1 "kernel command line from env KERNEL_CMDLINE: $cmdline"
- elif read_proc_cmdline; then
- read_proc_cmdline && cmdline="$_RET"
- debug 1 "kernel command line ($_RET_MSG): $cmdline"
- fi
- _RET="unset"
- cmdline=" $cmdline "
- tok=${cmdline##* cloud-init=}
- [ "$tok" = "$cmdline" ] && _RET="unset"
- tok=${tok%% *}
- [ "$tok" = "$ENABLE" -o "$tok" = "$DISABLE" ] && _RET="$tok"
- return 0
-}
-
-default() {
- _RET="$ENABLE"
-}
-
-main() {
- local normal_d="$1" early_d="$2" late_d="$3"
- local target_name="multi-user.target" gen_d="$early_d"
- local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}"
-
- debug 1 "$0 normal=$normal_d early=$early_d late=$late_d"
- debug 2 "$0 $*"
-
- local search result="error" ret=""
- for search in kernel_cmdline etc_file default; do
- if $search; then
- debug 1 "$search found $_RET"
- [ "$_RET" = "$ENABLE" -o "$_RET" = "$DISABLE" ] &&
- result=$_RET && break
- else
- ret=$?
- debug 0 "search $search returned $ret"
- fi
- done
-
- if [ "$result" = "$ENABLE" ]; then
- if [ -e "$link_path" ]; then
- debug 1 "already enabled: no change needed"
- else
- [ -d "${link_path%/*}" ] || mkdir -p "${link_path%/*}" ||
- debug 0 "failed to make dir $link_path"
- if ln -snf "$CLOUD_SYSTEM_TARGET" "$link_path"; then
- debug 1 "enabled via $link_path -> $CLOUD_SYSTEM_TARGET"
- else
- ret=$?
- debug 0 "[$ret] enable failed:" \
- "ln $CLOUD_SYSTEM_TARGET $link_path"
- fi
- fi
- elif [ "$result" = "$DISABLE" ]; then
- if [ -f "$link_path" ]; then
- if rm -f "$link_path"; then
- debug 1 "disabled. removed existing $link_path"
- else
- ret=$?
- debug 0 "[$ret] disable failed, remove $link_path"
- fi
- else
- debug 1 "already disabled: no change needed [no $link_path]"
- fi
- else
- debug 0 "unexpected result '$result'"
- ret=3
- fi
- return $ret
-}
-
-main "$@"
-
-# vi: ts=4 expandtab
diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service
deleted file mode 100644
index b19eeaee..00000000
--- a/systemd/cloud-init-local.service
+++ /dev/null
@@ -1,22 +0,0 @@
-[Unit]
-Description=Initial cloud-init job (pre-networking)
-DefaultDependencies=no
-Wants=local-fs.target
-Wants=network-pre.target
-After=local-fs.target
-Conflicts=shutdown.target
-Before=network-pre.target
-Before=shutdown.target
-
-[Service]
-Type=oneshot
-ExecStart=/usr/bin/cloud-init init --local
-ExecStart=/bin/touch /run/cloud-init/network-config-ready
-RemainAfterExit=yes
-TimeoutSec=0
-
-# Output needs to appear in instance console output
-StandardOutput=journal+console
-
-[Install]
-WantedBy=cloud-init.target
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service
deleted file mode 100644
index 6fb655e6..00000000
--- a/systemd/cloud-init.service
+++ /dev/null
@@ -1,18 +0,0 @@
-[Unit]
-Description=Initial cloud-init job (metadata service crawler)
-After=cloud-init-local.service networking.service
-Before=network-online.target sshd.service sshd-keygen.service systemd-user-sessions.service
-Requires=networking.service
-Wants=local-fs.target cloud-init-local.service sshd.service sshd-keygen.service
-
-[Service]
-Type=oneshot
-ExecStart=/usr/bin/cloud-init init
-RemainAfterExit=yes
-TimeoutSec=0
-
-# Output needs to appear in instance console output
-StandardOutput=journal+console
-
-[Install]
-WantedBy=cloud-init.target
diff --git a/systemd/cloud-init.target b/systemd/cloud-init.target
deleted file mode 100644
index a63babb0..00000000
--- a/systemd/cloud-init.target
+++ /dev/null
@@ -1,6 +0,0 @@
-# cloud-init target is enabled by cloud-init-generator
-# To disable it you can either:
-# a.) boot with kernel cmdline of 'cloudinit=disabled'
-# b.) touch a file /etc/cloud/cloud-init.disabled
-[Unit]
-Description=Cloud-init target
diff --git a/sysvinit/debian/cloud-config b/sysvinit/debian/cloud-config
deleted file mode 100644
index 53322748..00000000
--- a/sysvinit/debian/cloud-config
+++ /dev/null
@@ -1,64 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: cloud-config
-# Required-Start: cloud-init cloud-init-local
-# Required-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Cloud init modules --mode config
-# Description: Cloud configuration initialization
-### END INIT INFO
-
-# Authors: Julien Danjou <acid@debian.org>
-# Juerg Haefliger <juerg.haefliger@hp.com>
-# Thomas Goirand <zigo@debian.org>
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Cloud service"
-NAME=cloud-init
-DAEMON=/usr/bin/$NAME
-DAEMON_ARGS="modules --mode config"
-SCRIPTNAME=/etc/init.d/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-if init_is_upstart; then
- case "$1" in
- stop)
- exit 0
- ;;
- *)
- exit 1
- ;;
- esac
-fi
-
-case "$1" in
-start)
- log_daemon_msg "Starting $DESC" "$NAME"
- $DAEMON ${DAEMON_ARGS}
- case "$?" in
- 0|1) log_end_msg 0 ;;
- 2) log_end_msg 1 ;;
- esac
-;;
-stop|restart|force-reload)
- echo "Error: argument '$1' not supported" >&2
- exit 3
-;;
-*)
- echo "Usage: $SCRIPTNAME {start}" >&2
- exit 3
-;;
-esac
-
-:
diff --git a/sysvinit/debian/cloud-final b/sysvinit/debian/cloud-final
deleted file mode 100644
index 55afc8b0..00000000
--- a/sysvinit/debian/cloud-final
+++ /dev/null
@@ -1,66 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: cloud-final
-# Required-Start: $all cloud-config
-# Required-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Cloud init modules final jobs
-# Description: This runs the cloud configuration initialization "final" jobs
-# and can be seen as the traditional "rc.local" time for the cloud.
-# It runs after all cloud-config jobs are run
-### END INIT INFO
-
-# Authors: Julien Danjou <acid@debian.org>
-# Juerg Haefliger <juerg.haefliger@hp.com>
-# Thomas Goirand <zigo@debian.org>
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Cloud service"
-NAME=cloud-init
-DAEMON=/usr/bin/$NAME
-DAEMON_ARGS="modules --mode final"
-SCRIPTNAME=/etc/init.d/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-if init_is_upstart; then
- case "$1" in
- stop)
- exit 0
- ;;
- *)
- exit 1
- ;;
- esac
-fi
-
-case "$1" in
-start)
- log_daemon_msg "Starting $DESC" "$NAME"
- $DAEMON ${DAEMON_ARGS}
- case "$?" in
- 0|1) log_end_msg 0 ;;
- 2) log_end_msg 1 ;;
- esac
-;;
-stop|restart|force-reload)
- echo "Error: argument '$1' not supported" >&2
- exit 3
-;;
-*)
- echo "Usage: $SCRIPTNAME {start}" >&2
- exit 3
-;;
-esac
-
-:
diff --git a/sysvinit/debian/cloud-init b/sysvinit/debian/cloud-init
deleted file mode 100755
index 48fa0423..00000000
--- a/sysvinit/debian/cloud-init
+++ /dev/null
@@ -1,64 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: cloud-init
-# Required-Start: $local_fs $remote_fs $syslog $network cloud-init-local
-# Required-Stop: $remote_fs
-# X-Start-Before: sshd
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Cloud init
-# Description: Cloud configuration initialization
-### END INIT INFO
-
-# Authors: Julien Danjou <acid@debian.org>
-# Thomas Goirand <zigo@debian.org>
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Cloud service"
-NAME=cloud-init
-DAEMON=/usr/bin/$NAME
-DAEMON_ARGS="init"
-SCRIPTNAME=/etc/init.d/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-if init_is_upstart; then
- case "$1" in
- stop)
- exit 0
- ;;
- *)
- exit 1
- ;;
- esac
-fi
-
-case "$1" in
- start)
- log_daemon_msg "Starting $DESC" "$NAME"
- $DAEMON ${DAEMON_ARGS}
- case "$?" in
- 0|1) log_end_msg 0 ;;
- 2) log_end_msg 1 ;;
- esac
- ;;
- stop|restart|force-reload)
- echo "Error: argument '$1' not supported" >&2
- exit 3
- ;;
- *)
- echo "Usage: $SCRIPTNAME {start}" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/sysvinit/debian/cloud-init-local b/sysvinit/debian/cloud-init-local
deleted file mode 100644
index 802ee8e9..00000000
--- a/sysvinit/debian/cloud-init-local
+++ /dev/null
@@ -1,63 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: cloud-init-local
-# Required-Start: $local_fs $remote_fs
-# Required-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Cloud init local
-# Description: Cloud configuration initialization
-### END INIT INFO
-
-# Authors: Julien Danjou <acid@debian.org>
-# Juerg Haefliger <juerg.haefliger@hp.com>
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Cloud service"
-NAME=cloud-init
-DAEMON=/usr/bin/$NAME
-DAEMON_ARGS="init --local"
-SCRIPTNAME=/etc/init.d/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-if init_is_upstart; then
- case "$1" in
- stop)
- exit 0
- ;;
- *)
- exit 1
- ;;
- esac
-fi
-
-case "$1" in
-start)
- log_daemon_msg "Starting $DESC" "$NAME"
- $DAEMON ${DAEMON_ARGS}
- case "$?" in
- 0|1) log_end_msg 0 ;;
- 2) log_end_msg 1 ;;
- esac
-;;
-stop|restart|force-reload)
- echo "Error: argument '$1' not supported" >&2
- exit 3
-;;
-*)
- echo "Usage: $SCRIPTNAME {start}" >&2
- exit 3
-;;
-esac
-
-:
diff --git a/sysvinit/freebsd/cloudconfig b/sysvinit/freebsd/cloudconfig
deleted file mode 100755
index 01bc061e..00000000
--- a/sysvinit/freebsd/cloudconfig
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-
-# PROVIDE: cloudconfig
-# REQUIRE: cloudinit cloudinitlocal
-# BEFORE: cloudfinal
-
-. /etc/rc.subr
-
-PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg
-
-name="cloudconfig"
-command="/usr/local/bin/cloud-init"
-start_cmd="cloudconfig_start"
-stop_cmd=":"
-rcvar="cloudinit_enable"
-start_precmd="cloudinit_override"
-start_cmd="cloudconfig_start"
-
-cloudinit_override()
-{
- # If there exist sysconfig/defaults variable override files use it...
- if [ -f /etc/defaults/cloud-init ]; then
- . /etc/defaults/cloud-init
- fi
-}
-
-cloudconfig_start()
-{
- echo "${command} starting"
- ${command} modules --mode config
-}
-
-load_rc_config $name
-run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudfinal b/sysvinit/freebsd/cloudfinal
deleted file mode 100755
index 1b487aa0..00000000
--- a/sysvinit/freebsd/cloudfinal
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-
-# PROVIDE: cloudfinal
-# REQUIRE: LOGIN cloudinit cloudconfig cloudinitlocal
-# REQUIRE: cron mail sshd swaplate
-
-. /etc/rc.subr
-
-PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg
-
-name="cloudfinal"
-command="/usr/local/bin/cloud-init"
-start_cmd="cloudfinal_start"
-stop_cmd=":"
-rcvar="cloudinit_enable"
-start_precmd="cloudinit_override"
-start_cmd="cloudfinal_start"
-
-cloudinit_override()
-{
- # If there exist sysconfig/defaults variable override files use it...
- if [ -f /etc/defaults/cloud-init ]; then
- . /etc/defaults/cloud-init
- fi
-}
-
-cloudfinal_start()
-{
- echo -n "${command} starting"
- ${command} modules --mode final
-}
-
-load_rc_config $name
-run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit
deleted file mode 100755
index 862eeab4..00000000
--- a/sysvinit/freebsd/cloudinit
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-
-# PROVIDE: cloudinit
-# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal
-# BEFORE: cloudconfig cloudfinal
-
-. /etc/rc.subr
-
-PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg
-
-name="cloudinit"
-command="/usr/local/bin/cloud-init"
-start_cmd="cloudinit_start"
-stop_cmd=":"
-rcvar="cloudinit_enable"
-start_precmd="cloudinit_override"
-start_cmd="cloudinit_start"
-
-cloudinit_override()
-{
- # If there exist sysconfig/defaults variable override files use it...
- if [ -f /etc/defaults/cloud-init ]; then
- . /etc/defaults/cloud-init
- fi
-}
-
-cloudinit_start()
-{
- echo -n "${command} starting"
- ${command} init
-}
-
-load_rc_config $name
-run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal
deleted file mode 100755
index fb342a0f..00000000
--- a/sysvinit/freebsd/cloudinitlocal
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-
-# PROVIDE: cloudinitlocal
-# REQUIRE: mountcritlocal
-# BEFORE: NETWORKING FILESYSTEMS cloudinit cloudconfig cloudfinal
-
-. /etc/rc.subr
-
-PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg
-
-name="cloudinitlocal"
-command="/usr/local/bin/cloud-init"
-start_cmd="cloudlocal_start"
-stop_cmd=":"
-rcvar="cloudinit_enable"
-start_precmd="cloudinit_override"
-start_cmd="cloudlocal_start"
-
-cloudinit_override()
-{
- # If there exist sysconfig/defaults variable override files use it...
- if [ -f /etc/defaults/cloud-init ]; then
- . /etc/defaults/cloud-init
- fi
-}
-
-cloudlocal_start()
-{
- echo -n "${command} starting"
- ${command} init --local
-}
-
-load_rc_config $name
-run_rc_command "$1"
diff --git a/sysvinit/gentoo/cloud-config b/sysvinit/gentoo/cloud-config
deleted file mode 100644
index b0fa786d..00000000
--- a/sysvinit/gentoo/cloud-config
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/sbin/runscript
-
-depend() {
- after cloud-init-local
- after cloud-init
- before cloud-final
- provide cloud-config
-}
-
-start() {
- cloud-init modules --mode config
- eend 0
-}
diff --git a/sysvinit/gentoo/cloud-final b/sysvinit/gentoo/cloud-final
deleted file mode 100644
index b457a354..00000000
--- a/sysvinit/gentoo/cloud-final
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/sbin/runscript
-
-depend() {
- after cloud-config
- provide cloud-final
-}
-
-start() {
- cloud-init modules --mode final
- eend 0
-}
diff --git a/sysvinit/gentoo/cloud-init b/sysvinit/gentoo/cloud-init
deleted file mode 100644
index 9ab64ad8..00000000
--- a/sysvinit/gentoo/cloud-init
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/sbin/runscript
-# add depends for network, dns, fs etc
-depend() {
- after cloud-init-local
- before cloud-config
- provide cloud-init
-}
-
-start() {
- cloud-init init
- eend 0
-}
diff --git a/sysvinit/gentoo/cloud-init-local b/sysvinit/gentoo/cloud-init-local
deleted file mode 100644
index 9d47263e..00000000
--- a/sysvinit/gentoo/cloud-init-local
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/sbin/runscript
-
-depend() {
- after localmount
- after netmount
- before cloud-init
- provide cloud-init-local
-}
-
-start() {
- cloud-init init --local
- eend 0
-}
diff --git a/sysvinit/redhat/cloud-config b/sysvinit/redhat/cloud-config
deleted file mode 100755
index ad8ed831..00000000
--- a/sysvinit/redhat/cloud-config
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/sh
-
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-# See: http://wiki.debian.org/LSBInitScripts
-# See: http://tiny.cc/czvbgw
-# See: http://www.novell.com/coolsolutions/feature/15380.html
-# Also based on dhcpd in RHEL (for comparison)
-
-### BEGIN INIT INFO
-# Provides: cloud-config
-# Required-Start: cloud-init cloud-init-local
-# Should-Start: $time
-# Required-Stop:
-# Should-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: The config cloud-init job
-# Description: Start cloud-init and runs the config phase
-# and any associated config modules as desired.
-### END INIT INFO
-
-# Return values acc. to LSB for all commands but status:
-# 0 - success
-# 1 - generic or unspecified error
-# 2 - invalid or excess argument(s)
-# 3 - unimplemented feature (e.g. "reload")
-# 4 - user had insufficient privileges
-# 5 - program is not installed
-# 6 - program is not configured
-# 7 - program is not running
-# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
-#
-# Note that starting an already running service, stopping
-# or restarting a not-running service as well as the restart
-# with force-reload (in case signaling is not supported) are
-# considered a success.
-
-RETVAL=0
-
-prog="cloud-init"
-cloud_init="/usr/bin/cloud-init"
-conf="/etc/cloud/cloud.cfg"
-
-# If there exist sysconfig/default variable override files use it...
-[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
-[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init
-
-start() {
- [ -x $cloud_init ] || return 5
- [ -f $conf ] || return 6
-
- echo -n $"Starting $prog: "
- $cloud_init $CLOUDINITARGS modules --mode config
- RETVAL=$?
- return $RETVAL
-}
-
-stop() {
- echo -n $"Shutting down $prog: "
- # No-op
- RETVAL=7
- return $RETVAL
-}
-
-case "$1" in
- start)
- start
- RETVAL=$?
- ;;
- stop)
- stop
- RETVAL=$?
- ;;
- restart|try-restart|condrestart)
- ## Stop the service and regardless of whether it was
- ## running or not, start it again.
- #
- ## Note: try-restart is now part of LSB (as of 1.9).
- ## RH has a similar command named condrestart.
- start
- RETVAL=$?
- ;;
- reload|force-reload)
- # It does not support reload
- RETVAL=3
- ;;
- status)
- echo -n $"Checking for service $prog:"
- # Return value is slightly different for the status command:
- # 0 - service up and running
- # 1 - service dead, but /var/run/ pid file exists
- # 2 - service dead, but /var/lock/ lock file exists
- # 3 - service not running (unused)
- # 4 - service status unknown :-(
- # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.)
- RETVAL=3
- ;;
- *)
- echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}"
- RETVAL=3
- ;;
-esac
-
-exit $RETVAL
diff --git a/sysvinit/redhat/cloud-final b/sysvinit/redhat/cloud-final
deleted file mode 100755
index aeae8903..00000000
--- a/sysvinit/redhat/cloud-final
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/sh
-
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-# See: http://wiki.debian.org/LSBInitScripts
-# See: http://tiny.cc/czvbgw
-# See: http://www.novell.com/coolsolutions/feature/15380.html
-# Also based on dhcpd in RHEL (for comparison)
-
-### BEGIN INIT INFO
-# Provides: cloud-final
-# Required-Start: $all cloud-config
-# Should-Start: $time
-# Required-Stop:
-# Should-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: The final cloud-init job
-# Description: Start cloud-init and runs the final phase
-# and any associated final modules as desired.
-### END INIT INFO
-
-# Return values acc. to LSB for all commands but status:
-# 0 - success
-# 1 - generic or unspecified error
-# 2 - invalid or excess argument(s)
-# 3 - unimplemented feature (e.g. "reload")
-# 4 - user had insufficient privileges
-# 5 - program is not installed
-# 6 - program is not configured
-# 7 - program is not running
-# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
-#
-# Note that starting an already running service, stopping
-# or restarting a not-running service as well as the restart
-# with force-reload (in case signaling is not supported) are
-# considered a success.
-
-RETVAL=0
-
-prog="cloud-init"
-cloud_init="/usr/bin/cloud-init"
-conf="/etc/cloud/cloud.cfg"
-
-# If there exist sysconfig/default variable override files use it...
-[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
-[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init
-
-start() {
- [ -x $cloud_init ] || return 5
- [ -f $conf ] || return 6
-
- echo -n $"Starting $prog: "
- $cloud_init $CLOUDINITARGS modules --mode final
- RETVAL=$?
- return $RETVAL
-}
-
-stop() {
- echo -n $"Shutting down $prog: "
- # No-op
- RETVAL=7
- return $RETVAL
-}
-
-case "$1" in
- start)
- start
- RETVAL=$?
- ;;
- stop)
- stop
- RETVAL=$?
- ;;
- restart|try-restart|condrestart)
- ## Stop the service and regardless of whether it was
- ## running or not, start it again.
- #
- ## Note: try-restart is now part of LSB (as of 1.9).
- ## RH has a similar command named condrestart.
- start
- RETVAL=$?
- ;;
- reload|force-reload)
- # It does not support reload
- RETVAL=3
- ;;
- status)
- echo -n $"Checking for service $prog:"
- # Return value is slightly different for the status command:
- # 0 - service up and running
- # 1 - service dead, but /var/run/ pid file exists
- # 2 - service dead, but /var/lock/ lock file exists
- # 3 - service not running (unused)
- # 4 - service status unknown :-(
- # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.)
- RETVAL=3
- ;;
- *)
- echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}"
- RETVAL=3
- ;;
-esac
-
-exit $RETVAL
diff --git a/sysvinit/redhat/cloud-init b/sysvinit/redhat/cloud-init
deleted file mode 100755
index c1c92ad0..00000000
--- a/sysvinit/redhat/cloud-init
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/sh
-
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-# See: http://wiki.debian.org/LSBInitScripts
-# See: http://tiny.cc/czvbgw
-# See: http://www.novell.com/coolsolutions/feature/15380.html
-# Also based on dhcpd in RHEL (for comparison)
-
-### BEGIN INIT INFO
-# Provides: cloud-init
-# Required-Start: $local_fs $network $named $remote_fs cloud-init-local
-# Should-Start: $time
-# Required-Stop:
-# Should-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: The initial cloud-init job (net and fs contingent)
-# Description: Start cloud-init and runs the initialization phase
-# and any associated initial modules as desired.
-### END INIT INFO
-
-# Return values acc. to LSB for all commands but status:
-# 0 - success
-# 1 - generic or unspecified error
-# 2 - invalid or excess argument(s)
-# 3 - unimplemented feature (e.g. "reload")
-# 4 - user had insufficient privileges
-# 5 - program is not installed
-# 6 - program is not configured
-# 7 - program is not running
-# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
-#
-# Note that starting an already running service, stopping
-# or restarting a not-running service as well as the restart
-# with force-reload (in case signaling is not supported) are
-# considered a success.
-
-RETVAL=0
-
-prog="cloud-init"
-cloud_init="/usr/bin/cloud-init"
-conf="/etc/cloud/cloud.cfg"
-
-# If there exist sysconfig/default variable override files use it...
-[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
-[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init
-
-start() {
- [ -x $cloud_init ] || return 5
- [ -f $conf ] || return 6
-
- echo -n $"Starting $prog: "
- $cloud_init $CLOUDINITARGS init
- RETVAL=$?
- return $RETVAL
-}
-
-stop() {
- echo -n $"Shutting down $prog: "
- # No-op
- RETVAL=7
- return $RETVAL
-}
-
-case "$1" in
- start)
- start
- RETVAL=$?
- ;;
- stop)
- stop
- RETVAL=$?
- ;;
- restart|try-restart|condrestart)
- ## Stop the service and regardless of whether it was
- ## running or not, start it again.
- #
- ## Note: try-restart is now part of LSB (as of 1.9).
- ## RH has a similar command named condrestart.
- start
- RETVAL=$?
- ;;
- reload|force-reload)
- # It does not support reload
- RETVAL=3
- ;;
- status)
- echo -n $"Checking for service $prog:"
- # Return value is slightly different for the status command:
- # 0 - service up and running
- # 1 - service dead, but /var/run/ pid file exists
- # 2 - service dead, but /var/lock/ lock file exists
- # 3 - service not running (unused)
- # 4 - service status unknown :-(
- # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.)
- RETVAL=3
- ;;
- *)
- echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}"
- RETVAL=3
- ;;
-esac
-
-exit $RETVAL
diff --git a/sysvinit/redhat/cloud-init-local b/sysvinit/redhat/cloud-init-local
deleted file mode 100755
index b9caedbd..00000000
--- a/sysvinit/redhat/cloud-init-local
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/bin/sh
-
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-# See: http://wiki.debian.org/LSBInitScripts
-# See: http://tiny.cc/czvbgw
-# See: http://www.novell.com/coolsolutions/feature/15380.html
-# Also based on dhcpd in RHEL (for comparison)
-
-# Bring this up before network, S10
-#chkconfig: 2345 09 91
-
-### BEGIN INIT INFO
-# Provides: cloud-init-local
-# Required-Start: $local_fs
-# Should-Start: $time
-# Required-Stop:
-# Should-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: The initial cloud-init job (local fs contingent)
-# Description: Start cloud-init and runs the initialization phases
-# and any associated initial modules as desired.
-### END INIT INFO
-
-# Return values acc. to LSB for all commands but status:
-# 0 - success
-# 1 - generic or unspecified error
-# 2 - invalid or excess argument(s)
-# 3 - unimplemented feature (e.g. "reload")
-# 4 - user had insufficient privileges
-# 5 - program is not installed
-# 6 - program is not configured
-# 7 - program is not running
-# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
-#
-# Note that starting an already running service, stopping
-# or restarting a not-running service as well as the restart
-# with force-reload (in case signaling is not supported) are
-# considered a success.
-
-RETVAL=0
-
-prog="cloud-init"
-cloud_init="/usr/bin/cloud-init"
-conf="/etc/cloud/cloud.cfg"
-
-# If there exist sysconfig/default variable override files use it...
-[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
-[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init
-
-start() {
- [ -x $cloud_init ] || return 5
- [ -f $conf ] || return 6
-
- echo -n $"Starting $prog: "
- $cloud_init $CLOUDINITARGS init --local
- RETVAL=$?
- return $RETVAL
-}
-
-stop() {
- echo -n $"Shutting down $prog: "
- # No-op
- RETVAL=7
- return $RETVAL
-}
-
-case "$1" in
- start)
- start
- RETVAL=$?
- ;;
- stop)
- stop
- RETVAL=$?
- ;;
- restart|try-restart|condrestart)
- ## Stop the service and regardless of whether it was
- ## running or not, start it again.
- #
- ## Note: try-restart is now part of LSB (as of 1.9).
- ## RH has a similar command named condrestart.
- start
- RETVAL=$?
- ;;
- reload|force-reload)
- # It does not support reload
- RETVAL=3
- ;;
- status)
- echo -n $"Checking for service $prog:"
- # Return value is slightly different for the status command:
- # 0 - service up and running
- # 1 - service dead, but /var/run/ pid file exists
- # 2 - service dead, but /var/lock/ lock file exists
- # 3 - service not running (unused)
- # 4 - service status unknown :-(
- # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.)
- RETVAL=3
- ;;
- *)
- echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}"
- RETVAL=3
- ;;
-esac
-
-exit $RETVAL
diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl
deleted file mode 100644
index cbb6b15f..00000000
--- a/templates/chef_client.rb.tmpl
+++ /dev/null
@@ -1,58 +0,0 @@
-## template:jinja
-{#
-This file is only utilized if the module 'cc_chef' is enabled in
-cloud-config. Specifically, in order to enable it
-you need to add the following to config:
- chef:
- validation_key: XYZ
- validation_cert: XYZ
- validation_name: XYZ
- server_url: XYZ
--#}
-{{generated_by}}
-{#
-The reason these are not in quotes is because they are ruby
-symbols that will be placed inside here, and not actual strings...
-#}
-{% if log_level %}
-log_level {{log_level}}
-{% endif %}
-{% if ssl_verify_mode %}
-ssl_verify_mode {{ssl_verify_mode}}
-{% endif %}
-{% if log_location %}
-log_location "{{log_location}}"
-{% endif %}
-{% if validation_name %}
-validation_client_name "{{validation_name}}"
-{% endif %}
-{% if validation_cert %}
-validation_key "{{validation_key}}"
-{% endif %}
-{% if client_key %}
-client_key "{{client_key}}"
-{% endif %}
-{% if server_url %}
-chef_server_url "{{server_url}}"
-{% endif %}
-{% if environment %}
-environment "{{environment}}"
-{% endif %}
-{% if node_name %}
-node_name "{{node_name}}"
-{% endif %}
-{% if json_attribs %}
-json_attribs "{{json_attribs}}"
-{% endif %}
-{% if file_cache_path %}
-file_cache_path "{{file_cache_path}}"
-{% endif %}
-{% if file_backup_path %}
-file_backup_path "{{file_backup_path}}"
-{% endif %}
-{% if pid_file %}
-pid_file "{{pid_file}}"
-{% endif %}
-{% if show_time %}
-Chef::Log::Formatter.show_time = true
-{% endif %}
diff --git a/templates/hosts.debian.tmpl b/templates/hosts.debian.tmpl
deleted file mode 100644
index a1d97212..00000000
--- a/templates/hosts.debian.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-## template:jinja
-{#
-This file (/etc/cloud/templates/hosts.tmpl) is only utilized
-if enabled in cloud-config. Specifically, in order to enable it
-you need to add the following to config:
- manage_etc_hosts: True
--#}
-# Your system has configured 'manage_etc_hosts' as True.
-# As a result, if you wish for changes to this file to persist
-# then you will need to either
-# a.) make changes to the master file in /etc/cloud/templates/hosts.tmpl
-# b.) change or remove the value of 'manage_etc_hosts' in
-# /etc/cloud/cloud.cfg or cloud-config from user-data
-#
-{# The value '{{hostname}}' will be replaced with the local-hostname -#}
-127.0.1.1 {{fqdn}} {{hostname}}
-127.0.0.1 localhost
-
-# The following lines are desirable for IPv6 capable hosts
-::1 ip6-localhost ip6-loopback
-fe00::0 ip6-localnet
-ff00::0 ip6-mcastprefix
-ff02::1 ip6-allnodes
-ff02::2 ip6-allrouters
-ff02::3 ip6-allhosts
-
diff --git a/templates/hosts.freebsd.tmpl b/templates/hosts.freebsd.tmpl
deleted file mode 100644
index 7ded762f..00000000
--- a/templates/hosts.freebsd.tmpl
+++ /dev/null
@@ -1,24 +0,0 @@
-## template:jinja
-{#
-This file /etc/cloud/templates/hosts.freebsd.tmpl is only utilized
-if enabled in cloud-config. Specifically, in order to enable it
-you need to add the following to config:
- manage_etc_hosts: True
--#}
-# Your system has configured 'manage_etc_hosts' as True.
-# As a result, if you wish for changes to this file to persist
-# then you will need to either
-# a.) make changes to the master file in /etc/cloud/templates/hosts.freebsd.tmpl
-# b.) change or remove the value of 'manage_etc_hosts' in
-# /etc/cloud/cloud.cfg or cloud-config from user-data
-#
-# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 {{fqdn}} {{hostname}}
-127.0.0.1 localhost.localdomain localhost
-127.0.0.1 localhost4.localdomain4 localhost4
-
-# The following lines are desirable for IPv6 capable hosts
-::1 {{fqdn}} {{hostname}}
-::1 localhost.localdomain localhost
-::1 localhost6.localdomain6 localhost6
-
diff --git a/templates/hosts.redhat.tmpl b/templates/hosts.redhat.tmpl
deleted file mode 100644
index bc5da32c..00000000
--- a/templates/hosts.redhat.tmpl
+++ /dev/null
@@ -1,24 +0,0 @@
-## template:jinja
-{#
-This file /etc/cloud/templates/hosts.redhat.tmpl is only utilized
-if enabled in cloud-config. Specifically, in order to enable it
-you need to add the following to config:
- manage_etc_hosts: True
--#}
-# Your system has configured 'manage_etc_hosts' as True.
-# As a result, if you wish for changes to this file to persist
-# then you will need to either
-# a.) make changes to the master file in /etc/cloud/templates/hosts.redhat.tmpl
-# b.) change or remove the value of 'manage_etc_hosts' in
-# /etc/cloud/cloud.cfg or cloud-config from user-data
-#
-# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 {{fqdn}} {{hostname}}
-127.0.0.1 localhost.localdomain localhost
-127.0.0.1 localhost4.localdomain4 localhost4
-
-# The following lines are desirable for IPv6 capable hosts
-::1 {{fqdn}} {{hostname}}
-::1 localhost.localdomain localhost
-::1 localhost6.localdomain6 localhost6
-
diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl
deleted file mode 100644
index b6082692..00000000
--- a/templates/hosts.suse.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-## template:jinja
-{#
-This file /etc/cloud/templates/hosts.suse.tmpl is only utilized
-if enabled in cloud-config. Specifically, in order to enable it
-you need to add the following to config:
- manage_etc_hosts: True
--#}
-# Your system has configured 'manage_etc_hosts' as True.
-# As a result, if you wish for changes to this file to persist
-# then you will need to either
-# a.) make changes to the master file in /etc/cloud/templates/hosts.suse.tmpl
-# b.) change or remove the value of 'manage_etc_hosts' in
-# /etc/cloud/cloud.cfg or cloud-config from user-data
-#
-# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 localhost
-
-# The following lines are desirable for IPv6 capable hosts
-::1 localhost ipv6-localhost ipv6-loopback
-fe00::0 ipv6-localnet
-
-ff00::0 ipv6-mcastprefix
-ff02::1 ipv6-allnodes
-ff02::2 ipv6-allrouters
-ff02::3 ipv6-allhosts
-
diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl
deleted file mode 100644
index bfae80db..00000000
--- a/templates/resolv.conf.tmpl
+++ /dev/null
@@ -1,30 +0,0 @@
-## template:jinja
-# Your system has been configured with 'manage-resolv-conf' set to true.
-# As a result, cloud-init has written this file with configuration data
-# that it has been provided. Cloud-init, by default, will write this file
-# a single time (PER_ONCE).
-#
-{% if nameservers is defined %}
-{% for server in nameservers %}
-nameserver {{server}}
-{% endfor %}
-
-{% endif -%}
-{% if searchdomains is defined %}
-search {% for search in searchdomains %}{{search}} {% endfor %}
-
-{% endif %}
-{% if domain is defined %}
-domain {{domain}}
-{% endif %}
-{% if sortlist is defined %}
-
-sortlist {% for sort in sortlist %}{{sort}} {% endfor %}
-{% endif %}
-{% if options or flags %}
-
-options {% for flag in flags %}{{flag}} {% endfor %}
-{% for key, value in options.items() -%}
- {{key}}:{{value}}
-{% endfor %}
-{% endif %}
diff --git a/templates/sources.list.debian.tmpl b/templates/sources.list.debian.tmpl
deleted file mode 100644
index c8043f76..00000000
--- a/templates/sources.list.debian.tmpl
+++ /dev/null
@@ -1,32 +0,0 @@
-## template:jinja
-## Note, this file is written by cloud-init on first boot of an instance
-## modifications made here will not survive a re-bundle.
-## if you wish to make changes you can:
-## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
-## or do the same in user-data
-## b.) add sources in /etc/apt/sources.list.d
-## c.) make changes to template file /etc/cloud/templates/sources.list.debian.tmpl
-###
-
-# See http://www.debian.org/releases/stable/i386/release-notes/ch-upgrading.html
-# for how to upgrade to newer versions of the distribution.
-deb {{mirror}} {{codename}} main contrib non-free
-deb-src {{mirror}} {{codename}} main contrib non-free
-
-## Major bug fix updates produced after the final release of the
-## distribution.
-deb {{security}} {{codename}}/updates main contrib non-free
-deb-src {{security}} {{codename}}/updates main contrib non-free
-deb {{mirror}} {{codename}}-updates main contrib non-free
-deb-src {{mirror}} {{codename}}-updates main contrib non-free
-
-## Uncomment the following two lines to add software from the 'backports'
-## repository.
-##
-## N.B. software from this repository may not have been tested as
-## extensively as that contained in the main release, although it includes
-## newer versions of some applications which may provide useful features.
-{#
-deb http://backports.debian.org/debian-backports {{codename}}-backports main contrib non-free
-deb-src http://backports.debian.org/debian-backports {{codename}}-backports main contrib non-free
--#}
diff --git a/templates/sources.list.ubuntu.tmpl b/templates/sources.list.ubuntu.tmpl
deleted file mode 100644
index a0b350df..00000000
--- a/templates/sources.list.ubuntu.tmpl
+++ /dev/null
@@ -1,113 +0,0 @@
-## template:jinja
-## Note, this file is written by cloud-init on first boot of an instance
-## modifications made here will not survive a re-bundle.
-## if you wish to make changes you can:
-## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
-## or do the same in user-data
-## b.) add sources in /etc/apt/sources.list.d
-## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
-
-# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
-# newer versions of the distribution.
-deb {{mirror}} {{codename}} main restricted
-deb-src {{mirror}} {{codename}} main restricted
-
-## Major bug fix updates produced after the final release of the
-## distribution.
-deb {{mirror}} {{codename}}-updates main restricted
-deb-src {{mirror}} {{codename}}-updates main restricted
-
-## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
-## team. Also, please note that software in universe WILL NOT receive any
-## review or updates from the Ubuntu security team.
-deb {{mirror}} {{codename}} universe
-deb-src {{mirror}} {{codename}} universe
-deb {{mirror}} {{codename}}-updates universe
-deb-src {{mirror}} {{codename}}-updates universe
-
-## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
-## team, and may not be under a free licence. Please satisfy yourself as to
-## your rights to use the software. Also, please note that software in
-## multiverse WILL NOT receive any review or updates from the Ubuntu
-## security team.
-deb {{mirror}} {{codename}} multiverse
-deb-src {{mirror}} {{codename}} multiverse
-deb {{mirror}} {{codename}}-updates multiverse
-deb-src {{mirror}} {{codename}}-updates multiverse
-
-## N.B. software from this repository may not have been tested as
-## extensively as that contained in the main release, although it includes
-## newer versions of some applications which may provide useful features.
-## Also, please note that software in backports WILL NOT receive any review
-## or updates from the Ubuntu security team.
-deb {{mirror}} {{codename}}-backports main restricted universe multiverse
-deb-src {{mirror}} {{codename}}-backports main restricted universe multiverse
-
-##
-## "entries" are mega things that can get rendered into sources.list format.
-## pocket_aliases allow for 'release' or 'updates' rather than
-## {{codename}} and {{codename}}-released
-##
-##
-## one thing not done here is that for 'entry' rendering
-## the components cannot vary per pocket.
-## ie, each pocket (release or updates) will have the same
-## list of components
-apt:
- pocket_aliases:
- release: {{codename}}
- updates: {{codename}}-updates
- proposed: {{codename}}-proposed
- backports: {{codename}}-backports
- security: {{codename}}-security
- sources:
- main:
- path: "/etc/apt/sources.list"
- # if entries is present, then we render each of the entries defined.
- entries: [primary, security]
- key: yourkey
- my-repo2.list:
- # use source as it would before (supporting templates there)
- # and entries can be in their rendering namespace
- source: deb $primary.mirror $RELEASE multiverse
-
- entries:
- primary:
- mirrors:
- - arches: [i386, amd64]
- mirror: http://archive.ubuntu.com/ubuntu
- search:
- - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu
- - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu
- - http://%(region)s.clouds.archive.ubuntu.com/ubuntu
- - arches: [default]
- mirror: http://ports.ubuntu.com/ubuntu-ports
- search: []
- pockets: [release, updates, proposed, backports]
- components: [main, restricted, universe, multiverse]
- types: [deb, deb-src]
-
- security:
- mirrors:
- - arches: [default]
- mirror: http://archive.ubuntu.com/ubuntu
- pockets: [securityrelease, updates, proposed, backports]
- components: [main, restricted, universe]
- types: [deb, deb-src]
-
- primary:
- mirror
-
-deb {{security}} {{codename}}-security main restricted
-deb-src {{security}} {{codename}}-security main restricted
-deb {{security}} {{codename}}-security universe
-deb-src {{security}} {{codename}}-security universe
-deb {{security}} {{codename}}-security multiverse
-deb-src {{security}} {{codename}}-security multiverse
-
-## Uncomment the following two lines to add software from Canonical's
-## 'partner' repository.
-## This software is not part of Ubuntu, but is offered by Canonical and the
-## respective vendors as a service to Ubuntu users.
-# deb http://archive.canonical.com/ubuntu {{codename}} partner
-# deb-src http://archive.canonical.com/ubuntu {{codename}} partner
diff --git a/test-requirements.txt b/test-requirements.txt
deleted file mode 100644
index 6bf38940..00000000
--- a/test-requirements.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-# Needed generally in tests
-httpretty>=0.7.1
-mock
-nose
-unittest2
-
-# Only needed if you want to know the test times
-# nose-timer
-
-# Only really needed on older versions of python
-contextlib2
-setuptools
-
-# Used for syle checking
-pep8==1.7.0
-pyflakes==1.1.0
-flake8==2.5.4
-hacking==0.10.2
diff --git a/tests/__init__.py b/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/__init__.py
+++ /dev/null
diff --git a/tests/configs/sample1.yaml b/tests/configs/sample1.yaml
deleted file mode 100644
index 6231f293..00000000
--- a/tests/configs/sample1.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-#cloud-config
-#apt_update: false
-#apt_upgrade: true
-packages: [ bzr, pastebinit, ubuntu-dev-tools, ccache, bzr-builddeb, vim-nox, git-core, lftp ]
-
-#apt_sources:
-# - source: ppa:smoser/ppa
-
-#disable_root: False
-
-# mounts:
-# - [ ephemeral0, /mnt ]
-# - [ swap, none, swap, sw, 0, 0 ]
-
-ssh_import_id: [smoser ]
-
-#!/bin/sh
-
-output: {all: '| tee -a /var/log/cloud-init-output.log'}
-
-sm_misc:
- - &user_setup |
- set -x; exec > ~/user_setup.log 2>&1
- echo "starting at $(date -R)"
- echo "set -o vi" >> ~/.bashrc
- cat >> ~/.profile <<"EOF"
- export EDITOR=vi
- export DEB_BUILD_OPTIONS=parallel=4
- export PATH=/usr/lib/ccache:$PATH
- EOF
-
- mkdir ~/bin
- chmod 755 ~/bin
- cat > ~/bin/mdebuild <<"EOF"
- #!/bin/sh
- exec debuild --prepend-path /usr/lib/ccache "$@"
- EOF
- chmod 755 ~/bin/*
-
- #byobu-launcher-install
- byobu-ctrl-a screen 2>&1 || :
-
- echo "pinging 8.8.8.8"
- ping -c 4 8.8.8.8
-
-runcmd:
- - [ sudo, -Hu, ubuntu, sh, -c, '[ -e /var/log/cloud-init.log ] || exit 0; grep "cloud-init.*running" /var/log/cloud-init.log > ~/runcmd.log' ]
- - [ sudo, -Hu, ubuntu, sh, -c, 'read up sleep < /proc/uptime; echo $(date): runcmd up at $up | tee -a ~/runcmd.log' ]
- - [ sudo, -Hu, ubuntu, sh, -c, *user_setup ]
-
-
-byobu_by_default: user
diff --git a/tests/data/filter_cloud_multipart.yaml b/tests/data/filter_cloud_multipart.yaml
deleted file mode 100644
index 7acc2b9d..00000000
--- a/tests/data/filter_cloud_multipart.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-#cloud-config-archive
----
-- content: "\n blah: true\n launch-index: 3\n"
- type: text/cloud-config
-- content: "\n blah: true\n launch-index: 4\n"
- type: text/cloud-config
-- content: The quick brown fox jumps over the lazy dog
- filename: b0.txt
- launch-index: 0
- type: plain/text
-- content: The quick brown fox jumps over the lazy dog
- filename: b3.txt
- launch-index: 3
- type: plain/text
-- content: The quick brown fox jumps over the lazy dog
- filename: b2.txt
- launch-index: 2
- type: plain/text
-- content: '#!/bin/bash \n echo "stuff"'
- filename: b2.txt
- launch-index: 2
-- content: '#!/bin/bash \n echo "stuff"'
- filename: b2.txt
- launch-index: 1
-- content: '#!/bin/bash \n echo "stuff"'
- filename: b2.txt
- # Use a string to see if conversion works
- launch-index: "1"
-...
-
diff --git a/tests/data/filter_cloud_multipart_1.email b/tests/data/filter_cloud_multipart_1.email
deleted file mode 100644
index 6d93b1f1..00000000
--- a/tests/data/filter_cloud_multipart_1.email
+++ /dev/null
@@ -1,11 +0,0 @@
-From nobody Fri Aug 31 17:17:00 2012
-Content-Type: text/plain; charset="us-ascii"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-
-
-#cloud-config
-b: c
-launch-index: 2
-
-
diff --git a/tests/data/filter_cloud_multipart_2.email b/tests/data/filter_cloud_multipart_2.email
deleted file mode 100644
index b04068c5..00000000
--- a/tests/data/filter_cloud_multipart_2.email
+++ /dev/null
@@ -1,39 +0,0 @@
-From nobody Fri Aug 31 17:43:04 2012
-Content-Type: multipart/mixed; boundary="===============1668325974=="
-MIME-Version: 1.0
-
---===============1668325974==
-Content-Type: text/cloud-config; charset="us-ascii"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-
-
-#cloud-config
-b: c
-launch-index: 2
-
-
---===============1668325974==
-Content-Type: text/plain; charset="us-ascii"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-
-
-#cloud-config-archive
-- content: The quick brown fox jumps over the lazy dog
- filename: b3.txt
- launch-index: 3
- type: plain/text
-
---===============1668325974==
-Content-Type: text/plain; charset="us-ascii"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-
-
-#cloud-config
-b: c
-launch-index: 2
-
-
---===============1668325974==--
diff --git a/tests/data/filter_cloud_multipart_header.email b/tests/data/filter_cloud_multipart_header.email
deleted file mode 100644
index 770f7ef1..00000000
--- a/tests/data/filter_cloud_multipart_header.email
+++ /dev/null
@@ -1,11 +0,0 @@
-From nobody Fri Aug 31 17:17:00 2012
-Content-Type: text/plain; charset="us-ascii"
-MIME-Version: 1.0
-Launch-Index: 5
-Content-Transfer-Encoding: 7bit
-
-
-#cloud-config
-b: c
-
-
diff --git a/tests/data/merge_sources/expected1.yaml b/tests/data/merge_sources/expected1.yaml
deleted file mode 100644
index 640d282b..00000000
--- a/tests/data/merge_sources/expected1.yaml
+++ /dev/null
@@ -1 +0,0 @@
-Blah: ['blah2', 'b']
diff --git a/tests/data/merge_sources/expected10.yaml b/tests/data/merge_sources/expected10.yaml
deleted file mode 100644
index b865db16..00000000
--- a/tests/data/merge_sources/expected10.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-#cloud-config
-
-power_state:
- delay: 30
- mode: poweroff
- message: [Bye, Bye, Pew, Pew]
-
diff --git a/tests/data/merge_sources/expected11.yaml b/tests/data/merge_sources/expected11.yaml
deleted file mode 100644
index c0530dc3..00000000
--- a/tests/data/merge_sources/expected11.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-a: 22
-b: 4
-c: 3
diff --git a/tests/data/merge_sources/expected12.yaml b/tests/data/merge_sources/expected12.yaml
deleted file mode 100644
index 0421d2c8..00000000
--- a/tests/data/merge_sources/expected12.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-a:
- e:
- y: 2
diff --git a/tests/data/merge_sources/expected2.yaml b/tests/data/merge_sources/expected2.yaml
deleted file mode 100644
index 6eccc2cf..00000000
--- a/tests/data/merge_sources/expected2.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-Blah: 3
-Blah2: 2
-Blah3: [1]
diff --git a/tests/data/merge_sources/expected3.yaml b/tests/data/merge_sources/expected3.yaml
deleted file mode 100644
index 32d9ad48..00000000
--- a/tests/data/merge_sources/expected3.yaml
+++ /dev/null
@@ -1 +0,0 @@
-Blah: [blah2, 'blah1']
diff --git a/tests/data/merge_sources/expected4.yaml b/tests/data/merge_sources/expected4.yaml
deleted file mode 100644
index d88d8f73..00000000
--- a/tests/data/merge_sources/expected4.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-#cloud-config
-Blah: {}
diff --git a/tests/data/merge_sources/expected5.yaml b/tests/data/merge_sources/expected5.yaml
deleted file mode 100644
index 628f5878..00000000
--- a/tests/data/merge_sources/expected5.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-#cloud-config
-
-Blah: 3
-Blah2: 2
-Blah3: [1]
-
-
diff --git a/tests/data/merge_sources/expected6.yaml b/tests/data/merge_sources/expected6.yaml
deleted file mode 100644
index 7afe1d7c..00000000
--- a/tests/data/merge_sources/expected6.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-#cloud-config
-
-run_cmds:
- - bash
- - top
- - ps
- - vi
- - emacs
-
diff --git a/tests/data/merge_sources/expected7.yaml b/tests/data/merge_sources/expected7.yaml
deleted file mode 100644
index 25284f04..00000000
--- a/tests/data/merge_sources/expected7.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-#cloud-config
-
-users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary-group: foobar
- groups: users
- selinux-user: staff_u
- expiredate: 2012-09-01
- ssh-import-id: foobar
- lock-passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: users, admin
- ssh-import-id: None
- lock-passwd: true
- ssh-authorized-keys:
- - <ssh pub key 1>
- - <ssh pub key 2>
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: true
- system: true
- - bob
- - joe
- - sue
- - name: foobar_jr
- gecos: Foo B. Bar Jr
- primary-group: foobar
- groups: users
- selinux-user: staff_u
- expiredate: 2012-09-01
- ssh-import-id: foobar
- lock-passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
diff --git a/tests/data/merge_sources/expected8.yaml b/tests/data/merge_sources/expected8.yaml
deleted file mode 100644
index 69ca562d..00000000
--- a/tests/data/merge_sources/expected8.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-#cloud-config
-
-mounts:
- - [ ephemeral22, /mnt, auto, "defaults,noexec" ]
- - [ sdc, /opt/data ]
- - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ]
- - [ dd, /dev/zero ]
diff --git a/tests/data/merge_sources/expected9.yaml b/tests/data/merge_sources/expected9.yaml
deleted file mode 100644
index 00f91ca0..00000000
--- a/tests/data/merge_sources/expected9.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-phone_home:
- url: http://my.example.com/$INSTANCE_ID/$BLAH_BLAH
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/tests/data/merge_sources/source1-1.yaml b/tests/data/merge_sources/source1-1.yaml
deleted file mode 100644
index 38e4e5e0..00000000
--- a/tests/data/merge_sources/source1-1.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-#cloud-config
-Blah: ['blah2']
-
diff --git a/tests/data/merge_sources/source1-2.yaml b/tests/data/merge_sources/source1-2.yaml
deleted file mode 100644
index 2cd0e0e5..00000000
--- a/tests/data/merge_sources/source1-2.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-Blah: ['b']
-
-merge_how: 'dict(recurse_array,no_replace)+list(append)'
diff --git a/tests/data/merge_sources/source10-1.yaml b/tests/data/merge_sources/source10-1.yaml
deleted file mode 100644
index 6ae72a13..00000000
--- a/tests/data/merge_sources/source10-1.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-
-power_state:
- delay: 30
- mode: poweroff
- message: [Bye, Bye]
diff --git a/tests/data/merge_sources/source10-2.yaml b/tests/data/merge_sources/source10-2.yaml
deleted file mode 100644
index a38cf1c5..00000000
--- a/tests/data/merge_sources/source10-2.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-
-power_state:
- message: [Pew, Pew]
-
-merge_how: 'dict(recurse_list)+list(append)'
diff --git a/tests/data/merge_sources/source11-1.yaml b/tests/data/merge_sources/source11-1.yaml
deleted file mode 100644
index ee29d681..00000000
--- a/tests/data/merge_sources/source11-1.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-a: 1
-b: 2
-c: 3
diff --git a/tests/data/merge_sources/source11-2.yaml b/tests/data/merge_sources/source11-2.yaml
deleted file mode 100644
index a9914c34..00000000
--- a/tests/data/merge_sources/source11-2.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-#cloud-config
-
-b: 4
diff --git a/tests/data/merge_sources/source11-3.yaml b/tests/data/merge_sources/source11-3.yaml
deleted file mode 100644
index 8f2b8944..00000000
--- a/tests/data/merge_sources/source11-3.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-#cloud-config
-
-a: 22
diff --git a/tests/data/merge_sources/source12-1.yaml b/tests/data/merge_sources/source12-1.yaml
deleted file mode 100644
index 09e7c899..00000000
--- a/tests/data/merge_sources/source12-1.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-#cloud-config
-
-a:
- c: 1
- d: 2
- e:
- z: a
- y: b
diff --git a/tests/data/merge_sources/source12-2.yaml b/tests/data/merge_sources/source12-2.yaml
deleted file mode 100644
index 0421d2c8..00000000
--- a/tests/data/merge_sources/source12-2.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-a:
- e:
- y: 2
diff --git a/tests/data/merge_sources/source2-1.yaml b/tests/data/merge_sources/source2-1.yaml
deleted file mode 100644
index c7a33aaa..00000000
--- a/tests/data/merge_sources/source2-1.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-
-
-Blah: 1
-Blah2: 2
-Blah3: 3
diff --git a/tests/data/merge_sources/source2-2.yaml b/tests/data/merge_sources/source2-2.yaml
deleted file mode 100644
index 8f2fdc1a..00000000
--- a/tests/data/merge_sources/source2-2.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-Blah: 3
-Blah2: 2
-Blah3: [1]
diff --git a/tests/data/merge_sources/source3-1.yaml b/tests/data/merge_sources/source3-1.yaml
deleted file mode 100644
index 2303e906..00000000
--- a/tests/data/merge_sources/source3-1.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-#cloud-config
-Blah: ['blah1']
-
-
diff --git a/tests/data/merge_sources/source3-2.yaml b/tests/data/merge_sources/source3-2.yaml
deleted file mode 100644
index dca2ad10..00000000
--- a/tests/data/merge_sources/source3-2.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-#cloud-config
-Blah: ['blah2']
-
-merge_how: 'dict(recurse_array,no_replace)+list(prepend)'
diff --git a/tests/data/merge_sources/source4-1.yaml b/tests/data/merge_sources/source4-1.yaml
deleted file mode 100644
index e5b16872..00000000
--- a/tests/data/merge_sources/source4-1.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-#cloud-config
-Blah:
- b: 1
diff --git a/tests/data/merge_sources/source4-2.yaml b/tests/data/merge_sources/source4-2.yaml
deleted file mode 100644
index 1844e0f8..00000000
--- a/tests/data/merge_sources/source4-2.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-Blah:
- b: null
-
-
-merge_how: 'dict(allow_delete,no_replace)+list()'
diff --git a/tests/data/merge_sources/source5-1.yaml b/tests/data/merge_sources/source5-1.yaml
deleted file mode 100644
index c7a33aaa..00000000
--- a/tests/data/merge_sources/source5-1.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-
-
-Blah: 1
-Blah2: 2
-Blah3: 3
diff --git a/tests/data/merge_sources/source5-2.yaml b/tests/data/merge_sources/source5-2.yaml
deleted file mode 100644
index f61c96a2..00000000
--- a/tests/data/merge_sources/source5-2.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-#cloud-config
-
-Blah: 3
-Blah2: 2
-Blah3: [1]
-
-
-merge_how: 'dict(replace)+list(append)'
diff --git a/tests/data/merge_sources/source6-1.yaml b/tests/data/merge_sources/source6-1.yaml
deleted file mode 100644
index 519f7309..00000000
--- a/tests/data/merge_sources/source6-1.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-run_cmds:
- - bash
- - top
diff --git a/tests/data/merge_sources/source6-2.yaml b/tests/data/merge_sources/source6-2.yaml
deleted file mode 100644
index d8fac446..00000000
--- a/tests/data/merge_sources/source6-2.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-#cloud-config
-
-run_cmds:
- - ps
- - vi
- - emacs
-
-merge_type: 'list(append)+dict(recurse_array)+str()'
diff --git a/tests/data/merge_sources/source7-1.yaml b/tests/data/merge_sources/source7-1.yaml
deleted file mode 100644
index 8fb9b32a..00000000
--- a/tests/data/merge_sources/source7-1.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-#cloud-config
-
-users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary-group: foobar
- groups: users
- selinux-user: staff_u
- expiredate: 2012-09-01
- ssh-import-id: foobar
- lock-passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: users, admin
- ssh-import-id: None
- lock-passwd: true
- ssh-authorized-keys:
- - <ssh pub key 1>
- - <ssh pub key 2>
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: true
- system: true
-
diff --git a/tests/data/merge_sources/source7-2.yaml b/tests/data/merge_sources/source7-2.yaml
deleted file mode 100644
index 1e26201b..00000000
--- a/tests/data/merge_sources/source7-2.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#cloud-config
-
-users:
- - bob
- - joe
- - sue
- - name: foobar_jr
- gecos: Foo B. Bar Jr
- primary-group: foobar
- groups: users
- selinux-user: staff_u
- expiredate: 2012-09-01
- ssh-import-id: foobar
- lock-passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
-
-merge_how: "dict(recurse_array)+list(append)"
diff --git a/tests/data/merge_sources/source8-1.yaml b/tests/data/merge_sources/source8-1.yaml
deleted file mode 100644
index 5ea51c2c..00000000
--- a/tests/data/merge_sources/source8-1.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-#cloud-config
-
-mounts:
- - [ ephemeral0, /mnt, auto, "defaults,noexec" ]
- - [ sdc, /opt/data ]
- - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ]
- - [ dd, /dev/zero ]
diff --git a/tests/data/merge_sources/source8-2.yaml b/tests/data/merge_sources/source8-2.yaml
deleted file mode 100644
index 7fa3262b..00000000
--- a/tests/data/merge_sources/source8-2.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-
-mounts:
- - [ ephemeral22, /mnt, auto, "defaults,noexec" ]
-
-merge_how: 'dict(recurse_array)+list(recurse_list,recurse_str)+str()'
diff --git a/tests/data/merge_sources/source9-1.yaml b/tests/data/merge_sources/source9-1.yaml
deleted file mode 100644
index 0b102ba6..00000000
--- a/tests/data/merge_sources/source9-1.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-#cloud-config
-
-phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/tests/data/merge_sources/source9-2.yaml b/tests/data/merge_sources/source9-2.yaml
deleted file mode 100644
index ac85afc6..00000000
--- a/tests/data/merge_sources/source9-2.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-
-phone_home:
- url: $BLAH_BLAH
-
-merge_how: 'dict(recurse_str)+str(append)'
diff --git a/tests/data/mountinfo_precise_ext4.txt b/tests/data/mountinfo_precise_ext4.txt
deleted file mode 100644
index a7a1db67..00000000
--- a/tests/data/mountinfo_precise_ext4.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
-16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
-17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755
-18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
-19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755
-20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered
-21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755
-22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
-23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
-25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
-26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
-27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
-28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
-24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset
-29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu
-30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct
-31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory
-32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices
-33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer
-34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio
-35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event
-36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered
-37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw
-39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
diff --git a/tests/data/mountinfo_raring_btrfs.txt b/tests/data/mountinfo_raring_btrfs.txt
deleted file mode 100644
index c5795636..00000000
--- a/tests/data/mountinfo_raring_btrfs.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
-16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
-17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755
-18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
-19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755
-20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache
-21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
-22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
-23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
-24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
-25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
-26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
-27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache
diff --git a/tests/data/roots/simple_ubuntu/etc/networks/interfaces b/tests/data/roots/simple_ubuntu/etc/networks/interfaces
deleted file mode 100644
index 77efa67d..00000000
--- a/tests/data/roots/simple_ubuntu/etc/networks/interfaces
+++ /dev/null
@@ -1,3 +0,0 @@
-auto lo
-iface lo inet loopback
-
diff --git a/tests/data/user_data.1.txt b/tests/data/user_data.1.txt
deleted file mode 100644
index 4c4543de..00000000
--- a/tests/data/user_data.1.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-#cloud-config
-write_files:
-- content: blah
- path: /etc/blah.ini
- permissions: 493
-
-system_info:
- package_mirrors:
- - arches: [i386, amd64, blah]
- failsafe:
- primary: http://my.archive.mydomain.com/ubuntu
- security: http://my.security.mydomain.com/ubuntu
- search:
- primary: []
- security: []
diff --git a/tests/data/vmware/cust-dhcp-2nic.cfg b/tests/data/vmware/cust-dhcp-2nic.cfg
deleted file mode 100644
index f687311a..00000000
--- a/tests/data/vmware/cust-dhcp-2nic.cfg
+++ /dev/null
@@ -1,34 +0,0 @@
-[NETWORK]
-NETWORKING = yes
-BOOTPROTO = dhcp
-HOSTNAME = myhost1
-DOMAINNAME = eng.vmware.com
-
-[NIC-CONFIG]
-NICS = NIC1,NIC2
-
-[NIC1]
-MACADDR = 00:50:56:a6:8c:08
-ONBOOT = yes
-IPv4_MODE = BACKWARDS_COMPATIBLE
-BOOTPROTO = dhcp
-
-[NIC2]
-MACADDR = 00:50:56:a6:5a:de
-ONBOOT = yes
-IPv4_MODE = BACKWARDS_COMPATIBLE
-BOOTPROTO = dhcp
-
-# some random comment
-
-[PASSWORD]
-# secret
--PASS = c2VjcmV0Cg==
-
-[DNS]
-DNSFROMDHCP=yes
-SUFFIX|1 = eng.vmware.com
-
-[DATETIME]
-TIMEZONE = Africa/Abidjan
-UTC = yes
diff --git a/tests/data/vmware/cust-static-2nic.cfg b/tests/data/vmware/cust-static-2nic.cfg
deleted file mode 100644
index 0d80c2c4..00000000
--- a/tests/data/vmware/cust-static-2nic.cfg
+++ /dev/null
@@ -1,39 +0,0 @@
-[NETWORK]
-NETWORKING = yes
-BOOTPROTO = dhcp
-HOSTNAME = myhost1
-DOMAINNAME = eng.vmware.com
-
-[NIC-CONFIG]
-NICS = NIC1,NIC2
-
-[NIC1]
-MACADDR = 00:50:56:a6:8c:08
-ONBOOT = yes
-IPv4_MODE = BACKWARDS_COMPATIBLE
-BOOTPROTO = static
-IPADDR = 10.20.87.154
-NETMASK = 255.255.252.0
-GATEWAY = 10.20.87.253, 10.20.87.105
-IPv6ADDR|1 = fc00:10:20:87::154
-IPv6NETMASK|1 = 64
-IPv6GATEWAY|1 = fc00:10:20:87::253
-[NIC2]
-MACADDR = 00:50:56:a6:ef:7d
-ONBOOT = yes
-IPv4_MODE = BACKWARDS_COMPATIBLE
-BOOTPROTO = static
-IPADDR = 192.168.6.102
-NETMASK = 255.255.0.0
-GATEWAY = 192.168.0.10
-
-[DNS]
-DNSFROMDHCP=no
-SUFFIX|1 = eng.vmware.com
-SUFFIX|2 = proxy.vmware.com
-NAMESERVER|1 = 10.20.145.1
-NAMESERVER|2 = 10.20.145.2
-
-[DATETIME]
-TIMEZONE = Africa/Abidjan
-UTC = yes
diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/__init__.py
+++ /dev/null
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
deleted file mode 100644
index 972245df..00000000
--- a/tests/unittests/helpers.py
+++ /dev/null
@@ -1,291 +0,0 @@
-from __future__ import print_function
-
-import functools
-import os
-import shutil
-import sys
-import tempfile
-import unittest
-
-import mock
-import six
-import unittest2
-
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
-
-from cloudinit import helpers as ch
-from cloudinit import util
-
-# Used for skipping tests
-SkipTest = unittest2.SkipTest
-
-# Used for detecting different python versions
-PY2 = False
-PY26 = False
-PY27 = False
-PY3 = False
-FIX_HTTPRETTY = False
-
-_PY_VER = sys.version_info
-_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3]
-if (_PY_MAJOR, _PY_MINOR) <= (2, 6):
- if (_PY_MAJOR, _PY_MINOR) == (2, 6):
- PY26 = True
- if (_PY_MAJOR, _PY_MINOR) >= (2, 0):
- PY2 = True
-else:
- if (_PY_MAJOR, _PY_MINOR) == (2, 7):
- PY27 = True
- PY2 = True
- if (_PY_MAJOR, _PY_MINOR) >= (3, 0):
- PY3 = True
- if _PY_MINOR == 4 and _PY_MICRO < 3:
- FIX_HTTPRETTY = True
-
-
-# Makes the old path start
-# with new base instead of whatever
-# it previously had
-def rebase_path(old_path, new_base):
- if old_path.startswith(new_base):
- # Already handled...
- return old_path
- # Retarget the base of that path
- # to the new base instead of the
- # old one...
- path = os.path.join(new_base, old_path.lstrip("/"))
- path = os.path.abspath(path)
- return path
-
-
-# Can work on anything that takes a path as arguments
-def retarget_many_wrapper(new_base, am, old_func):
- def wrapper(*args, **kwds):
- n_args = list(args)
- nam = am
- if am == -1:
- nam = len(n_args)
- for i in range(0, nam):
- path = args[i]
- # patchOS() wraps various os and os.path functions, however in
- # Python 3 some of these now accept file-descriptors (integers).
- # That breaks rebase_path() so in lieu of a better solution, just
- # don't rebase if we get a fd.
- if isinstance(path, six.string_types):
- n_args[i] = rebase_path(path, new_base)
- return old_func(*n_args, **kwds)
- return wrapper
-
-
-class TestCase(unittest2.TestCase):
- pass
-
-
-class ResourceUsingTestCase(TestCase):
- def setUp(self):
- super(ResourceUsingTestCase, self).setUp()
- self.resource_path = None
-
- def resourceLocation(self, subname=None):
- if self.resource_path is None:
- paths = [
- os.path.join('tests', 'data'),
- os.path.join('data'),
- os.path.join(os.pardir, 'tests', 'data'),
- os.path.join(os.pardir, 'data'),
- ]
- for p in paths:
- if os.path.isdir(p):
- self.resource_path = p
- break
- self.assertTrue((self.resource_path and
- os.path.isdir(self.resource_path)),
- msg="Unable to locate test resource data path!")
- if not subname:
- return self.resource_path
- return os.path.join(self.resource_path, subname)
-
- def readResource(self, name):
- where = self.resourceLocation(name)
- with open(where, 'r') as fh:
- return fh.read()
-
- def getCloudPaths(self, ds=None):
- tmpdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmpdir)
- cp = ch.Paths({'cloud_dir': tmpdir,
- 'templates_dir': self.resourceLocation()},
- ds=ds)
- return cp
-
-
-class FilesystemMockingTestCase(ResourceUsingTestCase):
- def setUp(self):
- super(FilesystemMockingTestCase, self).setUp()
- self.patched_funcs = ExitStack()
-
- def tearDown(self):
- self.patched_funcs.close()
- ResourceUsingTestCase.tearDown(self)
-
- def replicateTestRoot(self, example_root, target_root):
- real_root = self.resourceLocation()
- real_root = os.path.join(real_root, 'roots', example_root)
- for (dir_path, _dirnames, filenames) in os.walk(real_root):
- real_path = dir_path
- make_path = rebase_path(real_path[len(real_root):], target_root)
- util.ensure_dir(make_path)
- for f in filenames:
- real_path = util.abs_join(real_path, f)
- make_path = util.abs_join(make_path, f)
- shutil.copy(real_path, make_path)
-
- def patchUtils(self, new_root):
- patch_funcs = {
- util: [('write_file', 1),
- ('append_file', 1),
- ('load_file', 1),
- ('ensure_dir', 1),
- ('chmod', 1),
- ('delete_dir_contents', 1),
- ('del_file', 1),
- ('sym_link', -1),
- ('copy', -1)],
- }
- for (mod, funcs) in patch_funcs.items():
- for (f, am) in funcs:
- func = getattr(mod, f)
- trap_func = retarget_many_wrapper(new_root, am, func)
- self.patched_funcs.enter_context(
- mock.patch.object(mod, f, trap_func))
-
- # Handle subprocess calls
- func = getattr(util, 'subp')
-
- def nsubp(*_args, **_kwargs):
- return ('', '')
-
- self.patched_funcs.enter_context(
- mock.patch.object(util, 'subp', nsubp))
-
- def null_func(*_args, **_kwargs):
- return None
-
- for f in ['chownbyid', 'chownbyname']:
- self.patched_funcs.enter_context(
- mock.patch.object(util, f, null_func))
-
- def patchOS(self, new_root):
- patch_funcs = {
- os.path: [('isfile', 1), ('exists', 1),
- ('islink', 1), ('isdir', 1)],
- os: [('listdir', 1), ('mkdir', 1),
- ('lstat', 1), ('symlink', 2)],
- }
- for (mod, funcs) in patch_funcs.items():
- for f, nargs in funcs:
- func = getattr(mod, f)
- trap_func = retarget_many_wrapper(new_root, nargs, func)
- self.patched_funcs.enter_context(
- mock.patch.object(mod, f, trap_func))
-
- def patchOpen(self, new_root):
- trap_func = retarget_many_wrapper(new_root, 1, open)
- name = 'builtins.open' if PY3 else '__builtin__.open'
- self.patched_funcs.enter_context(mock.patch(name, trap_func))
-
- def patchStdoutAndStderr(self, stdout=None, stderr=None):
- if stdout is not None:
- self.patched_funcs.enter_context(
- mock.patch.object(sys, 'stdout', stdout))
- if stderr is not None:
- self.patched_funcs.enter_context(
- mock.patch.object(sys, 'stderr', stderr))
-
-
-def import_httpretty():
- """Import HTTPretty and monkey patch Python 3.4 issue.
- See https://github.com/gabrielfalcao/HTTPretty/pull/193 and
- as well as https://github.com/gabrielfalcao/HTTPretty/issues/221.
-
- Lifted from
- https://github.com/inveniosoftware/datacite/blob/master/tests/helpers.py
- """
- if not FIX_HTTPRETTY:
- import httpretty
- else:
- import socket
- old_SocketType = socket.SocketType
-
- import httpretty
- from httpretty import core
-
- def sockettype_patch(f):
- @functools.wraps(f)
- def inner(*args, **kwargs):
- f(*args, **kwargs)
- socket.SocketType = old_SocketType
- socket.__dict__['SocketType'] = old_SocketType
- return inner
-
- core.httpretty.disable = sockettype_patch(
- httpretty.httpretty.disable
- )
- return httpretty
-
-
-class HttprettyTestCase(TestCase):
- # necessary as http_proxy gets in the way of httpretty
- # https://github.com/gabrielfalcao/HTTPretty/issues/122
- def setUp(self):
- self.restore_proxy = os.environ.get('http_proxy')
- if self.restore_proxy is not None:
- del os.environ['http_proxy']
- super(HttprettyTestCase, self).setUp()
-
- def tearDown(self):
- if self.restore_proxy:
- os.environ['http_proxy'] = self.restore_proxy
- super(HttprettyTestCase, self).tearDown()
-
-
-def populate_dir(path, files):
- if not os.path.exists(path):
- os.makedirs(path)
- for (name, content) in files.items():
- with open(os.path.join(path, name), "wb") as fp:
- if isinstance(content, six.binary_type):
- fp.write(content)
- else:
- fp.write(content.encode('utf-8'))
- fp.close()
-
-
-def dir2dict(startdir, prefix=None):
- flist = {}
- if prefix is None:
- prefix = startdir
- for root, dirs, files in os.walk(startdir):
- for fname in files:
- fpath = os.path.join(root, fname)
- key = fpath[len(prefix):]
- flist[key] = util.load_file(fpath)
- return flist
-
-
-try:
- skipIf = unittest.skipIf
-except AttributeError:
- # Python 2.6. Doesn't have to be high fidelity.
- def skipIf(condition, reason):
- def decorator(func):
- def wrapper(*args, **kws):
- if condition:
- return func(*args, **kws)
- else:
- print(reason, file=sys.stderr)
- return wrapper
- return decorator
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
deleted file mode 100644
index 0154784a..00000000
--- a/tests/unittests/test__init__.py
+++ /dev/null
@@ -1,211 +0,0 @@
-import os
-import shutil
-import tempfile
-
-from cloudinit import handlers
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit import url_helper
-from cloudinit import util
-
-from .helpers import TestCase, ExitStack, mock
-
-
-class FakeModule(handlers.Handler):
- def __init__(self):
- handlers.Handler.__init__(self, settings.PER_ALWAYS)
- self.types = []
-
- def list_types(self):
- return self.types
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- pass
-
-
-class TestWalkerHandleHandler(TestCase):
-
- def setUp(self):
- super(TestWalkerHandleHandler, self).setUp()
- tmpdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmpdir)
-
- self.data = {
- "handlercount": 0,
- "frequency": "",
- "handlerdir": tmpdir,
- "handlers": helpers.ContentHandlers(),
- "data": None}
-
- self.expected_module_name = "part-handler-%03d" % (
- self.data["handlercount"],)
- expected_file_name = "%s.py" % self.expected_module_name
- self.expected_file_fullname = os.path.join(
- self.data["handlerdir"], expected_file_name)
- self.module_fake = FakeModule()
- self.ctype = None
- self.filename = None
- self.payload = "dummy payload"
-
- # Mock the write_file() function. We'll assert that it got called as
- # expected in each of the individual tests.
- resources = ExitStack()
- self.addCleanup(resources.close)
- self.write_file_mock = resources.enter_context(
- mock.patch('cloudinit.util.write_file'))
-
- def test_no_errors(self):
- """Payload gets written to file and added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- return_value=self.module_fake) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
- mockobj.assert_called_once_with(self.expected_module_name)
- self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 1)
-
- def test_import_error(self):
- """Module import errors are logged. No handler added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- side_effect=ImportError) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
- mockobj.assert_called_once_with(self.expected_module_name)
- self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 0)
-
- def test_attribute_error(self):
- """Attribute errors are logged. No handler added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- side_effect=AttributeError,
- return_value=self.module_fake) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
- mockobj.assert_called_once_with(self.expected_module_name)
- self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 0)
-
-
-class TestHandlerHandlePart(TestCase):
-
- def setUp(self):
- super(TestHandlerHandlePart, self).setUp()
- self.data = "fake data"
- self.ctype = "fake ctype"
- self.filename = "fake filename"
- self.payload = "fake payload"
- self.frequency = settings.PER_INSTANCE
- self.headers = {
- 'Content-Type': self.ctype,
- }
-
- def test_normal_version_1(self):
- """
- C{handle_part} is called without C{frequency} for
- C{handler_version} == 1.
- """
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=1)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
- # Assert that the handle_part() method of the mock object got
- # called with the expected arguments.
- mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
-
- def test_normal_version_2(self):
- """
- C{handle_part} is called with C{frequency} for
- C{handler_version} == 2.
- """
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=2)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
- # Assert that the handle_part() method of the mock object got
- # called with the expected arguments.
- mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload,
- settings.PER_INSTANCE)
-
- def test_modfreq_per_always(self):
- """
- C{handle_part} is called regardless of frequency if nofreq is always.
- """
- self.frequency = "once"
- mod_mock = mock.Mock(frequency=settings.PER_ALWAYS,
- handler_version=1)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
- # Assert that the handle_part() method of the mock object got
- # called with the expected arguments.
- mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
-
- def test_no_handle_when_modfreq_once(self):
- """C{handle_part} is not called if frequency is once."""
- self.frequency = "once"
- mod_mock = mock.Mock(frequency=settings.PER_ONCE)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
- self.assertEqual(0, mod_mock.handle_part.call_count)
-
- def test_exception_is_caught(self):
- """Exceptions within C{handle_part} are caught and logged."""
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=1)
- mod_mock.handle_part.side_effect = Exception
- try:
- handlers.run_part(mod_mock, self.data, self.filename,
- self.payload, self.frequency, self.headers)
- except Exception:
- self.fail("Exception was not caught in handle_part")
-
- mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
-
-
-class TestCmdlineUrl(TestCase):
- def test_invalid_content(self):
- url = "http://example.com/foo"
- key = "mykey"
- payload = b"0"
- cmdline = "ro %s=%s bar=1" % (key, url)
-
- with mock.patch('cloudinit.url_helper.readurl',
- return_value=url_helper.StringResponse(payload)):
- self.assertEqual(
- util.get_cmdline_url(names=[key], starts="xxxxxx",
- cmdline=cmdline),
- (key, url, None))
-
- def test_valid_content(self):
- url = "http://example.com/foo"
- key = "mykey"
- payload = b"xcloud-config\nmydata: foo\nbar: wark\n"
- cmdline = "ro %s=%s bar=1" % (key, url)
-
- with mock.patch('cloudinit.url_helper.readurl',
- return_value=url_helper.StringResponse(payload)):
- self.assertEqual(
- util.get_cmdline_url(names=[key], starts=b"xcloud-config",
- cmdline=cmdline),
- (key, url, payload))
-
- def test_no_key_found(self):
- url = "http://example.com/foo"
- key = "mykey"
- cmdline = "ro %s=%s bar=1" % (key, url)
-
- with mock.patch('cloudinit.url_helper.readurl',
- return_value=url_helper.StringResponse(b'')):
- self.assertEqual(
- util.get_cmdline_url(names=["does-not-appear"],
- starts="#cloud-config", cmdline=cmdline),
- (None, None, None))
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
deleted file mode 100644
index dea908d7..00000000
--- a/tests/unittests/test_builtin_handlers.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Tests of the built-in user data handlers."""
-
-import os
-import shutil
-import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-from . import helpers as test_helpers
-
-from cloudinit import handlers
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.handlers import upstart_job
-
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
-
-
-class TestBuiltins(test_helpers.FilesystemMockingTestCase):
- def test_upstart_frequency_no_out(self):
- c_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, c_root)
- up_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, up_root)
- paths = helpers.Paths({
- 'cloud_dir': c_root,
- 'upstart_dir': up_root,
- })
- freq = PER_ALWAYS
- h = upstart_job.UpstartJobPartHandler(paths)
- # No files should be written out when
- # the frequency is ! per-instance
- h.handle_part('', handlers.CONTENT_START,
- None, None, None)
- h.handle_part('blah', 'text/upstart-job',
- 'test.conf', 'blah', freq)
- h.handle_part('', handlers.CONTENT_END,
- None, None, None)
- self.assertEqual(0, len(os.listdir(up_root)))
-
- def test_upstart_frequency_single(self):
- # files should be written out when frequency is ! per-instance
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- freq = PER_INSTANCE
-
- self.patchOS(new_root)
- self.patchUtils(new_root)
- paths = helpers.Paths({
- 'upstart_dir': "/etc/upstart",
- })
-
- upstart_job.SUITABLE_UPSTART = True
- util.ensure_dir("/run")
- util.ensure_dir("/etc/upstart")
-
- with mock.patch.object(util, 'subp') as mockobj:
- h = upstart_job.UpstartJobPartHandler(paths)
- h.handle_part('', handlers.CONTENT_START,
- None, None, None)
- h.handle_part('blah', 'text/upstart-job',
- 'test.conf', 'blah', freq)
- h.handle_part('', handlers.CONTENT_END,
- None, None, None)
-
- self.assertEqual(len(os.listdir('/etc/upstart')), 1)
-
- mockobj.assert_called_once_with(
- ['initctl', 'reload-configuration'], capture=False)
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
deleted file mode 100644
index 5fa252f7..00000000
--- a/tests/unittests/test_cli.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import six
-
-from . import helpers as test_helpers
-
-from cloudinit.cmd import main as cli
-
-mock = test_helpers.mock
-
-
-class TestCLI(test_helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestCLI, self).setUp()
- self.stderr = six.StringIO()
- self.patchStdoutAndStderr(stderr=self.stderr)
-
- def _call_main(self, sysv_args=None):
- if not sysv_args:
- sysv_args = ['cloud-init']
- try:
- return cli.main(sysv_args=sysv_args)
- except SystemExit as e:
- return e.code
-
- def test_no_arguments_shows_usage(self):
- exit_code = self._call_main()
- self.assertIn('usage: cloud-init', self.stderr.getvalue())
- self.assertEqual(2, exit_code)
-
- def test_no_arguments_shows_error_message(self):
- exit_code = self._call_main()
- self.assertIn('cloud-init: error: too few arguments',
- self.stderr.getvalue())
- self.assertEqual(2, exit_code)
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
deleted file mode 100644
index 56c9ce9e..00000000
--- a/tests/unittests/test_cs_util.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import print_function
-
-from . import helpers as test_helpers
-
-from cloudinit.cs_utils import Cepko
-
-
-SERVER_CONTEXT = {
- "cpu": 1000,
- "cpus_instead_of_cores": False,
- "global_context": {"some_global_key": "some_global_val"},
- "mem": 1073741824,
- "meta": {"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"},
- "name": "test_server",
- "requirements": [],
- "smp": 1,
- "tags": ["much server", "very performance"],
- "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e889",
- "vnc_password": "9e84d6cb49e46379"
-}
-
-
-class CepkoMock(Cepko):
- def all(self):
- return SERVER_CONTEXT
-
- def get(self, key="", request_pattern=None):
- return SERVER_CONTEXT['tags']
-
-
-# 2015-01-22 BAW: This test is completely useless because it only ever tests
-# the CepkoMock object. Even in its original form, I don't think it ever
-# touched the underlying Cepko class methods.
-class CepkoResultTests(test_helpers.TestCase):
- def setUp(self):
- raise test_helpers.SkipTest('This test is completely useless')
-
- def test_getitem(self):
- result = self.c.all()
- self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result['uuid'])
- self.assertEqual([], result['requirements'])
- self.assertEqual("much server", result['tags'][0])
- self.assertEqual(1, result['smp'])
-
- def test_len(self):
- self.assertEqual(len(SERVER_CONTEXT), len(self.c.all()))
-
- def test_contains(self):
- result = self.c.all()
- self.assertTrue('uuid' in result)
- self.assertFalse('uid' in result)
- self.assertTrue('meta' in result)
- self.assertFalse('ssh_public_key' in result)
-
- def test_iter(self):
- self.assertEqual(sorted(SERVER_CONTEXT.keys()),
- sorted([key for key in self.c.all()]))
-
- def test_with_list_as_result(self):
- result = self.c.get('tags')
- self.assertEqual('much server', result[0])
- self.assertTrue('very performance' in result)
- self.assertEqual(2, len(result))
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
deleted file mode 100644
index 13db8a4c..00000000
--- a/tests/unittests/test_data.py
+++ /dev/null
@@ -1,576 +0,0 @@
-"""Tests for handling of userdata within cloud init."""
-
-import gzip
-import logging
-import os
-import shutil
-import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-from six import BytesIO, StringIO
-
-from email import encoders
-from email.mime.application import MIMEApplication
-from email.mime.base import MIMEBase
-from email.mime.multipart import MIMEMultipart
-
-from cloudinit import handlers
-from cloudinit import helpers as c_helpers
-from cloudinit import log
-from cloudinit.settings import (PER_INSTANCE)
-from cloudinit import sources
-from cloudinit import stages
-from cloudinit import user_data as ud
-from cloudinit import util
-
-from . import helpers
-
-
-INSTANCE_ID = "i-testing"
-
-
-class FakeDataSource(sources.DataSource):
-
- def __init__(self, userdata=None, vendordata=None):
- sources.DataSource.__init__(self, {}, None, None)
- self.metadata = {'instance-id': INSTANCE_ID}
- self.userdata_raw = userdata
- self.vendordata_raw = vendordata
-
-
-def count_messages(root):
- am = 0
- for m in root.walk():
- if ud.is_skippable(m):
- continue
- am += 1
- return am
-
-
-def gzip_text(text):
- contents = BytesIO()
- f = gzip.GzipFile(fileobj=contents, mode='wb')
- f.write(util.encode_text(text))
- f.flush()
- f.close()
- return contents.getvalue()
-
-
-# FIXME: these tests shouldn't be checking log output??
-# Weirddddd...
-class TestConsumeUserData(helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestConsumeUserData, self).setUp()
- self._log = None
- self._log_file = None
- self._log_handler = None
-
- def tearDown(self):
- if self._log_handler and self._log:
- self._log.removeHandler(self._log_handler)
- helpers.FilesystemMockingTestCase.tearDown(self)
-
- def _patchIn(self, root):
- self.patchOS(root)
- self.patchUtils(root)
-
- def capture_log(self, lvl=logging.DEBUG):
- log_file = StringIO()
- self._log_handler = logging.StreamHandler(log_file)
- self._log_handler.setLevel(lvl)
- self._log = log.getLogger()
- self._log.addHandler(self._log_handler)
- return log_file
-
- def test_simple_jsonp(self):
- blob = '''
-#cloud-config-jsonp
-[
- { "op": "add", "path": "/baz", "value": "qux" },
- { "op": "add", "path": "/bar", "value": "qux2" }
-]
-'''
-
- ci = stages.Init()
- ci.datasource = FakeDataSource(blob)
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.patchUtils(new_root)
- self.patchOS(new_root)
- ci.fetch()
- ci.consume_data()
- cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
- cc = util.load_yaml(cc_contents)
- self.assertEqual(2, len(cc))
- self.assertEqual('qux', cc['baz'])
- self.assertEqual('qux2', cc['bar'])
-
- def test_simple_jsonp_vendor_and_user(self):
- # test that user-data wins over vendor
- user_blob = '''
-#cloud-config-jsonp
-[
- { "op": "add", "path": "/baz", "value": "qux" },
- { "op": "add", "path": "/bar", "value": "qux2" }
-]
-'''
- vendor_blob = '''
-#cloud-config-jsonp
-[
- { "op": "add", "path": "/baz", "value": "quxA" },
- { "op": "add", "path": "/bar", "value": "quxB" },
- { "op": "add", "path": "/foo", "value": "quxC" }
-]
-'''
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
- initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
- initer.read_cfg()
- initer.initialize()
- initer.fetch()
- initer.instancify()
- initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
- cfg = mods.cfg
- self.assertIn('vendor_data', cfg)
- self.assertEqual('qux', cfg['baz'])
- self.assertEqual('qux2', cfg['bar'])
- self.assertEqual('quxC', cfg['foo'])
-
- def test_simple_jsonp_no_vendor_consumed(self):
- # make sure that vendor data is not consumed
- user_blob = '''
-#cloud-config-jsonp
-[
- { "op": "add", "path": "/baz", "value": "qux" },
- { "op": "add", "path": "/bar", "value": "qux2" },
- { "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}}
-]
-'''
- vendor_blob = '''
-#cloud-config-jsonp
-[
- { "op": "add", "path": "/baz", "value": "quxA" },
- { "op": "add", "path": "/bar", "value": "quxB" },
- { "op": "add", "path": "/foo", "value": "quxC" }
-]
-'''
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
- initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
- initer.read_cfg()
- initer.initialize()
- initer.fetch()
- initer.instancify()
- initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
- cfg = mods.cfg
- self.assertEqual('qux', cfg['baz'])
- self.assertEqual('qux2', cfg['bar'])
- self.assertNotIn('foo', cfg)
-
- def test_mixed_cloud_config(self):
- blob_cc = '''
-#cloud-config
-a: b
-c: d
-'''
- message_cc = MIMEBase("text", "cloud-config")
- message_cc.set_payload(blob_cc)
-
- blob_jp = '''
-#cloud-config-jsonp
-[
- { "op": "replace", "path": "/a", "value": "c" },
- { "op": "remove", "path": "/c" }
-]
-'''
-
- message_jp = MIMEBase('text', "cloud-config-jsonp")
- message_jp.set_payload(blob_jp)
-
- message = MIMEMultipart()
- message.attach(message_cc)
- message.attach(message_jp)
-
- ci = stages.Init()
- ci.datasource = FakeDataSource(str(message))
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.patchUtils(new_root)
- self.patchOS(new_root)
- ci.fetch()
- ci.consume_data()
- cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
- cc = util.load_yaml(cc_contents)
- self.assertEqual(1, len(cc))
- self.assertEqual('c', cc['a'])
-
- def test_vendor_user_yaml_cloud_config(self):
- vendor_blob = '''
-#cloud-config
-a: b
-name: vendor
-run:
- - x
- - y
-'''
-
- user_blob = '''
-#cloud-config
-a: c
-vendor_data:
- enabled: True
- prefix: /bin/true
-name: user
-run:
- - z
-'''
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
- initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
- initer.read_cfg()
- initer.initialize()
- initer.fetch()
- initer.instancify()
- initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
- cfg = mods.cfg
- self.assertIn('vendor_data', cfg)
- self.assertEqual('c', cfg['a'])
- self.assertEqual('user', cfg['name'])
- self.assertNotIn('x', cfg['run'])
- self.assertNotIn('y', cfg['run'])
- self.assertIn('z', cfg['run'])
-
- def test_vendordata_script(self):
- vendor_blob = '''
-#!/bin/bash
-echo "test"
-'''
-
- user_blob = '''
-#cloud-config
-vendor_data:
- enabled: True
- prefix: /bin/true
-'''
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
- initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
- initer.read_cfg()
- initer.initialize()
- initer.fetch()
- initer.instancify()
- initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
- vendor_script = initer.paths.get_ipath_cur('vendor_scripts')
- vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
- self.assertTrue(os.path.exists(vendor_script_fns))
-
- def test_merging_cloud_config(self):
- blob = '''
-#cloud-config
-a: b
-e: f
-run:
- - b
- - c
-'''
- message1 = MIMEBase("text", "cloud-config")
- message1.set_payload(blob)
-
- blob2 = '''
-#cloud-config
-a: e
-e: g
-run:
- - stuff
- - morestuff
-'''
- message2 = MIMEBase("text", "cloud-config")
- message2['X-Merge-Type'] = ('dict(recurse_array,'
- 'recurse_str)+list(append)+str(append)')
- message2.set_payload(blob2)
-
- blob3 = '''
-#cloud-config
-e:
- - 1
- - 2
- - 3
-p: 1
-'''
- message3 = MIMEBase("text", "cloud-config")
- message3.set_payload(blob3)
-
- messages = [message1, message2, message3]
-
- paths = c_helpers.Paths({}, ds=FakeDataSource(''))
- cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths)
-
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.patchUtils(new_root)
- self.patchOS(new_root)
- cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None,
- None)
- for i, m in enumerate(messages):
- headers = dict(m)
- fn = "part-%s" % (i + 1)
- payload = m.get_payload(decode=True)
- cloud_cfg.handle_part(None, headers['Content-Type'],
- fn, payload, None, headers)
- cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None,
- None)
- contents = util.load_file(paths.get_ipath('cloud_config'))
- contents = util.load_yaml(contents)
- self.assertEqual(contents['run'], ['b', 'c', 'stuff', 'morestuff'])
- self.assertEqual(contents['a'], 'be')
- self.assertEqual(contents['e'], [1, 2, 3])
- self.assertEqual(contents['p'], 1)
-
- def test_unhandled_type_warning(self):
- """Raw text without magic is ignored but shows warning."""
- ci = stages.Init()
- data = "arbitrary text\n"
- ci.datasource = FakeDataSource(data)
-
- with mock.patch('cloudinit.util.write_file') as mockobj:
- log_file = self.capture_log(logging.WARNING)
- ci.fetch()
- ci.consume_data()
- self.assertIn(
- "Unhandled non-multipart (text/x-not-multipart) userdata:",
- log_file.getvalue())
-
- mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
-
- def test_mime_gzip_compressed(self):
- """Tests that individual message gzip encoding works."""
-
- def gzip_part(text):
- return MIMEApplication(gzip_text(text), 'gzip')
-
- base_content1 = '''
-#cloud-config
-a: 2
-'''
-
- base_content2 = '''
-#cloud-config
-b: 3
-c: 4
-'''
-
- message = MIMEMultipart('test')
- message.attach(gzip_part(base_content1))
- message.attach(gzip_part(base_content2))
- ci = stages.Init()
- ci.datasource = FakeDataSource(str(message))
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.patchUtils(new_root)
- self.patchOS(new_root)
- ci.fetch()
- ci.consume_data()
- contents = util.load_file(ci.paths.get_ipath("cloud_config"))
- contents = util.load_yaml(contents)
- self.assertTrue(isinstance(contents, dict))
- self.assertEqual(3, len(contents))
- self.assertEqual(2, contents['a'])
- self.assertEqual(3, contents['b'])
- self.assertEqual(4, contents['c'])
-
- def test_mime_text_plain(self):
- """Mime message of type text/plain is ignored but shows warning."""
- ci = stages.Init()
- message = MIMEBase("text", "plain")
- message.set_payload("Just text")
- ci.datasource = FakeDataSource(message.as_string().encode())
-
- with mock.patch('cloudinit.util.write_file') as mockobj:
- log_file = self.capture_log(logging.WARNING)
- ci.fetch()
- ci.consume_data()
- self.assertIn(
- "Unhandled unknown content-type (text/plain)",
- log_file.getvalue())
- mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
-
- def test_shellscript(self):
- """Raw text starting #!/bin/sh is treated as script."""
- ci = stages.Init()
- script = "#!/bin/sh\necho hello\n"
- ci.datasource = FakeDataSource(script)
-
- outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
-
- with mock.patch('cloudinit.util.write_file') as mockobj:
- log_file = self.capture_log(logging.WARNING)
- ci.fetch()
- ci.consume_data()
- self.assertEqual("", log_file.getvalue())
-
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
-
- def test_mime_text_x_shellscript(self):
- """Mime message of type text/x-shellscript is treated as script."""
- ci = stages.Init()
- script = "#!/bin/sh\necho hello\n"
- message = MIMEBase("text", "x-shellscript")
- message.set_payload(script)
- ci.datasource = FakeDataSource(message.as_string())
-
- outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
-
- with mock.patch('cloudinit.util.write_file') as mockobj:
- log_file = self.capture_log(logging.WARNING)
- ci.fetch()
- ci.consume_data()
- self.assertEqual("", log_file.getvalue())
-
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
-
- def test_mime_text_plain_shell(self):
- """Mime type text/plain starting #!/bin/sh is treated as script."""
- ci = stages.Init()
- script = "#!/bin/sh\necho hello\n"
- message = MIMEBase("text", "plain")
- message.set_payload(script)
- ci.datasource = FakeDataSource(message.as_string())
-
- outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
-
- with mock.patch('cloudinit.util.write_file') as mockobj:
- log_file = self.capture_log(logging.WARNING)
- ci.fetch()
- ci.consume_data()
- self.assertEqual("", log_file.getvalue())
-
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
-
- def test_mime_application_octet_stream(self):
- """Mime type application/octet-stream is ignored but shows warning."""
- ci = stages.Init()
- message = MIMEBase("application", "octet-stream")
- message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc')
- encoders.encode_base64(message)
- ci.datasource = FakeDataSource(message.as_string().encode())
-
- with mock.patch('cloudinit.util.write_file') as mockobj:
- log_file = self.capture_log(logging.WARNING)
- ci.fetch()
- ci.consume_data()
- self.assertIn(
- "Unhandled unknown content-type (application/octet-stream)",
- log_file.getvalue())
- mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
-
- def test_cloud_config_archive(self):
- non_decodable = b'\x11\xc9\xb4gTH\xee\x12'
- data = [{'content': '#cloud-config\npassword: gocubs\n'},
- {'content': '#cloud-config\nlocale: chicago\n'},
- {'content': non_decodable}]
- message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode()
-
- ci = stages.Init()
- ci.datasource = FakeDataSource(message)
-
- fs = {}
-
- def fsstore(filename, content, mode=0o0644, omode="wb"):
- fs[filename] = content
-
- # consuming the user-data provided should write 'cloud_config' file
- # which will have our yaml in it.
- with mock.patch('cloudinit.util.write_file') as mockobj:
- mockobj.side_effect = fsstore
- ci.fetch()
- ci.consume_data()
-
- cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")])
- self.assertEqual(cfg.get('password'), 'gocubs')
- self.assertEqual(cfg.get('locale'), 'chicago')
-
-
-class TestUDProcess(helpers.ResourceUsingTestCase):
-
- def test_bytes_in_userdata(self):
- msg = b'#cloud-config\napt_update: True\n'
- ud_proc = ud.UserDataProcessor(self.getCloudPaths())
- message = ud_proc.process(msg)
- self.assertTrue(count_messages(message) == 1)
-
- def test_string_in_userdata(self):
- msg = '#cloud-config\napt_update: True\n'
-
- ud_proc = ud.UserDataProcessor(self.getCloudPaths())
- message = ud_proc.process(msg)
- self.assertTrue(count_messages(message) == 1)
-
- def test_compressed_in_userdata(self):
- msg = gzip_text('#cloud-config\napt_update: True\n')
-
- ud_proc = ud.UserDataProcessor(self.getCloudPaths())
- message = ud_proc.process(msg)
- self.assertTrue(count_messages(message) == 1)
-
-
-class TestConvertString(helpers.TestCase):
- def test_handles_binary_non_utf8_decodable(self):
- blob = b'\x32\x99'
- msg = ud.convert_string(blob)
- self.assertEqual(blob, msg.get_payload(decode=True))
-
- def test_handles_binary_utf8_decodable(self):
- blob = b'\x32\x32'
- msg = ud.convert_string(blob)
- self.assertEqual(blob, msg.get_payload(decode=True))
-
- def test_handle_headers(self):
- text = "hi mom"
- msg = ud.convert_string(text)
- self.assertEqual(text, msg.get_payload(decode=False))
diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/test_datasource/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_datasource/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
deleted file mode 100644
index 12966563..00000000
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ /dev/null
@@ -1,452 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joe VLcek <JVLcek@RedHat.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-'''
-This test file exercises the code in sources DataSourceAltCloud.py
-'''
-
-import os
-import shutil
-import tempfile
-
-from cloudinit import helpers
-from cloudinit import util
-from unittest import TestCase
-
-# Get the cloudinit.sources.DataSourceAltCloud import items needed.
-import cloudinit.sources.DataSourceAltCloud
-from cloudinit.sources.DataSourceAltCloud import DataSourceAltCloud
-from cloudinit.sources.DataSourceAltCloud import read_user_data_callback
-
-OS_UNAME_ORIG = getattr(os, 'uname')
-
-
-def _write_cloud_info_file(value):
- '''
- Populate the CLOUD_INFO_FILE which would be populated
- with a cloud backend identifier ImageFactory when building
- an image with ImageFactory.
- '''
- cifile = open(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 'w')
- cifile.write(value)
- cifile.close()
- os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0o664)
-
-
-def _remove_cloud_info_file():
- '''
- Remove the test CLOUD_INFO_FILE
- '''
- os.remove(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE)
-
-
-def _write_user_data_files(mount_dir, value):
- '''
- Populate the deltacloud_user_data_file the user_data_file
- which would be populated with user data.
- '''
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
-
- udfile = open(deltacloud_user_data_file, 'w')
- udfile.write(value)
- udfile.close()
- os.chmod(deltacloud_user_data_file, 0o664)
-
- udfile = open(user_data_file, 'w')
- udfile.write(value)
- udfile.close()
- os.chmod(user_data_file, 0o664)
-
-
-def _remove_user_data_files(mount_dir,
- dc_file=True,
- non_dc_file=True):
- '''
- Remove the test files: deltacloud_user_data_file and
- user_data_file
- '''
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
-
- # Ignore any failures removeing files that are already gone.
- if dc_file:
- try:
- os.remove(deltacloud_user_data_file)
- except OSError:
- pass
-
- if non_dc_file:
- try:
- os.remove(user_data_file)
- except OSError:
- pass
-
-
-def _dmi_data(expected):
- '''
- Spoof the data received over DMI
- '''
- def _data(key):
- return expected
-
- return _data
-
-
-class TestGetCloudType(TestCase):
- '''
- Test to exercise method: DataSourceAltCloud.get_cloud_type()
- '''
-
- def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
- self.dmi_data = util.read_dmi_data
- # We have a different code path for arm to deal with LP1243287
- # We have to switch arch to x86_64 to avoid test failure
- force_arch('x86_64')
-
- def tearDown(self):
- # Reset
- util.read_dmi_data = self.dmi_data
- force_arch()
-
- def test_rhev(self):
- '''
- Test method get_cloud_type() for RHEVm systems.
- Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor
- '''
- util.read_dmi_data = _dmi_data('RHEV')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('RHEV', dsrc.get_cloud_type())
-
- def test_vsphere(self):
- '''
- Test method get_cloud_type() for vSphere systems.
- Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor
- '''
- util.read_dmi_data = _dmi_data('VMware Virtual Platform')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('VSPHERE', dsrc.get_cloud_type())
-
- def test_unknown(self):
- '''
- Test method get_cloud_type() for unknown systems.
- Forcing read_dmi_data return to match an unrecognized return.
- '''
- util.read_dmi_data = _dmi_data('Unrecognized Platform')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
-
-
-class TestGetDataCloudInfoFile(TestCase):
- '''
- Test to exercise method: DataSourceAltCloud.get_data()
- With a contrived CLOUD_INFO_FILE
- '''
- def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
- self.cloud_info_file = tempfile.mkstemp()[1]
- self.dmi_data = util.read_dmi_data
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- self.cloud_info_file
-
- def tearDown(self):
- # Reset
-
- # Attempt to remove the temp file ignoring errors
- try:
- os.remove(self.cloud_info_file)
- except OSError:
- pass
-
- util.read_dmi_data = self.dmi_data
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
-
- def test_rhev(self):
- '''Success Test module get_data() forcing RHEV.'''
-
- _write_cloud_info_file('RHEV')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- dsrc.user_data_rhevm = lambda: True
- self.assertEqual(True, dsrc.get_data())
-
- def test_vsphere(self):
- '''Success Test module get_data() forcing VSPHERE.'''
-
- _write_cloud_info_file('VSPHERE')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- dsrc.user_data_vsphere = lambda: True
- self.assertEqual(True, dsrc.get_data())
-
- def test_fail_rhev(self):
- '''Failure Test module get_data() forcing RHEV.'''
-
- _write_cloud_info_file('RHEV')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- dsrc.user_data_rhevm = lambda: False
- self.assertEqual(False, dsrc.get_data())
-
- def test_fail_vsphere(self):
- '''Failure Test module get_data() forcing VSPHERE.'''
-
- _write_cloud_info_file('VSPHERE')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- dsrc.user_data_vsphere = lambda: False
- self.assertEqual(False, dsrc.get_data())
-
- def test_unrecognized(self):
- '''Failure Test module get_data() forcing unrecognized.'''
-
- _write_cloud_info_file('unrecognized')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- self.assertEqual(False, dsrc.get_data())
-
-
-class TestGetDataNoCloudInfoFile(TestCase):
- '''
- Test to exercise method: DataSourceAltCloud.get_data()
- Without a CLOUD_INFO_FILE
- '''
- def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
- self.dmi_data = util.read_dmi_data
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- 'no such file'
- # We have a different code path for arm to deal with LP1243287
- # We have to switch arch to x86_64 to avoid test failure
- force_arch('x86_64')
-
- def tearDown(self):
- # Reset
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
- util.read_dmi_data = self.dmi_data
- # Return back to original arch
- force_arch()
-
- def test_rhev_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing RHEV.'''
-
- util.read_dmi_data = _dmi_data('RHEV Hypervisor')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- dsrc.user_data_rhevm = lambda: True
- self.assertEqual(True, dsrc.get_data())
-
- def test_vsphere_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing VSPHERE.'''
-
- util.read_dmi_data = _dmi_data('VMware Virtual Platform')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- dsrc.user_data_vsphere = lambda: True
- self.assertEqual(True, dsrc.get_data())
-
- def test_failure_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing unrecognized.'''
-
- util.read_dmi_data = _dmi_data('Unrecognized Platform')
- dsrc = DataSourceAltCloud({}, None, self.paths)
- self.assertEqual(False, dsrc.get_data())
-
-
-class TestUserDataRhevm(TestCase):
- '''
- Test to exercise method: DataSourceAltCloud.user_data_rhevm()
- '''
- def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
- self.mount_dir = tempfile.mkdtemp()
-
- _write_user_data_files(self.mount_dir, 'test user data')
-
- def tearDown(self):
- # Reset
-
- _remove_user_data_files(self.mount_dir)
-
- # Attempt to remove the temp dir ignoring errors
- try:
- shutil.rmtree(self.mount_dir)
- except OSError:
- pass
-
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
- cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
- ['/sbin/modprobe', 'floppy']
- cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
- ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
-
- def test_mount_cb_fails(self):
- '''Test user_data_rhevm() where mount_cb fails.'''
-
- cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
- ['echo', 'modprobe floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
- self.assertEqual(False, dsrc.user_data_rhevm())
-
- def test_modprobe_fails(self):
- '''Test user_data_rhevm() where modprobe fails.'''
-
- cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
- ['ls', 'modprobe floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
- self.assertEqual(False, dsrc.user_data_rhevm())
-
- def test_no_modprobe_cmd(self):
- '''Test user_data_rhevm() with no modprobe command.'''
-
- cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
- ['bad command', 'modprobe floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
- self.assertEqual(False, dsrc.user_data_rhevm())
-
- def test_udevadm_fails(self):
- '''Test user_data_rhevm() where udevadm fails.'''
-
- cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
- ['ls', 'udevadm floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
- self.assertEqual(False, dsrc.user_data_rhevm())
-
- def test_no_udevadm_cmd(self):
- '''Test user_data_rhevm() with no udevadm command.'''
-
- cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
- ['bad command', 'udevadm floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
- self.assertEqual(False, dsrc.user_data_rhevm())
-
-
-class TestUserDataVsphere(TestCase):
- '''
- Test to exercise method: DataSourceAltCloud.user_data_vsphere()
- '''
- def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
- self.mount_dir = tempfile.mkdtemp()
-
- _write_user_data_files(self.mount_dir, 'test user data')
-
- def tearDown(self):
- # Reset
-
- _remove_user_data_files(self.mount_dir)
-
- # Attempt to remove the temp dir ignoring errors
- try:
- shutil.rmtree(self.mount_dir)
- except OSError:
- pass
-
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
-
- def test_user_data_vsphere(self):
- '''Test user_data_vsphere() where mount_cb fails.'''
-
- cloudinit.sources.DataSourceAltCloud.MEDIA_DIR = self.mount_dir
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
- self.assertEqual(False, dsrc.user_data_vsphere())
-
-
-class TestReadUserDataCallback(TestCase):
- '''
- Test to exercise method: DataSourceAltCloud.read_user_data_callback()
- '''
- def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
- self.mount_dir = tempfile.mkdtemp()
-
- _write_user_data_files(self.mount_dir, 'test user data')
-
- def tearDown(self):
- # Reset
-
- _remove_user_data_files(self.mount_dir)
-
- # Attempt to remove the temp dir ignoring errors
- try:
- shutil.rmtree(self.mount_dir)
- except OSError:
- pass
-
- def test_callback_both(self):
- '''Test read_user_data_callback() with both files.'''
-
- self.assertEqual('test user data',
- read_user_data_callback(self.mount_dir))
-
- def test_callback_dc(self):
- '''Test read_user_data_callback() with only DC file.'''
-
- _remove_user_data_files(self.mount_dir,
- dc_file=False,
- non_dc_file=True)
-
- self.assertEqual('test user data',
- read_user_data_callback(self.mount_dir))
-
- def test_callback_non_dc(self):
- '''Test read_user_data_callback() with only non-DC file.'''
-
- _remove_user_data_files(self.mount_dir,
- dc_file=True,
- non_dc_file=False)
-
- self.assertEqual('test user data',
- read_user_data_callback(self.mount_dir))
-
- def test_callback_none(self):
- '''Test read_user_data_callback() no files are found.'''
-
- _remove_user_data_files(self.mount_dir)
- self.assertEqual(None, read_user_data_callback(self.mount_dir))
-
-
-def force_arch(arch=None):
-
- def _os_uname():
- return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch)
-
- if arch:
- setattr(os, 'uname', _os_uname)
- elif arch is None:
- setattr(os, 'uname', OS_UNAME_ORIG)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
deleted file mode 100644
index e90e903c..00000000
--- a/tests/unittests/test_datasource/test_azure.py
+++ /dev/null
@@ -1,640 +0,0 @@
-from cloudinit import helpers
-from cloudinit.util import b64e, decode_binary, load_file
-from cloudinit.sources import DataSourceAzure
-
-from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest
-
-import crypt
-import os
-import shutil
-import stat
-import tempfile
-import xml.etree.ElementTree as ET
-import yaml
-
-
-def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
- if data is None:
- data = {'HostName': 'FOOHOST'}
- if pubkeys is None:
- pubkeys = {}
-
- content = """<?xml version="1.0" encoding="utf-8"?>
-<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:wa="http://schemas.microsoft.com/windowsazure"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-
- <wa:ProvisioningSection><wa:Version>1.0</wa:Version>
- <LinuxProvisioningConfigurationSet
- xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
- """
- for key, dval in data.items():
- if isinstance(dval, dict):
- val = dval.get('text')
- attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items()
- if k != 'text'])
- else:
- val = dval
- attrs = ""
- content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
-
- if userdata:
- content += "<UserData>%s</UserData>\n" % (b64e(userdata))
-
- if pubkeys:
- content += "<SSH><PublicKeys>\n"
- for fp, path, value in pubkeys:
- content += " <PublicKey>"
- if fp and path:
- content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" %
- (fp, path))
- if value:
- content += "<Value>%s</Value>" % value
- content += "</PublicKey>\n"
- content += "</PublicKeys></SSH>"
- content += """
- </LinuxProvisioningConfigurationSet>
- </wa:ProvisioningSection>
- <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
- <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <KmsServerHostname>kms.core.windows.net</KmsServerHostname>
- <ProvisionGuestAgent>false</ProvisionGuestAgent>
- <GuestAgentPackageName i:nil="true" />
- </PlatformSettings></wa:PlatformSettingsSection>
-</Environment>
- """
-
- return content
-
-
-class TestAzureDataSource(TestCase):
-
- def setUp(self):
- super(TestAzureDataSource, self).setUp()
- if PY26:
- raise SkipTest("Does not work on python 2.6")
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- # patch cloud_dir, so our 'seed_dir' is guaranteed empty
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
- self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
-
- self.patches = ExitStack()
- self.addCleanup(self.patches.close)
-
- super(TestAzureDataSource, self).setUp()
-
- def apply_patches(self, patches):
- for module, name, new in patches:
- self.patches.enter_context(mock.patch.object(module, name, new))
-
- def _get_ds(self, data):
-
- def dsdevs():
- return data.get('dsdevs', [])
-
- def _invoke_agent(cmd):
- data['agent_invoked'] = cmd
-
- def _wait_for_files(flist, _maxwait=None, _naplen=None):
- data['waited'] = flist
- return []
-
- def _pubkeys_from_crt_files(flist):
- data['pubkey_files'] = flist
- return ["pubkey_from: %s" % f for f in flist]
-
- if data.get('ovfcontent') is not None:
- populate_dir(os.path.join(self.paths.seed_dir, "azure"),
- {'ovf-env.xml': data['ovfcontent']})
-
- mod = DataSourceAzure
- mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- self.get_metadata_from_fabric = mock.MagicMock(return_value={
- 'public-keys': [],
- })
-
- self.instance_id = 'test-instance-id'
-
- self.apply_patches([
- (mod, 'list_possible_azure_ds_devs', dsdevs),
- (mod, 'invoke_agent', _invoke_agent),
- (mod, 'wait_for_files', _wait_for_files),
- (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
- (mod, 'perform_hostname_bounce', mock.MagicMock()),
- (mod, 'get_hostname', mock.MagicMock()),
- (mod, 'set_hostname', mock.MagicMock()),
- (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
- (mod.util, 'read_dmi_data', mock.MagicMock(
- return_value=self.instance_id)),
- ])
-
- dsrc = mod.DataSourceAzureNet(
- data.get('sys_cfg', {}), distro=None, paths=self.paths)
-
- return dsrc
-
- def xml_equals(self, oxml, nxml):
- """Compare two sets of XML to make sure they are equal"""
-
- def create_tag_index(xml):
- et = ET.fromstring(xml)
- ret = {}
- for x in et.iter():
- ret[x.tag] = x
- return ret
-
- def tags_exists(x, y):
- for tag in x.keys():
- self.assertIn(tag, y)
- for tag in y.keys():
- self.assertIn(tag, x)
-
- def tags_equal(x, y):
- for x_tag, x_val in x.items():
- y_val = y.get(x_val.tag)
- self.assertEqual(x_val.text, y_val.text)
-
- old_cnt = create_tag_index(oxml)
- new_cnt = create_tag_index(nxml)
- tags_exists(old_cnt, new_cnt)
- tags_equal(old_cnt, new_cnt)
-
- def xml_notequals(self, oxml, nxml):
- try:
- self.xml_equals(oxml, nxml)
- except AssertionError:
- return
- raise AssertionError("XML is the same")
-
- def test_basic_seed_dir(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, "")
- self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName'])
- self.assertTrue(os.path.isfile(
- os.path.join(self.waagent_d, 'ovf-env.xml')))
-
- def test_waagent_d_has_0700_perms(self):
- # we expect /var/lib/waagent to be created 0700
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertTrue(os.path.isdir(self.waagent_d))
- self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
-
- def test_user_cfg_set_agent_command_plain(self):
- # set dscfg in via plaintext
- # we must have friendly-to-xml formatted plaintext in yaml_cfg
- # not all plaintext is expected to work.
- yaml_cfg = "{agent_command: my_command}\n"
- cfg = yaml.safe_load(yaml_cfg)
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
-
- def test_user_cfg_set_agent_command(self):
- # set dscfg in via base64 encoded yaml
- cfg = {'agent_command': "my_command"}
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
-
- def test_sys_cfg_set_agent_command(self):
- sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}}
- data = {'ovfcontent': construct_valid_ovf_env(data={}),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], '_COMMAND')
-
- def test_username_used(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.cfg['system_info']['default_user']['name'],
- "myuser")
-
- def test_password_given(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': "mypass"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertTrue('default_user' in dsrc.cfg['system_info'])
- defuser = dsrc.cfg['system_info']['default_user']
-
- # default user should be updated username and should not be locked.
- self.assertEqual(defuser['name'], odata['UserName'])
- self.assertFalse(defuser['lock_passwd'])
- # passwd is crypt formated string $id$salt$encrypted
- # encrypting plaintext with salt value of everything up to final '$'
- # should equal that after the '$'
- pos = defuser['passwd'].rfind("$") + 1
- self.assertEqual(defuser['passwd'],
- crypt.crypt(odata['UserPassword'],
- defuser['passwd'][0:pos]))
-
- def test_userdata_plain(self):
- mydata = "FOOBAR"
- odata = {'UserData': {'text': mydata, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(decode_binary(dsrc.userdata_raw), mydata)
-
- def test_userdata_found(self):
- mydata = "FOOBAR"
- odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
-
- def test_no_datasource_expected(self):
- # no source should be found if no seed_dir and no devs
- data = {}
- dsrc = self._get_ds({})
- ret = dsrc.get_data()
- self.assertFalse(ret)
- self.assertFalse('agent_invoked' in data)
-
- def test_cfg_has_pubkeys_fingerprint(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- for mypk in mypklist:
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1])
-
- def test_cfg_has_pubkeys_value(self):
- # make sure that provided key is used over fingerprint
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- for mypk in mypklist:
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
-
- def test_cfg_has_no_fingerprint_has_value(self):
- # test value is used when fingerprint not provided
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- for mypk in mypklist:
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
-
- def test_default_ephemeral(self):
- # make sure the ephemeral device works
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
-
- self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
- "/dev/sdb")
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
-
- def test_provide_disk_aliases(self):
- # Make sure that user can affect disk aliases
- dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(dscfg)),
- 'encoding': 'base64'}}
- usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
- 'ephemeral0': False}}
- userdata = '#cloud-config' + yaml.dump(usercfg) + "\n"
-
- ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
- data = {'ovfcontent': ovfcontent, 'sys_cfg': {}}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
- self.assertTrue(cfg)
-
- def test_userdata_arrives(self):
- userdata = "This is my user-data"
- xml = construct_valid_ovf_env(data={}, userdata=userdata)
- data = {'ovfcontent': xml}
- dsrc = self._get_ds(data)
- dsrc.get_data()
-
- self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw)
-
- def test_password_redacted_in_ovf(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': "mypass"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
-
- self.assertTrue(ret)
- ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
-
- # The XML should not be same since the user password is redacted
- on_disk_ovf = load_file(ovf_env_path)
- self.xml_notequals(data['ovfcontent'], on_disk_ovf)
-
- # Make sure that the redacted password on disk is not used by CI
- self.assertNotEqual(dsrc.cfg.get('password'),
- DataSourceAzure.DEF_PASSWD_REDACTION)
-
- # Make sure that the password was really encrypted
- et = ET.fromstring(on_disk_ovf)
- for elem in et.iter():
- if 'UserPassword' in elem.tag:
- self.assertEqual(DataSourceAzure.DEF_PASSWD_REDACTION,
- elem.text)
-
- def test_ovf_env_arrives_in_waagent_dir(self):
- xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
- dsrc = self._get_ds({'ovfcontent': xml})
- dsrc.get_data()
-
- # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir)
- # we expect that the ovf-env.xml file is copied there.
- ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
- self.assertTrue(os.path.exists(ovf_env_path))
- self.xml_equals(xml, load_file(ovf_env_path))
-
- def test_ovf_can_include_unicode(self):
- xml = construct_valid_ovf_env(data={})
- xml = u'\ufeff{0}'.format(xml)
- dsrc = self._get_ds({'ovfcontent': xml})
- dsrc.get_data()
-
- def test_exception_fetching_fabric_data_doesnt_propagate(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.ds_cfg['agent_command'] = '__builtin__'
- self.get_metadata_from_fabric.side_effect = Exception
- self.assertFalse(ds.get_data())
-
- def test_fabric_data_included_in_metadata(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.ds_cfg['agent_command'] = '__builtin__'
- self.get_metadata_from_fabric.return_value = {'test': 'value'}
- ret = ds.get_data()
- self.assertTrue(ret)
- self.assertEqual('value', ds.metadata['test'])
-
- def test_instance_id_from_dmidecode_used(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
-
- def test_instance_id_from_dmidecode_used_for_builtin(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.ds_cfg['agent_command'] = '__builtin__'
- ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
-
-
-class TestAzureBounce(TestCase):
-
- def mock_out_azure_moving_parts(self):
- self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'invoke_agent'))
- self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'wait_for_files'))
- self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs',
- mock.MagicMock(return_value=[])))
- self.patches.enter_context(
- mock.patch.object(DataSourceAzure,
- 'find_fabric_formatted_ephemeral_disk',
- mock.MagicMock(return_value=None)))
- self.patches.enter_context(
- mock.patch.object(DataSourceAzure,
- 'find_fabric_formatted_ephemeral_part',
- mock.MagicMock(return_value=None)))
- self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric',
- mock.MagicMock(return_value={})))
- self.patches.enter_context(
- mock.patch.object(DataSourceAzure.util, 'read_dmi_data',
- mock.MagicMock(return_value='test-instance-id')))
-
- def setUp(self):
- super(TestAzureBounce, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
- self.addCleanup(shutil.rmtree, self.tmp)
- DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
- self.patches = ExitStack()
- self.mock_out_azure_moving_parts()
- self.get_hostname = self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'get_hostname'))
- self.set_hostname = self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'set_hostname'))
- self.subp = self.patches.enter_context(
- mock.patch('cloudinit.sources.DataSourceAzure.util.subp'))
-
- def tearDown(self):
- self.patches.close()
-
- def _get_ds(self, ovfcontent=None):
- if ovfcontent is not None:
- populate_dir(os.path.join(self.paths.seed_dir, "azure"),
- {'ovf-env.xml': ovfcontent})
- return DataSourceAzure.DataSourceAzureNet(
- {}, distro=None, paths=self.paths)
-
- def get_ovf_env_with_dscfg(self, hostname, cfg):
- odata = {
- 'HostName': hostname,
- 'dscfg': {
- 'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'
- }
- }
- return construct_valid_ovf_env(data=odata)
-
- def test_disabled_bounce_does_not_change_hostname(self):
- cfg = {'hostname_bounce': {'policy': 'off'}}
- self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
- self.assertEqual(0, self.set_hostname.call_count)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
- def test_disabled_bounce_does_not_perform_bounce(
- self, perform_hostname_bounce):
- cfg = {'hostname_bounce': {'policy': 'off'}}
- self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- def test_same_hostname_does_not_change_hostname(self):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'yes'}}
- self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
- self.assertEqual(0, self.set_hostname.call_count)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
- def test_unchanged_hostname_does_not_perform_bounce(
- self, perform_hostname_bounce):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'yes'}}
- self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
- def test_force_performs_bounce_regardless(self, perform_hostname_bounce):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'force'}}
- self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
- self.assertEqual(1, perform_hostname_bounce.call_count)
-
- def test_different_hostnames_sets_hostname(self):
- expected_hostname = 'azure-expected-host-name'
- self.get_hostname.return_value = 'default-host-name'
- self._get_ds(
- self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
- self.assertEqual(expected_hostname,
- self.set_hostname.call_args_list[0][0][0])
-
- @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
- def test_different_hostnames_performs_bounce(
- self, perform_hostname_bounce):
- expected_hostname = 'azure-expected-host-name'
- self.get_hostname.return_value = 'default-host-name'
- self._get_ds(
- self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
- self.assertEqual(1, perform_hostname_bounce.call_count)
-
- def test_different_hostnames_sets_hostname_back(self):
- initial_host_name = 'default-host-name'
- self.get_hostname.return_value = initial_host_name
- self._get_ds(
- self.get_ovf_env_with_dscfg('some-host-name', {})).get_data()
- self.assertEqual(initial_host_name,
- self.set_hostname.call_args_list[-1][0][0])
-
- @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
- def test_failure_in_bounce_still_resets_host_name(
- self, perform_hostname_bounce):
- perform_hostname_bounce.side_effect = Exception
- initial_host_name = 'default-host-name'
- self.get_hostname.return_value = initial_host_name
- self._get_ds(
- self.get_ovf_env_with_dscfg('some-host-name', {})).get_data()
- self.assertEqual(initial_host_name,
- self.set_hostname.call_args_list[-1][0][0])
-
- def test_environment_correct_for_bounce_command(self):
- interface = 'int0'
- hostname = 'my-new-host'
- old_hostname = 'my-old-host'
- self.get_hostname.return_value = old_hostname
- cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg(hostname, cfg)
- self._get_ds(data).get_data()
- self.assertEqual(1, self.subp.call_count)
- bounce_env = self.subp.call_args[1]['env']
- self.assertEqual(interface, bounce_env['interface'])
- self.assertEqual(hostname, bounce_env['hostname'])
- self.assertEqual(old_hostname, bounce_env['old_hostname'])
-
- def test_default_bounce_command_used_by_default(self):
- cmd = 'default-bounce-command'
- DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd
- cfg = {'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
- self.assertEqual(1, self.subp.call_count)
- bounce_args = self.subp.call_args[1]['args']
- self.assertEqual(cmd, bounce_args)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
- def test_set_hostname_option_can_disable_bounce(
- self, perform_hostname_bounce):
- cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
-
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- def test_set_hostname_option_can_disable_hostname_set(self):
- cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
-
- self.assertEqual(0, self.set_hostname.call_count)
-
-
-class TestReadAzureOvf(TestCase):
- def test_invalid_xml_raises_non_azure_ds(self):
- invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
- self.assertRaises(DataSourceAzure.BrokenAzureDataSource,
- DataSourceAzure.read_azure_ovf, invalid_xml)
-
- def test_load_with_pubkeys(self):
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- content = construct_valid_ovf_env(pubkeys=pubkeys)
- (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content)
- for mypk in mypklist:
- self.assertIn(mypk, cfg['_pubkeys'])
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
deleted file mode 100644
index 65202ff0..00000000
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ /dev/null
@@ -1,412 +0,0 @@
-import os
-
-from cloudinit.sources.helpers import azure as azure_helper
-
-from ..helpers import ExitStack, mock, TestCase
-
-
-GOAL_STATE_TEMPLATE = """\
-<?xml version="1.0" encoding="utf-8"?>
-<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:noNamespaceSchemaLocation="goalstate10.xsd">
- <Version>2012-11-30</Version>
- <Incarnation>{incarnation}</Incarnation>
- <Machine>
- <ExpectedState>Started</ExpectedState>
- <StopRolesDeadlineHint>300000</StopRolesDeadlineHint>
- <LBProbePorts>
- <Port>16001</Port>
- </LBProbePorts>
- <ExpectHealthReport>FALSE</ExpectHealthReport>
- </Machine>
- <Container>
- <ContainerId>{container_id}</ContainerId>
- <RoleInstanceList>
- <RoleInstance>
- <InstanceId>{instance_id}</InstanceId>
- <State>Started</State>
- <Configuration>
- <HostingEnvironmentConfig>
- http://100.86.192.70:80/...hostingEnvironmentConfig...
- </HostingEnvironmentConfig>
- <SharedConfig>http://100.86.192.70:80/..SharedConfig..</SharedConfig>
- <ExtensionsConfig>
- http://100.86.192.70:80/...extensionsConfig...
- </ExtensionsConfig>
- <FullConfig>http://100.86.192.70:80/...fullConfig...</FullConfig>
- <Certificates>{certificates_url}</Certificates>
- <ConfigName>68ce47.0.68ce47.0.utl-trusty--292258.1.xml</ConfigName>
- </Configuration>
- </RoleInstance>
- </RoleInstanceList>
- </Container>
-</GoalState>
-"""
-
-
-class TestFindEndpoint(TestCase):
-
- def setUp(self):
- super(TestFindEndpoint, self).setUp()
- patches = ExitStack()
- self.addCleanup(patches.close)
-
- self.load_file = patches.enter_context(
- mock.patch.object(azure_helper.util, 'load_file'))
-
- def test_missing_file(self):
- self.load_file.side_effect = IOError
- self.assertRaises(IOError,
- azure_helper.WALinuxAgentShim.find_endpoint)
-
- def test_missing_special_azure_line(self):
- self.load_file.return_value = ''
- self.assertRaises(ValueError,
- azure_helper.WALinuxAgentShim.find_endpoint)
-
- @staticmethod
- def _build_lease_content(encoded_address):
- return '\n'.join([
- 'lease {',
- ' interface "eth0";',
- ' option unknown-245 {0};'.format(encoded_address),
- '}'])
-
- def test_latest_lease_used(self):
- encoded_addresses = ['5:4:3:2', '4:3:2:1']
- file_content = '\n'.join([self._build_lease_content(encoded_address)
- for encoded_address in encoded_addresses])
- self.load_file.return_value = file_content
- self.assertEqual(encoded_addresses[-1].replace(':', '.'),
- azure_helper.WALinuxAgentShim.find_endpoint())
-
-
-class TestExtractIpAddressFromLeaseValue(TestCase):
-
- def test_hex_string(self):
- ip_address, encoded_address = '98.76.54.32', '62:4c:36:20'
- self.assertEqual(
- ip_address,
- azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
- encoded_address
- ))
-
- def test_hex_string_with_single_character_part(self):
- ip_address, encoded_address = '4.3.2.1', '4:3:2:1'
- self.assertEqual(
- ip_address,
- azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
- encoded_address
- ))
-
- def test_packed_string(self):
- ip_address, encoded_address = '98.76.54.32', 'bL6 '
- self.assertEqual(
- ip_address,
- azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
- encoded_address
- ))
-
- def test_packed_string_with_escaped_quote(self):
- ip_address, encoded_address = '100.72.34.108', 'dH\\"l'
- self.assertEqual(
- ip_address,
- azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
- encoded_address
- ))
-
- def test_packed_string_containing_a_colon(self):
- ip_address, encoded_address = '100.72.58.108', 'dH:l'
- self.assertEqual(
- ip_address,
- azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
- encoded_address
- ))
-
-
-class TestGoalStateParsing(TestCase):
-
- default_parameters = {
- 'incarnation': 1,
- 'container_id': 'MyContainerId',
- 'instance_id': 'MyInstanceId',
- 'certificates_url': 'MyCertificatesUrl',
- }
-
- def _get_goal_state(self, http_client=None, **kwargs):
- if http_client is None:
- http_client = mock.MagicMock()
- parameters = self.default_parameters.copy()
- parameters.update(kwargs)
- xml = GOAL_STATE_TEMPLATE.format(**parameters)
- if parameters['certificates_url'] is None:
- new_xml_lines = []
- for line in xml.splitlines():
- if 'Certificates' in line:
- continue
- new_xml_lines.append(line)
- xml = '\n'.join(new_xml_lines)
- return azure_helper.GoalState(xml, http_client)
-
- def test_incarnation_parsed_correctly(self):
- incarnation = '123'
- goal_state = self._get_goal_state(incarnation=incarnation)
- self.assertEqual(incarnation, goal_state.incarnation)
-
- def test_container_id_parsed_correctly(self):
- container_id = 'TestContainerId'
- goal_state = self._get_goal_state(container_id=container_id)
- self.assertEqual(container_id, goal_state.container_id)
-
- def test_instance_id_parsed_correctly(self):
- instance_id = 'TestInstanceId'
- goal_state = self._get_goal_state(instance_id=instance_id)
- self.assertEqual(instance_id, goal_state.instance_id)
-
- def test_certificates_xml_parsed_and_fetched_correctly(self):
- http_client = mock.MagicMock()
- certificates_url = 'TestCertificatesUrl'
- goal_state = self._get_goal_state(
- http_client=http_client, certificates_url=certificates_url)
- certificates_xml = goal_state.certificates_xml
- self.assertEqual(1, http_client.get.call_count)
- self.assertEqual(certificates_url, http_client.get.call_args[0][0])
- self.assertTrue(http_client.get.call_args[1].get('secure', False))
- self.assertEqual(http_client.get.return_value.contents,
- certificates_xml)
-
- def test_missing_certificates_skips_http_get(self):
- http_client = mock.MagicMock()
- goal_state = self._get_goal_state(
- http_client=http_client, certificates_url=None)
- certificates_xml = goal_state.certificates_xml
- self.assertEqual(0, http_client.get.call_count)
- self.assertIsNone(certificates_xml)
-
-
-class TestAzureEndpointHttpClient(TestCase):
-
- regular_headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
- }
-
- def setUp(self):
- super(TestAzureEndpointHttpClient, self).setUp()
- patches = ExitStack()
- self.addCleanup(patches.close)
-
- self.read_file_or_url = patches.enter_context(
- mock.patch.object(azure_helper.util, 'read_file_or_url'))
-
- def test_non_secure_get(self):
- client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- url = 'MyTestUrl'
- response = client.get(url, secure=False)
- self.assertEqual(1, self.read_file_or_url.call_count)
- self.assertEqual(self.read_file_or_url.return_value, response)
- self.assertEqual(mock.call(url, headers=self.regular_headers),
- self.read_file_or_url.call_args)
-
- def test_secure_get(self):
- url = 'MyTestUrl'
- certificate = mock.MagicMock()
- expected_headers = self.regular_headers.copy()
- expected_headers.update({
- "x-ms-cipher-name": "DES_EDE3_CBC",
- "x-ms-guest-agent-public-x509-cert": certificate,
- })
- client = azure_helper.AzureEndpointHttpClient(certificate)
- response = client.get(url, secure=True)
- self.assertEqual(1, self.read_file_or_url.call_count)
- self.assertEqual(self.read_file_or_url.return_value, response)
- self.assertEqual(mock.call(url, headers=expected_headers),
- self.read_file_or_url.call_args)
-
- def test_post(self):
- data = mock.MagicMock()
- url = 'MyTestUrl'
- client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- response = client.post(url, data=data)
- self.assertEqual(1, self.read_file_or_url.call_count)
- self.assertEqual(self.read_file_or_url.return_value, response)
- self.assertEqual(
- mock.call(url, data=data, headers=self.regular_headers),
- self.read_file_or_url.call_args)
-
- def test_post_with_extra_headers(self):
- url = 'MyTestUrl'
- client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- extra_headers = {'test': 'header'}
- client.post(url, extra_headers=extra_headers)
- self.assertEqual(1, self.read_file_or_url.call_count)
- expected_headers = self.regular_headers.copy()
- expected_headers.update(extra_headers)
- self.assertEqual(
- mock.call(mock.ANY, data=mock.ANY, headers=expected_headers),
- self.read_file_or_url.call_args)
-
-
-class TestOpenSSLManager(TestCase):
-
- def setUp(self):
- super(TestOpenSSLManager, self).setUp()
- patches = ExitStack()
- self.addCleanup(patches.close)
-
- self.subp = patches.enter_context(
- mock.patch.object(azure_helper.util, 'subp'))
- try:
- self.open = patches.enter_context(
- mock.patch('__builtin__.open'))
- except ImportError:
- self.open = patches.enter_context(
- mock.patch('builtins.open'))
-
- @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
- @mock.patch.object(azure_helper.tempfile, 'mkdtemp')
- def test_openssl_manager_creates_a_tmpdir(self, mkdtemp):
- manager = azure_helper.OpenSSLManager()
- self.assertEqual(mkdtemp.return_value, manager.tmpdir)
-
- def test_generate_certificate_uses_tmpdir(self):
- subp_directory = {}
-
- def capture_directory(*args, **kwargs):
- subp_directory['path'] = os.getcwd()
-
- self.subp.side_effect = capture_directory
- manager = azure_helper.OpenSSLManager()
- self.assertEqual(manager.tmpdir, subp_directory['path'])
- manager.clean_up()
-
- @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
- @mock.patch.object(azure_helper.tempfile, 'mkdtemp', mock.MagicMock())
- @mock.patch.object(azure_helper.util, 'del_dir')
- def test_clean_up(self, del_dir):
- manager = azure_helper.OpenSSLManager()
- manager.clean_up()
- self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list)
-
-
-class TestWALinuxAgentShim(TestCase):
-
- def setUp(self):
- super(TestWALinuxAgentShim, self).setUp()
- patches = ExitStack()
- self.addCleanup(patches.close)
-
- self.AzureEndpointHttpClient = patches.enter_context(
- mock.patch.object(azure_helper, 'AzureEndpointHttpClient'))
- self.find_endpoint = patches.enter_context(
- mock.patch.object(
- azure_helper.WALinuxAgentShim, 'find_endpoint'))
- self.GoalState = patches.enter_context(
- mock.patch.object(azure_helper, 'GoalState'))
- self.OpenSSLManager = patches.enter_context(
- mock.patch.object(azure_helper, 'OpenSSLManager'))
- patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
-
- def test_http_client_uses_certificate(self):
- shim = azure_helper.WALinuxAgentShim()
- shim.register_with_azure_and_fetch_data()
- self.assertEqual(
- [mock.call(self.OpenSSLManager.return_value.certificate)],
- self.AzureEndpointHttpClient.call_args_list)
-
- def test_correct_url_used_for_goalstate(self):
- self.find_endpoint.return_value = 'test_endpoint'
- shim = azure_helper.WALinuxAgentShim()
- shim.register_with_azure_and_fetch_data()
- get = self.AzureEndpointHttpClient.return_value.get
- self.assertEqual(
- [mock.call('http://test_endpoint/machine/?comp=goalstate')],
- get.call_args_list)
- self.assertEqual(
- [mock.call(get.return_value.contents,
- self.AzureEndpointHttpClient.return_value)],
- self.GoalState.call_args_list)
-
- def test_certificates_used_to_determine_public_keys(self):
- shim = azure_helper.WALinuxAgentShim()
- data = shim.register_with_azure_and_fetch_data()
- self.assertEqual(
- [mock.call(self.GoalState.return_value.certificates_xml)],
- self.OpenSSLManager.return_value.parse_certificates.call_args_list)
- self.assertEqual(
- self.OpenSSLManager.return_value.parse_certificates.return_value,
- data['public-keys'])
-
- def test_absent_certificates_produces_empty_public_keys(self):
- self.GoalState.return_value.certificates_xml = None
- shim = azure_helper.WALinuxAgentShim()
- data = shim.register_with_azure_and_fetch_data()
- self.assertEqual([], data['public-keys'])
-
- def test_correct_url_used_for_report_ready(self):
- self.find_endpoint.return_value = 'test_endpoint'
- shim = azure_helper.WALinuxAgentShim()
- shim.register_with_azure_and_fetch_data()
- expected_url = 'http://test_endpoint/machine?comp=health'
- self.assertEqual(
- [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
- self.AzureEndpointHttpClient.return_value.post.call_args_list)
-
- def test_goal_state_values_used_for_report_ready(self):
- self.GoalState.return_value.incarnation = 'TestIncarnation'
- self.GoalState.return_value.container_id = 'TestContainerId'
- self.GoalState.return_value.instance_id = 'TestInstanceId'
- shim = azure_helper.WALinuxAgentShim()
- shim.register_with_azure_and_fetch_data()
- posted_document = (
- self.AzureEndpointHttpClient.return_value.post.call_args[1]['data']
- )
- self.assertIn('TestIncarnation', posted_document)
- self.assertIn('TestContainerId', posted_document)
- self.assertIn('TestInstanceId', posted_document)
-
- def test_clean_up_can_be_called_at_any_time(self):
- shim = azure_helper.WALinuxAgentShim()
- shim.clean_up()
-
- def test_clean_up_will_clean_up_openssl_manager_if_instantiated(self):
- shim = azure_helper.WALinuxAgentShim()
- shim.register_with_azure_and_fetch_data()
- shim.clean_up()
- self.assertEqual(
- 1, self.OpenSSLManager.return_value.clean_up.call_count)
-
- def test_failure_to_fetch_goalstate_bubbles_up(self):
- class SentinelException(Exception):
- pass
- self.AzureEndpointHttpClient.return_value.get.side_effect = (
- SentinelException)
- shim = azure_helper.WALinuxAgentShim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
-
-
-class TestGetMetadataFromFabric(TestCase):
-
- @mock.patch.object(azure_helper, 'WALinuxAgentShim')
- def test_data_from_shim_returned(self, shim):
- ret = azure_helper.get_metadata_from_fabric()
- self.assertEqual(
- shim.return_value.register_with_azure_and_fetch_data.return_value,
- ret)
-
- @mock.patch.object(azure_helper, 'WALinuxAgentShim')
- def test_success_calls_clean_up(self, shim):
- azure_helper.get_metadata_from_fabric()
- self.assertEqual(1, shim.return_value.clean_up.call_count)
-
- @mock.patch.object(azure_helper, 'WALinuxAgentShim')
- def test_failure_in_registration_calls_clean_up(self, shim):
- class SentinelException(Exception):
- pass
- shim.return_value.register_with_azure_and_fetch_data.side_effect = (
- SentinelException)
- self.assertRaises(SentinelException,
- azure_helper.get_metadata_from_fabric)
- self.assertEqual(1, shim.return_value.clean_up.call_count)
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py
deleted file mode 100644
index 2a42ce0c..00000000
--- a/tests/unittests/test_datasource/test_cloudsigma.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# coding: utf-8
-
-import copy
-
-from cloudinit.cs_utils import Cepko
-from cloudinit.sources import DataSourceCloudSigma
-
-from .. import helpers as test_helpers
-
-SERVER_CONTEXT = {
- "cpu": 1000,
- "cpus_instead_of_cores": False,
- "global_context": {"some_global_key": "some_global_val"},
- "mem": 1073741824,
- "meta": {
- "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe",
- "cloudinit-user-data": "#cloud-config\n\n...",
- },
- "name": "test_server",
- "requirements": [],
- "smp": 1,
- "tags": ["much server", "very performance"],
- "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890",
- "vnc_password": "9e84d6cb49e46379",
- "vendor_data": {
- "location": "zrh",
- "cloudinit": "#cloud-config\n\n...",
- }
-}
-
-
-class CepkoMock(Cepko):
- def __init__(self, mocked_context):
- self.result = mocked_context
-
- def all(self):
- return self
-
-
-class DataSourceCloudSigmaTest(test_helpers.TestCase):
- def setUp(self):
- super(DataSourceCloudSigmaTest, self).setUp()
- self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "")
- self.datasource.is_running_in_cloudsigma = lambda: True
- self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
- self.datasource.get_data()
-
- def test_get_hostname(self):
- self.assertEqual("test_server", self.datasource.get_hostname())
- self.datasource.metadata['name'] = ''
- self.assertEqual("65b2fb23", self.datasource.get_hostname())
- self.datasource.metadata['name'] = u'тест'
- self.assertEqual("65b2fb23", self.datasource.get_hostname())
-
- def test_get_public_ssh_keys(self):
- self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']],
- self.datasource.get_public_ssh_keys())
-
- def test_get_instance_id(self):
- self.assertEqual(SERVER_CONTEXT['uuid'],
- self.datasource.get_instance_id())
-
- def test_metadata(self):
- self.assertEqual(self.datasource.metadata, SERVER_CONTEXT)
-
- def test_user_data(self):
- self.assertEqual(self.datasource.userdata_raw,
- SERVER_CONTEXT['meta']['cloudinit-user-data'])
-
- def test_encoded_user_data(self):
- encoded_context = copy.deepcopy(SERVER_CONTEXT)
- encoded_context['meta']['base64_fields'] = 'cloudinit-user-data'
- encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK'
- self.datasource.cepko = CepkoMock(encoded_context)
- self.datasource.get_data()
-
- self.assertEqual(self.datasource.userdata_raw, b'hi world\n')
-
- def test_vendor_data(self):
- self.assertEqual(self.datasource.vendordata_raw,
- SERVER_CONTEXT['vendor_data']['cloudinit'])
-
- def test_lack_of_vendor_data(self):
- stripped_context = copy.deepcopy(SERVER_CONTEXT)
- del stripped_context["vendor_data"]
- self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "")
- self.datasource.cepko = CepkoMock(stripped_context)
- self.datasource.get_data()
-
- self.assertIsNone(self.datasource.vendordata_raw)
-
- def test_lack_of_cloudinit_key_in_vendor_data(self):
- stripped_context = copy.deepcopy(SERVER_CONTEXT)
- del stripped_context["vendor_data"]["cloudinit"]
- self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "")
- self.datasource.cepko = CepkoMock(stripped_context)
- self.datasource.get_data()
-
- self.assertIsNone(self.datasource.vendordata_raw)
diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py
deleted file mode 100644
index b1aab17b..00000000
--- a/tests/unittests/test_datasource/test_cloudstack.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from cloudinit import helpers
-from cloudinit.sources.DataSourceCloudStack import DataSourceCloudStack
-
-from ..helpers import TestCase, mock, ExitStack
-
-
-class TestCloudStackPasswordFetching(TestCase):
-
- def setUp(self):
- super(TestCloudStackPasswordFetching, self).setUp()
- self.patches = ExitStack()
- self.addCleanup(self.patches.close)
- mod_name = 'cloudinit.sources.DataSourceCloudStack'
- self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name)))
- self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name)))
-
- def _set_password_server_response(self, response_string):
- subp = mock.MagicMock(return_value=(response_string, ''))
- self.patches.enter_context(
- mock.patch('cloudinit.sources.DataSourceCloudStack.util.subp',
- subp))
- return subp
-
- def test_empty_password_doesnt_create_config(self):
- self._set_password_server_response('')
- ds = DataSourceCloudStack({}, None, helpers.Paths({}))
- ds.get_data()
- self.assertEqual({}, ds.get_config_obj())
-
- def test_saved_password_doesnt_create_config(self):
- self._set_password_server_response('saved_password')
- ds = DataSourceCloudStack({}, None, helpers.Paths({}))
- ds.get_data()
- self.assertEqual({}, ds.get_config_obj())
-
- def test_password_sets_password(self):
- password = 'SekritSquirrel'
- self._set_password_server_response(password)
- ds = DataSourceCloudStack({}, None, helpers.Paths({}))
- ds.get_data()
- self.assertEqual(password, ds.get_config_obj()['password'])
-
- def test_bad_request_doesnt_stop_ds_from_working(self):
- self._set_password_server_response('bad_request')
- ds = DataSourceCloudStack({}, None, helpers.Paths({}))
- self.assertTrue(ds.get_data())
-
- def assertRequestTypesSent(self, subp, expected_request_types):
- request_types = []
- for call in subp.call_args_list:
- args = call[0][0]
- for arg in args:
- if arg.startswith('DomU_Request'):
- request_types.append(arg.split()[1])
- self.assertEqual(expected_request_types, request_types)
-
- def test_valid_response_means_password_marked_as_saved(self):
- password = 'SekritSquirrel'
- subp = self._set_password_server_response(password)
- ds = DataSourceCloudStack({}, None, helpers.Paths({}))
- ds.get_data()
- self.assertRequestTypesSent(subp,
- ['send_my_password', 'saved_password'])
-
- def _check_password_not_saved_for(self, response_string):
- subp = self._set_password_server_response(response_string)
- ds = DataSourceCloudStack({}, None, helpers.Paths({}))
- ds.get_data()
- self.assertRequestTypesSent(subp, ['send_my_password'])
-
- def test_password_not_saved_if_empty(self):
- self._check_password_not_saved_for('')
-
- def test_password_not_saved_if_already_saved(self):
- self._check_password_not_saved_for('saved_password')
-
- def test_password_not_saved_if_bad_request(self):
- self._check_password_not_saved_for('bad_request')
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
deleted file mode 100644
index 18551b92..00000000
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ /dev/null
@@ -1,597 +0,0 @@
-from copy import copy
-import json
-import os
-import shutil
-import six
-import tempfile
-
-from cloudinit import helpers
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit import settings
-from cloudinit.sources import DataSourceConfigDrive as ds
-from cloudinit.sources.helpers import openstack
-from cloudinit import util
-
-from ..helpers import TestCase, ExitStack, mock
-
-
-PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
-EC2_META = {
- 'ami-id': 'ami-00000001',
- 'ami-launch-index': 0,
- 'ami-manifest-path': 'FIXME',
- 'block-device-mapping': {
- 'ami': 'sda1',
- 'ephemeral0': 'sda2',
- 'root': '/dev/sda1',
- 'swap': 'sda3'},
- 'hostname': 'sm-foo-test.novalocal',
- 'instance-action': 'none',
- 'instance-id': 'i-00000001',
- 'instance-type': 'm1.tiny',
- 'local-hostname': 'sm-foo-test.novalocal',
- 'local-ipv4': None,
- 'placement': {'availability-zone': 'nova'},
- 'public-hostname': 'sm-foo-test.novalocal',
- 'public-ipv4': '',
- 'public-keys': {'0': {'openssh-key': PUBKEY}},
- 'reservation-id': 'r-iru5qm4m',
- 'security-groups': ['default']
-}
-USER_DATA = b'#!/bin/sh\necho This is user data\n'
-OSTACK_META = {
- 'availability_zone': 'nova',
- 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
- {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
- 'hostname': 'sm-foo-test.novalocal',
- 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
-
-CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
-NETWORK_DATA = {
- 'services': [
- {'type': 'dns', 'address': '199.204.44.24'},
- {'type': 'dns', 'address': '199.204.47.54'}
- ],
- 'links': [
- {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd',
- 'ethernet_mac_address': 'fa:16:3e:69:b0:58',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'},
- {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33',
- 'ethernet_mac_address': 'fa:16:3e:d4:57:ad',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
- {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc',
- 'ethernet_mac_address': 'fa:16:3e:05:30:fe',
- 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'}
- ],
- 'networks': [
- {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp',
- 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235',
- 'id': 'network0'},
- {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp',
- 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54',
- 'id': 'network1'},
- {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp',
- 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5',
- 'id': 'network2'}
- ]
-}
-
-NETWORK_DATA_2 = {
- "services": [
- {"type": "dns", "address": "1.1.1.191"},
- {"type": "dns", "address": "1.1.1.4"}],
- "networks": [
- {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4",
- "netmask": "255.255.255.248", "link": "eth0",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "2.2.2.9"}],
- "ip_address": "2.2.2.10", "id": "network0-ipv4"},
- {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4",
- "netmask": "255.255.255.224", "link": "eth1",
- "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}],
- "links": [
- {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500,
- "type": "vif", "id": "eth0", "vif_id": "vif-foo1"},
- {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500,
- "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}]
-}
-
-
-KNOWN_MACS = {
- 'fa:16:3e:69:b0:58': 'enp0s1',
- 'fa:16:3e:d4:57:ad': 'enp0s2',
- 'fa:16:3e:dd:50:9a': 'foo1',
- 'fa:16:3e:a8:14:69': 'foo2',
- 'fa:16:3e:ed:9a:59': 'foo3',
-}
-
-CFG_DRIVE_FILES_V2 = {
- 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
- 'ec2/2009-04-04/user-data': USER_DATA,
- 'ec2/latest/meta-data.json': json.dumps(EC2_META),
- 'ec2/latest/user-data': USER_DATA,
- 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2012-08-10/user_data': USER_DATA,
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA),
- 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2015-10-15/user_data': USER_DATA,
- 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)}
-
-
-class TestConfigDriveDataSource(TestCase):
-
- def setUp(self):
- super(TestConfigDriveDataSource, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_ec2_metadata(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- found = ds.read_config_drive(self.tmp)
- self.assertTrue('ec2-metadata' in found)
- ec2_md = found['ec2-metadata']
- self.assertEqual(EC2_META, ec2_md)
-
- def test_dev_os_remap(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- cfg_ds.metadata = found['metadata']
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- }
- for name, dev_name in name_tests.items():
- with ExitStack() as mocks:
- provided_name = dev_name[len('/dev/'):]
- provided_name = "s" + provided_name[1:]
- find_mock = mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- return_value=[provided_name]))
- # We want os.path.exists() to return False on its first call,
- # and True on its second call. We use a handy generator as
- # the mock side effect for this. The mocked function returns
- # what the side effect returns.
-
- def exists_side_effect():
- yield False
- yield True
- exists_mock = mocks.enter_context(
- mock.patch.object(os.path, 'exists',
- side_effect=exists_side_effect()))
- device = cfg_ds.device_name_to_device(name)
- self.assertEqual(dev_name, device)
-
- find_mock.assert_called_once_with(mock.ANY)
- self.assertEqual(exists_mock.call_count, 2)
-
- def test_dev_os_map(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- os_md = found['metadata']
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- }
- for name, dev_name in name_tests.items():
- with ExitStack() as mocks:
- find_mock = mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- return_value=[dev_name]))
- exists_mock = mocks.enter_context(
- mock.patch.object(os.path, 'exists',
- return_value=True))
- device = cfg_ds.device_name_to_device(name)
- self.assertEqual(dev_name, device)
-
- find_mock.assert_called_once_with(mock.ANY)
- exists_mock.assert_called_once_with(mock.ANY)
-
- def test_dev_ec2_remap(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- ec2_md = found['ec2-metadata']
- os_md = found['metadata']
- cfg_ds.ec2_metadata = ec2_md
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- None: None,
- 'bob': None,
- 'root2k': None,
- }
- for name, dev_name in name_tests.items():
- # We want os.path.exists() to return False on its first call,
- # and True on its second call. We use a handy generator as
- # the mock side effect for this. The mocked function returns
- # what the side effect returns.
- def exists_side_effect():
- yield False
- yield True
- with mock.patch.object(os.path, 'exists',
- side_effect=exists_side_effect()):
- device = cfg_ds.device_name_to_device(name)
- self.assertEqual(dev_name, device)
- # We don't assert the call count for os.path.exists() because
- # not all of the entries in name_tests results in two calls to
- # that function. Specifically, 'root2k' doesn't seem to call
- # it at all.
-
- def test_dev_ec2_map(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- ec2_md = found['ec2-metadata']
- os_md = found['metadata']
- cfg_ds.ec2_metadata = ec2_md
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/sda1',
- 'root': '/dev/sda1',
- 'ephemeral0': '/dev/sda2',
- 'swap': '/dev/sda3',
- None: None,
- 'bob': None,
- 'root2k': None,
- }
- for name, dev_name in name_tests.items():
- with mock.patch.object(os.path, 'exists', return_value=True):
- device = cfg_ds.device_name_to_device(name)
- self.assertEqual(dev_name, device)
-
- def test_dir_valid(self):
- """Verify a dir is read as such."""
-
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
-
- found = ds.read_config_drive(self.tmp)
-
- expected_md = copy(OSTACK_META)
- expected_md['instance-id'] = expected_md['uuid']
- expected_md['local-hostname'] = expected_md['hostname']
-
- self.assertEqual(USER_DATA, found['userdata'])
- self.assertEqual(expected_md, found['metadata'])
- self.assertEqual(NETWORK_DATA, found['networkdata'])
- self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0)
- self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1)
-
- def test_seed_dir_valid_extra(self):
- """Verify extra files do not affect datasource validity."""
-
- data = copy(CFG_DRIVE_FILES_V2)
- data["myfoofile.txt"] = "myfoocontent"
- data["openstack/latest/random-file.txt"] = "random-content"
-
- populate_dir(self.tmp, data)
-
- found = ds.read_config_drive(self.tmp)
-
- expected_md = copy(OSTACK_META)
- expected_md['instance-id'] = expected_md['uuid']
- expected_md['local-hostname'] = expected_md['hostname']
-
- self.assertEqual(expected_md, found['metadata'])
-
- def test_seed_dir_bad_json_metadata(self):
- """Verify that bad json in metadata raises BrokenConfigDriveDir."""
- data = copy(CFG_DRIVE_FILES_V2)
-
- data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
- data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}"
- data["openstack/latest/meta_data.json"] = "non-json garbage {}"
-
- populate_dir(self.tmp, data)
-
- self.assertRaises(openstack.BrokenMetadata,
- ds.read_config_drive, self.tmp)
-
- def test_seed_dir_no_configdrive(self):
- """Verify that no metadata raises NonConfigDriveDir."""
-
- my_d = os.path.join(self.tmp, "non-configdrive")
- data = copy(CFG_DRIVE_FILES_V2)
- data["myfoofile.txt"] = "myfoocontent"
- data["openstack/latest/random-file.txt"] = "random-content"
- data["content/foo"] = "foocontent"
-
- self.assertRaises(openstack.NonReadable,
- ds.read_config_drive, my_d)
-
- def test_seed_dir_missing(self):
- """Verify that missing seed_dir raises NonConfigDriveDir."""
- my_d = os.path.join(self.tmp, "nonexistantdirectory")
- self.assertRaises(openstack.NonReadable,
- ds.read_config_drive, my_d)
-
- def test_find_candidates(self):
- devs_with_answers = {}
-
- def my_devs_with(*args, **kwargs):
- criteria = args[0] if len(args) else kwargs.pop('criteria', None)
- return devs_with_answers.get(criteria, [])
-
- def my_is_partition(dev):
- return dev[-1] in "0123456789" and not dev.startswith("sr")
-
- try:
- orig_find_devs_with = util.find_devs_with
- util.find_devs_with = my_devs_with
-
- orig_is_partition = util.is_partition
- util.is_partition = my_is_partition
-
- devs_with_answers = {"TYPE=vfat": [],
- "TYPE=iso9660": ["/dev/vdb"],
- "LABEL=config-2": ["/dev/vdb"]}
- self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
-
- # add a vfat item
- # zdd reverse sorts after vdb, but config-2 label is preferred
- devs_with_answers['TYPE=vfat'] = ["/dev/zdd"]
- self.assertEqual(["/dev/vdb", "/dev/zdd"],
- ds.find_candidate_devs())
-
- # verify that partitions are considered, that have correct label.
- devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
- "TYPE=iso9660": [],
- "LABEL=config-2": ["/dev/vdb3"]}
- self.assertEqual(["/dev/vdb3"],
- ds.find_candidate_devs())
-
- finally:
- util.find_devs_with = orig_find_devs_with
- util.is_partition = orig_is_partition
-
- @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot')
- def test_pubkeys_v2(self, on_first_boot):
- """Verify that public-keys work in config-drive-v2."""
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- myds = cfg_ds_from_dir(self.tmp)
- self.assertEqual(myds.get_public_ssh_keys(),
- [OSTACK_META['public_keys']['mykey']])
-
-
-class TestNetJson(TestCase):
- def setUp(self):
- super(TestNetJson, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.maxDiff = None
-
- @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot')
- def test_network_data_is_found(self, on_first_boot):
- """Verify that network_data is present in ds in config-drive-v2."""
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- myds = cfg_ds_from_dir(self.tmp)
- self.assertIsNotNone(myds.network_json)
-
- @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot')
- def test_network_config_is_converted(self, on_first_boot):
- """Verify that network_data is converted and present on ds object."""
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- myds = cfg_ds_from_dir(self.tmp)
- network_config = openstack.convert_net_json(NETWORK_DATA,
- known_macs=KNOWN_MACS)
- self.assertEqual(myds.network_config, network_config)
-
- def test_network_config_conversions(self):
- """Tests a bunch of input network json and checks the
- expected conversions."""
- in_datas = [
- NETWORK_DATA,
- {
- 'services': [{'type': 'dns', 'address': '172.19.0.12'}],
- 'networks': [{
- 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4',
- 'type': 'ipv4',
- 'netmask': '255.255.252.0',
- 'link': 'tap1a81968a-79',
- 'routes': [{
- 'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- 'gateway': '172.19.3.254',
- }],
- 'ip_address': '172.19.1.34',
- 'id': 'network0',
- }],
- 'links': [{
- 'type': 'bridge',
- 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f',
- 'ethernet_mac_address': 'fa:16:3e:ed:9a:59',
- 'id': 'tap1a81968a-79',
- 'mtu': None,
- }],
- },
- ]
- out_datas = [
- {
- 'version': 1,
- 'config': [
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:69:b0:58',
- 'name': 'enp0s1',
- 'mtu': None,
- },
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:d4:57:ad',
- 'name': 'enp0s2',
- 'mtu': None,
- },
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:05:30:fe',
- 'name': 'nic0',
- 'mtu': None,
- },
- {
- 'type': 'nameserver',
- 'address': '199.204.44.24',
- },
- {
- 'type': 'nameserver',
- 'address': '199.204.47.54',
- }
- ],
-
- },
- {
- 'version': 1,
- 'config': [
- {
- 'name': 'foo3',
- 'mac_address': 'fa:16:3e:ed:9a:59',
- 'mtu': None,
- 'type': 'physical',
- 'subnets': [
- {
- 'address': '172.19.1.34',
- 'netmask': '255.255.252.0',
- 'type': 'static',
- 'ipv4': True,
- 'routes': [{
- 'gateway': '172.19.3.254',
- 'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- }],
- }
- ]
- },
- {
- 'type': 'nameserver',
- 'address': '172.19.0.12',
- }
- ],
- },
- ]
- for in_data, out_data in zip(in_datas, out_datas):
- conv_data = openstack.convert_net_json(in_data,
- known_macs=KNOWN_MACS)
- self.assertEqual(out_data, conv_data)
-
-
-class TestConvertNetworkData(TestCase):
- def setUp(self):
- super(TestConvertNetworkData, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def _getnames_in_config(self, ncfg):
- return set([n['name'] for n in ncfg['config']
- if n['type'] == 'physical'])
-
- def test_conversion_fills_names(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
- expected = set(['nic0', 'enp0s1', 'enp0s2'])
- found = self._getnames_in_config(ncfg)
- self.assertEqual(found, expected)
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
- macs = KNOWN_MACS.copy()
- macs.update({'fa:16:3e:05:30:fe': 'foonic1',
- 'fa:16:3e:69:b0:58': 'ens1'})
- get_interfaces_by_mac.return_value = macs
-
- ncfg = openstack.convert_net_json(NETWORK_DATA)
- expected = set(['nic0', 'ens1', 'enp0s2'])
- found = self._getnames_in_config(ncfg)
- self.assertEqual(found, expected)
-
- def test_convert_raises_value_error_on_missing_name(self):
- macs = {'aa:aa:aa:aa:aa:00': 'ens1'}
- self.assertRaises(ValueError, openstack.convert_net_json,
- NETWORK_DATA, known_macs=macs)
-
- def test_conversion_with_route(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA_2,
- known_macs=KNOWN_MACS)
- # not the best test, but see that we get a route in the
- # network config and that it gets rendered to an ENI file
- routes = []
- for n in ncfg['config']:
- for s in n.get('subnets', []):
- routes.extend(s.get('routes', []))
- self.assertIn(
- {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'},
- routes)
- eni_renderer = eni.Renderer()
- eni_renderer.render_network_state(
- self.tmp, network_state.parse_net_config_data(ncfg))
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
- eni_rendering = f.read()
- self.assertIn("route add default gw 2.2.2.9", eni_rendering)
-
-
-def cfg_ds_from_dir(seed_d):
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None,
- helpers.Paths({}))
- cfg_ds.seed_dir = seed_d
- cfg_ds.known_macs = KNOWN_MACS.copy()
- if not cfg_ds.get_data():
- raise RuntimeError("Data source did not extract itself from"
- " seed directory %s" % seed_d)
- return cfg_ds
-
-
-def populate_ds_from_read_config(cfg_ds, source, results):
- """Patch the DataSourceConfigDrive from the results of
- read_config_drive_dir hopefully in line with what it would have
- if cfg_ds.get_data had been successfully called"""
- cfg_ds.source = source
- cfg_ds.metadata = results.get('metadata')
- cfg_ds.ec2_metadata = results.get('ec2-metadata')
- cfg_ds.userdata_raw = results.get('userdata')
- cfg_ds.version = results.get('version')
- cfg_ds.network_json = results.get('networkdata')
- cfg_ds._network_config = openstack.convert_net_json(
- cfg_ds.network_json, known_macs=KNOWN_MACS)
-
-
-def populate_dir(seed_dir, files):
- for (name, content) in files.items():
- path = os.path.join(seed_dir, name)
- dirname = os.path.dirname(path)
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
- if isinstance(content, six.text_type):
- mode = "w"
- else:
- mode = "wb"
- with open(path, mode) as fp:
- fp.write(content)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
deleted file mode 100644
index 8936a1e3..00000000
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#
-# Copyright (C) 2014 Neal Shrader
-#
-# Author: Neal Shrader <neal@digitalocean.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-from six.moves.urllib_parse import urlparse
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import DataSourceDigitalOcean
-
-from .. import helpers as test_helpers
-
-httpretty = test_helpers.import_httpretty()
-
-# Abbreviated for the test
-DO_INDEX = """id
- hostname
- user-data
- vendor-data
- public-keys
- region"""
-
-DO_MULTIPLE_KEYS = """ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com
- ssh-rsa AAAAB3NzaC1yc2EAAAA... neal2@digitalocean.com"""
-DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com"
-
-DO_META = {
- '': DO_INDEX,
- 'user-data': '#!/bin/bash\necho "user-data"',
- 'vendor-data': '#!/bin/bash\necho "vendor-data"',
- 'public-keys': DO_SINGLE_KEY,
- 'region': 'nyc3',
- 'id': '2000000',
- 'hostname': 'cloudinit-test',
-}
-
-MD_URL_RE = re.compile(r'http://169.254.169.254/metadata/v1/.*')
-
-
-def _request_callback(method, uri, headers):
- url_path = urlparse(uri).path
- if url_path.startswith('/metadata/v1/'):
- path = url_path.split('/metadata/v1/')[1:][0]
- else:
- path = None
- if path in DO_META:
- return (200, headers, DO_META.get(path))
- else:
- return (404, headers, '')
-
-
-class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase):
-
- def setUp(self):
- self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
- settings.CFG_BUILTIN, None,
- helpers.Paths({}))
- super(TestDataSourceDigitalOcean, self).setUp()
-
- @httpretty.activate
- def test_connection(self):
- httpretty.register_uri(
- httpretty.GET, MD_URL_RE,
- body=_request_callback)
-
- success = self.ds.get_data()
- self.assertTrue(success)
-
- @httpretty.activate
- def test_metadata(self):
- httpretty.register_uri(
- httpretty.GET, MD_URL_RE,
- body=_request_callback)
- self.ds.get_data()
-
- self.assertEqual(DO_META.get('user-data'),
- self.ds.get_userdata_raw())
-
- self.assertEqual(DO_META.get('vendor-data'),
- self.ds.get_vendordata_raw())
-
- self.assertEqual(DO_META.get('region'),
- self.ds.availability_zone)
-
- self.assertEqual(DO_META.get('id'),
- self.ds.get_instance_id())
-
- self.assertEqual(DO_META.get('hostname'),
- self.ds.get_hostname())
-
- self.assertEqual('http://mirrors.digitalocean.com/',
- self.ds.get_package_mirror_info())
-
- # Single key
- self.assertEqual([DO_META.get('public-keys')],
- self.ds.get_public_ssh_keys())
-
- self.assertIsInstance(self.ds.get_public_ssh_keys(), list)
-
- @httpretty.activate
- def test_multiple_ssh_keys(self):
- DO_META['public_keys'] = DO_MULTIPLE_KEYS
- httpretty.register_uri(
- httpretty.GET, MD_URL_RE,
- body=_request_callback)
- self.ds.get_data()
-
- # Multiple keys
- self.assertEqual(DO_META.get('public-keys').splitlines(),
- self.ds.get_public_ssh_keys())
-
- self.assertIsInstance(self.ds.get_public_ssh_keys(), list)
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
deleted file mode 100644
index 6e62a4d2..00000000
--- a/tests/unittests/test_datasource/test_gce.py
+++ /dev/null
@@ -1,166 +0,0 @@
-#
-# Copyright (C) 2014 Vaidas Jablonskis
-#
-# Author: Vaidas Jablonskis <jablonskis@gmail.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-from base64 import b64encode, b64decode
-from six.moves.urllib_parse import urlparse
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import DataSourceGCE
-
-from .. import helpers as test_helpers
-
-httpretty = test_helpers.import_httpretty()
-
-GCE_META = {
- 'instance/id': '123',
- 'instance/zone': 'foo/bar',
- 'project/attributes/sshKeys': 'user:ssh-rsa AA2..+aRD0fyVw== root@server',
- 'instance/hostname': 'server.project-foo.local',
- 'instance/attributes/user-data': b'/bin/echo foo\n',
-}
-
-GCE_META_PARTIAL = {
- 'instance/id': '1234',
- 'instance/hostname': 'server.project-bar.local',
- 'instance/zone': 'bar/baz',
-}
-
-GCE_META_ENCODING = {
- 'instance/id': '12345',
- 'instance/hostname': 'server.project-baz.local',
- 'instance/zone': 'baz/bang',
- 'instance/attributes/user-data': b64encode(b'/bin/echo baz\n'),
- 'instance/attributes/user-data-encoding': 'base64',
-}
-
-HEADERS = {'X-Google-Metadata-Request': 'True'}
-MD_URL_RE = re.compile(
- r'http://metadata.google.internal/computeMetadata/v1/.*')
-
-
-def _set_mock_metadata(gce_meta=None):
- if gce_meta is None:
- gce_meta = GCE_META
-
- def _request_callback(method, uri, headers):
- url_path = urlparse(uri).path
- if url_path.startswith('/computeMetadata/v1/'):
- path = url_path.split('/computeMetadata/v1/')[1:][0]
- else:
- path = None
- if path in gce_meta:
- return (200, headers, gce_meta.get(path))
- else:
- return (404, headers, '')
-
- httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
-
-
-@httpretty.activate
-class TestDataSourceGCE(test_helpers.HttprettyTestCase):
-
- def setUp(self):
- self.ds = DataSourceGCE.DataSourceGCE(
- settings.CFG_BUILTIN, None,
- helpers.Paths({}))
- super(TestDataSourceGCE, self).setUp()
-
- def test_connection(self):
- _set_mock_metadata()
- success = self.ds.get_data()
- self.assertTrue(success)
-
- req_header = httpretty.last_request().headers
- self.assertDictContainsSubset(HEADERS, req_header)
-
- def test_metadata(self):
- _set_mock_metadata()
- self.ds.get_data()
-
- shostname = GCE_META.get('instance/hostname').split('.')[0]
- self.assertEqual(shostname,
- self.ds.get_hostname())
-
- self.assertEqual(GCE_META.get('instance/id'),
- self.ds.get_instance_id())
-
- self.assertEqual(GCE_META.get('instance/attributes/user-data'),
- self.ds.get_userdata_raw())
-
- # test partial metadata (missing user-data in particular)
- def test_metadata_partial(self):
- _set_mock_metadata(GCE_META_PARTIAL)
- self.ds.get_data()
-
- self.assertEqual(GCE_META_PARTIAL.get('instance/id'),
- self.ds.get_instance_id())
-
- shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0]
- self.assertEqual(shostname, self.ds.get_hostname())
-
- def test_metadata_encoding(self):
- _set_mock_metadata(GCE_META_ENCODING)
- self.ds.get_data()
-
- decoded = b64decode(
- GCE_META_ENCODING.get('instance/attributes/user-data'))
- self.assertEqual(decoded, self.ds.get_userdata_raw())
-
- def test_missing_required_keys_return_false(self):
- for required_key in ['instance/id', 'instance/zone',
- 'instance/hostname']:
- meta = GCE_META_PARTIAL.copy()
- del meta[required_key]
- _set_mock_metadata(meta)
- self.assertEqual(False, self.ds.get_data())
- httpretty.reset()
-
- def test_project_level_ssh_keys_are_used(self):
- _set_mock_metadata()
- self.ds.get_data()
-
- # we expect a list of public ssh keys with user names stripped
- self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'],
- self.ds.get_public_ssh_keys())
-
- def test_instance_level_ssh_keys_are_used(self):
- key_content = 'ssh-rsa JustAUser root@server'
- meta = GCE_META.copy()
- meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content)
-
- _set_mock_metadata(meta)
- self.ds.get_data()
-
- self.assertIn(key_content, self.ds.get_public_ssh_keys())
-
- def test_instance_level_keys_replace_project_level_keys(self):
- key_content = 'ssh-rsa JustAUser root@server'
- meta = GCE_META.copy()
- meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content)
-
- _set_mock_metadata(meta)
- self.ds.get_data()
-
- self.assertEqual([key_content], self.ds.get_public_ssh_keys())
-
- def test_only_last_part_of_zone_used_for_availability_zone(self):
- _set_mock_metadata()
- self.ds.get_data()
- self.assertEqual('bar', self.ds.availability_zone)
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
deleted file mode 100644
index f66f1c6d..00000000
--- a/tests/unittests/test_datasource/test_maas.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from copy import copy
-import os
-import shutil
-import tempfile
-
-from cloudinit.sources import DataSourceMAAS
-from cloudinit import url_helper
-from ..helpers import TestCase, populate_dir
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-
-class TestMAASDataSource(TestCase):
-
- def setUp(self):
- super(TestMAASDataSource, self).setUp()
- # Make a temp directoy for tests to use.
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_seed_dir_valid(self):
- """Verify a valid seeddir is read as such."""
-
- data = {'instance-id': 'i-valid01',
- 'local-hostname': 'valid01-hostname',
- 'user-data': b'valid01-userdata',
- 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
-
- my_d = os.path.join(self.tmp, "valid")
- populate_dir(my_d, data)
-
- (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d)
-
- self.assertEqual(userdata, data['user-data'])
- for key in ('instance-id', 'local-hostname'):
- self.assertEqual(data[key], metadata[key])
-
- # verify that 'userdata' is not returned as part of the metadata
- self.assertFalse(('user-data' in metadata))
-
- def test_seed_dir_valid_extra(self):
- """Verify extra files do not affect seed_dir validity."""
-
- data = {'instance-id': 'i-valid-extra',
- 'local-hostname': 'valid-extra-hostname',
- 'user-data': b'valid-extra-userdata', 'foo': 'bar'}
-
- my_d = os.path.join(self.tmp, "valid_extra")
- populate_dir(my_d, data)
-
- (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d)
-
- self.assertEqual(userdata, data['user-data'])
- for key in ('instance-id', 'local-hostname'):
- self.assertEqual(data[key], metadata[key])
-
- # additional files should not just appear as keys in metadata atm
- self.assertFalse(('foo' in metadata))
-
- def test_seed_dir_invalid(self):
- """Verify that invalid seed_dir raises MAASSeedDirMalformed."""
-
- valid = {'instance-id': 'i-instanceid',
- 'local-hostname': 'test-hostname', 'user-data': ''}
-
- my_based = os.path.join(self.tmp, "valid_extra")
-
- # missing 'userdata' file
- my_d = "%s-01" % my_based
- invalid_data = copy(valid)
- del invalid_data['local-hostname']
- populate_dir(my_d, invalid_data)
- self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
- DataSourceMAAS.read_maas_seed_dir, my_d)
-
- # missing 'instance-id'
- my_d = "%s-02" % my_based
- invalid_data = copy(valid)
- del invalid_data['instance-id']
- populate_dir(my_d, invalid_data)
- self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
- DataSourceMAAS.read_maas_seed_dir, my_d)
-
- def test_seed_dir_none(self):
- """Verify that empty seed_dir raises MAASSeedDirNone."""
-
- my_d = os.path.join(self.tmp, "valid_empty")
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir, my_d)
-
- def test_seed_dir_missing(self):
- """Verify that missing seed_dir raises MAASSeedDirNone."""
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir,
- os.path.join(self.tmp, "nonexistantdirectory"))
-
- def test_seed_url_valid(self):
- """Verify that valid seed_url is read as such."""
- valid = {
- 'meta-data/instance-id': 'i-instanceid',
- 'meta-data/local-hostname': 'test-hostname',
- 'meta-data/public-keys': 'test-hostname',
- 'user-data': b'foodata',
- }
- valid_order = [
- 'meta-data/local-hostname',
- 'meta-data/instance-id',
- 'meta-data/public-keys',
- 'user-data',
- ]
- my_seed = "http://example.com/xmeta"
- my_ver = "1999-99-99"
- my_headers = {'header1': 'value1', 'header2': 'value2'}
-
- def my_headers_cb(url):
- return my_headers
-
- # Each time url_helper.readurl() is called, something different is
- # returned based on the canned data above. We need to build up a list
- # of side effect return values, which the mock will return. At the
- # same time, we'll build up a list of expected call arguments for
- # asserting after the code under test is run.
- calls = []
-
- def side_effect():
- for key in valid_order:
- resp = valid.get(key)
- url = "%s/%s/%s" % (my_seed, my_ver, key)
- calls.append(
- mock.call(url, headers=None, timeout=mock.ANY,
- data=mock.ANY, sec_between=mock.ANY,
- ssl_details=mock.ANY, retries=mock.ANY,
- headers_cb=my_headers_cb,
- exception_cb=mock.ANY))
- yield url_helper.StringResponse(resp)
-
- # Now do the actual call of the code under test.
- with mock.patch.object(url_helper, 'readurl',
- side_effect=side_effect()) as mockobj:
- userdata, metadata = DataSourceMAAS.read_maas_seed_url(
- my_seed, version=my_ver)
-
- self.assertEqual(b"foodata", userdata)
- self.assertEqual(metadata['instance-id'],
- valid['meta-data/instance-id'])
- self.assertEqual(metadata['local-hostname'],
- valid['meta-data/local-hostname'])
-
- mockobj.has_calls(calls)
-
- def test_seed_url_invalid(self):
- """Verify that invalid seed_url raises MAASSeedDirMalformed."""
- pass
-
- def test_seed_url_missing(self):
- """Verify seed_url with no found entries raises MAASSeedDirNone."""
- pass
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
deleted file mode 100644
index b0fa1130..00000000
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ /dev/null
@@ -1,178 +0,0 @@
-from cloudinit import helpers
-from cloudinit.sources import DataSourceNoCloud
-from cloudinit import util
-from ..helpers import TestCase, populate_dir, mock, ExitStack
-
-import os
-import shutil
-import tempfile
-
-import yaml
-
-
-class TestNoCloudDataSource(TestCase):
-
- def setUp(self):
- super(TestNoCloudDataSource, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
-
- self.cmdline = "root=TESTCMDLINE"
-
- self.mocks = ExitStack()
- self.addCleanup(self.mocks.close)
-
- self.mocks.enter_context(
- mock.patch.object(util, 'get_cmdline', return_value=self.cmdline))
-
- def test_nocloud_seed_dir(self):
- md = {'instance-id': 'IID', 'dsmode': 'local'}
- ud = b"USER_DATA_HERE"
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': ud, 'meta-data': yaml.safe_dump(md)})
-
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
-
- ds = DataSourceNoCloud.DataSourceNoCloud
-
- dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
- self.assertEqual(dsrc.userdata_raw, ud)
- self.assertEqual(dsrc.metadata, md)
- self.assertTrue(ret)
-
- def test_fs_label(self):
- # find_devs_with should not be called ff fs_label is None
- ds = DataSourceNoCloud.DataSourceNoCloud
-
- class PsuedoException(Exception):
- pass
-
- def my_find_devs_with(*args, **kwargs):
- raise PsuedoException
-
- self.mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- side_effect=PsuedoException))
-
- # by default, NoCloud should search for filesystems by label
- sys_cfg = {'datasource': {'NoCloud': {}}}
- dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- self.assertRaises(PsuedoException, dsrc.get_data)
-
- # but disabling searching should just end up with None found
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
- dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
- self.assertFalse(ret)
-
- def test_no_datasource_expected(self):
- # no source should be found if no cmdline, config, and fs_label=None
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
-
- ds = DataSourceNoCloud.DataSourceNoCloud
- dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- self.assertFalse(dsrc.get_data())
-
- def test_seed_in_config(self):
- ds = DataSourceNoCloud.DataSourceNoCloud
-
- data = {
- 'fs_label': None,
- 'meta-data': yaml.safe_dump({'instance-id': 'IID'}),
- 'user-data': b"USER_DATA_RAW",
- }
-
- sys_cfg = {'datasource': {'NoCloud': data}}
- dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
- self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW")
- self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
- self.assertTrue(ret)
-
- def test_nocloud_seed_with_vendordata(self):
- md = {'instance-id': 'IID', 'dsmode': 'local'}
- ud = b"USER_DATA_HERE"
- vd = b"THIS IS MY VENDOR_DATA"
-
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': ud, 'meta-data': yaml.safe_dump(md),
- 'vendor-data': vd})
-
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
-
- ds = DataSourceNoCloud.DataSourceNoCloud
-
- dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
- self.assertEqual(dsrc.userdata_raw, ud)
- self.assertEqual(dsrc.metadata, md)
- self.assertEqual(dsrc.vendordata_raw, vd)
- self.assertTrue(ret)
-
- def test_nocloud_no_vendordata(self):
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
-
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
-
- ds = DataSourceNoCloud.DataSourceNoCloud
-
- dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
- self.assertEqual(dsrc.userdata_raw, b"ud")
- self.assertFalse(dsrc.vendordata)
- self.assertTrue(ret)
-
-
-class TestParseCommandLineData(TestCase):
-
- def test_parse_cmdline_data_valid(self):
- ds_id = "ds=nocloud"
- pairs = (
- ("root=/dev/sda1 %(ds_id)s", {}),
- ("%(ds_id)s; root=/dev/foo", {}),
- ("%(ds_id)s", {}),
- ("%(ds_id)s;", {}),
- ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}),
- ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost",
- {'seedfrom': 'SEED', 'local-hostname': 'xhost'}),
- ("%(ds_id)s;h=xhost",
- {'local-hostname': 'xhost'}),
- ("%(ds_id)s;h=xhost;i=IID",
- {'local-hostname': 'xhost', 'instance-id': 'IID'}),
- )
-
- for (fmt, expected) in pairs:
- fill = {}
- cmdline = fmt % {'ds_id': ds_id}
- ret = DataSourceNoCloud.parse_cmdline_data(ds_id=ds_id, fill=fill,
- cmdline=cmdline)
- self.assertEqual(expected, fill)
- self.assertTrue(ret)
-
- def test_parse_cmdline_data_none(self):
- ds_id = "ds=foo"
- cmdlines = (
- "root=/dev/sda1 ro",
- "console=/dev/ttyS0 root=/dev/foo",
- "",
- "ds=foocloud",
- "ds=foo-net",
- "ds=nocloud;s=SEED",
- )
-
- for cmdline in cmdlines:
- fill = {}
- ret = DataSourceNoCloud.parse_cmdline_data(ds_id=ds_id, fill=fill,
- cmdline=cmdline)
- self.assertEqual(fill, {})
- self.assertFalse(ret)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
deleted file mode 100644
index d796f030..00000000
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ /dev/null
@@ -1,300 +0,0 @@
-from cloudinit import helpers
-from cloudinit.sources import DataSourceOpenNebula as ds
-from cloudinit import util
-from ..helpers import TestCase, populate_dir
-
-import os
-import pwd
-import shutil
-import tempfile
-import unittest
-
-
-TEST_VARS = {
- 'VAR1': 'single',
- 'VAR2': 'double word',
- 'VAR3': 'multi\nline\n',
- 'VAR4': "'single'",
- 'VAR5': "'double word'",
- 'VAR6': "'multi\nline\n'",
- 'VAR7': 'single\\t',
- 'VAR8': 'double\\tword',
- 'VAR9': 'multi\\t\nline\n',
- 'VAR10': '\\', # expect '\'
- 'VAR11': '\'', # expect '
- 'VAR12': '$', # expect $
-}
-
-INVALID_CONTEXT = ';'
-USER_DATA = '#cloud-config\napt_upgrade: true'
-SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
-HOSTNAME = 'foo.example.com'
-PUBLIC_IP = '10.0.0.3'
-
-CMD_IP_OUT = '''\
-1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
-2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
- link/ether 02:00:0a:12:01:01 brd ff:ff:ff:ff:ff:ff
-'''
-
-
-class TestOpenNebulaDataSource(TestCase):
- parsed_user = None
-
- def setUp(self):
- super(TestOpenNebulaDataSource, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
-
- # defaults for few tests
- self.ds = ds.DataSourceOpenNebula
- self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula")
- self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}}
-
- # we don't want 'sudo' called in tests. so we patch switch_user_cmd
- def my_switch_user_cmd(user):
- self.parsed_user = user
- return []
-
- self.switch_user_cmd_real = ds.switch_user_cmd
- ds.switch_user_cmd = my_switch_user_cmd
-
- def tearDown(self):
- ds.switch_user_cmd = self.switch_user_cmd_real
- super(TestOpenNebulaDataSource, self).tearDown()
-
- def test_get_data_non_contextdisk(self):
- orig_find_devs_with = util.find_devs_with
- try:
- # dont' try to lookup for CDs
- util.find_devs_with = lambda n: []
- dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
- self.assertFalse(ret)
- finally:
- util.find_devs_with = orig_find_devs_with
-
- def test_get_data_broken_contextdisk(self):
- orig_find_devs_with = util.find_devs_with
- try:
- # dont' try to lookup for CDs
- util.find_devs_with = lambda n: []
- populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
- dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
- self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
- finally:
- util.find_devs_with = orig_find_devs_with
-
- def test_get_data_invalid_identity(self):
- orig_find_devs_with = util.find_devs_with
- try:
- # generate non-existing system user name
- sys_cfg = self.sys_cfg
- invalid_user = 'invalid'
- while not sys_cfg['datasource']['OpenNebula'].get('parseuser'):
- try:
- pwd.getpwnam(invalid_user)
- invalid_user += 'X'
- except KeyError:
- sys_cfg['datasource']['OpenNebula']['parseuser'] = \
- invalid_user
-
- # dont' try to lookup for CDs
- util.find_devs_with = lambda n: []
- populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
- dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
- finally:
- util.find_devs_with = orig_find_devs_with
-
- def test_get_data(self):
- orig_find_devs_with = util.find_devs_with
- try:
- # dont' try to lookup for CDs
- util.find_devs_with = lambda n: []
- populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
- dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- finally:
- util.find_devs_with = orig_find_devs_with
-
- def test_seed_dir_non_contextdisk(self):
- self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir,
- self.seed_dir)
-
- def test_seed_dir_empty1_context(self):
- populate_dir(self.seed_dir, {'context.sh': ''})
- results = ds.read_context_disk_dir(self.seed_dir)
-
- self.assertEqual(results['userdata'], None)
- self.assertEqual(results['metadata'], {})
-
- def test_seed_dir_empty2_context(self):
- populate_context_dir(self.seed_dir, {})
- results = ds.read_context_disk_dir(self.seed_dir)
-
- self.assertEqual(results['userdata'], None)
- self.assertEqual(results['metadata'], {})
-
- def test_seed_dir_broken_context(self):
- populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
-
- self.assertRaises(ds.BrokenContextDiskDir,
- ds.read_context_disk_dir,
- self.seed_dir)
-
- def test_context_parser(self):
- populate_context_dir(self.seed_dir, TEST_VARS)
- results = ds.read_context_disk_dir(self.seed_dir)
-
- self.assertTrue('metadata' in results)
- self.assertEqual(TEST_VARS, results['metadata'])
-
- def test_ssh_key(self):
- public_keys = ['first key', 'second key']
- for c in range(4):
- for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'):
- my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
- populate_context_dir(my_d, {k: '\n'.join(public_keys)})
- results = ds.read_context_disk_dir(my_d)
-
- self.assertTrue('metadata' in results)
- self.assertTrue('public-keys' in results['metadata'])
- self.assertEqual(public_keys,
- results['metadata']['public-keys'])
-
- public_keys.append(SSH_KEY % (c + 1,))
-
- def test_user_data_plain(self):
- for k in ('USER_DATA', 'USERDATA'):
- my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: USER_DATA,
- 'USERDATA_ENCODING': ''})
- results = ds.read_context_disk_dir(my_d)
-
- self.assertTrue('userdata' in results)
- self.assertEqual(USER_DATA, results['userdata'])
-
- def test_user_data_encoding_required_for_decode(self):
- b64userdata = util.b64e(USER_DATA)
- for k in ('USER_DATA', 'USERDATA'):
- my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: b64userdata})
- results = ds.read_context_disk_dir(my_d)
-
- self.assertTrue('userdata' in results)
- self.assertEqual(b64userdata, results['userdata'])
-
- def test_user_data_base64_encoding(self):
- for k in ('USER_DATA', 'USERDATA'):
- my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: util.b64e(USER_DATA),
- 'USERDATA_ENCODING': 'base64'})
- results = ds.read_context_disk_dir(my_d)
-
- self.assertTrue('userdata' in results)
- self.assertEqual(USER_DATA, results['userdata'])
-
- def test_hostname(self):
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
- my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: PUBLIC_IP})
- results = ds.read_context_disk_dir(my_d)
-
- self.assertTrue('metadata' in results)
- self.assertTrue('local-hostname' in results['metadata'])
- self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname'])
-
- def test_network_interfaces(self):
- populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'})
- results = ds.read_context_disk_dir(self.seed_dir)
-
- self.assertTrue('network-interfaces' in results)
-
- def test_find_candidates(self):
- def my_devs_with(criteria):
- return {
- "LABEL=CONTEXT": ["/dev/sdb"],
- "LABEL=CDROM": ["/dev/sr0"],
- "TYPE=iso9660": ["/dev/vdb"],
- }.get(criteria, [])
-
- orig_find_devs_with = util.find_devs_with
- try:
- util.find_devs_with = my_devs_with
- self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"],
- ds.find_candidate_devs())
- finally:
- util.find_devs_with = orig_find_devs_with
-
-
-class TestOpenNebulaNetwork(unittest.TestCase):
-
- def setUp(self):
- super(TestOpenNebulaNetwork, self).setUp()
-
- def test_lo(self):
- net = ds.OpenNebulaNetwork('', {})
- self.assertEqual(net.gen_conf(), u'''\
-auto lo
-iface lo inet loopback
-''')
-
- def test_eth0(self):
- net = ds.OpenNebulaNetwork(CMD_IP_OUT, {})
- self.assertEqual(net.gen_conf(), u'''\
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.18.1.1
- network 10.18.1.0
- netmask 255.255.255.0
-''')
-
- def test_eth0_override(self):
- context = {
- 'DNS': '1.2.3.8',
- 'ETH0_IP': '1.2.3.4',
- 'ETH0_NETWORK': '1.2.3.0',
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_DOMAIN': 'example.com',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7'
- }
-
- net = ds.OpenNebulaNetwork(CMD_IP_OUT, context)
- self.assertEqual(net.gen_conf(), u'''\
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 1.2.3.4
- network 1.2.3.0
- netmask 255.255.0.0
- gateway 1.2.3.5
- dns-search example.com
- dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7
-''')
-
-
-class TestParseShellConfig(unittest.TestCase):
- def test_no_seconds(self):
- cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
- # we could test 'sleep 2', but that would make the test run slower.
- ret = ds.parse_shell_config(cfg)
- self.assertEqual(ret, {"foo": "bar", "xx": "foo"})
-
-
-def populate_context_dir(path, variables):
- data = "# Context variables generated by OpenNebula\n"
- for k, v in variables.items():
- data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")))
- populate_dir(path, {'context.sh': data})
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
deleted file mode 100644
index 5c8592c5..00000000
--- a/tests/unittests/test_datasource/test_openstack.py
+++ /dev/null
@@ -1,347 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import json
-import re
-
-from .. import helpers as test_helpers
-
-from six.moves.urllib.parse import urlparse
-from six import StringIO
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import DataSourceOpenStack as ds
-from cloudinit.sources.helpers import openstack
-from cloudinit import util
-
-hp = test_helpers.import_httpretty()
-
-BASE_URL = "http://169.254.169.254"
-PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
-EC2_META = {
- 'ami-id': 'ami-00000001',
- 'ami-launch-index': '0',
- 'ami-manifest-path': 'FIXME',
- 'hostname': 'sm-foo-test.novalocal',
- 'instance-action': 'none',
- 'instance-id': 'i-00000001',
- 'instance-type': 'm1.tiny',
- 'local-hostname': 'sm-foo-test.novalocal',
- 'local-ipv4': '0.0.0.0',
- 'public-hostname': 'sm-foo-test.novalocal',
- 'public-ipv4': '0.0.0.1',
- 'reservation-id': 'r-iru5qm4m',
-}
-USER_DATA = b'#!/bin/sh\necho This is user data\n'
-VENDOR_DATA = {
- 'magic': '',
-}
-OSTACK_META = {
- 'availability_zone': 'nova',
- 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
- {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
- 'hostname': 'sm-foo-test.novalocal',
- 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
-}
-CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
-OS_FILES = {
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
-}
-EC2_FILES = {
- 'latest/user-data': USER_DATA,
-}
-EC2_VERSIONS = [
- 'latest',
-]
-
-
-def _register_uris(version, ec2_files, ec2_meta, os_files):
- """Registers a set of url patterns into httpretty that will mimic the
- same data returned by the openstack metadata service (and ec2 service)."""
-
- def match_ec2_url(uri, headers):
- path = uri.path.strip("/")
- if len(path) == 0:
- return (200, headers, "\n".join(EC2_VERSIONS))
- path = uri.path.lstrip("/")
- if path in ec2_files:
- return (200, headers, ec2_files.get(path))
- if path == 'latest/meta-data/':
- buf = StringIO()
- for (k, v) in ec2_meta.items():
- if isinstance(v, (list, tuple)):
- buf.write("%s/" % (k))
- else:
- buf.write("%s" % (k))
- buf.write("\n")
- return (200, headers, buf.getvalue())
- if path.startswith('latest/meta-data/'):
- value = None
- pieces = path.split("/")
- if path.endswith("/"):
- pieces = pieces[2:-1]
- value = util.get_cfg_by_path(ec2_meta, pieces)
- else:
- pieces = pieces[2:]
- value = util.get_cfg_by_path(ec2_meta, pieces)
- if value is not None:
- return (200, headers, str(value))
- return (404, headers, '')
-
- def match_os_uri(uri, headers):
- path = uri.path.strip("/")
- if path == 'openstack':
- return (200, headers, "\n".join([openstack.OS_LATEST]))
- path = uri.path.lstrip("/")
- if path in os_files:
- return (200, headers, os_files.get(path))
- return (404, headers, '')
-
- def get_request_callback(method, uri, headers):
- uri = urlparse(uri)
- path = uri.path.lstrip("/").split("/")
- if path[0] == 'openstack':
- return match_os_uri(uri, headers)
- return match_ec2_url(uri, headers)
-
- hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'),
- body=get_request_callback)
-
-
-def _read_metadata_service():
- return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1)
-
-
-class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
- VERSION = 'latest'
-
- @hp.activate
- def test_successful(self):
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertEqual(2, len(f['files']))
- self.assertEqual(USER_DATA, f.get('userdata'))
- self.assertEqual(EC2_META, f.get('ec2-metadata'))
- self.assertEqual(2, f.get('version'))
- metadata = f['metadata']
- self.assertEqual('nova', metadata.get('availability_zone'))
- self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname'))
- self.assertEqual('sm-foo-test.novalocal',
- metadata.get('local-hostname'))
- self.assertEqual('sm-foo-test', metadata.get('name'))
- self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
- metadata.get('uuid'))
- self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
- metadata.get('instance-id'))
-
- @hp.activate
- def test_no_ec2(self):
- _register_uris(self.VERSION, {}, {}, OS_FILES)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertEqual(USER_DATA, f.get('userdata'))
- self.assertEqual({}, f.get('ec2-metadata'))
- self.assertEqual(2, f.get('version'))
-
- @hp.activate
- def test_bad_metadata(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.NonReadable, _read_metadata_service)
-
- @hp.activate
- def test_bad_uuid(self):
- os_files = copy.deepcopy(OS_FILES)
- os_meta = copy.deepcopy(OSTACK_META)
- os_meta.pop('uuid')
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = json.dumps(os_meta)
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
-
- @hp.activate
- def test_userdata_empty(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('user_data'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('userdata'))
-
- @hp.activate
- def test_vendordata_empty(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('vendor_data.json'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- f = _read_metadata_service()
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('vendordata'))
-
- @hp.activate
- def test_vendordata_invalid(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('vendor_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
-
- @hp.activate
- def test_metadata_invalid(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
-
- @hp.activate
- def test_datasource(self):
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- self.assertIsNone(ds_os.version)
- found = ds_os.get_data(timeout=0.1, retries=0)
- self.assertTrue(found)
- self.assertEqual(2, ds_os.version)
- md = dict(ds_os.metadata)
- md.pop('instance-id', None)
- md.pop('local-hostname', None)
- self.assertEqual(OSTACK_META, md)
- self.assertEqual(EC2_META, ds_os.ec2_metadata)
- self.assertEqual(USER_DATA, ds_os.userdata_raw)
- self.assertEqual(2, len(ds_os.files))
- self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
- self.assertEqual(ds_os.vendordata_raw, None)
-
- @hp.activate
- def test_bad_datasource_meta(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- self.assertIsNone(ds_os.version)
- found = ds_os.get_data(timeout=0.1, retries=0)
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
-
- @hp.activate
- def test_no_datasource(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files.pop(k)
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- ds_os.ds_cfg = {
- 'max_wait': 0,
- 'timeout': 0,
- }
- self.assertIsNone(ds_os.version)
- found = ds_os.get_data(timeout=0.1, retries=0)
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
-
- @hp.activate
- def test_disabled_datasource(self):
- os_files = copy.deepcopy(OS_FILES)
- os_meta = copy.deepcopy(OSTACK_META)
- os_meta['meta'] = {
- 'dsmode': 'disabled',
- }
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = json.dumps(os_meta)
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- ds_os.ds_cfg = {
- 'max_wait': 0,
- 'timeout': 0,
- }
- self.assertIsNone(ds_os.version)
- found = ds_os.get_data(timeout=0.1, retries=0)
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
-
-
-class TestVendorDataLoading(test_helpers.TestCase):
- def cvj(self, data):
- return openstack.convert_vendordata_json(data)
-
- def test_vd_load_none(self):
- # non-existant vendor-data should return none
- self.assertIsNone(self.cvj(None))
-
- def test_vd_load_string(self):
- self.assertEqual(self.cvj("foobar"), "foobar")
-
- def test_vd_load_list(self):
- data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])]
- self.assertEqual(self.cvj(data), data)
-
- def test_vd_load_dict_no_ci(self):
- self.assertEqual(self.cvj({'foo': 'bar'}), None)
-
- def test_vd_load_dict_ci_dict(self):
- self.assertRaises(ValueError, self.cvj,
- {'foo': 'bar', 'cloud-init': {'x': 1}})
-
- def test_vd_load_dict_ci_string(self):
- data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'}
- self.assertEqual(self.cvj(data), data['cloud-init'])
-
- def test_vd_load_dict_ci_list(self):
- data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']}
- self.assertEqual(self.cvj(data), data['cloud-init'])
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
deleted file mode 100644
index 9c6c8768..00000000
--- a/tests/unittests/test_datasource/test_smartos.py
+++ /dev/null
@@ -1,543 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# This is a testcase for the SmartOS datasource. It replicates a serial
-# console and acts like the SmartOS console does in order to validate
-# return responses.
-#
-
-from __future__ import print_function
-
-from binascii import crc32
-import json
-import os
-import os.path
-import re
-import shutil
-import stat
-import tempfile
-import uuid
-
-from cloudinit import serial
-from cloudinit.sources import DataSourceSmartOS
-
-import six
-
-from cloudinit import helpers as c_helpers
-from cloudinit.util import b64e
-
-from ..helpers import mock, FilesystemMockingTestCase, TestCase
-
-SDC_NICS = json.loads("""
-[
- {
- "nic_tag": "external",
- "primary": true,
- "mtu": 1500,
- "model": "virtio",
- "gateway": "8.12.42.1",
- "netmask": "255.255.255.0",
- "ip": "8.12.42.102",
- "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
- "gateways": [
- "8.12.42.1"
- ],
- "vlan_id": 324,
- "mac": "90:b8:d0:f5:e4:f5",
- "interface": "net0",
- "ips": [
- "8.12.42.102/24"
- ]
- },
- {
- "nic_tag": "sdc_overlay/16187209",
- "gateway": "192.168.128.1",
- "model": "virtio",
- "mac": "90:b8:d0:a5:ff:cd",
- "netmask": "255.255.252.0",
- "ip": "192.168.128.93",
- "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9",
- "gateways": [
- "192.168.128.1"
- ],
- "vlan_id": 2,
- "mtu": 8500,
- "interface": "net1",
- "ips": [
- "192.168.128.93/22"
- ]
- }
-]
-""")
-
-MOCK_RETURNS = {
- 'hostname': 'test-host',
- 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname',
- 'disable_iptables_flag': None,
- 'enable_motd_sys_info': None,
- 'test-var1': 'some data',
- 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
- 'sdc:datacenter_name': 'somewhere2',
- 'sdc:operator-script': '\n'.join(['bin/true', '']),
- 'sdc:uuid': str(uuid.uuid4()),
- 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
- 'user-data': '\n'.join(['something', '']),
- 'user-script': '\n'.join(['/bin/true', '']),
- 'sdc:nics': json.dumps(SDC_NICS),
-}
-
-DMI_DATA_RETURN = 'smartdc'
-
-
-class PsuedoJoyentClient(object):
- def __init__(self, data=None):
- if data is None:
- data = MOCK_RETURNS.copy()
- self.data = data
- return
-
- def get(self, key, default=None, strip=False):
- if key in self.data:
- r = self.data[key]
- if strip:
- r = r.strip()
- else:
- r = default
- return r
-
- def get_json(self, key, default=None):
- result = self.get(key, default=default)
- if result is None:
- return default
- return json.loads(result)
-
- def exists(self):
- return True
-
-
-class TestSmartOSDataSource(FilesystemMockingTestCase):
- def setUp(self):
- super(TestSmartOSDataSource, self).setUp()
-
- dsmos = 'cloudinit.sources.DataSourceSmartOS'
- patcher = mock.patch(dsmos + ".jmc_client_factory")
- self.jmc_cfact = patcher.start()
- self.addCleanup(patcher.stop)
- patcher = mock.patch(dsmos + ".get_smartos_environ")
- self.get_smartos_environ = patcher.start()
- self.addCleanup(patcher.stop)
-
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.paths = c_helpers.Paths({'cloud_dir': self.tmp})
-
- self.legacy_user_d = os.path.join(self.tmp, 'legacy_user_tmp')
- os.mkdir(self.legacy_user_d)
-
- self.orig_lud = DataSourceSmartOS.LEGACY_USER_D
- DataSourceSmartOS.LEGACY_USER_D = self.legacy_user_d
-
- def tearDown(self):
- DataSourceSmartOS.LEGACY_USER_D = self.orig_lud
- super(TestSmartOSDataSource, self).tearDown()
-
- def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
- sys_cfg=None, ds_cfg=None):
- self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
- self.get_smartos_environ.return_value = mode
-
- if sys_cfg is None:
- sys_cfg = {}
-
- if ds_cfg is not None:
- sys_cfg['datasource'] = sys_cfg.get('datasource', {})
- sys_cfg['datasource']['SmartOS'] = ds_cfg
-
- return DataSourceSmartOS.DataSourceSmartOS(
- sys_cfg, distro=None, paths=self.paths)
-
- def test_no_base64(self):
- ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
- dsrc = self._get_ds(ds_cfg=ds_cfg)
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- def test_uuid(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['sdc:uuid'],
- dsrc.metadata['instance-id'])
-
- def test_root_keys(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
- dsrc.metadata['public-keys'])
-
- def test_hostname_b64(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
-
- def test_hostname(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
-
- def test_userdata(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-data'],
- dsrc.metadata['legacy-user-data'])
- self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
- dsrc.userdata_raw)
-
- def test_sdc_nics(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']),
- dsrc.metadata['network-data'])
-
- def test_sdc_scripts(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-script'],
- dsrc.metadata['user-script'])
-
- legacy_script_f = "%s/user-script" % self.legacy_user_d
- self.assertTrue(os.path.exists(legacy_script_f))
- self.assertTrue(os.path.islink(legacy_script_f))
- user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
- self.assertEqual(user_script_perm, '700')
-
- def test_scripts_shebanged(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-script'],
- dsrc.metadata['user-script'])
-
- legacy_script_f = "%s/user-script" % self.legacy_user_d
- self.assertTrue(os.path.exists(legacy_script_f))
- self.assertTrue(os.path.islink(legacy_script_f))
- shebang = None
- with open(legacy_script_f, 'r') as f:
- shebang = f.readlines()[0].strip()
- self.assertEqual(shebang, "#!/bin/bash")
- user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
- self.assertEqual(user_script_perm, '700')
-
- def test_scripts_shebang_not_added(self):
- """
- Test that the SmartOS requirement that plain text scripts
- are executable. This test makes sure that plain texts scripts
- with out file magic have it added appropriately by cloud-init.
- """
-
- my_returns = MOCK_RETURNS.copy()
- my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl',
- 'print("hi")', ''])
-
- dsrc = self._get_ds(mockdata=my_returns)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(my_returns['user-script'],
- dsrc.metadata['user-script'])
-
- legacy_script_f = "%s/user-script" % self.legacy_user_d
- self.assertTrue(os.path.exists(legacy_script_f))
- self.assertTrue(os.path.islink(legacy_script_f))
- shebang = None
- with open(legacy_script_f, 'r') as f:
- shebang = f.readlines()[0].strip()
- self.assertEqual(shebang, "#!/usr/bin/perl")
-
- def test_userdata_removed(self):
- """
- User-data in the SmartOS world is supposed to be written to a file
- each and every boot. This tests to make sure that in the event the
- legacy user-data is removed, the existing user-data is backed-up
- and there is no /var/db/user-data left.
- """
-
- user_data_f = "%s/mdata-user-data" % self.legacy_user_d
- with open(user_data_f, 'w') as f:
- f.write("PREVIOUS")
-
- my_returns = MOCK_RETURNS.copy()
- del my_returns['user-data']
-
- dsrc = self._get_ds(mockdata=my_returns)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertFalse(dsrc.metadata.get('legacy-user-data'))
-
- found_new = False
- for root, _dirs, files in os.walk(self.legacy_user_d):
- for name in files:
- name_f = os.path.join(root, name)
- permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
- if re.match(r'.*\/mdata-user-data$', name_f):
- found_new = True
- print(name_f)
- self.assertEqual(permissions, '400')
-
- self.assertFalse(found_new)
-
- def test_vendor_data_not_default(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['sdc:vendor-data'],
- dsrc.metadata['vendor-data'])
-
- def test_default_vendor_data(self):
- my_returns = MOCK_RETURNS.copy()
- def_op_script = my_returns['sdc:vendor-data']
- del my_returns['sdc:vendor-data']
- dsrc = self._get_ds(mockdata=my_returns)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data'])
-
- # we expect default vendor-data is a boothook
- self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook"))
-
- def test_disable_iptables_flag(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['disable_iptables_flag'],
- dsrc.metadata['iptables_disable'])
-
- def test_motd_sys_info(self):
- dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'],
- dsrc.metadata['motd_sys_info'])
-
- def test_default_ephemeral(self):
- # Test to make sure that the builtin config has the ephemeral
- # configuration.
- dsrc = self._get_ds()
- cfg = dsrc.get_config_obj()
-
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
-
- def test_override_disk_aliases(self):
- # Test to make sure that the built-in DS is overriden
- builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG
-
- mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}}
-
- # expect that these values are in builtin, or this is pointless
- for k in mydscfg:
- self.assertIn(k, builtin)
-
- dsrc = self._get_ds(ds_cfg=mydscfg)
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- self.assertEqual(mydscfg['disk_aliases']['FOO'],
- dsrc.ds_cfg['disk_aliases']['FOO'])
-
- self.assertEqual(dsrc.device_name_to_device('FOO'),
- mydscfg['disk_aliases']['FOO'])
-
-
-class TestJoyentMetadataClient(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestJoyentMetadataClient, self).setUp()
-
- self.serial = mock.MagicMock(spec=serial.Serial)
- self.request_id = 0xabcdef12
- self.metadata_value = 'value'
- self.response_parts = {
- 'command': 'SUCCESS',
- 'crc': 'b5a9ff00',
- 'length': 17 + len(b64e(self.metadata_value)),
- 'payload': b64e(self.metadata_value),
- 'request_id': '{0:08x}'.format(self.request_id),
- }
-
- def make_response():
- payloadstr = ''
- if 'payload' in self.response_parts:
- payloadstr = ' {0}'.format(self.response_parts['payload'])
- return ('V2 {length} {crc} {request_id} '
- '{command}{payloadstr}\n'.format(
- payloadstr=payloadstr,
- **self.response_parts).encode('ascii'))
-
- self.metasource_data = None
-
- def read_response(length):
- if not self.metasource_data:
- self.metasource_data = make_response()
- self.metasource_data_len = len(self.metasource_data)
- resp = self.metasource_data[:length]
- self.metasource_data = self.metasource_data[length:]
- return resp
-
- self.serial.read.side_effect = read_response
- self.patched_funcs.enter_context(
- mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
- mock.Mock(return_value=self.request_id)))
-
- def _get_client(self):
- return DataSourceSmartOS.JoyentMetadataClient(
- fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM)
-
- def assertEndsWith(self, haystack, prefix):
- self.assertTrue(haystack.endswith(prefix),
- "{0} does not end with '{1}'".format(
- repr(haystack), prefix))
-
- def assertStartsWith(self, haystack, prefix):
- self.assertTrue(haystack.startswith(prefix),
- "{0} does not start with '{1}'".format(
- repr(haystack), prefix))
-
- def test_get_metadata_writes_a_single_line(self):
- client = self._get_client()
- client.get('some_key')
- self.assertEqual(1, self.serial.write.call_count)
- written_line = self.serial.write.call_args[0][0]
- print(type(written_line))
- self.assertEndsWith(written_line.decode('ascii'),
- b'\n'.decode('ascii'))
- self.assertEqual(1, written_line.count(b'\n'))
-
- def _get_written_line(self, key='some_key'):
- client = self._get_client()
- client.get(key)
- return self.serial.write.call_args[0][0]
-
- def test_get_metadata_writes_bytes(self):
- self.assertIsInstance(self._get_written_line(), six.binary_type)
-
- def test_get_metadata_line_starts_with_v2(self):
- foo = self._get_written_line()
- self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii'))
-
- def test_get_metadata_uses_get_command(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- self.assertEqual('GET', parts[4])
-
- def test_get_metadata_base64_encodes_argument(self):
- key = 'my_key'
- parts = self._get_written_line(key).decode('ascii').strip().split(' ')
- self.assertEqual(b64e(key), parts[5])
-
- def test_get_metadata_calculates_length_correctly(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- expected_length = len(' '.join(parts[3:]))
- self.assertEqual(expected_length, int(parts[1]))
-
- def test_get_metadata_uses_appropriate_request_id(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- request_id = parts[3]
- self.assertEqual(8, len(request_id))
- self.assertEqual(request_id, request_id.lower())
-
- def test_get_metadata_uses_random_number_for_request_id(self):
- line = self._get_written_line()
- request_id = line.decode('ascii').strip().split(' ')[3]
- self.assertEqual('{0:08x}'.format(self.request_id), request_id)
-
- def test_get_metadata_checksums_correctly(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- expected_checksum = '{0:08x}'.format(
- crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff)
- checksum = parts[2]
- self.assertEqual(expected_checksum, checksum)
-
- def test_get_metadata_reads_a_line(self):
- client = self._get_client()
- client.get('some_key')
- self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
-
- def test_get_metadata_returns_valid_value(self):
- client = self._get_client()
- value = client.get('some_key')
- self.assertEqual(self.metadata_value, value)
-
- def test_get_metadata_throws_exception_for_incorrect_length(self):
- self.response_parts['length'] = 0
- client = self._get_client()
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
-
- def test_get_metadata_throws_exception_for_incorrect_crc(self):
- self.response_parts['crc'] = 'deadbeef'
- client = self._get_client()
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
-
- def test_get_metadata_throws_exception_for_request_id_mismatch(self):
- self.response_parts['request_id'] = 'deadbeef'
- client = self._get_client()
- client._checksum = lambda _: self.response_parts['crc']
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
-
- def test_get_metadata_returns_None_if_value_not_found(self):
- self.response_parts['payload'] = ''
- self.response_parts['command'] = 'NOTFOUND'
- self.response_parts['length'] = 17
- client = self._get_client()
- client._checksum = lambda _: self.response_parts['crc']
- self.assertIsNone(client.get('some_key'))
-
-
-class TestNetworkConversion(TestCase):
-
- def test_convert_simple(self):
- expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'netmask': '255.255.255.0',
- 'address': '8.12.42.102/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '192.168.128.1',
- 'netmask': '255.255.252.0',
- 'address': '192.168.128.93/22'}],
- 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]}
- found = DataSourceSmartOS.convert_smartos_network_data(SDC_NICS)
- self.assertEqual(expected, found)
diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/test_distros/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_distros/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
deleted file mode 100644
index 96fa0811..00000000
--- a/tests/unittests/test_distros/test_generic.py
+++ /dev/null
@@ -1,233 +0,0 @@
-from cloudinit import distros
-from cloudinit import util
-
-from .. import helpers
-
-import os
-import shutil
-import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-unknown_arch_info = {
- 'arches': ['default'],
- 'failsafe': {'primary': 'http://fs-primary-default',
- 'security': 'http://fs-security-default'}
-}
-
-package_mirrors = [
- {'arches': ['i386', 'amd64'],
- 'failsafe': {'primary': 'http://fs-primary-intel',
- 'security': 'http://fs-security-intel'},
- 'search': {
- 'primary': ['http://%(ec2_region)s.ec2/',
- 'http://%(availability_zone)s.clouds/'],
- 'security': ['http://security-mirror1-intel',
- 'http://security-mirror2-intel']}},
- {'arches': ['armhf', 'armel'],
- 'failsafe': {'primary': 'http://fs-primary-arm',
- 'security': 'http://fs-security-arm'}},
- unknown_arch_info
-]
-
-gpmi = distros._get_package_mirror_info
-gapmi = distros._get_arch_package_mirror_info
-
-
-class TestGenericDistro(helpers.FilesystemMockingTestCase):
-
- def return_first(self, mlist):
- if not mlist:
- return None
- return mlist[0]
-
- def return_second(self, mlist):
- if not mlist:
- return None
- return mlist[1]
-
- def return_none(self, _mlist):
- return None
-
- def return_last(self, mlist):
- if not mlist:
- return None
- return(mlist[-1])
-
- def setUp(self):
- super(TestGenericDistro, self).setUp()
- # Make a temp directoy for tests to use.
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def _write_load_sudoers(self, _user, rules):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- os.makedirs(os.path.join(self.tmp, "etc"))
- os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d'))
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- d.write_sudo_rules("harlowja", rules)
- contents = util.load_file(d.ci_sudoers_fn)
- return contents
-
- def _count_in(self, lines_look_for, text_content):
- found_amount = 0
- for e in lines_look_for:
- for line in text_content.splitlines():
- line = line.strip()
- if line == e:
- found_amount += 1
- return found_amount
-
- def test_sudoers_ensure_rules(self):
- rules = 'ALL=(ALL:ALL) ALL'
- contents = self._write_load_sudoers('harlowja', rules)
- expected = ['harlowja ALL=(ALL:ALL) ALL']
- self.assertEqual(len(expected), self._count_in(expected, contents))
- not_expected = [
- 'harlowja A',
- 'harlowja L',
- 'harlowja L',
- ]
- self.assertEqual(0, self._count_in(not_expected, contents))
-
- def test_sudoers_ensure_rules_list(self):
- rules = [
- 'ALL=(ALL:ALL) ALL',
- 'B-ALL=(ALL:ALL) ALL',
- 'C-ALL=(ALL:ALL) ALL',
- ]
- contents = self._write_load_sudoers('harlowja', rules)
- expected = [
- 'harlowja ALL=(ALL:ALL) ALL',
- 'harlowja B-ALL=(ALL:ALL) ALL',
- 'harlowja C-ALL=(ALL:ALL) ALL',
- ]
- self.assertEqual(len(expected), self._count_in(expected, contents))
- not_expected = [
- 'harlowja A',
- 'harlowja L',
- 'harlowja L',
- ]
- self.assertEqual(0, self._count_in(not_expected, contents))
-
- def test_sudoers_ensure_new(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- d.ensure_sudo_dir("/b")
- contents = util.load_file("/etc/sudoers")
- self.assertIn("includedir /b", contents)
- self.assertTrue(os.path.isdir("/b"))
-
- def test_sudoers_ensure_append(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- util.write_file("/etc/sudoers", "josh, josh\n")
- d.ensure_sudo_dir("/b")
- contents = util.load_file("/etc/sudoers")
- self.assertIn("includedir /b", contents)
- self.assertTrue(os.path.isdir("/b"))
- self.assertIn("josh", contents)
- self.assertEqual(2, contents.count("josh"))
-
- def test_arch_package_mirror_info_unknown(self):
- """for an unknown arch, we should get back that with arch 'default'."""
- arch_mirrors = gapmi(package_mirrors, arch="unknown")
- self.assertEqual(unknown_arch_info, arch_mirrors)
-
- def test_arch_package_mirror_info_known(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- self.assertEqual(package_mirrors[0], arch_mirrors)
-
- def test_get_package_mirror_info_az_ec2(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone="us-east-1a")
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_second)
- self.assertEqual(results,
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_none)
- self.assertEqual(results, package_mirrors[0]['failsafe'])
-
- def test_get_package_mirror_info_az_non_ec2(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- self.assertEqual(results,
- {'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror2-intel'})
-
- def test_get_package_mirror_info_none(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone=None)
-
- # because both search entries here replacement based on
- # availability-zone, the filter will be called with an empty list and
- # failsafe should be taken.
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- self.assertEqual(results,
- {'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror2-intel'})
-
- def test_systemd_in_use(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- os.makedirs('/run/systemd/system')
- self.assertTrue(d.uses_systemd())
-
- def test_systemd_not_in_use(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- self.assertFalse(d.uses_systemd())
-
- def test_systemd_symlink(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- os.makedirs('/run/systemd')
- os.symlink('/', '/run/systemd/system')
- self.assertFalse(d.uses_systemd())
-
-# def _get_package_mirror_info(mirror_info, availability_zone=None,
-# mirror_filter=util.search_for_mirror):
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_hostname.py b/tests/unittests/test_distros/test_hostname.py
deleted file mode 100644
index 5f28a868..00000000
--- a/tests/unittests/test_distros/test_hostname.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import unittest
-
-from cloudinit.distros.parsers import hostname
-
-
-BASE_HOSTNAME = '''
-# My super-duper-hostname
-
-blahblah
-
-'''
-BASE_HOSTNAME = BASE_HOSTNAME.strip()
-
-
-class TestHostnameHelper(unittest.TestCase):
- def test_parse_same(self):
- hn = hostname.HostnameConf(BASE_HOSTNAME)
- self.assertEqual(str(hn).strip(), BASE_HOSTNAME)
- self.assertEqual(hn.hostname, 'blahblah')
-
- def test_no_adjust_hostname(self):
- hn = hostname.HostnameConf(BASE_HOSTNAME)
- prev_name = hn.hostname
- hn.set_hostname("")
- self.assertEqual(hn.hostname, prev_name)
-
- def test_adjust_hostname(self):
- hn = hostname.HostnameConf(BASE_HOSTNAME)
- prev_name = hn.hostname
- self.assertEqual(prev_name, 'blahblah')
- hn.set_hostname("bbbbd")
- self.assertEqual(hn.hostname, 'bbbbd')
- expected_out = '''
-# My super-duper-hostname
-
-bbbbd
-'''
- self.assertEqual(str(hn).strip(), expected_out.strip())
diff --git a/tests/unittests/test_distros/test_hosts.py b/tests/unittests/test_distros/test_hosts.py
deleted file mode 100644
index ab867c6f..00000000
--- a/tests/unittests/test_distros/test_hosts.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import unittest
-
-from cloudinit.distros.parsers import hosts
-
-
-BASE_ETC = '''
-# Example
-127.0.0.1 localhost
-192.168.1.10 foo.mydomain.org foo
-192.168.1.10 bar.mydomain.org bar
-146.82.138.7 master.debian.org master
-209.237.226.90 www.opensource.org
-'''
-BASE_ETC = BASE_ETC.strip()
-
-
-class TestHostsHelper(unittest.TestCase):
- def test_parse(self):
- eh = hosts.HostsConf(BASE_ETC)
- self.assertEqual(eh.get_entry('127.0.0.1'), [['localhost']])
- self.assertEqual(eh.get_entry('192.168.1.10'),
- [['foo.mydomain.org', 'foo'],
- ['bar.mydomain.org', 'bar']])
- eh = str(eh)
- self.assertTrue(eh.startswith('# Example'))
-
- def test_add(self):
- eh = hosts.HostsConf(BASE_ETC)
- eh.add_entry('127.0.0.0', 'blah')
- self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']])
- eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3')
- self.assertEqual(eh.get_entry('127.0.0.3'),
- [['blah', 'blah2', 'blah3']])
-
- def test_del(self):
- eh = hosts.HostsConf(BASE_ETC)
- eh.add_entry('127.0.0.0', 'blah')
- self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']])
-
- eh.del_entries('127.0.0.0')
- self.assertEqual(eh.get_entry('127.0.0.0'), [])
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
deleted file mode 100644
index 36eae2dc..00000000
--- a/tests/unittests/test_distros/test_netconfig.py
+++ /dev/null
@@ -1,381 +0,0 @@
-import os
-from six import StringIO
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
-
-from ..helpers import TestCase
-
-from cloudinit import distros
-from cloudinit.distros.parsers.sys_conf import SysConf
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit import util
-
-
-BASE_NET_CFG = '''
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 192.168.1.5
- netmask 255.255.255.0
- network 192.168.0.0
- broadcast 192.168.1.0
- gateway 192.168.1.254
-
-auto eth1
-iface eth1 inet dhcp
-'''
-
-BASE_NET_CFG_IPV6 = '''
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 192.168.1.5
- netmask 255.255.255.0
- network 192.168.0.0
- broadcast 192.168.1.0
- gateway 192.168.1.254
-
-iface eth0 inet6 static
- address 2607:f0d0:1002:0011::2
- netmask 64
- gateway 2607:f0d0:1002:0011::1
-
-iface eth1 inet static
- address 192.168.1.6
- netmask 255.255.255.0
- network 192.168.0.0
- broadcast 192.168.1.0
- gateway 192.168.1.254
-
-iface eth1 inet6 static
- address 2607:f0d0:1002:0011::3
- netmask 64
- gateway 2607:f0d0:1002:0011::1
-'''
-
-
-class WriteBuffer(object):
- def __init__(self):
- self.buffer = StringIO()
- self.mode = None
- self.omode = None
-
- def write(self, text):
- self.buffer.write(text)
-
- def __str__(self):
- return self.buffer.getvalue()
-
-
-class TestNetCfgDistro(TestCase):
-
- def _get_distro(self, dname):
- cls = distros.fetch(dname)
- cfg = settings.CFG_BUILTIN
- cfg['system_info']['distro'] = dname
- paths = helpers.Paths({})
- return cls(dname, cfg, paths)
-
- def test_simple_write_ub(self):
- ub_distro = self._get_distro('ubuntu')
- with ExitStack() as mocks:
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
-
- ub_distro.apply_network(BASE_NET_CFG, False)
-
- self.assertEqual(len(write_bufs), 1)
- eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg'
- self.assertIn(eni_name, write_bufs)
- write_buf = write_bufs[eni_name]
- self.assertEqual(str(write_buf).strip(), BASE_NET_CFG.strip())
- self.assertEqual(write_buf.mode, 0o644)
-
- def assertCfgEquals(self, blob1, blob2):
- b1 = dict(SysConf(blob1.strip().splitlines()))
- b2 = dict(SysConf(blob2.strip().splitlines()))
- self.assertEqual(b1, b2)
- for (k, v) in b1.items():
- self.assertIn(k, b2)
- for (k, v) in b2.items():
- self.assertIn(k, b1)
- for (k, v) in b1.items():
- self.assertEqual(v, b2[k])
-
- def test_simple_write_rh(self):
- rh_distro = self._get_distro('rhel')
-
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', return_value=''))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
-
- rh_distro.apply_network(BASE_NET_CFG, False)
-
- self.assertEqual(len(write_bufs), 4)
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo']
- expected_buf = '''
-DEVICE="lo"
-ONBOOT=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
- expected_buf = '''
-DEVICE="eth0"
-BOOTPROTO="static"
-NETMASK="255.255.255.0"
-IPADDR="192.168.1.5"
-ONBOOT=yes
-GATEWAY="192.168.1.254"
-BROADCAST="192.168.1.0"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
- expected_buf = '''
-DEVICE="eth1"
-BOOTPROTO="dhcp"
-ONBOOT=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network', write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network']
- expected_buf = '''
-# Created by cloud-init v. 0.7
-NETWORKING=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_write_ipv6_rhel(self):
- rh_distro = self._get_distro('rhel')
-
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', return_value=''))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
-
- rh_distro.apply_network(BASE_NET_CFG_IPV6, False)
-
- self.assertEqual(len(write_bufs), 4)
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo']
- expected_buf = '''
-DEVICE="lo"
-ONBOOT=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
- expected_buf = '''
-DEVICE="eth0"
-BOOTPROTO="static"
-NETMASK="255.255.255.0"
-IPADDR="192.168.1.5"
-ONBOOT=yes
-GATEWAY="192.168.1.254"
-BROADCAST="192.168.1.0"
-IPV6INIT=yes
-IPV6ADDR="2607:f0d0:1002:0011::2"
-IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
- expected_buf = '''
-DEVICE="eth1"
-BOOTPROTO="static"
-NETMASK="255.255.255.0"
-IPADDR="192.168.1.6"
-ONBOOT=no
-GATEWAY="192.168.1.254"
-BROADCAST="192.168.1.0"
-IPV6INIT=yes
-IPV6ADDR="2607:f0d0:1002:0011::3"
-IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network', write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network']
- expected_buf = '''
-# Created by cloud-init v. 0.7
-NETWORKING=yes
-NETWORKING_IPV6=yes
-IPV6_AUTOCONF=no
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_simple_write_freebsd(self):
- fbsd_distro = self._get_distro('freebsd')
-
- write_bufs = {}
- read_bufs = {
- '/etc/rc.conf': '',
- '/etc/resolv.conf': '',
- }
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- def replace_read(fname, read_cb=None, quiet=False):
- if fname not in read_bufs:
- if fname in write_bufs:
- return str(write_bufs[fname])
- raise IOError("%s not found" % fname)
- else:
- if fname in write_bufs:
- return str(write_bufs[fname])
- return read_bufs[fname]
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'subp', return_value=('vtnet0', '')))
- mocks.enter_context(
- mock.patch.object(os.path, 'exists', return_value=False))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', replace_read))
-
- fbsd_distro.apply_network(BASE_NET_CFG, False)
-
- self.assertIn('/etc/rc.conf', write_bufs)
- write_buf = write_bufs['/etc/rc.conf']
- expected_buf = '''
-ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0"
-ifconfig_vtnet1="DHCP"
-defaultrouter="192.168.1.254"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_apply_network_config_fallback(self):
- fbsd_distro = self._get_distro('freebsd')
-
- # a weak attempt to verify that we don't have an implementation
- # of _write_network_config or apply_network_config in fbsd now,
- # which would make this test not actually test the fallback.
- self.assertRaises(
- NotImplementedError, fbsd_distro._write_network_config,
- BASE_NET_CFG)
-
- # now run
- mynetcfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}],
- 'version': 1}
-
- write_bufs = {}
- read_bufs = {
- '/etc/rc.conf': '',
- '/etc/resolv.conf': '',
- }
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- def replace_read(fname, read_cb=None, quiet=False):
- if fname not in read_bufs:
- if fname in write_bufs:
- return str(write_bufs[fname])
- raise IOError("%s not found" % fname)
- else:
- if fname in write_bufs:
- return str(write_bufs[fname])
- return read_bufs[fname]
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'subp', return_value=('vtnet0', '')))
- mocks.enter_context(
- mock.patch.object(os.path, 'exists', return_value=False))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', replace_read))
-
- fbsd_distro.apply_network_config(mynetcfg, bring_up=False)
-
- self.assertIn('/etc/rc.conf', write_bufs)
- write_buf = write_bufs['/etc/rc.conf']
- expected_buf = '''
-ifconfig_vtnet0="DHCP"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py
deleted file mode 100644
index 9402b5ea..00000000
--- a/tests/unittests/test_distros/test_resolv.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from cloudinit.distros.parsers import resolv_conf
-from cloudinit.distros import rhel_util
-
-from ..helpers import TestCase
-
-import re
-import tempfile
-
-
-BASE_RESOLVE = '''
-; generated by /sbin/dhclient-script
-search blah.yahoo.com yahoo.com
-nameserver 10.15.44.14
-nameserver 10.15.30.92
-'''
-BASE_RESOLVE = BASE_RESOLVE.strip()
-
-
-class TestResolvHelper(TestCase):
- def test_parse_same(self):
- rp = resolv_conf.ResolvConf(BASE_RESOLVE)
- rp_r = str(rp).strip()
- self.assertEqual(BASE_RESOLVE, rp_r)
-
- def test_write_works(self):
- with tempfile.NamedTemporaryFile() as fh:
- rhel_util.update_resolve_conf_file(fh.name, [], [])
-
- def test_local_domain(self):
- rp = resolv_conf.ResolvConf(BASE_RESOLVE)
- self.assertEqual(None, rp.local_domain)
-
- rp.local_domain = "bob"
- self.assertEqual('bob', rp.local_domain)
- self.assertIn('domain bob', str(rp))
-
- def test_nameservers(self):
- rp = resolv_conf.ResolvConf(BASE_RESOLVE)
- self.assertIn('10.15.44.14', rp.nameservers)
- self.assertIn('10.15.30.92', rp.nameservers)
- rp.add_nameserver('10.2')
- self.assertIn('10.2', rp.nameservers)
- self.assertIn('nameserver 10.2', str(rp))
- self.assertNotIn('10.3', rp.nameservers)
- self.assertEqual(len(rp.nameservers), 3)
- rp.add_nameserver('10.2')
- self.assertRaises(ValueError, rp.add_nameserver, '10.3')
- self.assertNotIn('10.3', rp.nameservers)
-
- def test_search_domains(self):
- rp = resolv_conf.ResolvConf(BASE_RESOLVE)
- self.assertIn('yahoo.com', rp.search_domains)
- self.assertIn('blah.yahoo.com', rp.search_domains)
- rp.add_search_domain('bbb.y.com')
- self.assertIn('bbb.y.com', rp.search_domains)
- self.assertTrue(re.search(r'search(.*)bbb.y.com(.*)', str(rp)))
- self.assertIn('bbb.y.com', rp.search_domains)
- rp.add_search_domain('bbb.y.com')
- self.assertEqual(len(rp.search_domains), 3)
- rp.add_search_domain('bbb2.y.com')
- self.assertEqual(len(rp.search_domains), 4)
- rp.add_search_domain('bbb3.y.com')
- self.assertEqual(len(rp.search_domains), 5)
- rp.add_search_domain('bbb4.y.com')
- self.assertEqual(len(rp.search_domains), 6)
- self.assertRaises(ValueError, rp.add_search_domain, 'bbb5.y.com')
- self.assertEqual(len(rp.search_domains), 6)
diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py
deleted file mode 100644
index 8cb55522..00000000
--- a/tests/unittests/test_distros/test_sysconfig.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import re
-
-from cloudinit.distros.parsers.sys_conf import SysConf
-
-from ..helpers import TestCase
-
-
-# Lots of good examples @
-# http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt
-
-class TestSysConfHelper(TestCase):
- # This function was added in 2.7, make it work for 2.6
- def assertRegMatches(self, text, regexp):
- regexp = re.compile(regexp)
- self.assertTrue(regexp.search(text),
- msg="%s must match %s!" % (text, regexp.pattern))
-
- def test_parse_no_change(self):
- contents = '''# A comment
-USESMBAUTH=no
-KEYTABLE=/usr/lib/kbd/keytables/us.map
-SHORTDATE=$(date +%y:%m:%d:%H:%M)
-HOSTNAME=blahblah
-NETMASK0=255.255.255.0
-# Inline comment
-LIST=$LOGROOT/incremental-list
-IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64'
-ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256"
-USEMD5=no'''
- conf = SysConf(contents.splitlines())
- self.assertEqual(conf['HOSTNAME'], 'blahblah')
- self.assertEqual(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)')
- # Should be unquoted
- self.assertEqual(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; '
- '-G ${DEVICE} rx 256 tx 256'))
- self.assertEqual(contents, str(conf))
-
- def test_parse_shell_vars(self):
- contents = 'USESMBAUTH=$XYZ'
- conf = SysConf(contents.splitlines())
- self.assertEqual(contents, str(conf))
- conf = SysConf('')
- conf['B'] = '${ZZ}d apples'
- # Should be quoted
- self.assertEqual('B="${ZZ}d apples"', str(conf))
- conf = SysConf('')
- conf['B'] = '$? d apples'
- self.assertEqual('B="$? d apples"', str(conf))
- contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"'
- conf = SysConf(contents.splitlines())
- self.assertEqual('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf))
-
- def test_parse_adjust(self):
- contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"'
- conf = SysConf(contents.splitlines())
- # Should be unquoted
- self.assertEqual('eth0-:0004::1/64 eth1-:0005::1/64',
- conf['IPV6TO4_ROUTING'])
- conf['IPV6TO4_ROUTING'] = "blah \tblah"
- contents2 = str(conf).strip()
- # Should be requoted due to whitespace
- self.assertRegMatches(contents2,
- r'IPV6TO4_ROUTING=[\']blah\s+blah[\']')
-
- def test_parse_no_adjust_shell(self):
- conf = SysConf(''.splitlines())
- conf['B'] = ' $(time)'
- contents = str(conf)
- self.assertEqual('B= $(time)', contents)
-
- def test_parse_empty(self):
- contents = ''
- conf = SysConf(contents.splitlines())
- self.assertEqual('', str(conf).strip())
-
- def test_parse_add_new(self):
- contents = 'BLAH=b'
- conf = SysConf(contents.splitlines())
- conf['Z'] = 'd'
- contents = str(conf)
- self.assertIn("Z=d", contents)
- self.assertIn("BLAH=b", contents)
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
deleted file mode 100644
index a887a930..00000000
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ /dev/null
@@ -1,297 +0,0 @@
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import settings
-
-from ..helpers import TestCase
-
-
-bcfg = {
- 'name': 'bob',
- 'plain_text_passwd': 'ubuntu',
- 'home': "/home/ubuntu",
- 'shell': "/bin/bash",
- 'lock_passwd': True,
- 'gecos': "Ubuntu",
- 'groups': ["foo"]
-}
-
-
-class TestUGNormalize(TestCase):
-
- def _make_distro(self, dtype, def_user=None):
- cfg = dict(settings.CFG_BUILTIN)
- cfg['system_info']['distro'] = dtype
- paths = helpers.Paths(cfg['system_info']['paths'])
- distro_cls = distros.fetch(dtype)
- if def_user:
- cfg['system_info']['default_user'] = def_user.copy()
- distro = distro_cls(dtype, cfg['system_info'], paths)
- return distro
-
- def _norm(self, cfg, distro):
- return distros.normalize_users_groups(cfg, distro)
-
- def test_group_dict(self):
- distro = self._make_distro('ubuntu')
- g = {'groups':
- [{'ubuntu': ['foo', 'bar'],
- 'bob': 'users'},
- 'cloud-users',
- {'bob': 'users2'}]}
- (_users, groups) = self._norm(g, distro)
- self.assertIn('ubuntu', groups)
- ub_members = groups['ubuntu']
- self.assertEqual(sorted(['foo', 'bar']), sorted(ub_members))
- self.assertIn('bob', groups)
- b_members = groups['bob']
- self.assertEqual(sorted(['users', 'users2']),
- sorted(b_members))
-
- def test_basic_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': ['bob'],
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertEqual({}, users)
-
- def test_csv_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': 'bob,joe,steve',
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_more_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': ['bob', 'joe', 'steve']
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_member_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': {
- 'bob': ['s'],
- 'joe': [],
- 'steve': [],
- }
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertEqual(['s'], groups['bob'])
- self.assertEqual([], groups['joe'])
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_users_simple_dict(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': {
- 'default': True,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- ug_cfg = {
- 'users': {
- 'default': 'yes',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- ug_cfg = {
- 'users': {
- 'default': '1',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
-
- def test_users_simple_dict_no(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': {
- 'default': False,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
- ug_cfg = {
- 'users': {
- 'default': 'no',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
-
- def test_users_simple_csv(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': 'joe,bob',
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_simple(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- 'joe',
- 'bob'
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_old_user(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'user': 'zetta',
- 'users': 'default'
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertNotIn('bob', users) # Bob is not the default now, zetta is
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- self.assertNotIn('default', users)
- ug_cfg = {
- 'user': 'zetta',
- 'users': 'default, joe'
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertNotIn('bob', users) # Bob is not the default now, zetta is
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- self.assertNotIn('default', users)
- ug_cfg = {
- 'user': 'zetta',
- 'users': ['bob', 'joe']
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- ug_cfg = {
- 'user': 'zetta',
- 'users': {
- 'bob': True,
- 'joe': True,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- ug_cfg = {
- 'user': 'zetta',
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('zetta', users)
- ug_cfg = {}
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
- self.assertEqual({}, groups)
-
- def test_users_dict_default_additional(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- {'name': 'default', 'blah': True}
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertEqual(",".join(distro.get_default_user()['groups']),
- users['bob']['groups'])
- self.assertEqual(True, users['bob']['blah'])
- self.assertEqual(True, users['bob']['default'])
-
- def test_users_dict_extract(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- 'default',
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- (name, config) = distros.extract_default(users)
- self.assertEqual(name, 'bob')
- expected_config = {}
- def_config = None
- try:
- def_config = distro.get_default_user()
- except NotImplementedError:
- pass
- if not def_config:
- def_config = {}
- expected_config.update(def_config)
-
- # Ignore these for now
- expected_config.pop('name', None)
- expected_config.pop('groups', None)
- config.pop('groups', None)
- self.assertEqual(config, expected_config)
-
- def test_users_dict_default(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- 'default',
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertEqual(",".join(distro.get_default_user()['groups']),
- users['bob']['groups'])
- self.assertEqual(True, users['bob']['default'])
-
- def test_users_dict_trans(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe',
- 'tr-me': True},
- {'name': 'bob'},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'tr_me': True, 'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_dict(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe'},
- {'name': 'bob'},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
deleted file mode 100644
index d6cf17fa..00000000
--- a/tests/unittests/test_ec2_util.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from . import helpers
-
-from cloudinit import ec2_utils as eu
-from cloudinit import url_helper as uh
-
-hp = helpers.import_httpretty()
-
-
-class TestEc2Util(helpers.HttprettyTestCase):
- VERSION = 'latest'
-
- @hp.activate
- def test_userdata_fetch(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- body='stuff',
- status=200)
- userdata = eu.get_instance_userdata(self.VERSION)
- self.assertEqual('stuff', userdata.decode('utf-8'))
-
- @hp.activate
- def test_userdata_fetch_fail_not_found(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=404)
- userdata = eu.get_instance_userdata(self.VERSION, retries=0)
- self.assertEqual('', userdata)
-
- @hp.activate
- def test_userdata_fetch_fail_server_dead(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=500)
- userdata = eu.get_instance_userdata(self.VERSION, retries=0)
- self.assertEqual('', userdata)
-
- @hp.activate
- def test_userdata_fetch_fail_server_not_found(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=404)
- userdata = eu.get_instance_userdata(self.VERSION)
- self.assertEqual('', userdata)
-
- @hp.activate
- def test_metadata_fetch_no_keys(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'ami-launch-index']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'ami-launch-index'),
- status=200, body='1')
- md = eu.get_instance_metadata(self.VERSION, retries=0)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(md['ami-launch-index'], '1')
-
- @hp.activate
- def test_metadata_fetch_key(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'public-keys/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
- status=200, body='0=my-public-key')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/0/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
- md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(1, len(md['public-keys']))
-
- @hp.activate
- def test_metadata_fetch_with_2_keys(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'public-keys/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
- status=200,
- body="\n".join(['0=my-public-key', '1=my-other-key']))
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/0/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/1/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
- md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(2, len(md['public-keys']))
-
- @hp.activate
- def test_metadata_fetch_bdm(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'block-device-mapping/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'block-device-mapping/'),
- status=200,
- body="\n".join(['ami', 'ephemeral0']))
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'block-device-mapping/ami'),
- status=200,
- body="sdb")
- hp.register_uri(hp.GET,
- uh.combine_url(base_url,
- 'block-device-mapping/ephemeral0'),
- status=200,
- body="sdc")
- md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- bdm = md['block-device-mapping']
- self.assertEqual(2, len(bdm))
- self.assertEqual(bdm['ami'], 'sdb')
- self.assertEqual(bdm['ephemeral0'], 'sdc')
diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/test_filters/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_filters/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py
deleted file mode 100644
index 395713e6..00000000
--- a/tests/unittests/test_filters/test_launch_index.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import copy
-
-from .. import helpers
-
-from six.moves import filterfalse
-
-from cloudinit.filters import launch_index
-from cloudinit import user_data as ud
-from cloudinit import util
-
-
-def count_messages(root):
- am = 0
- for m in root.walk():
- if ud.is_skippable(m):
- continue
- am += 1
- return am
-
-
-class TestLaunchFilter(helpers.ResourceUsingTestCase):
-
- def assertCounts(self, message, expected_counts):
- orig_message = copy.deepcopy(message)
- for (index, count) in expected_counts.items():
- index = util.safe_int(index)
- filtered_message = launch_index.Filter(index).apply(message)
- self.assertEqual(count_messages(filtered_message), count)
- # Ensure original message still ok/not modified
- self.assertTrue(self.equivalentMessage(message, orig_message))
-
- def equivalentMessage(self, msg1, msg2):
- msg1_count = count_messages(msg1)
- msg2_count = count_messages(msg2)
- if msg1_count != msg2_count:
- return False
- # Do some basic payload checking
- msg1_msgs = [m for m in msg1.walk()]
- msg1_msgs = [m for m in filterfalse(ud.is_skippable, msg1_msgs)]
- msg2_msgs = [m for m in msg2.walk()]
- msg2_msgs = [m for m in filterfalse(ud.is_skippable, msg2_msgs)]
- for i in range(0, len(msg2_msgs)):
- m1_msg = msg1_msgs[i]
- m2_msg = msg2_msgs[i]
- if m1_msg.get_charset() != m2_msg.get_charset():
- return False
- if m1_msg.is_multipart() != m2_msg.is_multipart():
- return False
- m1_py = m1_msg.get_payload(decode=True)
- m2_py = m2_msg.get_payload(decode=True)
- if m1_py != m2_py:
- return False
- return True
-
- def testMultiEmailIndex(self):
- test_data = self.readResource('filter_cloud_multipart_2.email')
- ud_proc = ud.UserDataProcessor(self.getCloudPaths())
- message = ud_proc.process(test_data)
- self.assertTrue(count_messages(message) > 0)
- # This file should have the following
- # indexes -> amount mapping in it
- expected_counts = {
- 3: 1,
- 2: 2,
- None: 3,
- -1: 0,
- }
- self.assertCounts(message, expected_counts)
-
- def testHeaderEmailIndex(self):
- test_data = self.readResource('filter_cloud_multipart_header.email')
- ud_proc = ud.UserDataProcessor(self.getCloudPaths())
- message = ud_proc.process(test_data)
- self.assertTrue(count_messages(message) > 0)
- # This file should have the following
- # indexes -> amount mapping in it
- expected_counts = {
- 5: 1,
- -1: 0,
- 'c': 1,
- None: 1,
- }
- self.assertCounts(message, expected_counts)
-
- def testConfigEmailIndex(self):
- test_data = self.readResource('filter_cloud_multipart_1.email')
- ud_proc = ud.UserDataProcessor(self.getCloudPaths())
- message = ud_proc.process(test_data)
- self.assertTrue(count_messages(message) > 0)
- # This file should have the following
- # indexes -> amount mapping in it
- expected_counts = {
- 2: 1,
- -1: 0,
- None: 1,
- }
- self.assertCounts(message, expected_counts)
-
- def testNoneIndex(self):
- test_data = self.readResource('filter_cloud_multipart.yaml')
- ud_proc = ud.UserDataProcessor(self.getCloudPaths())
- message = ud_proc.process(test_data)
- start_count = count_messages(message)
- self.assertTrue(start_count > 0)
- filtered_message = launch_index.Filter(None).apply(message)
- self.assertTrue(self.equivalentMessage(message, filtered_message))
-
- def testIndexes(self):
- test_data = self.readResource('filter_cloud_multipart.yaml')
- ud_proc = ud.UserDataProcessor(self.getCloudPaths())
- message = ud_proc.process(test_data)
- start_count = count_messages(message)
- self.assertTrue(start_count > 0)
- # This file should have the following
- # indexes -> amount mapping in it
- expected_counts = {
- 2: 2,
- 3: 2,
- 1: 2,
- 0: 1,
- 4: 1,
- 7: 0,
- -1: 0,
- 100: 0,
- # None should just give all back
- None: start_count,
- # Non ints should be ignored
- 'c': start_count,
- # Strings should be converted
- '1': 2,
- }
- self.assertCounts(message, expected_counts)
diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/test_handler/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_handler/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
deleted file mode 100644
index d1dca2c4..00000000
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ /dev/null
@@ -1,109 +0,0 @@
-from cloudinit.config import cc_apt_configure
-from cloudinit import util
-
-from ..helpers import TestCase
-
-import os
-import re
-import shutil
-import tempfile
-
-
-def load_tfile_or_url(*args, **kwargs):
- return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents))
-
-
-class TestAptProxyConfig(TestCase):
- def setUp(self):
- super(TestAptProxyConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.pfile = os.path.join(self.tmp, "proxy.cfg")
- self.cfile = os.path.join(self.tmp, "config.cfg")
-
- def _search_apt_config(self, contents, ptype, value):
- return re.search(
- r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
- contents, flags=re.IGNORECASE)
-
- def test_apt_proxy_written(self):
- cfg = {'apt_proxy': 'myproxy'}
- cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
-
- self.assertTrue(os.path.isfile(self.pfile))
- self.assertFalse(os.path.isfile(self.cfile))
-
- contents = load_tfile_or_url(self.pfile)
- self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
-
- def test_apt_http_proxy_written(self):
- cfg = {'apt_http_proxy': 'myproxy'}
- cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
-
- self.assertTrue(os.path.isfile(self.pfile))
- self.assertFalse(os.path.isfile(self.cfile))
-
- contents = load_tfile_or_url(self.pfile)
- self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
-
- def test_apt_all_proxy_written(self):
- cfg = {'apt_http_proxy': 'myproxy_http_proxy',
- 'apt_https_proxy': 'myproxy_https_proxy',
- 'apt_ftp_proxy': 'myproxy_ftp_proxy'}
-
- values = {'http': cfg['apt_http_proxy'],
- 'https': cfg['apt_https_proxy'],
- 'ftp': cfg['apt_ftp_proxy'],
- }
-
- cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
-
- self.assertTrue(os.path.isfile(self.pfile))
- self.assertFalse(os.path.isfile(self.cfile))
-
- contents = load_tfile_or_url(self.pfile)
-
- for ptype, pval in values.items():
- self.assertTrue(self._search_apt_config(contents, ptype, pval))
-
- def test_proxy_deleted(self):
- util.write_file(self.cfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
- self.assertFalse(os.path.isfile(self.pfile))
- self.assertFalse(os.path.isfile(self.cfile))
-
- def test_proxy_replaced(self):
- util.write_file(self.cfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({'apt_proxy': "foo"},
- self.pfile, self.cfile)
- self.assertTrue(os.path.isfile(self.pfile))
- contents = load_tfile_or_url(self.pfile)
- self.assertTrue(self._search_apt_config(contents, "http", "foo"))
-
- def test_config_written(self):
- payload = 'this is my apt config'
- cfg = {'apt_config': payload}
-
- cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
-
- self.assertTrue(os.path.isfile(self.cfile))
- self.assertFalse(os.path.isfile(self.pfile))
-
- self.assertEqual(load_tfile_or_url(self.cfile), payload)
-
- def test_config_replaced(self):
- util.write_file(self.pfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({'apt_config': "foo"},
- self.pfile, self.cfile)
- self.assertTrue(os.path.isfile(self.cfile))
- self.assertEqual(load_tfile_or_url(self.cfile), "foo")
-
- def test_config_deleted(self):
- # if no 'apt_config' is provided, delete any previously written file
- util.write_file(self.pfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
- self.assertFalse(os.path.isfile(self.pfile))
- self.assertFalse(os.path.isfile(self.cfile))
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list.py
deleted file mode 100644
index acde0863..00000000
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list.py
+++ /dev/null
@@ -1,180 +0,0 @@
-""" test_handler_apt_configure_sources_list
-Test templating of sources list
-"""
-import logging
-import os
-import shutil
-import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import templater
-from cloudinit import util
-
-from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
-from cloudinit.distros.debian import Distro
-
-from .. import helpers as t_help
-
-LOG = logging.getLogger(__name__)
-
-YAML_TEXT_CUSTOM_SL = """
-apt_mirror: http://archive.ubuntu.com/ubuntu/
-apt_custom_sources_list: |
- ## template:jinja
- ## Note, this file is written by cloud-init on first boot of an instance
- ## modifications made here will not survive a re-bundle.
- ## if you wish to make changes you can:
- ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
- ## or do the same in user-data
- ## b.) add sources in /etc/apt/sources.list.d
- ## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
-
- # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
- # newer versions of the distribution.
- deb {{mirror}} {{codename}} main restricted
- deb-src {{mirror}} {{codename}} main restricted
- # FIND_SOMETHING_SPECIAL
-"""
-
-EXPECTED_CONVERTED_CONTENT = (
- """## Note, this file is written by cloud-init on first boot of an instance
-## modifications made here will not survive a re-bundle.
-## if you wish to make changes you can:
-## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
-## or do the same in user-data
-## b.) add sources in /etc/apt/sources.list.d
-## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
-
-# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
-# newer versions of the distribution.
-deb http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
-deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
-# FIND_SOMETHING_SPECIAL
-""")
-
-
-def load_tfile_or_url(*args, **kwargs):
- """load_tfile_or_url
- load file and return content after decoding
- """
- return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
-
-
-class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
- """TestAptSourceConfigSourceList
- Main Class to test sources list rendering
- """
- def setUp(self):
- super(TestAptSourceConfigSourceList, self).setUp()
- self.subp = util.subp
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def apt_source_list(self, distro, mirror, mirrorcheck=None):
- """apt_source_list
- Test rendering of a source.list from template for a given distro
- """
- if mirrorcheck is None:
- mirrorcheck = mirror
-
- if isinstance(mirror, list):
- cfg = {'apt_mirror_search': mirror}
- else:
- cfg = {'apt_mirror': mirror}
- mycloud = self._get_cloud(distro)
-
- with mock.patch.object(templater, 'render_to_file') as mocktmpl:
- with mock.patch.object(os.path, 'isfile',
- return_value=True) as mockisfile:
- with mock.patch.object(util, 'rename'):
- cc_apt_configure.handle("notimportant", cfg, mycloud,
- LOG, None)
-
- mockisfile.assert_any_call(
- ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
- mocktmpl.assert_called_once_with(
- ('/etc/cloud/templates/sources.list.%s.tmpl' % distro),
- '/etc/apt/sources.list',
- {'codename': '', 'primary': mirrorcheck, 'mirror': mirrorcheck})
-
- def test_apt_source_list_debian(self):
- """Test rendering of a source.list from template for debian"""
- self.apt_source_list('debian', 'http://httpredir.debian.org/debian')
-
- def test_apt_source_list_ubuntu(self):
- """Test rendering of a source.list from template for ubuntu"""
- self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/')
-
- @staticmethod
- def myresolve(name):
- """Fake util.is_resolvable for mirrorfail tests"""
- if name == "does.not.exist":
- print("Faking FAIL for '%s'" % name)
- return False
- else:
- print("Faking SUCCESS for '%s'" % name)
- return True
-
- def test_apt_srcl_debian_mirrorfail(self):
- """Test rendering of a source.list from template for debian"""
- with mock.patch.object(util, 'is_resolvable',
- side_effect=self.myresolve) as mockresolve:
- self.apt_source_list('debian',
- ['http://does.not.exist',
- 'http://httpredir.debian.org/debian'],
- 'http://httpredir.debian.org/debian')
- mockresolve.assert_any_call("does.not.exist")
- mockresolve.assert_any_call("httpredir.debian.org")
-
- def test_apt_srcl_ubuntu_mirrorfail(self):
- """Test rendering of a source.list from template for ubuntu"""
- with mock.patch.object(util, 'is_resolvable',
- side_effect=self.myresolve) as mockresolve:
- self.apt_source_list('ubuntu',
- ['http://does.not.exist',
- 'http://archive.ubuntu.com/ubuntu/'],
- 'http://archive.ubuntu.com/ubuntu/')
- mockresolve.assert_any_call("does.not.exist")
- mockresolve.assert_any_call("archive.ubuntu.com")
-
- def test_apt_srcl_custom(self):
- """Test rendering from a custom source.list template"""
- cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
- mycloud = self._get_cloud('ubuntu')
-
- # the second mock restores the original subp
- with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(util, 'subp', self.subp):
- with mock.patch.object(cc_apt_configure, 'get_release',
- return_value='fakerelease'):
- with mock.patch.object(Distro, 'get_primary_arch',
- return_value='amd64'):
- cc_apt_configure.handle("notimportant", cfg, mycloud,
- LOG, None)
-
- mockwrite.assert_called_once_with(
- '/etc/apt/sources.list',
- EXPECTED_CONVERTED_CONTENT,
- mode=420)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source.py b/tests/unittests/test_handler/test_handler_apt_source.py
deleted file mode 100644
index 99a4d860..00000000
--- a/tests/unittests/test_handler/test_handler_apt_source.py
+++ /dev/null
@@ -1,516 +0,0 @@
-""" test_handler_apt_source
-Testing various config variations of the apt_source config
-"""
-import os
-import re
-import shutil
-import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-from mock import call
-
-from cloudinit.config import cc_apt_configure
-from cloudinit import gpg
-from cloudinit import util
-
-from ..helpers import TestCase
-
-EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
-NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
-8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
-HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
-CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
-OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
-FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
-S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
-=ACB2
------END PGP PUBLIC KEY BLOCK-----"""
-
-
-def load_tfile_or_url(*args, **kwargs):
- """load_tfile_or_url
- load file and return content after decoding
- """
- return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
-
-
-class TestAptSourceConfig(TestCase):
- """TestAptSourceConfig
- Main Class to test apt_source configs
- """
- release = "fantastic"
-
- def setUp(self):
- super(TestAptSourceConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
- self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
- self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
- self.join = os.path.join
- # mock fallback filename into writable tmp dir
- self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/",
- "cloud_config_sources.list")
-
- patcher = mock.patch("cloudinit.config.cc_apt_configure.get_release")
- get_rel = patcher.start()
- get_rel.return_value = self.release
- self.addCleanup(patcher.stop)
-
- @staticmethod
- def _get_default_params():
- """get_default_params
- Get the most basic default mrror and release info to be used in tests
- """
- params = {}
- params['RELEASE'] = cc_apt_configure.get_release()
- params['MIRROR'] = "http://archive.ubuntu.com/ubuntu"
- return params
-
- def myjoin(self, *args, **kwargs):
- """myjoin - redir into writable tmpdir"""
- if (args[0] == "/etc/apt/sources.list.d/" and
- args[1] == "cloud_config_sources.list" and
- len(args) == 2):
- return self.join(self.tmp, args[0].lstrip("/"), args[1])
- else:
- return self.join(*args, **kwargs)
-
- def apt_src_basic(self, filename, cfg):
- """apt_src_basic
- Test Fix deb source string, has to overwrite mirror conf in params
- """
- params = self._get_default_params()
-
- cc_apt_configure.add_apt_sources(cfg, params)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = load_tfile_or_url(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "karmic-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_basic(self):
- """Test deb source string, overwrite mirror and filename"""
- cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile}
- self.apt_src_basic(self.aptlistfile, [cfg])
-
- def test_apt_src_basic_dict(self):
- """Test deb source string, overwrite mirror and filename (dict)"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')}}
- self.apt_src_basic(self.aptlistfile, cfg)
-
- def apt_src_basic_tri(self, cfg):
- """apt_src_basic_tri
- Test Fix three deb source string, has to overwrite mirror conf in
- params. Test with filenames provided in config.
- generic part to check three files with different content
- """
- self.apt_src_basic(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- contents = load_tfile_or_url(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "precise-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
- contents = load_tfile_or_url(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "lucid-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_basic_tri(self):
- """Test Fix three deb source string with filenames"""
- cfg1 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile}
- cfg2 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' precise-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile2}
- cfg3 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' lucid-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile3}
- self.apt_src_basic_tri([cfg1, cfg2, cfg3])
-
- def test_apt_src_basic_dict_tri(self):
- """Test Fix three deb source string with filenames (dict)"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile2: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' precise-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile3: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' lucid-backports'
- ' main universe multiverse restricted')}}
- self.apt_src_basic_tri(cfg)
-
- def test_apt_src_basic_nofn(self):
- """Test Fix three deb source string without filenames (dict)"""
- cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_basic(self.fallbackfn, [cfg])
-
- def apt_src_replacement(self, filename, cfg):
- """apt_src_replace
- Test Autoreplacement of MIRROR and RELEASE in source specs
- """
- params = self._get_default_params()
- cc_apt_configure.add_apt_sources(cfg, params)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = load_tfile_or_url(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_replace(self):
- """Test Autoreplacement of MIRROR and RELEASE in source specs"""
- cfg = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- self.apt_src_replacement(self.aptlistfile, [cfg])
-
- def apt_src_replace_tri(self, cfg):
- """apt_src_replace_tri
- Test three autoreplacements of MIRROR and RELEASE in source specs with
- generic part
- """
- self.apt_src_replacement(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- params = self._get_default_params()
- contents = load_tfile_or_url(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "main"),
- contents, flags=re.IGNORECASE))
- contents = load_tfile_or_url(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "universe"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_replace_tri(self):
- """Test triple Autoreplacement of MIRROR and RELEASE in source specs"""
- cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
- 'filename': self.aptlistfile3}
- self.apt_src_replace_tri([cfg1, cfg2, cfg3])
-
- def test_apt_src_replace_dict_tri(self):
- """Test triple Autoreplacement in source specs (dict)"""
- cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'},
- 'notused': {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2},
- self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
- self.apt_src_replace_tri(cfg)
-
- def test_apt_src_replace_nofn(self):
- """Test Autoreplacement of MIRROR and RELEASE in source specs nofile"""
- cfg = {'source': 'deb $MIRROR $RELEASE multiverse'}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_replacement(self.fallbackfn, [cfg])
-
- def apt_src_keyid(self, filename, cfg, keynum):
- """apt_src_keyid
- Test specification of a source + keyid
- """
- params = self._get_default_params()
-
- with mock.patch.object(util, 'subp',
- return_value=('fakekey 1234', '')) as mockobj:
- cc_apt_configure.add_apt_sources(cfg, params)
-
- # check if it added the right ammount of keys
- calls = []
- for _ in range(keynum):
- calls.append(call(('apt-key', 'add', '-'), 'fakekey 1234'))
- mockobj.assert_has_calls(calls, any_order=True)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = load_tfile_or_url(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_keyid(self):
- """Test specification of a source + keyid with filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile}
- self.apt_src_keyid(self.aptlistfile, [cfg], 1)
-
- def test_apt_src_keyid_tri(self):
- """Test 3x specification of a source + keyid with filename being set"""
- cfg1 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile}
- cfg2 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial universe'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile2}
- cfg3 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial multiverse'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile3}
-
- self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3)
- contents = load_tfile_or_url(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "universe"),
- contents, flags=re.IGNORECASE))
- contents = load_tfile_or_url(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_keyid_nofn(self):
- """Test specification of a source + keyid without filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77"}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_keyid(self.fallbackfn, [cfg], 1)
-
- def apt_src_key(self, filename, cfg):
- """apt_src_key
- Test specification of a source + key
- """
- params = self._get_default_params()
-
- with mock.patch.object(util, 'subp') as mockobj:
- cc_apt_configure.add_apt_sources([cfg], params)
-
- mockobj.assert_called_with(('apt-key', 'add', '-'), 'fakekey 4321')
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = load_tfile_or_url(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_key(self):
- """Test specification of a source + key with filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'key': "fakekey 4321",
- 'filename': self.aptlistfile}
- self.apt_src_key(self.aptlistfile, cfg)
-
- def test_apt_src_key_nofn(self):
- """Test specification of a source + key without filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'key': "fakekey 4321"}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_key(self.fallbackfn, cfg)
-
- def test_apt_src_keyonly(self):
- """Test specifying key without source"""
- params = self._get_default_params()
- cfg = {'key': "fakekey 4242",
- 'filename': self.aptlistfile}
-
- with mock.patch.object(util, 'subp') as mockobj:
- cc_apt_configure.add_apt_sources([cfg], params)
-
- mockobj.assert_called_once_with(('apt-key', 'add', '-'),
- 'fakekey 4242')
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_keyidonly(self):
- """Test specification of a keyid without source"""
- params = self._get_default_params()
- cfg = {'keyid': "03683F77",
- 'filename': self.aptlistfile}
-
- with mock.patch.object(util, 'subp',
- return_value=('fakekey 1212', '')) as mockobj:
- cc_apt_configure.add_apt_sources([cfg], params)
-
- mockobj.assert_called_with(('apt-key', 'add', '-'), 'fakekey 1212')
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def apt_src_keyid_real(self, cfg, expectedkey):
- """apt_src_keyid_real
- Test specification of a keyid without source including
- up to addition of the key (add_apt_key_raw mocked to keep the
- environment as is)
- """
- params = self._get_default_params()
-
- with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey:
- with mock.patch.object(gpg, 'get_key_by_id',
- return_value=expectedkey) as mockgetkey:
- cc_apt_configure.add_apt_sources([cfg], params)
-
- mockgetkey.assert_called_with(cfg['keyid'],
- cfg.get('keyserver',
- 'keyserver.ubuntu.com'))
- mockkey.assert_called_with(expectedkey)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_keyid_real(self):
- """test_apt_src_keyid_real - Test keyid including key add"""
- keyid = "03683F77"
- cfg = {'keyid': keyid,
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_longkeyid_real(self):
- """test_apt_src_longkeyid_real - Test long keyid including key add"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {'keyid': keyid,
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_longkeyid_ks_real(self):
- """test_apt_src_longkeyid_ks_real - Test long keyid from other ks"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {'keyid': keyid,
- 'keyserver': 'keys.gnupg.net',
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_ppa(self):
- """Test adding a ppa"""
- params = self._get_default_params()
- cfg = {'source': 'ppa:smoser/cloud-init-test',
- 'filename': self.aptlistfile}
-
- # default matcher needed for ppa
- matcher = re.compile(r'^[\w-]+:\w').search
-
- with mock.patch.object(util, 'subp') as mockobj:
- cc_apt_configure.add_apt_sources([cfg], params,
- aa_repo_match=matcher)
- mockobj.assert_called_once_with(['add-apt-repository',
- 'ppa:smoser/cloud-init-test'])
-
- # adding ppa should ignore filename (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_ppa_tri(self):
- """Test adding three ppa's"""
- params = self._get_default_params()
- cfg1 = {'source': 'ppa:smoser/cloud-init-test',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'ppa:smoser/cloud-init-test2',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'ppa:smoser/cloud-init-test3',
- 'filename': self.aptlistfile3}
-
- # default matcher needed for ppa
- matcher = re.compile(r'^[\w-]+:\w').search
-
- with mock.patch.object(util, 'subp') as mockobj:
- cc_apt_configure.add_apt_sources([cfg1, cfg2, cfg3], params,
- aa_repo_match=matcher)
- calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test']),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test2']),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'])]
- mockobj.assert_has_calls(calls, any_order=True)
-
- # adding ppa should ignore all filenames (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
- self.assertFalse(os.path.isfile(self.aptlistfile2))
- self.assertFalse(os.path.isfile(self.aptlistfile3))
-
- def test_convert_to_new_format(self):
- """Test the conversion of old to new format"""
- cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
- 'filename': self.aptlistfile3}
- checkcfg = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'multiverse'},
- self.aptlistfile2: {'filename': self.aptlistfile2,
- 'source': 'deb $MIRROR $RELEASE main'},
- self.aptlistfile3: {'filename': self.aptlistfile3,
- 'source': 'deb $MIRROR $RELEASE '
- 'universe'}}
-
- newcfg = cc_apt_configure.convert_to_new_format([cfg1, cfg2, cfg3])
- self.assertEqual(newcfg, checkcfg)
-
- newcfg2 = cc_apt_configure.convert_to_new_format(newcfg)
- self.assertEqual(newcfg2, checkcfg)
-
- with self.assertRaises(ValueError):
- cc_apt_configure.convert_to_new_format(5)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
deleted file mode 100644
index 5e771731..00000000
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ /dev/null
@@ -1,271 +0,0 @@
-from cloudinit import cloud
-from cloudinit.config import cc_ca_certs
-from cloudinit import helpers
-from cloudinit import util
-
-from ..helpers import TestCase
-
-import logging
-import shutil
-import tempfile
-import unittest
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
-
-
-class TestNoConfig(unittest.TestCase):
- def setUp(self):
- super(TestNoConfig, self).setUp()
- self.name = "ca-certs"
- self.cloud_init = None
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- def test_no_config(self):
- """
- Test that nothing is done if no ca-certs configuration is provided.
- """
- config = util.get_builtin_cfg()
- with ExitStack() as mocks:
- util_mock = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- certs_mock = mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'update_ca_certs'))
-
- cc_ca_certs.handle(self.name, config, self.cloud_init, self.log,
- self.args)
-
- self.assertEqual(util_mock.call_count, 0)
- self.assertEqual(certs_mock.call_count, 0)
-
-
-class TestConfig(TestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.name = "ca-certs"
- self.paths = None
- self.cloud = cloud.Cloud(None, self.paths, None, None, None)
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- self.mocks = ExitStack()
- self.addCleanup(self.mocks.close)
-
- # Mock out the functions that actually modify the system
- self.mock_add = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'add_ca_certs'))
- self.mock_update = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'update_ca_certs'))
- self.mock_remove = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'remove_default_ca_certs'))
-
- def test_no_trusted_list(self):
- """
- Test that no certificates are written if the 'trusted' key is not
- present.
- """
- config = {"ca-certs": {}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_empty_trusted_list(self):
- """Test that no certificate are written if 'trusted' list is empty."""
- config = {"ca-certs": {"trusted": []}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_single_trusted(self):
- """Test that a single cert gets passed to add_ca_certs."""
- config = {"ca-certs": {"trusted": ["CERT1"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_multiple_trusted(self):
- """Test that multiple certs get passed to add_ca_certs."""
- config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1', 'CERT2'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_remove_default_ca_certs(self):
- """Test remove_defaults works as expected."""
- config = {"ca-certs": {"remove-defaults": True}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
-
- def test_no_remove_defaults_if_false(self):
- """Test remove_defaults is not called when config value is False."""
- config = {"ca-certs": {"remove-defaults": False}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_correct_order_for_remove_then_add(self):
- """Test remove_defaults is not called when config value is False."""
- config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
-
-
-class TestAddCaCerts(TestCase):
-
- def setUp(self):
- super(TestAddCaCerts, self).setUp()
- tmpdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmpdir)
- self.paths = helpers.Paths({
- 'cloud_dir': tmpdir,
- })
-
- def test_no_certs_in_list(self):
- """Test that no certificate are written if not provided."""
- with mock.patch.object(util, 'write_file') as mockobj:
- cc_ca_certs.add_ca_certs([])
- self.assertEqual(mockobj.call_count, 0)
-
- def test_single_cert_trailing_cr(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates has trailing newline"""
- cert = "CERT1\nLINE2\nLINE3"
-
- ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
- expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs([cert])
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
- def test_single_cert_no_trailing_cr(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates has no trailing newline"""
- cert = "CERT1\nLINE2\nLINE3"
-
- ca_certs_content = "line1\nline2\nline3"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs([cert])
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf",
- "%s\n%s\n" % (ca_certs_content,
- "cloud-init-ca-certs.crt"),
- omode="wb")])
-
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
- def test_multiple_certs(self):
- """Test adding multiple certificates to the trusted CAs."""
- certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
- expected_cert_file = "\n".join(certs)
- ca_certs_content = "line1\nline2\nline3"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs(certs)
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- expected_cert_file, mode=0o644),
- mock.call("/etc/ca-certificates.conf",
- "%s\n%s\n" % (ca_certs_content,
- "cloud-init-ca-certs.crt"),
- omode='wb')])
-
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
-
-class TestUpdateCaCerts(unittest.TestCase):
- def test_commands(self):
- with mock.patch.object(util, 'subp') as mockobj:
- cc_ca_certs.update_ca_certs()
- mockobj.assert_called_once_with(
- ["update-ca-certificates"], capture=False)
-
-
-class TestRemoveDefaultCaCerts(TestCase):
-
- def setUp(self):
- super(TestRemoveDefaultCaCerts, self).setUp()
- tmpdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmpdir)
- self.paths = helpers.Paths({
- 'cloud_dir': tmpdir,
- })
-
- def test_commands(self):
- with ExitStack() as mocks:
- mock_delete = mocks.enter_context(
- mock.patch.object(util, 'delete_dir_contents'))
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_subp = mocks.enter_context(mock.patch.object(util, 'subp'))
-
- cc_ca_certs.remove_default_ca_certs()
-
- mock_delete.assert_has_calls([
- mock.call("/usr/share/ca-certificates/"),
- mock.call("/etc/ssl/certs/")])
-
- mock_write.assert_called_once_with(
- "/etc/ca-certificates.conf", "", mode=0o644)
-
- mock_subp.assert_called_once_with(
- ('debconf-set-selections', '-'),
- "ca-certificates ca-certificates/trust_new_crts select no")
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
deleted file mode 100644
index 7a1bc317..00000000
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ /dev/null
@@ -1,192 +0,0 @@
-import json
-import logging
-import os
-import shutil
-import six
-import tempfile
-
-from cloudinit import cloud
-from cloudinit.config import cc_chef
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit.sources import DataSourceNone
-from cloudinit import util
-
-from .. import helpers as t_help
-
-LOG = logging.getLogger(__name__)
-
-CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"])
-
-
-class TestChef(t_help.FilesystemMockingTestCase):
- def setUp(self):
- super(TestChef, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def fetch_cloud(self, distro_kind):
- cls = distros.fetch(distro_kind)
- paths = helpers.Paths({})
- distro = cls(distro_kind, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, distro, paths, None)
- return cloud.Cloud(ds, paths, {}, distro, None)
-
- def test_no_config(self):
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- cfg = {}
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- for d in cc_chef.CHEF_DIRS:
- self.assertFalse(os.path.isdir(d))
-
- @t_help.skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_basic_config(self):
- """
- test basic config looks sane
-
- # This should create a file of the format...
- # Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000
- log_level :info
- ssl_verify_mode :verify_none
- log_location "/var/log/chef/client.log"
- validation_client_name "bob"
- validation_key "/etc/chef/validation.pem"
- client_key "/etc/chef/client.pem"
- chef_server_url "localhost"
- environment "_default"
- node_name "iid-datasource-none"
- json_attribs "/etc/chef/firstboot.json"
- file_cache_path "/var/cache/chef"
- file_backup_path "/var/backups/chef"
- pid_file "/var/run/chef/client.pid"
- Chef::Log::Formatter.show_time = true
- """
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': "/etc/chef/vkey.pem",
- 'validation_cert': "this is my cert",
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- for d in cc_chef.CHEF_DIRS:
- self.assertTrue(os.path.isdir(d))
- c = util.load_file(cc_chef.CHEF_RB_PATH)
-
- # the content of these keys is not expected to be rendered to tmpl
- unrendered_keys = ('validation_cert',)
- for k, v in cfg['chef'].items():
- if k in unrendered_keys:
- continue
- self.assertIn(v, c)
- for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items():
- if k in unrendered_keys:
- continue
- # the value from the cfg overrides that in the default
- val = cfg['chef'].get(k, v)
- if isinstance(val, six.string_types):
- self.assertIn(val, c)
- c = util.load_file(cc_chef.CHEF_FB_PATH)
- self.assertEqual({}, json.loads(c))
-
- def test_firstboot_json(self):
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'run_list': ['a', 'b', 'c'],
- 'initial_attributes': {
- 'c': 'd',
- }
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- c = util.load_file(cc_chef.CHEF_FB_PATH)
- self.assertEqual(
- {
- 'run_list': ['a', 'b', 'c'],
- 'c': 'd',
- }, json.loads(c))
-
- @t_help.skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_template_deletes(self):
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'json_attribs': None,
- 'show_time': None,
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- c = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertNotIn('json_attribs', c)
- self.assertNotIn('Formatter.show_time', c)
-
- @t_help.skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_validation_cert_and_validation_key(self):
- # test validation_cert content is written to validation_key path
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- v_path = '/etc/chef/vkey.pem'
- v_cert = 'this is my cert'
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': v_path,
- 'validation_cert': v_cert
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- content = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertIn(v_path, content)
- util.load_file(v_path)
- self.assertEqual(v_cert, util.load_file(v_path))
-
- def test_validation_cert_with_system(self):
- # test validation_cert content is not written over system file
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- v_path = '/etc/chef/vkey.pem'
- v_cert = "system"
- expected_cert = "this is the system file certificate"
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': v_path,
- 'validation_cert': v_cert
- },
- }
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- util.write_file(v_path, expected_cert)
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- content = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertIn(v_path, content)
- util.load_file(v_path)
- self.assertEqual(expected_cert, util.load_file(v_path))
diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/test_handler/test_handler_debug.py
deleted file mode 100644
index 80708d7b..00000000
--- a/tests/unittests/test_handler/test_handler_debug.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.config import cc_debug
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNone
-
-from .. import helpers as t_help
-
-import logging
-import shutil
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-
-class TestDebug(t_help.FilesystemMockingTestCase):
- def setUp(self):
- super(TestDebug, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, d, paths)
- if metadata:
- ds.metadata.update(metadata)
- return cloud.Cloud(ds, paths, {}, d, None)
-
- def test_debug_write(self):
- cfg = {
- 'abc': '123',
- 'c': u'\u20a0',
- 'debug': {
- 'verbose': True,
- # Does not actually write here due to mocking...
- 'output': '/var/log/cloud-init-debug.log',
- },
- }
- cc = self._get_cloud('ubuntu')
- cc_debug.handle('cc_debug', cfg, cc, LOG, [])
- contents = util.load_file('/var/log/cloud-init-debug.log')
- # Some basic sanity tests...
- self.assertNotEqual(0, len(contents))
- for k in cfg.keys():
- self.assertIn(k, contents)
-
- def test_debug_no_write(self):
- cfg = {
- 'abc': '123',
- 'debug': {
- 'verbose': False,
- # Does not actually write here due to mocking...
- 'output': '/var/log/cloud-init-debug.log',
- },
- }
- cc = self._get_cloud('ubuntu')
- cc_debug.handle('cc_debug', cfg, cc, LOG, [])
- self.assertRaises(IOError,
- util.load_file, '/var/log/cloud-init-debug.log')
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
deleted file mode 100644
index ddef8d48..00000000
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from cloudinit.config import cc_disk_setup
-from ..helpers import ExitStack, mock, TestCase
-
-
-class TestIsDiskUsed(TestCase):
-
- def setUp(self):
- super(TestIsDiskUsed, self).setUp()
- self.patches = ExitStack()
- mod_name = 'cloudinit.config.cc_disk_setup'
- self.enumerate_disk = self.patches.enter_context(
- mock.patch('{0}.enumerate_disk'.format(mod_name)))
- self.check_fs = self.patches.enter_context(
- mock.patch('{0}.check_fs'.format(mod_name)))
-
- def test_multiple_child_nodes_returns_true(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2))
- self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
- self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
-
- def test_valid_filesystem_returns_true(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
- self.check_fs.return_value = (
- mock.MagicMock(), 'ext4', mock.MagicMock())
- self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
-
- def test_one_child_nodes_and_no_fs_returns_false(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
- self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
- self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock()))
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
deleted file mode 100644
index e653488a..00000000
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from cloudinit import cloud
-from cloudinit.config import cc_growpart
-from cloudinit import util
-
-from ..helpers import TestCase
-
-import errno
-import logging
-import os
-import re
-import unittest
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
-
-# growpart:
-# mode: auto # off, on, auto, 'growpart'
-# devices: ['root']
-
-HELP_GROWPART_RESIZE = """
-growpart disk partition
- rewrite partition table so that partition takes up all the space it can
- options:
- -h | --help print Usage and exit
-<SNIP>
- -u | --update R update the the kernel partition table info after growing
- this requires kernel support and 'partx --update'
- R is one of:
- - 'auto' : [default] update partition if possible
-<SNIP>
- Example:
- - growpart /dev/sda 1
- Resize partition 1 on /dev/sda
-"""
-
-HELP_GROWPART_NO_RESIZE = """
-growpart disk partition
- rewrite partition table so that partition takes up all the space it can
- options:
- -h | --help print Usage and exit
-<SNIP>
- Example:
- - growpart /dev/sda 1
- Resize partition 1 on /dev/sda
-"""
-
-
-class TestDisabled(unittest.TestCase):
- def setUp(self):
- super(TestDisabled, self).setUp()
- self.name = "growpart"
- self.cloud_init = None
- self.log = logging.getLogger("TestDisabled")
- self.args = []
-
- self.handle = cc_growpart.handle
-
- def test_mode_off(self):
- # Test that nothing is done if mode is off.
-
- # this really only verifies that resizer_factory isn't called
- config = {'growpart': {'mode': 'off'}}
-
- with mock.patch.object(cc_growpart, 'resizer_factory') as mockobj:
- self.handle(self.name, config, self.cloud_init, self.log,
- self.args)
- self.assertEqual(mockobj.call_count, 0)
-
-
-class TestConfig(TestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.name = "growpart"
- self.paths = None
- self.cloud = cloud.Cloud(None, self.paths, None, None, None)
- self.log = logging.getLogger("TestConfig")
- self.args = []
- os.environ = {}
-
- self.cloud_init = None
- self.handle = cc_growpart.handle
-
- def test_no_resizers_auto_is_fine(self):
- with mock.patch.object(
- util, 'subp',
- return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
-
- config = {'growpart': {'mode': 'auto'}}
- self.handle(self.name, config, self.cloud_init, self.log,
- self.args)
-
- mockobj.assert_called_once_with(
- ['growpart', '--help'], env={'LANG': 'C'})
-
- def test_no_resizers_mode_growpart_is_exception(self):
- with mock.patch.object(
- util, 'subp',
- return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
- config = {'growpart': {'mode': "growpart"}}
- self.assertRaises(
- ValueError, self.handle, self.name, config,
- self.cloud_init, self.log, self.args)
-
- mockobj.assert_called_once_with(
- ['growpart', '--help'], env={'LANG': 'C'})
-
- def test_mode_auto_prefers_growpart(self):
- with mock.patch.object(
- util, 'subp',
- return_value=(HELP_GROWPART_RESIZE, "")) as mockobj:
- ret = cc_growpart.resizer_factory(mode="auto")
- self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
-
- mockobj.assert_called_once_with(
- ['growpart', '--help'], env={'LANG': 'C'})
-
- def test_handle_with_no_growpart_entry(self):
- # if no 'growpart' entry in config, then mode=auto should be used
-
- myresizer = object()
- retval = (("/", cc_growpart.RESIZE.CHANGED, "my-message",),)
-
- with ExitStack() as mocks:
- factory = mocks.enter_context(
- mock.patch.object(cc_growpart, 'resizer_factory',
- return_value=myresizer))
- rsdevs = mocks.enter_context(
- mock.patch.object(cc_growpart, 'resize_devices',
- return_value=retval))
- mocks.enter_context(
- mock.patch.object(cc_growpart, 'RESIZERS',
- (('mysizer', object),)
- ))
-
- self.handle(self.name, {}, self.cloud_init, self.log, self.args)
-
- factory.assert_called_once_with('auto')
- rsdevs.assert_called_once_with(myresizer, ['/'])
-
-
-class TestResize(unittest.TestCase):
- def setUp(self):
- super(TestResize, self).setUp()
- self.name = "growpart"
- self.log = logging.getLogger("TestResize")
-
- def test_simple_devices(self):
- # test simple device list
- # this patches out devent2dev, os.stat, and device_part_info
- # so in the end, doesn't test a lot
- devs = ["/dev/XXda1", "/dev/YYda2"]
- devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5,
- st_nlink=1, st_uid=0, st_gid=6, st_size=0,
- st_atime=0, st_mtime=0, st_ctime=0)
- enoent = ["/dev/NOENT"]
- real_stat = os.stat
- resize_calls = []
-
- class myresizer(object):
- def resize(self, diskdev, partnum, partdev):
- resize_calls.append((diskdev, partnum, partdev))
- if partdev == "/dev/YYda2":
- return (1024, 2048)
- return (1024, 1024) # old size, new size
-
- def mystat(path):
- if path in devs:
- return devstat_ret
- if path in enoent:
- e = OSError("%s: does not exist" % path)
- e.errno = errno.ENOENT
- raise e
- return real_stat(path)
-
- try:
- opinfo = cc_growpart.device_part_info
- cc_growpart.device_part_info = simple_device_part_info
- os.stat = mystat
-
- resized = cc_growpart.resize_devices(myresizer(), devs + enoent)
-
- def find(name, res):
- for f in res:
- if f[0] == name:
- return f
- return None
-
- self.assertEqual(cc_growpart.RESIZE.NOCHANGE,
- find("/dev/XXda1", resized)[1])
- self.assertEqual(cc_growpart.RESIZE.CHANGED,
- find("/dev/YYda2", resized)[1])
- self.assertEqual(cc_growpart.RESIZE.SKIPPED,
- find(enoent[0], resized)[1])
- # self.assertEqual(resize_calls,
- # [("/dev/XXda", "1", "/dev/XXda1"),
- # ("/dev/YYda", "2", "/dev/YYda2")])
- finally:
- cc_growpart.device_part_info = opinfo
- os.stat = real_stat
-
-
-def simple_device_part_info(devpath):
- # simple stupid return (/dev/vda, 1) for /dev/vda
- ret = re.search("([^0-9]*)([0-9]*)$", devpath)
- x = (ret.group(1), ret.group(2))
- return x
-
-
-class Bunch(object):
- def __init__(self, **kwds):
- self.__dict__.update(kwds)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
deleted file mode 100644
index c91908f4..00000000
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# Based on test_handler_set_hostname.py
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.config import cc_locale
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNoCloud
-
-from .. import helpers as t_help
-
-from configobj import ConfigObj
-
-from six import BytesIO
-
-import logging
-import shutil
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-
-class TestLocale(t_help.FilesystemMockingTestCase):
- def setUp(self):
- super(TestLocale, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
-
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
- def test_set_locale_sles(self):
-
- cfg = {
- 'locale': 'My.Locale',
- }
- cc = self._get_cloud('sles')
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
-
- contents = util.load_file('/etc/sysconfig/language', decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg))
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
deleted file mode 100644
index 6f90defb..00000000
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ /dev/null
@@ -1,134 +0,0 @@
-from cloudinit.config import cc_lxd
-from cloudinit.sources import DataSourceNoCloud
-from cloudinit import (distros, helpers, cloud)
-from .. import helpers as t_help
-
-import logging
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-LOG = logging.getLogger(__name__)
-
-
-class TestLxd(t_help.TestCase):
- lxd_cfg = {
- 'lxd': {
- 'init': {
- 'network_address': '0.0.0.0',
- 'storage_backend': 'zfs',
- 'storage_pool': 'poolname',
- }
- }
- }
-
- def setUp(self):
- super(TestLxd, self).setUp()
-
- def _get_cloud(self, distro):
- cls = distros.fetch(distro)
- paths = helpers.Paths({})
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_lxd_init(self, mock_util):
- cc = self._get_cloud('ubuntu')
- mock_util.which.return_value = True
- cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, LOG, [])
- self.assertTrue(mock_util.which.called)
- init_call = mock_util.subp.call_args_list[0][0][0]
- self.assertEqual(init_call,
- ['lxd', 'init', '--auto',
- '--network-address=0.0.0.0',
- '--storage-backend=zfs',
- '--storage-pool=poolname'])
-
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_lxd_install(self, mock_util):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- mock_util.which.return_value = None
- cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, LOG, [])
- self.assertTrue(cc.distro.install_packages.called)
- install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
- self.assertEqual(sorted(install_pkg), ['lxd', 'zfs'])
-
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_no_init_does_nothing(self, mock_util):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, LOG, [])
- self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_util.subp.called)
-
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_no_lxd_does_nothing(self, mock_util):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc_lxd.handle('cc_lxd', {'package_update': True}, cc, LOG, [])
- self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_util.subp.called)
-
- def test_lxd_debconf_new_full(self):
- data = {"mode": "new",
- "name": "testbr0",
- "ipv4_address": "10.0.8.1",
- "ipv4_netmask": "24",
- "ipv4_dhcp_first": "10.0.8.2",
- "ipv4_dhcp_last": "10.0.8.254",
- "ipv4_dhcp_leases": "250",
- "ipv4_nat": "true",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true",
- "domain": "lxd"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "true",
- "lxd/bridge-name": "testbr0",
- "lxd/bridge-ipv4": "true",
- "lxd/bridge-ipv4-address": "10.0.8.1",
- "lxd/bridge-ipv4-netmask": "24",
- "lxd/bridge-ipv4-dhcp-first": "10.0.8.2",
- "lxd/bridge-ipv4-dhcp-last": "10.0.8.254",
- "lxd/bridge-ipv4-dhcp-leases": "250",
- "lxd/bridge-ipv4-nat": "true",
- "lxd/bridge-ipv6": "true",
- "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
- "lxd/bridge-ipv6-netmask": "64",
- "lxd/bridge-ipv6-nat": "true",
- "lxd/bridge-domain": "lxd"})
-
- def test_lxd_debconf_new_partial(self):
- data = {"mode": "new",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "true",
- "lxd/bridge-ipv6": "true",
- "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
- "lxd/bridge-ipv6-netmask": "64",
- "lxd/bridge-ipv6-nat": "true"})
-
- def test_lxd_debconf_existing(self):
- data = {"mode": "existing",
- "name": "testbr0"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "false",
- "lxd/use-existing-bridge": "true",
- "lxd/bridge-name": "testbr0"})
-
- def test_lxd_debconf_none(self):
- data = {"mode": "none"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "false",
- "lxd/bridge-name": ""})
diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py
deleted file mode 100644
index 6aefb93d..00000000
--- a/tests/unittests/test_handler/test_handler_mcollective.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from cloudinit import (cloud, distros, helpers, util)
-from cloudinit.config import cc_mcollective
-from cloudinit.sources import DataSourceNoCloud
-
-from .. import helpers as t_help
-
-import configobj
-import logging
-import os
-import shutil
-from six import BytesIO
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-
-STOCK_CONFIG = """\
-main_collective = mcollective
-collectives = mcollective
-libdir = /usr/share/mcollective/plugins
-logfile = /var/log/mcollective.log
-loglevel = info
-daemonize = 1
-
-# Plugins
-securityprovider = psk
-plugin.psk = unset
-
-connector = activemq
-plugin.activemq.pool.size = 1
-plugin.activemq.pool.1.host = stomp1
-plugin.activemq.pool.1.port = 61613
-plugin.activemq.pool.1.user = mcollective
-plugin.activemq.pool.1.password = marionette
-
-# Facts
-factsource = yaml
-plugin.yaml = /etc/mcollective/facts.yaml
-"""
-
-
-class TestConfig(t_help.FilesystemMockingTestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- # "./": make os.path.join behave correctly with abs path as second arg
- self.server_cfg = os.path.join(
- self.tmp, "./" + cc_mcollective.SERVER_CFG)
- self.pubcert_file = os.path.join(
- self.tmp, "./" + cc_mcollective.PUBCERT_FILE)
- self.pricert_file= os.path.join(
- self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE)
-
- def test_basic_config(self):
- cfg = {
- 'mcollective': {
- 'conf': {
- 'loglevel': 'debug',
- 'connector': 'rabbitmq',
- 'logfile': '/var/log/mcollective.log',
- 'ttl': '4294957',
- 'collectives': 'mcollective',
- 'main_collective': 'mcollective',
- 'securityprovider': 'psk',
- 'daemonize': '1',
- 'factsource': 'yaml',
- 'direct_addressing': '1',
- 'plugin.psk': 'unset',
- 'libdir': '/usr/share/mcollective/plugins',
- 'identity': '1',
- },
- },
- }
- expected = cfg['mcollective']['conf']
-
- self.patchUtils(self.tmp)
- cc_mcollective.configure(cfg['mcollective']['conf'])
- contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False)
- contents = configobj.ConfigObj(BytesIO(contents))
- self.assertEqual(expected, dict(contents))
-
- def test_existing_config_is_saved(self):
- cfg = {'loglevel': 'warn'}
- util.write_file(self.server_cfg, STOCK_CONFIG)
- cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg)
- self.assertTrue(os.path.exists(self.server_cfg))
- self.assertTrue(os.path.exists(self.server_cfg + ".old"))
- self.assertEqual(util.load_file(self.server_cfg + ".old"), STOCK_CONFIG)
-
- def test_existing_updated(self):
- cfg = {'loglevel': 'warn'}
- util.write_file(self.server_cfg, STOCK_CONFIG)
- cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg)
- cfgobj = configobj.ConfigObj(self.server_cfg)
- self.assertEqual(cfg['loglevel'], cfgobj['loglevel'])
-
- def test_certificats_written(self):
- # check public-cert and private-cert keys in config get written
- cfg = {'loglevel': 'debug',
- 'public-cert': "this is my public-certificate",
- 'private-cert': "secret private certificate"}
-
- cc_mcollective.configure(config=cfg,
- server_cfg=self.server_cfg, pricert_file=self.pricert_file,
- pubcert_file=self.pubcert_file)
-
- found = configobj.ConfigObj(self.server_cfg)
-
- # make sure these didnt get written in
- self.assertFalse('public-cert' in found)
- self.assertFalse('private-cert' in found)
-
- # these need updating to the specified paths
- self.assertEqual(found['plugin.ssl_server_public'], self.pubcert_file)
- self.assertEqual(found['plugin.ssl_server_private'], self.pricert_file)
-
- # and the security provider should be ssl
- self.assertEqual(found['securityprovider'], 'ssl')
-
- self.assertEqual(
- util.load_file(self.pricert_file), cfg['private-cert'])
- self.assertEqual(
- util.load_file(self.pubcert_file), cfg['public-cert'])
-
-
-class TestHandler(t_help.TestCase):
- def _get_cloud(self, distro):
- cls = distros.fetch(distro)
- paths = helpers.Paths({})
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
- @t_help.mock.patch("cloudinit.config.cc_mcollective.util")
- def test_mcollective_install(self, mock_util):
- cc = self._get_cloud('ubuntu')
- cc.distro = t_help.mock.MagicMock()
- mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}}
- cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, [])
- self.assertTrue(cc.distro.install_packages.called)
- install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
- self.assertEqual(install_pkg, ('mcollective',))
-
- self.assertTrue(mock_util.subp.called)
- self.assertEqual(mock_util.subp.call_args_list[0][0][0],
- ['service', 'mcollective', 'restart'])
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
deleted file mode 100644
index 355674b2..00000000
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import os.path
-import shutil
-import tempfile
-
-from cloudinit.config import cc_mounts
-
-from .. import helpers as test_helpers
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-
-class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestSanitizeDevname, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
- self.patchOS(self.new_root)
-
- def _touch(self, path):
- path = os.path.join(self.new_root, path.lstrip('/'))
- basedir = os.path.dirname(path)
- if not os.path.exists(basedir):
- os.makedirs(basedir)
- open(path, 'a').close()
-
- def _makedirs(self, directory):
- directory = os.path.join(self.new_root, directory.lstrip('/'))
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- def mock_existence_of_disk(self, disk_path):
- self._touch(disk_path)
- self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1]))
-
- def mock_existence_of_partition(self, disk_path, partition_number):
- self.mock_existence_of_disk(disk_path)
- self._touch(disk_path + str(partition_number))
- disk_name = disk_path.split('/')[-1]
- self._makedirs(os.path.join('/sys/block',
- disk_name,
- disk_name + str(partition_number)))
-
- def test_existent_full_disk_path_is_returned(self):
- disk_path = '/dev/sda'
- self.mock_existence_of_disk(disk_path)
- self.assertEqual(disk_path,
- cc_mounts.sanitize_devname(disk_path,
- lambda x: None,
- mock.Mock()))
-
- def test_existent_disk_name_returns_full_path(self):
- disk_name = 'sda'
- disk_path = '/dev/' + disk_name
- self.mock_existence_of_disk(disk_path)
- self.assertEqual(disk_path,
- cc_mounts.sanitize_devname(disk_name,
- lambda x: None,
- mock.Mock()))
-
- def test_existent_meta_disk_is_returned(self):
- actual_disk_path = '/dev/sda'
- self.mock_existence_of_disk(actual_disk_path)
- self.assertEqual(
- actual_disk_path,
- cc_mounts.sanitize_devname('ephemeral0',
- lambda x: actual_disk_path,
- mock.Mock()))
-
- def test_existent_meta_partition_is_returned(self):
- disk_name, partition_part = '/dev/sda', '1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.1',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_existent_meta_partition_with_p_is_returned(self):
- disk_name, partition_part = '/dev/sda', 'p1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.1',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_first_partition_returned_if_existent_disk_is_partitioned(self):
- disk_name, partition_part = '/dev/sda', '1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_nth_partition_returned_if_requested(self):
- disk_name, partition_part = '/dev/sda', '3'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.3',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_transformer_returning_none_returns_none(self):
- self.assertIsNone(
- cc_mounts.sanitize_devname(
- 'ephemeral0', lambda x: None, mock.Mock()))
-
- def test_missing_device_returns_none(self):
- self.assertIsNone(
- cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock()))
-
- def test_missing_sys_returns_none(self):
- disk_path = '/dev/sda'
- self._makedirs(disk_path)
- self.assertIsNone(
- cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
-
- def test_existent_disk_but_missing_partition_returns_none(self):
- disk_path = '/dev/sda'
- self.mock_existence_of_disk(disk_path)
- self.assertIsNone(
- cc_mounts.sanitize_devname(
- 'ephemeral0.1', lambda x: disk_path, mock.Mock()))
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
deleted file mode 100644
index feff319d..00000000
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import sys
-
-from cloudinit.config import cc_power_state_change as psc
-
-from .. import helpers as t_help
-from ..helpers import mock
-
-
-class TestLoadPowerState(t_help.TestCase):
- def setUp(self):
- super(self.__class__, self).setUp()
-
- def test_no_config(self):
- # completely empty config should mean do nothing
- (cmd, _timeout, _condition) = psc.load_power_state({})
- self.assertEqual(cmd, None)
-
- def test_irrelevant_config(self):
- # no power_state field in config should return None for cmd
- (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'})
- self.assertEqual(cmd, None)
-
- def test_invalid_mode(self):
- cfg = {'power_state': {'mode': 'gibberish'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
-
- cfg = {'power_state': {'mode': ''}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
-
- def test_empty_mode(self):
- cfg = {'power_state': {'message': 'goodbye'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
-
- def test_valid_modes(self):
- cfg = {'power_state': {}}
- for mode in ('halt', 'poweroff', 'reboot'):
- cfg['power_state']['mode'] = mode
- check_lps_ret(psc.load_power_state(cfg), mode=mode)
-
- def test_invalid_delay(self):
- cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
-
- def test_valid_delay(self):
- cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}}
- for delay in ("now", "+1", "+30"):
- cfg['power_state']['delay'] = delay
- check_lps_ret(psc.load_power_state(cfg))
-
- def test_message_present(self):
- cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}}
- ret = psc.load_power_state(cfg)
- check_lps_ret(psc.load_power_state(cfg))
- self.assertIn(cfg['power_state']['message'], ret[0])
-
- def test_no_message(self):
- # if message is not present, then no argument should be passed for it
- cfg = {'power_state': {'mode': 'poweroff'}}
- (cmd, _timeout, _condition) = psc.load_power_state(cfg)
- self.assertNotIn("", cmd)
- check_lps_ret(psc.load_power_state(cfg))
- self.assertTrue(len(cmd) == 3)
-
- def test_condition_null_raises(self):
- cfg = {'power_state': {'mode': 'poweroff', 'condition': None}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
-
- def test_condition_default_is_true(self):
- cfg = {'power_state': {'mode': 'poweroff'}}
- _cmd, _timeout, cond = psc.load_power_state(cfg)
- self.assertEqual(cond, True)
-
-
-class TestCheckCondition(t_help.TestCase):
- def cmd_with_exit(self, rc):
- return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc])
-
- def test_true_is_true(self):
- self.assertEqual(psc.check_condition(True), True)
-
- def test_false_is_false(self):
- self.assertEqual(psc.check_condition(False), False)
-
- def test_cmd_exit_zero_true(self):
- self.assertEqual(psc.check_condition(self.cmd_with_exit(0)), True)
-
- def test_cmd_exit_one_false(self):
- self.assertEqual(psc.check_condition(self.cmd_with_exit(1)), False)
-
- def test_cmd_exit_nonzero_warns(self):
- mocklog = mock.Mock()
- self.assertEqual(
- psc.check_condition(self.cmd_with_exit(2), mocklog), False)
- self.assertEqual(mocklog.warn.call_count, 1)
-
-
-def check_lps_ret(psc_return, mode=None):
- if len(psc_return) != 3:
- raise TypeError("length returned = %d" % len(psc_return))
-
- errs = []
- cmd = psc_return[0]
- timeout = psc_return[1]
- condition = psc_return[2]
-
- if 'shutdown' not in psc_return[0][0]:
- errs.append("string 'shutdown' not in cmd")
-
- if condition is None:
- errs.append("condition was not returned")
-
- if mode is not None:
- opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode]
- if opt not in psc_return[0]:
- errs.append("opt '%s' not in cmd: %s" % (opt, cmd))
-
- if len(cmd) != 3 and len(cmd) != 4:
- errs.append("Invalid command length: %s" % len(cmd))
-
- try:
- float(timeout)
- except Exception:
- errs.append("timeout failed convert to float")
-
- if len(errs):
- lines = ["Errors in result: %s" % str(psc_return)] + errs
- raise Exception('\n'.join(lines))
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py
deleted file mode 100644
index 38636063..00000000
--- a/tests/unittests/test_handler/test_handler_rsyslog.py
+++ /dev/null
@@ -1,174 +0,0 @@
-import os
-import shutil
-import tempfile
-
-from cloudinit.config.cc_rsyslog import (
- apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config,
- parse_remotes_line, remotes_to_rsyslog_cfg)
-from cloudinit import util
-
-from .. import helpers as t_help
-
-
-class TestLoadConfig(t_help.TestCase):
- def setUp(self):
- super(TestLoadConfig, self).setUp()
- self.basecfg = {
- 'config_filename': DEF_FILENAME,
- 'config_dir': DEF_DIR,
- 'service_reload_command': DEF_RELOAD,
- 'configs': [],
- 'remotes': {},
- }
-
- def test_legacy_full(self):
- found = load_config({
- 'rsyslog': ['*.* @192.168.1.1'],
- 'rsyslog_dir': "mydir",
- 'rsyslog_filename': "myfilename"})
- self.basecfg.update({
- 'configs': ['*.* @192.168.1.1'],
- 'config_dir': "mydir",
- 'config_filename': 'myfilename',
- 'service_reload_command': 'auto'}
- )
-
- self.assertEqual(found, self.basecfg)
-
- def test_legacy_defaults(self):
- found = load_config({
- 'rsyslog': ['*.* @192.168.1.1']})
- self.basecfg.update({
- 'configs': ['*.* @192.168.1.1']})
- self.assertEqual(found, self.basecfg)
-
- def test_new_defaults(self):
- self.assertEqual(load_config({}), self.basecfg)
-
- def test_new_configs(self):
- cfgs = ['*.* myhost', '*.* my2host']
- self.basecfg.update({'configs': cfgs})
- self.assertEqual(
- load_config({'rsyslog': {'configs': cfgs}}),
- self.basecfg)
-
-
-class TestApplyChanges(t_help.TestCase):
- def setUp(self):
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_simple(self):
- cfgline = "*.* foohost"
- changed = apply_rsyslog_changes(
- configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp)
-
- fname = os.path.join(self.tmp, "foo.cfg")
- self.assertEqual([fname], changed)
- self.assertEqual(
- util.load_file(fname), cfgline + "\n")
-
- def test_multiple_files(self):
- configs = [
- '*.* foohost',
- {'content': 'abc', 'filename': 'my.cfg'},
- {'content': 'filefoo-content',
- 'filename': os.path.join(self.tmp, 'mydir/mycfg')},
- ]
-
- changed = apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
-
- expected = [
- (os.path.join(self.tmp, "default.cfg"),
- "*.* foohost\n"),
- (os.path.join(self.tmp, "my.cfg"), "abc\n"),
- (os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"),
- ]
- self.assertEqual([f[0] for f in expected], changed)
- actual = []
- for fname, _content in expected:
- util.load_file(fname)
- actual.append((fname, util.load_file(fname),))
- self.assertEqual(expected, actual)
-
- def test_repeat_def(self):
- configs = ['*.* foohost', "*.warn otherhost"]
-
- changed = apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
-
- fname = os.path.join(self.tmp, "default.cfg")
- self.assertEqual([fname], changed)
-
- expected_content = '\n'.join([c for c in configs]) + '\n'
- found_content = util.load_file(fname)
- self.assertEqual(expected_content, found_content)
-
- def test_multiline_content(self):
- configs = ['line1', 'line2\nline3\n']
-
- apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
-
- fname = os.path.join(self.tmp, "default.cfg")
- expected_content = '\n'.join([c for c in configs])
- found_content = util.load_file(fname)
- self.assertEqual(expected_content, found_content)
-
-
-class TestParseRemotesLine(t_help.TestCase):
- def test_valid_port(self):
- r = parse_remotes_line("foo:9")
- self.assertEqual(9, r.port)
-
- def test_invalid_port(self):
- with self.assertRaises(ValueError):
- parse_remotes_line("*.* foo:abc")
-
- def test_valid_ipv6(self):
- r = parse_remotes_line("*.* [::1]")
- self.assertEqual("*.* @[::1]", str(r))
-
- def test_valid_ipv6_with_port(self):
- r = parse_remotes_line("*.* [::1]:100")
- self.assertEqual(r.port, 100)
- self.assertEqual(r.addr, "::1")
- self.assertEqual("*.* @[::1]:100", str(r))
-
- def test_invalid_multiple_colon(self):
- with self.assertRaises(ValueError):
- parse_remotes_line("*.* ::1:100")
-
- def test_name_in_string(self):
- r = parse_remotes_line("syslog.host", name="foobar")
- self.assertEqual("*.* @syslog.host # foobar", str(r))
-
-
-class TestRemotesToSyslog(t_help.TestCase):
- def test_simple(self):
- # str rendered line must appear in remotes_to_ryslog_cfg return
- mycfg = "*.* myhost"
- myline = str(parse_remotes_line(mycfg, name="myname"))
- r = remotes_to_rsyslog_cfg({'myname': mycfg})
- lines = r.splitlines()
- self.assertEqual(1, len(lines))
- self.assertTrue(myline in r.splitlines())
-
- def test_header_footer(self):
- header = "#foo head"
- footer = "#foo foot"
- r = remotes_to_rsyslog_cfg(
- {'myname': "*.* myhost"}, header=header, footer=footer)
- lines = r.splitlines()
- self.assertTrue(header, lines[0])
- self.assertTrue(footer, lines[-1])
-
- def test_with_empty_or_null(self):
- mycfg = "*.* myhost"
- myline = str(parse_remotes_line(mycfg, name="myname"))
- r = remotes_to_rsyslog_cfg(
- {'myname': mycfg, 'removed': None, 'removed2': ""})
- lines = r.splitlines()
- self.assertEqual(1, len(lines))
- self.assertTrue(myline in r.splitlines())
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
deleted file mode 100644
index a0390da9..00000000
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ /dev/null
@@ -1,227 +0,0 @@
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# Based on test_handler_set_hostname.py
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.config import cc_seed_random
-
-import gzip
-import tempfile
-
-from six import BytesIO
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNone
-
-from .. import helpers as t_help
-
-import logging
-
-LOG = logging.getLogger(__name__)
-
-
-class TestRandomSeed(t_help.TestCase):
- def setUp(self):
- super(TestRandomSeed, self).setUp()
- self._seed_file = tempfile.mktemp()
- self.unapply = []
-
- # by default 'which' has nothing in its path
- self.apply_patches([(util, 'which', self._which)])
- self.apply_patches([(util, 'subp', self._subp)])
- self.subp_called = []
- self.whichdata = {}
-
- def tearDown(self):
- apply_patches([i for i in reversed(self.unapply)])
- util.del_file(self._seed_file)
-
- def apply_patches(self, patches):
- ret = apply_patches(patches)
- self.unapply += ret
-
- def _which(self, program):
- return self.whichdata.get(program)
-
- def _subp(self, *args, **kwargs):
- # supports subp calling with cmd as args or kwargs
- if 'args' not in kwargs:
- kwargs['args'] = args[0]
- self.subp_called.append(kwargs)
- return
-
- def _compress(self, text):
- contents = BytesIO()
- gz_fh = gzip.GzipFile(mode='wb', fileobj=contents)
- gz_fh.write(text)
- gz_fh.close()
- return contents.getvalue()
-
- def _get_cloud(self, distro, metadata=None):
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- ubuntu_distro = cls(distro, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, ubuntu_distro, paths)
- if metadata:
- ds.metadata = metadata
- return cloud.Cloud(ds, paths, {}, ubuntu_distro, None)
-
- def test_append_random(self):
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': 'tiny-tim-was-here',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("tiny-tim-was-here", contents)
-
- def test_append_random_unknown_encoding(self):
- data = self._compress(b"tiny-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'special_encoding',
- }
- }
- self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg,
- self._get_cloud('ubuntu'), LOG, [])
-
- def test_append_random_gzip(self):
- data = self._compress(b"tiny-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'gzip',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("tiny-toe", contents)
-
- def test_append_random_gz(self):
- data = self._compress(b"big-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'gz',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("big-toe", contents)
-
- def test_append_random_base64(self):
- data = util.b64e('bubbles')
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'base64',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("bubbles", contents)
-
- def test_append_random_b64(self):
- data = util.b64e('kit-kat')
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'b64',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("kit-kat", contents)
-
- def test_append_random_metadata(self):
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': 'tiny-tim-was-here',
- }
- }
- c = self._get_cloud('ubuntu', {'random_seed': '-so-was-josh'})
- cc_seed_random.handle('test', cfg, c, LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual('tiny-tim-was-here-so-was-josh', contents)
-
- def test_seed_command_provided_and_available(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'pollinate': '/usr/bin/pollinate'}
- cfg = {'random_seed': {'command': ['pollinate', '-q']}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- subp_args = [f['args'] for f in self.subp_called]
- self.assertIn(['pollinate', '-q'], subp_args)
-
- def test_seed_command_not_provided(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {}
- cc_seed_random.handle('test', {}, c, LOG, [])
-
- # subp should not have been called as which would say not available
- self.assertFalse(self.subp_called)
-
- def test_unavailable_seed_command_and_required_raises_error(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {}
- cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'],
- 'command_required': True}}
- self.assertRaises(ValueError, cc_seed_random.handle,
- 'test', cfg, c, LOG, [])
-
- def test_seed_command_and_required(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'foo': 'foo'}
- cfg = {'random_seed': {'command_required': True, 'command': ['foo']}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- self.assertIn(['foo'], [f['args'] for f in self.subp_called])
-
- def test_file_in_environment_for_command(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'foo': 'foo'}
- cfg = {'random_seed': {'command_required': True, 'command': ['foo'],
- 'file': self._seed_file}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- # this just instists that the first time subp was called,
- # RANDOM_SEED_FILE was in the environment set up correctly
- subp_env = [f['env'] for f in self.subp_called]
- self.assertEqual(subp_env[0].get('RANDOM_SEED_FILE'), self._seed_file)
-
-
-def apply_patches(patches):
- ret = []
- for (ref, name, replace) in patches:
- if replace is None:
- continue
- orig = getattr(ref, name)
- setattr(ref, name, replace)
- ret.append((ref, name, orig))
- return ret
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
deleted file mode 100644
index 7effa124..00000000
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ /dev/null
@@ -1,72 +0,0 @@
-from cloudinit.config import cc_set_hostname
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from .. import helpers as t_help
-
-from configobj import ConfigObj
-import logging
-import shutil
-from six import BytesIO
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-
-class TestHostname(t_help.FilesystemMockingTestCase):
- def setUp(self):
- super(TestHostname, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({})
- return cls(kind, {}, paths)
-
- def test_write_hostname_rhel(self):
- cfg = {
- 'hostname': 'blah.blah.blah.yahoo.com',
- }
- distro = self._fetch_distro('rhel')
- paths = helpers.Paths({})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname',
- cfg, cc, LOG, [])
- if not distro.uses_systemd():
- contents = util.load_file("/etc/sysconfig/network", decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({'HOSTNAME': 'blah.blah.blah.yahoo.com'},
- dict(n_cfg))
-
- def test_write_hostname_debian(self):
- cfg = {
- 'hostname': 'blah.blah.blah.yahoo.com',
- }
- distro = self._fetch_distro('debian')
- paths = helpers.Paths({})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname',
- cfg, cc, LOG, [])
- contents = util.load_file("/etc/hostname")
- self.assertEqual('blah', contents.strip())
-
- def test_write_hostname_sles(self):
- cfg = {
- 'hostname': 'blah.blah.blah.suse.com',
- }
- distro = self._fetch_distro('sles')
- paths = helpers.Paths({})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, [])
- contents = util.load_file("/etc/HOSTNAME")
- self.assertEqual('blah', contents.strip())
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
deleted file mode 100644
index 57dce1bc..00000000
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ /dev/null
@@ -1,306 +0,0 @@
-from cloudinit.config.cc_snappy import (
- makeop, get_package_ops, render_snap_op)
-from cloudinit import util
-
-from .. import helpers as t_help
-
-import os
-import shutil
-import tempfile
-import yaml
-
-ALLOWED = (dict, list, int, str)
-
-
-class TestInstallPackages(t_help.TestCase):
- def setUp(self):
- super(TestInstallPackages, self).setUp()
- self.unapply = []
-
- # by default 'which' has nothing in its path
- self.apply_patches([(util, 'subp', self._subp)])
- self.subp_called = []
- self.snapcmds = []
- self.tmp = tempfile.mkdtemp(prefix="TestInstallPackages")
-
- def tearDown(self):
- apply_patches([i for i in reversed(self.unapply)])
- shutil.rmtree(self.tmp)
-
- def apply_patches(self, patches):
- ret = apply_patches(patches)
- self.unapply += ret
-
- def populate_tmp(self, files):
- return t_help.populate_dir(self.tmp, files)
-
- def _subp(self, *args, **kwargs):
- # supports subp calling with cmd as args or kwargs
- if 'args' not in kwargs:
- kwargs['args'] = args[0]
- self.subp_called.append(kwargs)
- args = kwargs['args']
- # here we basically parse the snappy command invoked
- # and append to snapcmds a list of (mode, pkg, config)
- if args[0:2] == ['snappy', 'config']:
- if args[3] == "-":
- config = kwargs.get('data', '')
- else:
- with open(args[3], "rb") as fp:
- config = yaml.safe_load(fp.read())
- self.snapcmds.append(['config', args[2], config])
- elif args[0:2] == ['snappy', 'install']:
- config = None
- pkg = None
- for arg in args[2:]:
- if arg.startswith("-"):
- continue
- if not pkg:
- pkg = arg
- elif not config:
- cfgfile = arg
- if cfgfile == "-":
- config = kwargs.get('data', '')
- elif cfgfile:
- with open(cfgfile, "rb") as fp:
- config = yaml.safe_load(fp.read())
- self.snapcmds.append(['install', pkg, config])
-
- def test_package_ops_1(self):
- ret = get_package_ops(
- packages=['pkg1', 'pkg2', 'pkg3'],
- configs={'pkg2': b'mycfg2'}, installed=[])
- self.assertEqual(
- ret, [makeop('install', 'pkg1', None, None),
- makeop('install', 'pkg2', b'mycfg2', None),
- makeop('install', 'pkg3', None, None)])
-
- def test_package_ops_config_only(self):
- ret = get_package_ops(
- packages=None,
- configs={'pkg2': b'mycfg2'}, installed=['pkg1', 'pkg2'])
- self.assertEqual(
- ret, [makeop('config', 'pkg2', b'mycfg2')])
-
- def test_package_ops_install_and_config(self):
- ret = get_package_ops(
- packages=['pkg3', 'pkg2'],
- configs={'pkg2': b'mycfg2', 'xinstalled': b'xcfg'},
- installed=['xinstalled'])
- self.assertEqual(
- ret, [makeop('install', 'pkg3'),
- makeop('install', 'pkg2', b'mycfg2'),
- makeop('config', 'xinstalled', b'xcfg')])
-
- def test_package_ops_install_long_config_short(self):
- # a package can be installed by full name, but have config by short
- cfg = {'k1': 'k2'}
- ret = get_package_ops(
- packages=['config-example.canonical'],
- configs={'config-example': cfg}, installed=[])
- self.assertEqual(
- ret, [makeop('install', 'config-example.canonical', cfg)])
-
- def test_package_ops_with_file(self):
- self.populate_tmp(
- {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg",
- "snapf2.snap": b"foo2", "foo.bar": "ignored"})
- ret = get_package_ops(
- packages=['pkg1'], configs={}, installed=[], fspath=self.tmp)
- self.assertEqual(
- ret,
- [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
- cfgfile="snapf1.config"),
- makeop_tmpd(self.tmp, 'install', 'snapf2', path="snapf2.snap"),
- makeop('install', 'pkg1')])
-
- def test_package_ops_common_filename(self):
- # fish package name from filename
- # package names likely look like: pkgname.namespace_version_arch.snap
-
- # find filenames
- self.populate_tmp(
- {"pkg-ws.smoser_0.3.4_all.snap": "pkg-ws-snapdata",
- "pkg-ws.config": "pkg-ws-config",
- "pkg1.smoser_1.2.3_all.snap": "pkg1.snapdata",
- "pkg1.smoser.config": "pkg1.smoser.config-data",
- "pkg1.config": "pkg1.config-data",
- "pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata",
- "pkg2.smoser_0.0_amd64.config": "pkg2.config"})
-
- ret = get_package_ops(
- packages=[], configs={}, installed=[], fspath=self.tmp)
- self.assertEqual(
- ret,
- [makeop_tmpd(self.tmp, 'install', 'pkg-ws.smoser',
- path="pkg-ws.smoser_0.3.4_all.snap",
- cfgfile="pkg-ws.config"),
- makeop_tmpd(self.tmp, 'install', 'pkg1.smoser',
- path="pkg1.smoser_1.2.3_all.snap",
- cfgfile="pkg1.smoser.config"),
- makeop_tmpd(self.tmp, 'install', 'pkg2.smoser',
- path="pkg2.smoser_0.0_amd64.snap",
- cfgfile="pkg2.smoser_0.0_amd64.config"),
- ])
-
- def test_package_ops_config_overrides_file(self):
- # config data overrides local file .config
- self.populate_tmp(
- {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg"})
- ret = get_package_ops(
- packages=[], configs={'snapf1': 'snapf1cfg-config'},
- installed=[], fspath=self.tmp)
- self.assertEqual(
- ret, [makeop_tmpd(self.tmp, 'install', 'snapf1',
- path="snapf1.snap", config="snapf1cfg-config")])
-
- def test_package_ops_namespacing(self):
- cfgs = {
- 'config-example': {'k1': 'v1'},
- 'pkg1': {'p1': 'p2'},
- 'ubuntu-core': {'c1': 'c2'},
- 'notinstalled.smoser': {'s1': 's2'},
- }
- ret = get_package_ops(
- packages=['config-example.canonical'], configs=cfgs,
- installed=['config-example.smoser', 'pkg1.canonical',
- 'ubuntu-core'])
-
- expected_configs = [
- makeop('config', 'pkg1', config=cfgs['pkg1']),
- makeop('config', 'ubuntu-core', config=cfgs['ubuntu-core'])]
- expected_installs = [
- makeop('install', 'config-example.canonical',
- config=cfgs['config-example'])]
-
- installs = [i for i in ret if i['op'] == 'install']
- configs = [c for c in ret if c['op'] == 'config']
-
- self.assertEqual(installs, expected_installs)
- # configs are not ordered
- self.assertEqual(len(configs), len(expected_configs))
- self.assertTrue(all(found in expected_configs for found in configs))
-
- def test_render_op_localsnap(self):
- self.populate_tmp({"snapf1.snap": b"foo1"})
- op = makeop_tmpd(self.tmp, 'install', 'snapf1',
- path='snapf1.snap')
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['install', op['path'], None]])
-
- def test_render_op_localsnap_localconfig(self):
- self.populate_tmp(
- {"snapf1.snap": b"foo1", 'snapf1.config': b'snapf1cfg'})
- op = makeop_tmpd(self.tmp, 'install', 'snapf1',
- path='snapf1.snap', cfgfile='snapf1.config')
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['install', op['path'], 'snapf1cfg']])
-
- def test_render_op_snap(self):
- op = makeop('install', 'snapf1')
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['install', 'snapf1', None]])
-
- def test_render_op_snap_config(self):
- mycfg = {'key1': 'value1'}
- name = "snapf1"
- op = makeop('install', name, config=mycfg)
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['install', name, {'config': {name: mycfg}}]])
-
- def test_render_op_config_bytes(self):
- name = "snapf1"
- mycfg = b'myconfig'
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]])
-
- def test_render_op_config_string(self):
- name = 'snapf1'
- mycfg = 'myconfig: foo\nhisconfig: bar\n'
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]])
-
- def test_render_op_config_dict(self):
- # config entry for package can be a dict, not a string blob
- mycfg = {'foo': 'bar'}
- name = 'snapf1'
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- # snapcmds is a list of 3-entry lists. data_found will be the
- # blob of data in the file in 'snappy install --config=<file>'
- data_found = self.snapcmds[0][2]
- self.assertEqual(mycfg, data_found['config'][name])
-
- def test_render_op_config_list(self):
- # config entry for package can be a list, not a string blob
- mycfg = ['foo', 'bar', 'wark', {'f1': 'b1'}]
- name = "snapf1"
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- data_found = self.snapcmds[0][2]
- self.assertEqual(mycfg, data_found['config'][name])
-
- def test_render_op_config_int(self):
- # config entry for package can be a list, not a string blob
- mycfg = 1
- name = 'snapf1'
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- data_found = self.snapcmds[0][2]
- self.assertEqual(mycfg, data_found['config'][name])
-
- def test_render_long_configs_short(self):
- # install a namespaced package should have un-namespaced config
- mycfg = {'k1': 'k2'}
- name = 'snapf1'
- op = makeop('install', name + ".smoser", config=mycfg)
- render_snap_op(**op)
- data_found = self.snapcmds[0][2]
- self.assertEqual(mycfg, data_found['config'][name])
-
- def test_render_does_not_pad_cfgfile(self):
- # package_ops with cfgfile should not modify --file= content.
- mydata = "foo1: bar1\nk: [l1, l2, l3]\n"
- self.populate_tmp(
- {"snapf1.snap": b"foo1", "snapf1.config": mydata.encode()})
- ret = get_package_ops(
- packages=[], configs={}, installed=[], fspath=self.tmp)
- self.assertEqual(
- ret,
- [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
- cfgfile="snapf1.config")])
-
- # now the op was ok, but test that render didn't mess it up.
- render_snap_op(**ret[0])
- data_found = self.snapcmds[0][2]
- # the data found gets loaded in the snapcmd interpretation
- # so this comparison is a bit lossy, but input to snappy config
- # is expected to be yaml loadable, so it should be OK.
- self.assertEqual(yaml.safe_load(mydata), data_found)
-
-
-def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
- if cfgfile:
- cfgfile = os.path.sep.join([tmpd, cfgfile])
- if path:
- path = os.path.sep.join([tmpd, path])
- return(makeop(op=op, name=name, config=config, path=path, cfgfile=cfgfile))
-
-
-def apply_patches(patches):
- ret = []
- for (ref, name, replace) in patches:
- if replace is None:
- continue
- orig = getattr(ref, name)
- setattr(ref, name, replace)
- ret.append((ref, name, orig))
- return ret
diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py
deleted file mode 100644
index b7e6b03d..00000000
--- a/tests/unittests/test_handler/test_handler_timezone.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# Based on test_handler_set_hostname.py
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.config import cc_timezone
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNoCloud
-
-from .. import helpers as t_help
-
-from configobj import ConfigObj
-import logging
-import shutil
-from six import BytesIO
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-
-class TestTimezone(t_help.FilesystemMockingTestCase):
- def setUp(self):
- super(TestTimezone, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
- self.patchOS(self.new_root)
-
- paths = helpers.Paths({})
-
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
- def test_set_timezone_sles(self):
-
- cfg = {
- 'timezone': 'Tatooine/Bestine',
- }
- cc = self._get_cloud('sles')
-
- # Create a dummy timezone file
- dummy_contents = '0123456789abcdefgh'
- util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'],
- dummy_contents)
-
- cc_timezone.handle('cc_timezone', cfg, cc, LOG, [])
-
- contents = util.load_file('/etc/sysconfig/clock', decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({'TIMEZONE': cfg['timezone']}, dict(n_cfg))
-
- contents = util.load_file('/etc/localtime')
- self.assertEqual(dummy_contents, contents.strip())
diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py
deleted file mode 100644
index 466e45f8..00000000
--- a/tests/unittests/test_handler/test_handler_write_files.py
+++ /dev/null
@@ -1,112 +0,0 @@
-from cloudinit.config.cc_write_files import write_files
-from cloudinit import log as logging
-from cloudinit import util
-
-from ..helpers import FilesystemMockingTestCase
-
-import base64
-import gzip
-import shutil
-import six
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-YAML_TEXT = """
-write_files:
- - encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /usr/bin/hello
- permissions: '0755'
- - content: !!binary |
- Zm9vYmFyCg==
- path: /wark
- permissions: '0755'
- - content: |
- hi mom line 1
- hi mom line 2
- path: /tmp/message
-"""
-
-YAML_CONTENT_EXPECTED = {
- '/usr/bin/hello': "#!/bin/sh\necho hello world\n",
- '/wark': "foobar\n",
- '/tmp/message': "hi mom line 1\nhi mom line 2\n",
-}
-
-
-class TestWriteFiles(FilesystemMockingTestCase):
- def setUp(self):
- super(TestWriteFiles, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_simple(self):
- self.patchUtils(self.tmp)
- expected = "hello world\n"
- filename = "/tmp/my.file"
- write_files(
- "test_simple", [{"content": expected, "path": filename}], LOG)
- self.assertEqual(util.load_file(filename), expected)
-
- def test_yaml_binary(self):
- self.patchUtils(self.tmp)
- data = util.load_yaml(YAML_TEXT)
- write_files("testname", data['write_files'], LOG)
- for path, content in YAML_CONTENT_EXPECTED.items():
- self.assertEqual(util.load_file(path), content)
-
- def test_all_decodings(self):
- self.patchUtils(self.tmp)
-
- # build a 'files' array that has a dictionary of encodings
- # for 'gz', 'gzip', 'gz+base64' ...
- data = b"foobzr"
- utf8_valid = b"foobzr"
- utf8_invalid = b'ab\xaadef'
- files = []
- expected = []
-
- gz_aliases = ('gz', 'gzip')
- gz_b64_aliases = ('gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64')
- b64_aliases = ('base64', 'b64')
-
- datum = (("utf8", utf8_valid), ("no-utf8", utf8_invalid))
- for name, data in datum:
- gz = (_gzip_bytes(data), gz_aliases)
- gz_b64 = (base64.b64encode(_gzip_bytes(data)), gz_b64_aliases)
- b64 = (base64.b64encode(data), b64_aliases)
- for content, aliases in (gz, gz_b64, b64):
- for enc in aliases:
- cur = {'content': content,
- 'path': '/tmp/file-%s-%s' % (name, enc),
- 'encoding': enc}
- files.append(cur)
- expected.append((cur['path'], data))
-
- write_files("test_decoding", files, LOG)
-
- for path, content in expected:
- self.assertEqual(util.load_file(path, decode=False), content)
-
- # make sure we actually wrote *some* files.
- flen_expected = (
- len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum))
- self.assertEqual(len(expected), flen_expected)
-
-
-def _gzip_bytes(data):
- buf = six.BytesIO()
- fp = None
- try:
- fp = gzip.GzipFile(fileobj=buf, mode="wb")
- fp.write(data)
- fp.close()
- return buf.getvalue()
- finally:
- if fp:
- fp.close()
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
deleted file mode 100644
index 28b060f8..00000000
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from cloudinit.config import cc_yum_add_repo
-from cloudinit import util
-
-from .. import helpers
-
-import configobj
-import logging
-import shutil
-from six import BytesIO
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-
-class TestConfig(helpers.FilesystemMockingTestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_bad_config(self):
- cfg = {
- 'yum_repos': {
- 'epel-testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- # Missing this should cause the repo not to be written
- # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch',
- 'enabled': False,
- 'gpgcheck': True,
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'failovermethod': 'priority',
- },
- },
- }
- self.patchUtils(self.tmp)
- cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- self.assertRaises(IOError, util.load_file,
- "/etc/yum.repos.d/epel_testing.repo")
-
- def test_write_config(self):
- cfg = {
- 'yum_repos': {
- 'epel-testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
- 'enabled': False,
- 'gpgcheck': True,
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'failovermethod': 'priority',
- },
- },
- }
- self.patchUtils(self.tmp)
- cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- contents = util.load_file("/etc/yum.repos.d/epel_testing.repo",
- decode=False)
- contents = configobj.ConfigObj(BytesIO(contents))
- expected = {
- 'epel_testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- 'failovermethod': 'priority',
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'enabled': '0',
- 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
- 'gpgcheck': '1',
- }
- }
- self.assertEqual(expected, dict(contents))
diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py
deleted file mode 100644
index 943a5723..00000000
--- a/tests/unittests/test_helpers.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""Tests of the built-in user data handlers."""
-
-import os
-
-from . import helpers as test_helpers
-
-from cloudinit import sources
-
-
-class MyDataSource(sources.DataSource):
- _instance_id = None
-
- def get_instance_id(self):
- return self._instance_id
-
-
-class TestPaths(test_helpers.ResourceUsingTestCase):
- def test_get_ipath_and_instance_id_with_slashes(self):
- myds = MyDataSource(sys_cfg={}, distro=None, paths={})
- myds._instance_id = "/foo/bar"
- safe_iid = "_foo_bar"
- mypaths = self.getCloudPaths(myds)
-
- self.assertEqual(
- os.path.join(mypaths.cloud_dir, 'instances', safe_iid),
- mypaths.get_ipath())
-
- def test_get_ipath_and_empty_instance_id_returns_none(self):
- myds = MyDataSource(sys_cfg={}, distro=None, paths={})
- myds._instance_id = None
- mypaths = self.getCloudPaths(myds)
-
- self.assertEqual(None, mypaths.get_ipath())
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
deleted file mode 100644
index a33ec184..00000000
--- a/tests/unittests/test_merging.py
+++ /dev/null
@@ -1,257 +0,0 @@
-from . import helpers
-
-from cloudinit.handlers import cloud_config
-from cloudinit.handlers import (CONTENT_START, CONTENT_END)
-
-from cloudinit import helpers as c_helpers
-from cloudinit import util
-
-import collections
-import glob
-import os
-import random
-import re
-import six
-import string
-
-SOURCE_PAT = "source*.*yaml"
-EXPECTED_PAT = "expected%s.yaml"
-TYPES = [dict, str, list, tuple, None]
-TYPES.extend(six.integer_types)
-
-
-def _old_mergedict(src, cand):
- """
- Merge values from C{cand} into C{src}.
- If C{src} has a key C{cand} will not override.
- Nested dictionaries are merged recursively.
- """
- if isinstance(src, dict) and isinstance(cand, dict):
- for (k, v) in cand.items():
- if k not in src:
- src[k] = v
- else:
- src[k] = _old_mergedict(src[k], v)
- return src
-
-
-def _old_mergemanydict(*args):
- out = {}
- for a in args:
- out = _old_mergedict(out, a)
- return out
-
-
-def _random_str(rand):
- base = ''
- for _i in range(rand.randint(1, 2 ** 8)):
- base += rand.choice(string.ascii_letters + string.digits)
- return base
-
-
-class _NoMoreException(Exception):
- pass
-
-
-def _make_dict(current_depth, max_depth, rand):
- if current_depth >= max_depth:
- raise _NoMoreException()
- if current_depth == 0:
- t = dict
- else:
- t = rand.choice(TYPES)
- base = None
- if t in [None]:
- return base
- if t in [dict, list, tuple]:
- if t in [dict]:
- amount = rand.randint(0, 5)
- keys = [_random_str(rand) for _i in range(0, amount)]
- base = {}
- for k in keys:
- try:
- base[k] = _make_dict(current_depth + 1, max_depth, rand)
- except _NoMoreException:
- pass
- elif t in [list, tuple]:
- base = []
- amount = rand.randint(0, 5)
- for _i in range(0, amount):
- try:
- base.append(_make_dict(current_depth + 1, max_depth, rand))
- except _NoMoreException:
- pass
- if t in [tuple]:
- base = tuple(base)
- elif t in six.integer_types:
- base = rand.randint(0, 2 ** 8)
- elif t in [str]:
- base = _random_str(rand)
- return base
-
-
-def make_dict(max_depth, seed=None):
- max_depth = max(1, max_depth)
- rand = random.Random(seed)
- return _make_dict(0, max_depth, rand)
-
-
-class TestSimpleRun(helpers.ResourceUsingTestCase):
- def _load_merge_files(self):
- merge_root = self.resourceLocation('merge_sources')
- tests = []
- source_ids = collections.defaultdict(list)
- expected_files = {}
- for fn in glob.glob(os.path.join(merge_root, SOURCE_PAT)):
- base_fn = os.path.basename(fn)
- file_id = re.match(r"source(\d+)\-(\d+)[.]yaml", base_fn)
- if not file_id:
- raise IOError("File %s does not have a numeric identifier"
- % (fn))
- file_id = int(file_id.group(1))
- source_ids[file_id].append(fn)
- expected_fn = os.path.join(merge_root, EXPECTED_PAT % (file_id))
- if not os.path.isfile(expected_fn):
- raise IOError("No expected file found at %s" % (expected_fn))
- expected_files[file_id] = expected_fn
- for i in sorted(source_ids.keys()):
- source_file_contents = []
- for fn in sorted(source_ids[i]):
- source_file_contents.append([fn, util.load_file(fn)])
- expected = util.load_yaml(util.load_file(expected_files[i]))
- entry = [source_file_contents, [expected, expected_files[i]]]
- tests.append(entry)
- return tests
-
- def test_seed_runs(self):
- test_dicts = []
- for i in range(1, 10):
- base_dicts = []
- for j in range(1, 10):
- base_dicts.append(make_dict(5, i * j))
- test_dicts.append(base_dicts)
- for test in test_dicts:
- c = _old_mergemanydict(*test)
- d = util.mergemanydict(test)
- self.assertEqual(c, d)
-
- def test_merge_cc_samples(self):
- tests = self._load_merge_files()
- paths = c_helpers.Paths({})
- cc_handler = cloud_config.CloudConfigPartHandler(paths)
- cc_handler.cloud_fn = None
- for (payloads, (expected_merge, expected_fn)) in tests:
- cc_handler.handle_part(None, CONTENT_START, None,
- None, None, None)
- merging_fns = []
- for (fn, contents) in payloads:
- cc_handler.handle_part(None, None, "%s.yaml" % (fn),
- contents, None, {})
- merging_fns.append(fn)
- merged_buf = cc_handler.cloud_buf
- cc_handler.handle_part(None, CONTENT_END, None,
- None, None, None)
- fail_msg = "Equality failure on checking %s with %s: %s != %s"
- fail_msg = fail_msg % (expected_fn,
- ",".join(merging_fns), merged_buf,
- expected_merge)
- self.assertEqual(expected_merge, merged_buf, msg=fail_msg)
-
- def test_compat_merges_dict(self):
- a = {
- '1': '2',
- 'b': 'c',
- }
- b = {
- 'b': 'e',
- }
- c = _old_mergedict(a, b)
- d = util.mergemanydict([a, b])
- self.assertEqual(c, d)
-
- def test_compat_merges_dict2(self):
- a = {
- 'Blah': 1,
- 'Blah2': 2,
- 'Blah3': 3,
- }
- b = {
- 'Blah': 1,
- 'Blah2': 2,
- 'Blah3': [1],
- }
- c = _old_mergedict(a, b)
- d = util.mergemanydict([a, b])
- self.assertEqual(c, d)
-
- def test_compat_merges_list(self):
- a = {'b': [1, 2, 3]}
- b = {'b': [4, 5]}
- c = {'b': [6, 7]}
- e = _old_mergemanydict(a, b, c)
- f = util.mergemanydict([a, b, c])
- self.assertEqual(e, f)
-
- def test_compat_merges_str(self):
- a = {'b': "hi"}
- b = {'b': "howdy"}
- c = {'b': "hallo"}
- e = _old_mergemanydict(a, b, c)
- f = util.mergemanydict([a, b, c])
- self.assertEqual(e, f)
-
- def test_compat_merge_sub_dict(self):
- a = {
- '1': '2',
- 'b': {
- 'f': 'g',
- 'e': 'c',
- 'h': 'd',
- 'hh': {
- '1': 2,
- },
- }
- }
- b = {
- 'b': {
- 'e': 'c',
- 'hh': {
- '3': 4,
- }
- }
- }
- c = _old_mergedict(a, b)
- d = util.mergemanydict([a, b])
- self.assertEqual(c, d)
-
- def test_compat_merge_sub_dict2(self):
- a = {
- '1': '2',
- 'b': {
- 'f': 'g',
- }
- }
- b = {
- 'b': {
- 'e': 'c',
- }
- }
- c = _old_mergedict(a, b)
- d = util.mergemanydict([a, b])
- self.assertEqual(c, d)
-
- def test_compat_merge_sub_list(self):
- a = {
- '1': '2',
- 'b': {
- 'f': ['1'],
- }
- }
- b = {
- 'b': {
- 'f': [],
- }
- }
- c = _old_mergedict(a, b)
- d = util.mergemanydict([a, b])
- self.assertEqual(c, d)
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
deleted file mode 100644
index 41b9a6d0..00000000
--- a/tests/unittests/test_net.py
+++ /dev/null
@@ -1,689 +0,0 @@
-from cloudinit import net
-from cloudinit.net import cmdline
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit.net import sysconfig
-from cloudinit.sources.helpers import openstack
-from cloudinit import util
-
-from .helpers import dir2dict
-from .helpers import mock
-from .helpers import TestCase
-
-import base64
-import copy
-import gzip
-import io
-import json
-import os
-import shutil
-import tempfile
-import textwrap
-import yaml
-
-DHCP_CONTENT_1 = """
-DEVICE='eth0'
-PROTO='dhcp'
-IPV4ADDR='192.168.122.89'
-IPV4BROADCAST='192.168.122.255'
-IPV4NETMASK='255.255.255.0'
-IPV4GATEWAY='192.168.122.1'
-IPV4DNS0='192.168.122.1'
-IPV4DNS1='0.0.0.0'
-HOSTNAME='foohost'
-DNSDOMAIN=''
-NISDOMAIN=''
-ROOTSERVER='192.168.122.1'
-ROOTPATH=''
-filename=''
-UPTIME='21'
-DHCPLEASETIME='3600'
-DOMAINSEARCH='foo.com'
-"""
-
-DHCP_EXPECTED_1 = {
- 'name': 'eth0',
- 'type': 'physical',
- 'subnets': [{'broadcast': '192.168.122.255',
- 'control': 'manual',
- 'gateway': '192.168.122.1',
- 'dns_search': ['foo.com'],
- 'type': 'dhcp',
- 'netmask': '255.255.255.0',
- 'dns_nameservers': ['192.168.122.1']}],
-}
-
-
-STATIC_CONTENT_1 = """
-DEVICE='eth1'
-PROTO='static'
-IPV4ADDR='10.0.0.2'
-IPV4BROADCAST='10.0.0.255'
-IPV4NETMASK='255.255.255.0'
-IPV4GATEWAY='10.0.0.1'
-IPV4DNS0='10.0.1.1'
-IPV4DNS1='0.0.0.0'
-HOSTNAME='foohost'
-UPTIME='21'
-DHCPLEASETIME='3600'
-DOMAINSEARCH='foo.com'
-"""
-
-STATIC_EXPECTED_1 = {
- 'name': 'eth1',
- 'type': 'physical',
- 'subnets': [{'broadcast': '10.0.0.255', 'control': 'manual',
- 'gateway': '10.0.0.1',
- 'dns_search': ['foo.com'], 'type': 'static',
- 'netmask': '255.255.255.0',
- 'dns_nameservers': ['10.0.1.1']}],
-}
-
-# Examples (and expected outputs for various renderers).
-OS_SAMPLES = [
- {
- 'in_data': {
- "services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }],
- "links": [
- {
- "ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
- },
- ],
- },
- 'in_macs': {
- 'fa:16:3e:ed:9a:59': 'eth0',
- },
- 'out_sysconfig': [
- ('etc/sysconfig/network-scripts/ifcfg-eth0',
- """
-# Created by cloud-init on instance boot automatically, do not edit.
-#
-BOOTPROTO=static
-DEFROUTE=yes
-DEVICE=eth0
-GATEWAY=172.19.3.254
-HWADDR=fa:16:3e:ed:9a:59
-IPADDR=172.19.1.34
-NETMASK=255.255.252.0
-NM_CONTROLLED=no
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=no
-""".lstrip()),
- ('etc/sysconfig/network-scripts/route-eth0',
- """
-# Created by cloud-init on instance boot automatically, do not edit.
-#
-ADDRESS0=0.0.0.0
-GATEWAY0=172.19.3.254
-NETMASK0=0.0.0.0
-""".lstrip()),
- ('etc/resolv.conf',
- """
-; Created by cloud-init on instance boot automatically, do not edit.
-;
-nameserver 172.19.0.12
-""".lstrip()),
- ('etc/udev/rules.d/70-persistent-net.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
- }
-]
-
-EXAMPLE_ENI = """
-auto lo
-iface lo inet loopback
- dns-nameservers 10.0.0.1
- dns-search foo.com
-
-auto eth0
-iface eth0 inet static
- address 1.2.3.12
- netmask 255.255.255.248
- broadcast 1.2.3.15
- gateway 1.2.3.9
- dns-nameservers 69.9.160.191 69.9.191.4
-auto eth1
-iface eth1 inet static
- address 10.248.2.4
- netmask 255.255.255.248
- broadcast 10.248.2.7
-"""
-
-RENDERED_ENI = """
-auto lo
-iface lo inet loopback
- dns-nameservers 10.0.0.1
- dns-search foo.com
-
-auto eth0
-iface eth0 inet static
- address 1.2.3.12
- broadcast 1.2.3.15
- dns-nameservers 69.9.160.191 69.9.191.4
- gateway 1.2.3.9
- netmask 255.255.255.248
-
-auto eth1
-iface eth1 inet static
- address 10.248.2.4
- broadcast 10.248.2.7
- netmask 255.255.255.248
-""".lstrip()
-
-NETWORK_CONFIGS = {
- 'small': {
- 'expected_eni': textwrap.dedent("""\
- auto lo
- iface lo inet loopback
- dns-nameservers 1.2.3.4 5.6.7.8
- dns-search wark.maas
-
- iface eth1 inet manual
-
- auto eth99
- iface eth99 inet dhcp
- post-up ifup eth99:1
-
-
- auto eth99:1
- iface eth99:1 inet static
- address 192.168.21.3/24
- dns-nameservers 8.8.8.8 8.8.4.4
- dns-search barley.maas sach.maas
- post-up route add default gw 65.61.151.37 || true
- pre-down route del default gw 65.61.151.37 || true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""
- version: 1
- config:
- # Physical interfaces.
- - type: physical
- name: eth99
- mac_address: "c0:d6:9f:2c:e8:80"
- subnets:
- - type: dhcp4
- - type: static
- address: 192.168.21.3/24
- dns_nameservers:
- - 8.8.8.8
- - 8.8.4.4
- dns_search: barley.maas sach.maas
- routes:
- - gateway: 65.61.151.37
- netmask: 0.0.0.0
- network: 0.0.0.0
- metric: 2
- - type: physical
- name: eth1
- mac_address: "cf:d6:af:48:e8:80"
- - type: nameserver
- address:
- - 1.2.3.4
- - 5.6.7.8
- search:
- - wark.maas
- """),
- },
- 'all': {
- 'expected_eni': ("""\
-auto lo
-iface lo inet loopback
- dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4
- dns-search barley.maas wark.maas foobar.maas
-
-iface eth0 inet manual
-
-auto eth1
-iface eth1 inet manual
- bond-master bond0
- bond-mode active-backup
-
-auto eth2
-iface eth2 inet manual
- bond-master bond0
- bond-mode active-backup
-
-iface eth3 inet manual
-
-iface eth4 inet manual
-
-# control-manual eth5
-iface eth5 inet dhcp
-
-auto bond0
-iface bond0 inet6 dhcp
- bond-mode active-backup
- bond-slaves none
- hwaddress aa:bb:cc:dd:ee:ff
-
-auto br0
-iface br0 inet static
- address 192.168.14.2/24
- bridge_ports eth3 eth4
- bridge_stp off
- post-up ifup br0:1
-
-
-auto br0:1
-iface br0:1 inet6 static
- address 2001:1::1/64
-
-auto bond0.200
-iface bond0.200 inet dhcp
- vlan-raw-device bond0
- vlan_id 200
-
-auto eth0.101
-iface eth0.101 inet static
- address 192.168.0.2/24
- dns-nameservers 192.168.0.10 10.23.23.134
- dns-search barley.maas sacchromyces.maas brettanomyces.maas
- gateway 192.168.0.1
- mtu 1500
- vlan-raw-device eth0
- vlan_id 101
- post-up ifup eth0.101:1
-
-
-auto eth0.101:1
-iface eth0.101:1 inet static
- address 192.168.2.10/24
-
-post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
-pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
-"""),
- 'yaml': textwrap.dedent("""
- version: 1
- config:
- # Physical interfaces.
- - type: physical
- name: eth0
- mac_address: "c0:d6:9f:2c:e8:80"
- - type: physical
- name: eth1
- mac_address: "aa:d6:9f:2c:e8:80"
- - type: physical
- name: eth2
- mac_address: "c0:bb:9f:2c:e8:80"
- - type: physical
- name: eth3
- mac_address: "66:bb:9f:2c:e8:80"
- - type: physical
- name: eth4
- mac_address: "98:bb:9f:2c:e8:80"
- # specify how ifupdown should treat iface
- # control is one of ['auto', 'hotplug', 'manual']
- # with manual meaning ifup/ifdown should not affect the iface
- # useful for things like iscsi root + dhcp
- - type: physical
- name: eth5
- mac_address: "98:bb:9f:2c:e8:8a"
- subnets:
- - type: dhcp
- control: manual
- # VLAN interface.
- - type: vlan
- name: eth0.101
- vlan_link: eth0
- vlan_id: 101
- mtu: 1500
- subnets:
- - type: static
- address: 192.168.0.2/24
- gateway: 192.168.0.1
- dns_nameservers:
- - 192.168.0.10
- - 10.23.23.134
- dns_search:
- - barley.maas
- - sacchromyces.maas
- - brettanomyces.maas
- - type: static
- address: 192.168.2.10/24
- # Bond.
- - type: bond
- name: bond0
- # if 'mac_address' is omitted, the MAC is taken from
- # the first slave.
- mac_address: "aa:bb:cc:dd:ee:ff"
- bond_interfaces:
- - eth1
- - eth2
- params:
- bond-mode: active-backup
- subnets:
- - type: dhcp6
- # A Bond VLAN.
- - type: vlan
- name: bond0.200
- vlan_link: bond0
- vlan_id: 200
- subnets:
- - type: dhcp4
- # A bridge.
- - type: bridge
- name: br0
- bridge_interfaces:
- - eth3
- - eth4
- ipv4_conf:
- rp_filter: 1
- proxy_arp: 0
- forwarding: 1
- ipv6_conf:
- autoconf: 1
- disable_ipv6: 1
- use_tempaddr: 1
- forwarding: 1
- # basically anything in /proc/sys/net/ipv6/conf/.../
- params:
- bridge_stp: 'off'
- bridge_fd: 0
- bridge_maxwait: 0
- subnets:
- - type: static
- address: 192.168.14.2/24
- - type: static
- address: 2001:1::1/64 # default to /64
- # A global nameserver.
- - type: nameserver
- address: 8.8.8.8
- search: barley.maas
- # global nameservers and search in list form
- - type: nameserver
- address:
- - 4.4.4.4
- - 8.8.4.4
- search:
- - wark.maas
- - foobar.maas
- # A global route.
- - type: route
- destination: 10.0.0.0/8
- gateway: 11.0.0.1
- metric: 3
- """).lstrip(),
- }
-}
-
-
-def _setup_test(tmp_dir, mock_get_devicelist, mock_sys_netdev_info,
- mock_sys_dev_path):
- mock_get_devicelist.return_value = ['eth1000']
- dev_characteristics = {
- 'eth1000': {
- "bridge": False,
- "carrier": False,
- "dormant": False,
- "operstate": "down",
- "address": "07-1C-C6-75-A4-BE",
- }
- }
-
- def netdev_info(name, field):
- return dev_characteristics[name][field]
-
- mock_sys_netdev_info.side_effect = netdev_info
-
- def sys_dev_path(devname, path=""):
- return tmp_dir + devname + "/" + path
-
- for dev in dev_characteristics:
- os.makedirs(os.path.join(tmp_dir, dev))
- with open(os.path.join(tmp_dir, dev, 'operstate'), 'w') as fh:
- fh.write("down")
-
- mock_sys_dev_path.side_effect = sys_dev_path
-
-
-class TestSysConfigRendering(TestCase):
-
- @mock.patch("cloudinit.net.sys_dev_path")
- @mock.patch("cloudinit.net.sys_netdev_info")
- @mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_sys_netdev_info,
- mock_sys_dev_path):
- tmp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmp_dir)
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_sys_netdev_info, mock_sys_dev_path)
-
- network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
-
- render_dir = os.path.join(tmp_dir, "render")
- os.makedirs(render_dir)
-
- renderer = sysconfig.Renderer()
- renderer.render_network_state(render_dir, ns)
-
- render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000'
- with open(os.path.join(render_dir, render_file)) as fh:
- content = fh.read()
- expected_content = """
-# Created by cloud-init on instance boot automatically, do not edit.
-#
-BOOTPROTO=dhcp
-DEVICE=eth1000
-HWADDR=07-1C-C6-75-A4-BE
-NM_CONTROLLED=no
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=no
-""".lstrip()
- self.assertEqual(expected_content, content)
-
- def test_openstack_rendering_samples(self):
- tmp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmp_dir)
- render_dir = os.path.join(tmp_dir, "render")
- for os_sample in OS_SAMPLES:
- ex_input = os_sample['in_data']
- ex_mac_addrs = os_sample['in_macs']
- network_cfg = openstack.convert_net_json(
- ex_input, known_macs=ex_mac_addrs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
- renderer = sysconfig.Renderer()
- renderer.render_network_state(render_dir, ns)
- for fn, expected_content in os_sample.get('out_sysconfig', []):
- with open(os.path.join(render_dir, fn)) as fh:
- self.assertEqual(expected_content, fh.read())
-
-
-class TestEniNetRendering(TestCase):
-
- @mock.patch("cloudinit.net.sys_dev_path")
- @mock.patch("cloudinit.net.sys_netdev_info")
- @mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_sys_netdev_info,
- mock_sys_dev_path):
- tmp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmp_dir)
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_sys_netdev_info, mock_sys_dev_path)
-
- network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
-
- render_dir = os.path.join(tmp_dir, "render")
- os.makedirs(render_dir)
-
- renderer = eni.Renderer(
- {'links_path_prefix': None,
- 'eni_path': 'interfaces', 'netrules_path': None,
- })
- renderer.render_network_state(render_dir, ns)
-
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- 'interfaces')))
- with open(os.path.join(render_dir, 'interfaces')) as fh:
- contents = fh.read()
-
- expected = """
-auto lo
-iface lo inet loopback
-
-auto eth1000
-iface eth1000 inet dhcp
-"""
- self.assertEqual(expected.lstrip(), contents.lstrip())
-
-
-class TestEniNetworkStateToEni(TestCase):
- mycfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}],
- 'version': 1}
- my_mac = 'c0:d6:9f:2c:e8:80'
-
- def test_no_header(self):
- rendered = eni.network_state_to_eni(
- network_state=network_state.parse_net_config_data(self.mycfg),
- render_hwaddress=True)
- self.assertIn(self.my_mac, rendered)
- self.assertIn("hwaddress", rendered)
-
- def test_with_header(self):
- header = "# hello world\n"
- rendered = eni.network_state_to_eni(
- network_state=network_state.parse_net_config_data(self.mycfg),
- header=header, render_hwaddress=True)
- self.assertIn(header, rendered)
- self.assertIn(self.my_mac, rendered)
-
- def test_no_hwaddress(self):
- rendered = eni.network_state_to_eni(
- network_state=network_state.parse_net_config_data(self.mycfg),
- render_hwaddress=False)
- self.assertNotIn(self.my_mac, rendered)
- self.assertNotIn("hwaddress", rendered)
-
-
-class TestCmdlineConfigParsing(TestCase):
- simple_cfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}]}
-
- def test_cmdline_convert_dhcp(self):
- found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1)
- self.assertEqual(found, ('eth0', DHCP_EXPECTED_1))
-
- def test_cmdline_convert_static(self):
- found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1)
- self.assertEqual(found, ('eth1', STATIC_EXPECTED_1))
-
- def test_config_from_cmdline_net_cfg(self):
- files = []
- pairs = (('net-eth0.cfg', DHCP_CONTENT_1),
- ('net-eth1.cfg', STATIC_CONTENT_1))
-
- macs = {'eth1': 'b8:ae:ed:75:ff:2b',
- 'eth0': 'b8:ae:ed:75:ff:2a'}
-
- dhcp = copy.deepcopy(DHCP_EXPECTED_1)
- dhcp['mac_address'] = macs['eth0']
-
- static = copy.deepcopy(STATIC_EXPECTED_1)
- static['mac_address'] = macs['eth1']
-
- expected = {'version': 1, 'config': [dhcp, static]}
- with util.tempdir() as tmpd:
- for fname, content in pairs:
- fp = os.path.join(tmpd, fname)
- files.append(fp)
- util.write_file(fp, content)
-
- found = cmdline.config_from_klibc_net_cfg(files=files,
- mac_addrs=macs)
- self.assertEqual(found, expected)
-
- def test_cmdline_with_b64(self):
- data = base64.b64encode(json.dumps(self.simple_cfg).encode())
- encoded_text = data.decode()
- raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo'
- found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
- self.assertEqual(found, self.simple_cfg)
-
- def test_cmdline_with_b64_gz(self):
- data = _gzip_data(json.dumps(self.simple_cfg).encode())
- encoded_text = base64.b64encode(data).decode()
- raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo'
- found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
- self.assertEqual(found, self.simple_cfg)
-
-
-class TestEniRoundTrip(TestCase):
- def setUp(self):
- super(TestCase, self).setUp()
- self.tmp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp_dir)
-
- def _render_and_read(self, network_config=None, state=None, eni_path=None,
- links_prefix=None, netrules_path=None):
- if network_config:
- ns = network_state.parse_net_config_data(network_config)
- elif state:
- ns = state
- else:
- raise ValueError("Expected data or state, got neither")
-
- if eni_path is None:
- eni_path = 'etc/network/interfaces'
-
- renderer = eni.Renderer(
- config={'eni_path': eni_path, 'links_path_prefix': links_prefix,
- 'netrules_path': netrules_path})
-
- renderer.render_network_state(self.tmp_dir, ns)
- return dir2dict(self.tmp_dir)
-
- def testsimple_convert_and_render(self):
- network_config = eni.convert_eni_data(EXAMPLE_ENI)
- files = self._render_and_read(network_config=network_config)
- self.assertEqual(
- RENDERED_ENI.splitlines(),
- files['/etc/network/interfaces'].splitlines())
-
- def testsimple_render_all(self):
- entry = NETWORK_CONFIGS['all']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
-
- def testsimple_render_small(self):
- entry = NETWORK_CONFIGS['small']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
-
-
-def _gzip_data(data):
- with io.BytesIO() as iobuf:
- gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf)
- gzfp.write(data)
- gzfp.close()
- return iobuf.getvalue()
diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py
deleted file mode 100644
index 38fd75b6..00000000
--- a/tests/unittests/test_pathprefix2dict.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from cloudinit import util
-
-from .helpers import TestCase, populate_dir
-
-import shutil
-import tempfile
-
-
-class TestPathPrefix2Dict(TestCase):
-
- def setUp(self):
- super(TestPathPrefix2Dict, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_required_only(self):
- dirdata = {'f1': b'f1content', 'f2': b'f2content'}
- populate_dir(self.tmp, dirdata)
-
- ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
- self.assertEqual(dirdata, ret)
-
- def test_required_missing(self):
- dirdata = {'f1': b'f1content'}
- populate_dir(self.tmp, dirdata)
- kwargs = {'required': ['f1', 'f2']}
- self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
-
- def test_no_required_and_optional(self):
- dirdata = {'f1': b'f1c', 'f2': b'f2c'}
- populate_dir(self.tmp, dirdata)
-
- ret = util.pathprefix2dict(self.tmp, required=None,
- optional=['f1', 'f2'])
- self.assertEqual(dirdata, ret)
-
- def test_required_and_optional(self):
- dirdata = {'f1': b'f1c', 'f2': b'f2c'}
- populate_dir(self.tmp, dirdata)
-
- ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
- self.assertEqual(dirdata, ret)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py
deleted file mode 100644
index bcf01475..00000000
--- a/tests/unittests/test_registry.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from cloudinit.registry import DictRegistry
-
-from .helpers import (mock, TestCase)
-
-
-class TestDictRegistry(TestCase):
-
- def test_added_item_included_in_output(self):
- registry = DictRegistry()
- item_key, item_to_register = 'test_key', mock.Mock()
- registry.register_item(item_key, item_to_register)
- self.assertEqual({item_key: item_to_register},
- registry.registered_items)
-
- def test_registry_starts_out_empty(self):
- self.assertEqual({}, DictRegistry().registered_items)
-
- def test_modifying_registered_items_isnt_exposed_to_other_callers(self):
- registry = DictRegistry()
- registry.registered_items['test_item'] = mock.Mock()
- self.assertEqual({}, registry.registered_items)
-
- def test_keys_cannot_be_replaced(self):
- registry = DictRegistry()
- item_key = 'test_key'
- registry.register_item(item_key, mock.Mock())
- self.assertRaises(ValueError,
- registry.register_item, item_key, mock.Mock())
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
deleted file mode 100644
index 20ca23df..00000000
--- a/tests/unittests/test_reporting.py
+++ /dev/null
@@ -1,371 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-# This file is part of cloud-init. See LICENCE file for license information.
-#
-# vi: ts=4 expandtab
-
-from cloudinit import reporting
-from cloudinit.reporting import events
-from cloudinit.reporting import handlers
-
-import mock
-
-from .helpers import TestCase
-
-
-def _fake_registry():
- return mock.Mock(registered_items={'a': mock.MagicMock(),
- 'b': mock.MagicMock()})
-
-
-class TestReportStartEvent(TestCase):
-
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
- def test_report_start_event_passes_something_with_as_string_to_handlers(
- self, instantiated_handler_registry):
- event_name, event_description = 'my_test_event', 'my description'
- events.report_start_event(event_name, event_description)
- expected_string_representation = ': '.join(
- ['start', event_name, event_description])
- for _, handler in (
- instantiated_handler_registry.registered_items.items()):
- self.assertEqual(1, handler.publish_event.call_count)
- event = handler.publish_event.call_args[0][0]
- self.assertEqual(expected_string_representation, event.as_string())
-
-
-class TestReportFinishEvent(TestCase):
-
- def _report_finish_event(self, result=events.status.SUCCESS):
- event_name, event_description = 'my_test_event', 'my description'
- events.report_finish_event(
- event_name, event_description, result=result)
- return event_name, event_description
-
- def assertHandlersPassedObjectWithAsString(
- self, handlers, expected_as_string):
- for _, handler in handlers.items():
- self.assertEqual(1, handler.publish_event.call_count)
- event = handler.publish_event.call_args[0][0]
- self.assertEqual(expected_as_string, event.as_string())
-
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
- def test_report_finish_event_passes_something_with_as_string_to_handlers(
- self, instantiated_handler_registry):
- event_name, event_description = self._report_finish_event()
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.SUCCESS,
- event_description])
- self.assertHandlersPassedObjectWithAsString(
- instantiated_handler_registry.registered_items,
- expected_string_representation)
-
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
- def test_reporting_successful_finish_has_sensible_string_repr(
- self, instantiated_handler_registry):
- event_name, event_description = self._report_finish_event(
- result=events.status.SUCCESS)
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.SUCCESS,
- event_description])
- self.assertHandlersPassedObjectWithAsString(
- instantiated_handler_registry.registered_items,
- expected_string_representation)
-
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
- def test_reporting_unsuccessful_finish_has_sensible_string_repr(
- self, instantiated_handler_registry):
- event_name, event_description = self._report_finish_event(
- result=events.status.FAIL)
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.FAIL, event_description])
- self.assertHandlersPassedObjectWithAsString(
- instantiated_handler_registry.registered_items,
- expected_string_representation)
-
- def test_invalid_result_raises_attribute_error(self):
- self.assertRaises(ValueError, self._report_finish_event, ("BOGUS",))
-
-
-class TestReportingEvent(TestCase):
-
- def test_as_string(self):
- event_type, name, description = 'test_type', 'test_name', 'test_desc'
- event = events.ReportingEvent(event_type, name, description)
- expected_string_representation = ': '.join(
- [event_type, name, description])
- self.assertEqual(expected_string_representation, event.as_string())
-
- def test_as_dict(self):
- event_type, name, desc = 'test_type', 'test_name', 'test_desc'
- event = events.ReportingEvent(event_type, name, desc)
- expected = {'event_type': event_type, 'name': name,
- 'description': desc, 'origin': 'cloudinit'}
-
- # allow for timestamp to differ, but must be present
- as_dict = event.as_dict()
- self.assertIn('timestamp', as_dict)
- del as_dict['timestamp']
-
- self.assertEqual(expected, as_dict)
-
-
-class TestFinishReportingEvent(TestCase):
- def test_as_has_result(self):
- result = events.status.SUCCESS
- name, desc = 'test_name', 'test_desc'
- event = events.FinishReportingEvent(name, desc, result)
- ret = event.as_dict()
- self.assertTrue('result' in ret)
- self.assertEqual(ret['result'], result)
-
-
-class TestBaseReportingHandler(TestCase):
-
- def test_base_reporting_handler_is_abstract(self):
- regexp = r".*abstract.*publish_event.*"
- self.assertRaisesRegexp(TypeError, regexp, handlers.ReportingHandler)
-
-
-class TestLogHandler(TestCase):
-
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
- def test_appropriate_logger_used(self, getLogger):
- event_type, event_name = 'test_type', 'test_name'
- event = events.ReportingEvent(event_type, event_name, 'description')
- reporting.handlers.LogHandler().publish_event(event)
- self.assertEqual(
- [mock.call(
- 'cloudinit.reporting.{0}.{1}'.format(event_type, event_name))],
- getLogger.call_args_list)
-
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
- def test_single_log_message_at_info_published(self, getLogger):
- event = events.ReportingEvent('type', 'name', 'description')
- reporting.handlers.LogHandler().publish_event(event)
- self.assertEqual(1, getLogger.return_value.log.call_count)
-
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
- def test_log_message_uses_event_as_string(self, getLogger):
- event = events.ReportingEvent('type', 'name', 'description')
- reporting.handlers.LogHandler(level="INFO").publish_event(event)
- self.assertIn(event.as_string(),
- getLogger.return_value.log.call_args[0][1])
-
-
-class TestDefaultRegisteredHandler(TestCase):
-
- def test_log_handler_registered_by_default(self):
- registered_items = (
- reporting.instantiated_handler_registry.registered_items)
- for _, item in registered_items.items():
- if isinstance(item, reporting.handlers.LogHandler):
- break
- else:
- self.fail('No reporting LogHandler registered by default.')
-
-
-class TestReportingConfiguration(TestCase):
-
- @mock.patch.object(reporting, 'instantiated_handler_registry')
- def test_empty_configuration_doesnt_add_handlers(
- self, instantiated_handler_registry):
- reporting.update_configuration({})
- self.assertEqual(
- 0, instantiated_handler_registry.register_item.call_count)
-
- @mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
- def test_looks_up_handler_by_type_and_adds_it(self, available_handlers):
- handler_type_name = 'test_handler'
- handler_cls = mock.Mock()
- available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_name = 'my_test_handler'
- reporting.update_configuration(
- {handler_name: {'type': handler_type_name}})
- self.assertEqual(
- {handler_name: handler_cls.return_value},
- reporting.instantiated_handler_registry.registered_items)
-
- @mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
- def test_uses_non_type_parts_of_config_dict_as_kwargs(
- self, available_handlers):
- handler_type_name = 'test_handler'
- handler_cls = mock.Mock()
- available_handlers.registered_items = {handler_type_name: handler_cls}
- extra_kwargs = {'foo': 'bar', 'bar': 'baz'}
- handler_config = extra_kwargs.copy()
- handler_config.update({'type': handler_type_name})
- handler_name = 'my_test_handler'
- reporting.update_configuration({handler_name: handler_config})
- self.assertEqual(
- handler_cls.return_value,
- reporting.instantiated_handler_registry.registered_items[
- handler_name])
- self.assertEqual([mock.call(**extra_kwargs)],
- handler_cls.call_args_list)
-
- @mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
- def test_handler_config_not_modified(self, available_handlers):
- handler_type_name = 'test_handler'
- handler_cls = mock.Mock()
- available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_config = {'type': handler_type_name, 'foo': 'bar'}
- expected_handler_config = handler_config.copy()
- reporting.update_configuration({'my_test_handler': handler_config})
- self.assertEqual(expected_handler_config, handler_config)
-
- @mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
- def test_handlers_removed_if_falseish_specified(self, available_handlers):
- handler_type_name = 'test_handler'
- handler_cls = mock.Mock()
- available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_name = 'my_test_handler'
- reporting.update_configuration(
- {handler_name: {'type': handler_type_name}})
- self.assertEqual(
- 1, len(reporting.instantiated_handler_registry.registered_items))
- reporting.update_configuration({handler_name: None})
- self.assertEqual(
- 0, len(reporting.instantiated_handler_registry.registered_items))
-
-
-class TestReportingEventStack(TestCase):
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
- def test_start_and_finish_success(self, report_start, report_finish):
- with events.ReportEventStack(name="myname", description="mydesc"):
- pass
- self.assertEqual(
- [mock.call('myname', 'mydesc')], report_start.call_args_list)
- self.assertEqual(
- [mock.call('myname', 'mydesc', events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
- def test_finish_exception_defaults_fail(self, report_start, report_finish):
- name = "myname"
- desc = "mydesc"
- try:
- with events.ReportEventStack(name, description=desc):
- raise ValueError("This didnt work")
- except ValueError:
- pass
- self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
- self.assertEqual(
- [mock.call(name, desc, events.status.FAIL, post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
- def test_result_on_exception_used(self, report_start, report_finish):
- name = "myname"
- desc = "mydesc"
- try:
- with events.ReportEventStack(
- name, desc, result_on_exception=events.status.WARN):
- raise ValueError("This didnt work")
- except ValueError:
- pass
- self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
- self.assertEqual(
- [mock.call(name, desc, events.status.WARN, post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_start_event')
- def test_child_fullname_respects_parent(self, report_start):
- parent_name = "topname"
- c1_name = "c1name"
- c2_name = "c2name"
- c2_expected_fullname = '/'.join([parent_name, c1_name, c2_name])
- c1_expected_fullname = '/'.join([parent_name, c1_name])
-
- parent = events.ReportEventStack(parent_name, "topdesc")
- c1 = events.ReportEventStack(c1_name, "c1desc", parent=parent)
- c2 = events.ReportEventStack(c2_name, "c2desc", parent=c1)
- with c1:
- report_start.assert_called_with(c1_expected_fullname, "c1desc")
- with c2:
- report_start.assert_called_with(c2_expected_fullname, "c2desc")
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
- def test_child_result_bubbles_up(self, report_start, report_finish):
- parent = events.ReportEventStack("topname", "topdesc")
- child = events.ReportEventStack("c_name", "c_desc", parent=parent)
- with parent:
- with child:
- child.result = events.status.WARN
-
- report_finish.assert_called_with(
- "topname", "topdesc", events.status.WARN, post_files=[])
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- def test_message_used_in_finish(self, report_finish):
- with events.ReportEventStack("myname", "mydesc",
- message="mymessage"):
- pass
- self.assertEqual(
- [mock.call("myname", "mymessage", events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- def test_message_updatable(self, report_finish):
- with events.ReportEventStack("myname", "mydesc") as c:
- c.message = "all good"
- self.assertEqual(
- [mock.call("myname", "all good", events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_start_event')
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- def test_reporting_disabled_does_not_report_events(
- self, report_start, report_finish):
- with events.ReportEventStack("a", "b", reporting_enabled=False):
- pass
- self.assertEqual(report_start.call_count, 0)
- self.assertEqual(report_finish.call_count, 0)
-
- @mock.patch('cloudinit.reporting.events.report_start_event')
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- def test_reporting_child_default_to_parent(
- self, report_start, report_finish):
- parent = events.ReportEventStack(
- "pname", "pdesc", reporting_enabled=False)
- child = events.ReportEventStack("cname", "cdesc", parent=parent)
- with parent:
- with child:
- pass
- pass
- self.assertEqual(report_start.call_count, 0)
- self.assertEqual(report_finish.call_count, 0)
-
- def test_reporting_event_has_sane_repr(self):
- myrep = events.ReportEventStack("fooname", "foodesc",
- reporting_enabled=True).__repr__()
- self.assertIn("fooname", myrep)
- self.assertIn("foodesc", myrep)
- self.assertIn("True", myrep)
-
- def test_set_invalid_result_raises_value_error(self):
- f = events.ReportEventStack("myname", "mydesc")
- self.assertRaises(ValueError, setattr, f, "result", "BOGUS")
-
-
-class TestStatusAccess(TestCase):
- def test_invalid_status_access_raises_value_error(self):
- self.assertRaises(AttributeError, getattr, events.status, "BOGUS")
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
deleted file mode 100644
index 891dbe77..00000000
--- a/tests/unittests/test_rh_subscription.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-
-from cloudinit.config import cc_rh_subscription
-from cloudinit import util
-
-from .helpers import TestCase, mock
-
-
-class GoodTests(TestCase):
- def setUp(self):
- super(GoodTests, self).setUp()
- self.name = "cc_rh_subscription"
- self.cloud_init = None
- self.log = logging.getLogger("good_tests")
- self.args = []
- self.handle = cc_rh_subscription.handle
- self.SM = cc_rh_subscription.SubscriptionManager
-
- self.config = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks'
- }}
- self.config_full = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'auto-attach': True,
- 'service-level': 'self-support',
- 'add-pool': ['pool1', 'pool2', 'pool3'],
- 'enable-repo': ['repo1', 'repo2', 'repo3'],
- 'disable-repo': ['repo4', 'repo5']
- }}
-
- def test_already_registered(self):
- '''
- Emulates a system that is already registered. Ensure it gets
- a non-ProcessExecution error from is_registered()
- '''
- with mock.patch.object(cc_rh_subscription.SubscriptionManager,
- '_sub_man_cli') as mockobj:
- self.SM.log_success = mock.MagicMock()
- self.handle(self.name, self.config, self.cloud_init,
- self.log, self.args)
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(mockobj.call_count, 1)
-
- def test_simple_registration(self):
- '''
- Simple registration with username and password
- '''
- self.SM.log_success = mock.MagicMock()
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (reg, 'bar')])
- self.handle(self.name, self.config, self.cloud_init,
- self.log, self.args)
- self.assertIn(mock.call(['identity']),
- self.SM._sub_man_cli.call_args_list)
- self.assertIn(mock.call(['register', '--username=scooby@do.com',
- '--password=scooby-snacks'],
- logstring_val=True),
- self.SM._sub_man_cli.call_args_list)
-
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
-
- def test_full_registration(self):
- '''
- Registration with auto-attach, service-level, adding pools,
- and enabling and disabling yum repos
- '''
- call_lists = []
- call_lists.append(['attach', '--pool=pool1', '--pool=pool3'])
- call_lists.append(['repos', '--enable=repo2', '--enable=repo3',
- '--disable=repo5'])
- call_lists.append(['attach', '--auto', '--servicelevel=self-support'])
- self.SM.log_success = mock.MagicMock()
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (reg, 'bar'),
- ('Service level set to: self-support', ''),
- ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
- ('Repo ID: repo1\nRepo ID: repo5\n', ''),
- ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: '
- 'repo4', ''),
- ('', '')])
- self.handle(self.name, self.config_full, self.cloud_init,
- self.log, self.args)
- for call in call_lists:
- self.assertIn(mock.call(call), self.SM._sub_man_cli.call_args_list)
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(self.SM._sub_man_cli.call_count, 9)
-
-
-class TestBadInput(TestCase):
- name = "cc_rh_subscription"
- cloud_init = None
- log = logging.getLogger("bad_tests")
- args = []
- SM = cc_rh_subscription.SubscriptionManager
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
-
- config_no_password = {'rh_subscription':
- {'username': 'scooby@do.com'
- }}
-
- config_no_key = {'rh_subscription':
- {'activation-key': '1234abcde',
- }}
-
- config_service = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'service-level': 'self-support'
- }}
-
- config_badpool = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'add-pool': 'not_a_list'
- }}
- config_badrepo = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'enable-repo': 'not_a_list'
- }}
- config_badkey = {'rh_subscription':
- {'activation-key': 'abcdef1234',
- 'fookey': 'bar',
- 'org': '123',
- }}
-
- def setUp(self):
- super(TestBadInput, self).setUp()
- self.handle = cc_rh_subscription.handle
-
- def test_no_password(self):
- '''
- Attempt to register without the password key/value
- '''
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
- self.handle(self.name, self.config_no_password, self.cloud_init,
- self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 0)
-
- def test_no_org(self):
- '''
- Attempt to register without the org key/value
- '''
- self.input_is_missing_data(self.config_no_key)
-
- def test_service_level_without_auto(self):
- '''
- Attempt to register using service-level without the auto-attach key
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
- self.handle(self.name, self.config_service, self.cloud_init,
- self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
- self.assertEqual(self.SM.log_warn.call_count, 2)
-
- def test_pool_not_a_list(self):
- '''
- Register with pools that are not in the format of a list
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
- self.handle(self.name, self.config_badpool, self.cloud_init,
- self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
- self.assertEqual(self.SM.log_warn.call_count, 2)
-
- def test_repo_not_a_list(self):
- '''
- Register with repos that are not in the format of a list
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
- self.handle(self.name, self.config_badrepo, self.cloud_init,
- self.log, self.args)
- self.assertEqual(self.SM.log_warn.call_count, 3)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
-
- def test_bad_key_value(self):
- '''
- Attempt to register with a key that we don't know
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
- self.handle(self.name, self.config_badkey, self.cloud_init,
- self.log, self.args)
- self.assertEqual(self.SM.log_warn.call_count, 2)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
-
- def input_is_missing_data(self, config):
- '''
- Helper def for tests that having missing information
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError])
- self.handle(self.name, config, self.cloud_init,
- self.log, self.args)
- self.SM._sub_man_cli.assert_called_with(['identity'])
- self.assertEqual(self.SM.log_warn.call_count, 4)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_runs/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py
deleted file mode 100644
index ce43798e..00000000
--- a/tests/unittests/test_runs/test_merge_run.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import os
-import shutil
-import tempfile
-
-from .. import helpers
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import stages
-from cloudinit import util
-
-
-class TestMergeRun(helpers.FilesystemMockingTestCase):
- def _patchIn(self, root):
- self.patchOS(root)
- self.patchUtils(root)
-
- def test_none_ds(self):
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.replicateTestRoot('simple_ubuntu', new_root)
- cfg = {
- 'datasource_list': ['None'],
- 'cloud_init_modules': ['write-files'],
- }
- ud = self.readResource('user_data.1.txt')
- cloud_cfg = util.yaml_dumps(cfg)
- util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
- self._patchIn(new_root)
-
- # Now start verifying whats created
- initer = stages.Init()
- initer.read_cfg()
- initer.initialize()
- initer.fetch()
- initer.datasource.userdata_raw = ud
- initer.instancify()
- initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- mirrors = initer.distro.get_option('package_mirrors')
- self.assertEqual(1, len(mirrors))
- mirror = mirrors[0]
- self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah'])
- mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
- self.assertTrue(len(failures) == 0)
- self.assertTrue(os.path.exists('/etc/blah.ini'))
- self.assertIn('write-files', which_ran)
- contents = util.load_file('/etc/blah.ini')
- self.assertEqual(contents, 'blah')
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py
deleted file mode 100644
index 07e7b1a8..00000000
--- a/tests/unittests/test_runs/test_simple_run.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import os
-import shutil
-import tempfile
-
-from .. import helpers
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import stages
-from cloudinit import util
-
-
-class TestSimpleRun(helpers.FilesystemMockingTestCase):
- def _patchIn(self, root):
- self.patchOS(root)
- self.patchUtils(root)
-
- def _pp_root(self, root, repatch=True):
- for (dirpath, dirnames, filenames) in os.walk(root):
- print(dirpath)
- for f in filenames:
- joined = os.path.join(dirpath, f)
- if os.path.islink(joined):
- print("f %s - (symlink)" % (f))
- else:
- print("f %s" % (f))
- for d in dirnames:
- joined = os.path.join(dirpath, d)
- if os.path.islink(joined):
- print("d %s - (symlink)" % (d))
- else:
- print("d %s" % (d))
- if repatch:
- self._patchIn(root)
-
- def test_none_ds(self):
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.replicateTestRoot('simple_ubuntu', new_root)
- cfg = {
- 'datasource_list': ['None'],
- 'write_files': [
- {
- 'path': '/etc/blah.ini',
- 'content': 'blah',
- 'permissions': 0o755,
- },
- ],
- 'cloud_init_modules': ['write-files'],
- }
- cloud_cfg = util.yaml_dumps(cfg)
- util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
- self._patchIn(new_root)
-
- # Now start verifying whats created
- initer = stages.Init()
- initer.read_cfg()
- initer.initialize()
- self.assertTrue(os.path.exists("/var/lib/cloud"))
- for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']:
- self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d)))
-
- initer.fetch()
- iid = initer.instancify()
- self.assertEqual(iid, 'iid-datasource-none')
- initer.update()
- self.assertTrue(os.path.islink("var/lib/cloud/instance"))
-
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
-
- mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
- self.assertTrue(len(failures) == 0)
- self.assertTrue(os.path.exists('/etc/blah.ini'))
- self.assertIn('write-files', which_ran)
- contents = util.load_file('/etc/blah.ini')
- self.assertEqual(contents, 'blah')
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
deleted file mode 100644
index 9aeb1cde..00000000
--- a/tests/unittests/test_sshutil.py
+++ /dev/null
@@ -1,171 +0,0 @@
-from mock import patch
-
-from . import helpers as test_helpers
-from cloudinit import ssh_util
-
-
-VALID_CONTENT = {
- 'dsa': (
- "AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF"
- "W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa"
- "A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa"
- "jzRAAAAFQDMPO96qXd4F5A+5b2f2MO7SpVomQAAAIBpC3K2zIbDLqBBs1fn7rsv"
- "KcJvwihdlVjG7UXsDB76P2GNqVG+IlYPpJZ8TO/B/fzTMtrdXp9pSm9OY1+BgN4"
- "REsZ2WNcvfgY33aWaEM+ieCcQigvxrNAF2FTVcbUIIxAn6SmHuQSWrLSfdHc8H7"
- "hsrgeUPPdzjBD/cv2ZmqwZ1AAAAIAplIsScrJut5wJMgyK1JG0Kbw9JYQpLe95P"
- "obB069g8+mYR8U0fysmTEdR44mMu0VNU5E5OhTYoTGfXrVrkR134LqFM2zpVVbE"
- "JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/"
- "5z7u2rVAlDw=="
- ),
- 'ecdsa': (
- "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ"
- "J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/"
- "Ql7lc2leWL7CY="
- ),
- 'rsa': (
- "AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5oz"
- "emNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbD"
- "c1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q"
- "7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhT"
- "YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07"
- "/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw=="
- ),
-}
-
-TEST_OPTIONS = (
- "no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
- 'command="echo \'Please login as the user \"ubuntu\" rather than the'
- 'user \"root\".\';echo;sleep 10"')
-
-
-class TestAuthKeyLineParser(test_helpers.TestCase):
- def test_simple_parse(self):
- # test key line with common 3 fields (keytype, base64, comment)
- parser = ssh_util.AuthKeyLineParser()
- for ktype in ['rsa', 'ecdsa', 'dsa']:
- content = VALID_CONTENT[ktype]
- comment = 'user-%s@host' % ktype
- line = ' '.join((ktype, content, comment,))
- key = parser.parse(line)
-
- self.assertEqual(key.base64, content)
- self.assertFalse(key.options)
- self.assertEqual(key.comment, comment)
- self.assertEqual(key.keytype, ktype)
-
- def test_parse_no_comment(self):
- # test key line with key type and base64 only
- parser = ssh_util.AuthKeyLineParser()
- for ktype in ['rsa', 'ecdsa', 'dsa']:
- content = VALID_CONTENT[ktype]
- line = ' '.join((ktype, content,))
- key = parser.parse(line)
-
- self.assertEqual(key.base64, content)
- self.assertFalse(key.options)
- self.assertFalse(key.comment)
- self.assertEqual(key.keytype, ktype)
-
- def test_parse_with_keyoptions(self):
- # test key line with options in it
- parser = ssh_util.AuthKeyLineParser()
- options = TEST_OPTIONS
- for ktype in ['rsa', 'ecdsa', 'dsa']:
- content = VALID_CONTENT[ktype]
- comment = 'user-%s@host' % ktype
- line = ' '.join((options, ktype, content, comment,))
- key = parser.parse(line)
-
- self.assertEqual(key.base64, content)
- self.assertEqual(key.options, options)
- self.assertEqual(key.comment, comment)
- self.assertEqual(key.keytype, ktype)
-
- def test_parse_with_options_passed_in(self):
- # test key line with key type and base64 only
- parser = ssh_util.AuthKeyLineParser()
-
- baseline = ' '.join(("rsa", VALID_CONTENT['rsa'], "user@host"))
- myopts = "no-port-forwarding,no-agent-forwarding"
-
- key = parser.parse("allowedopt" + " " + baseline)
- self.assertEqual(key.options, "allowedopt")
-
- key = parser.parse("overridden_opt " + baseline, options=myopts)
- self.assertEqual(key.options, myopts)
-
- def test_parse_invalid_keytype(self):
- parser = ssh_util.AuthKeyLineParser()
- key = parser.parse(' '.join(["badkeytype", VALID_CONTENT['rsa']]))
-
- self.assertFalse(key.valid())
-
-
-class TestParseSSHConfig(test_helpers.TestCase):
-
- def setUp(self):
- self.load_file_patch = patch('cloudinit.ssh_util.util.load_file')
- self.load_file = self.load_file_patch.start()
- self.isfile_patch = patch('cloudinit.ssh_util.os.path.isfile')
- self.isfile = self.isfile_patch.start()
- self.isfile.return_value = True
-
- def tearDown(self):
- self.load_file_patch.stop()
- self.isfile_patch.stop()
-
- def test_not_a_file(self):
- self.isfile.return_value = False
- self.load_file.side_effect = IOError
- ret = ssh_util.parse_ssh_config('not a real file')
- self.assertEqual([], ret)
-
- def test_empty_file(self):
- self.load_file.return_value = ''
- ret = ssh_util.parse_ssh_config('some real file')
- self.assertEqual([], ret)
-
- def test_comment_line(self):
- comment_line = '# This is a comment'
- self.load_file.return_value = comment_line
- ret = ssh_util.parse_ssh_config('some real file')
- self.assertEqual(1, len(ret))
- self.assertEqual(comment_line, ret[0].line)
-
- def test_blank_lines(self):
- lines = ['', '\t', ' ']
- self.load_file.return_value = '\n'.join(lines)
- ret = ssh_util.parse_ssh_config('some real file')
- self.assertEqual(len(lines), len(ret))
- for line in ret:
- self.assertEqual('', line.line)
-
- def test_lower_case_config(self):
- self.load_file.return_value = 'foo bar'
- ret = ssh_util.parse_ssh_config('some real file')
- self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
-
- def test_upper_case_config(self):
- self.load_file.return_value = 'Foo Bar'
- ret = ssh_util.parse_ssh_config('some real file')
- self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('Bar', ret[0].value)
-
- def test_lower_case_with_equals(self):
- self.load_file.return_value = 'foo=bar'
- ret = ssh_util.parse_ssh_config('some real file')
- self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
-
- def test_upper_case_with_equals(self):
- self.load_file.return_value = 'Foo=bar'
- ret = ssh_util.parse_ssh_config('some real file')
- self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
deleted file mode 100644
index 94b6e061..00000000
--- a/tests/unittests/test_templating.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import print_function
-
-from . import helpers as test_helpers
-import textwrap
-
-from cloudinit import templater
-
-try:
- import Cheetah
- HAS_CHEETAH = True
- Cheetah # make pyflakes happy, as Cheetah is not used here
-except ImportError:
- HAS_CHEETAH = False
-
-
-class TestTemplates(test_helpers.TestCase):
- def test_render_basic(self):
- in_data = textwrap.dedent("""
- ${b}
-
- c = d
- """)
- in_data = in_data.strip()
- expected_data = textwrap.dedent("""
- 2
-
- c = d
- """)
- out_data = templater.basic_render(in_data, {'b': 2})
- self.assertEqual(expected_data.strip(), out_data)
-
- @test_helpers.skipIf(not HAS_CHEETAH, 'cheetah renderer not available')
- def test_detection(self):
- blob = "## template:cheetah"
-
- (template_type, renderer, contents) = templater.detect_template(blob)
- self.assertIn("cheetah", template_type)
- self.assertEqual("", contents.strip())
-
- blob = "blahblah $blah"
- (template_type, renderer, contents) = templater.detect_template(blob)
- self.assertIn("cheetah", template_type)
- self.assertEqual(blob, contents)
-
- blob = '##template:something-new'
- self.assertRaises(ValueError, templater.detect_template, blob)
-
- def test_render_cheetah(self):
- blob = '''## template:cheetah
-$a,$b'''
- c = templater.render_string(blob, {"a": 1, "b": 2})
- self.assertEqual("1,2", c)
-
- def test_render_jinja(self):
- blob = '''## template:jinja
-{{a}},{{b}}'''
- c = templater.render_string(blob, {"a": 1, "b": 2})
- self.assertEqual("1,2", c)
-
- def test_render_default(self):
- blob = '''$a,$b'''
- c = templater.render_string(blob, {"a": 1, "b": 2})
- self.assertEqual("1,2", c)
-
- def test_render_basic_deeper(self):
- hn = 'myfoohost.yahoo.com'
- expected_data = "h=%s\nc=d\n" % hn
- in_data = "h=$hostname.canonical_name\nc=d\n"
- params = {
- "hostname": {
- "canonical_name": hn,
- },
- }
- out_data = templater.render_string(in_data, params)
- self.assertEqual(expected_data, out_data)
-
- def test_render_basic_no_parens(self):
- hn = "myfoohost"
- in_data = "h=$hostname\nc=d\n"
- expected_data = "h=%s\nc=d\n" % hn
- out_data = templater.basic_render(in_data, {'hostname': hn})
- self.assertEqual(expected_data, out_data)
-
- def test_render_basic_parens(self):
- hn = "myfoohost"
- in_data = "h = ${hostname}\nc=d\n"
- expected_data = "h = %s\nc=d\n" % hn
- out_data = templater.basic_render(in_data, {'hostname': hn})
- self.assertEqual(expected_data, out_data)
-
- def test_render_basic2(self):
- mirror = "mymirror"
- codename = "zany"
- in_data = "deb $mirror $codename-updates main contrib non-free"
- ex_data = "deb %s %s-updates main contrib non-free" % (mirror,
- codename)
-
- out_data = templater.basic_render(in_data,
- {'mirror': mirror,
- 'codename': codename})
- self.assertEqual(ex_data, out_data)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
deleted file mode 100644
index 37a984ac..00000000
--- a/tests/unittests/test_util.py
+++ /dev/null
@@ -1,489 +0,0 @@
-from __future__ import print_function
-
-import logging
-import os
-import shutil
-import stat
-import tempfile
-
-import six
-import yaml
-
-from cloudinit import importer, util
-from . import helpers
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-
-class FakeSelinux(object):
-
- def __init__(self, match_what):
- self.match_what = match_what
- self.restored = []
-
- def matchpathcon(self, path, mode):
- if path == self.match_what:
- return
- else:
- raise OSError("No match!")
-
- def is_selinux_enabled(self):
- return True
-
- def restorecon(self, path, recursive):
- self.restored.append(path)
-
-
-class TestGetCfgOptionListOrStr(helpers.TestCase):
- def test_not_found_no_default(self):
- """None is returned if key is not found and no default given."""
- config = {}
- result = util.get_cfg_option_list(config, "key")
- self.assertEqual(None, result)
-
- def test_not_found_with_default(self):
- """Default is returned if key is not found."""
- config = {}
- result = util.get_cfg_option_list(config, "key", default=["DEFAULT"])
- self.assertEqual(["DEFAULT"], result)
-
- def test_found_with_default(self):
- """Default is not returned if key is found."""
- config = {"key": ["value1"]}
- result = util.get_cfg_option_list(config, "key", default=["DEFAULT"])
- self.assertEqual(["value1"], result)
-
- def test_found_convert_to_list(self):
- """Single string is converted to one element list."""
- config = {"key": "value1"}
- result = util.get_cfg_option_list(config, "key")
- self.assertEqual(["value1"], result)
-
- def test_value_is_none(self):
- """If value is None empty list is returned."""
- config = {"key": None}
- result = util.get_cfg_option_list(config, "key")
- self.assertEqual([], result)
-
-
-class TestWriteFile(helpers.TestCase):
- def setUp(self):
- super(TestWriteFile, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_basic_usage(self):
- """Verify basic usage with default args."""
- path = os.path.join(self.tmp, "NewFile.txt")
- contents = "Hey there"
-
- util.write_file(path, contents)
-
- self.assertTrue(os.path.exists(path))
- self.assertTrue(os.path.isfile(path))
- with open(path) as f:
- create_contents = f.read()
- self.assertEqual(contents, create_contents)
- file_stat = os.stat(path)
- self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
-
- def test_dir_is_created_if_required(self):
- """Verifiy that directories are created is required."""
- dirname = os.path.join(self.tmp, "subdir")
- path = os.path.join(dirname, "NewFile.txt")
- contents = "Hey there"
-
- util.write_file(path, contents)
-
- self.assertTrue(os.path.isdir(dirname))
- self.assertTrue(os.path.isfile(path))
-
- def test_custom_mode(self):
- """Verify custom mode works properly."""
- path = os.path.join(self.tmp, "NewFile.txt")
- contents = "Hey there"
-
- util.write_file(path, contents, mode=0o666)
-
- self.assertTrue(os.path.exists(path))
- self.assertTrue(os.path.isfile(path))
- file_stat = os.stat(path)
- self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode))
-
- def test_custom_omode(self):
- """Verify custom omode works properly."""
- path = os.path.join(self.tmp, "NewFile.txt")
- contents = "Hey there"
-
- # Create file first with basic content
- with open(path, "wb") as f:
- f.write(b"LINE1\n")
- util.write_file(path, contents, omode="a")
-
- self.assertTrue(os.path.exists(path))
- self.assertTrue(os.path.isfile(path))
- with open(path) as f:
- create_contents = f.read()
- self.assertEqual("LINE1\nHey there", create_contents)
-
- def test_restorecon_if_possible_is_called(self):
- """Make sure the selinux guard is called correctly."""
- my_file = os.path.join(self.tmp, "my_file")
- with open(my_file, "w") as fp:
- fp.write("My Content")
-
- fake_se = FakeSelinux(my_file)
-
- with mock.patch.object(importer, 'import_module',
- return_value=fake_se) as mockobj:
- with util.SeLinuxGuard(my_file) as is_on:
- self.assertTrue(is_on)
-
- self.assertEqual(1, len(fake_se.restored))
- self.assertEqual(my_file, fake_se.restored[0])
-
- mockobj.assert_called_once_with('selinux')
-
-
-class TestDeleteDirContents(helpers.TestCase):
- def setUp(self):
- super(TestDeleteDirContents, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def assertDirEmpty(self, dirname):
- self.assertEqual([], os.listdir(dirname))
-
- def test_does_not_delete_dir(self):
- """Ensure directory itself is not deleted."""
- util.delete_dir_contents(self.tmp)
-
- self.assertTrue(os.path.isdir(self.tmp))
- self.assertDirEmpty(self.tmp)
-
- def test_deletes_files(self):
- """Single file should be deleted."""
- with open(os.path.join(self.tmp, "new_file.txt"), "wb") as f:
- f.write(b"DELETE ME")
-
- util.delete_dir_contents(self.tmp)
-
- self.assertDirEmpty(self.tmp)
-
- def test_deletes_empty_dirs(self):
- """Empty directories should be deleted."""
- os.mkdir(os.path.join(self.tmp, "new_dir"))
-
- util.delete_dir_contents(self.tmp)
-
- self.assertDirEmpty(self.tmp)
-
- def test_deletes_nested_dirs(self):
- """Nested directories should be deleted."""
- os.mkdir(os.path.join(self.tmp, "new_dir"))
- os.mkdir(os.path.join(self.tmp, "new_dir", "new_subdir"))
-
- util.delete_dir_contents(self.tmp)
-
- self.assertDirEmpty(self.tmp)
-
- def test_deletes_non_empty_dirs(self):
- """Non-empty directories should be deleted."""
- os.mkdir(os.path.join(self.tmp, "new_dir"))
- f_name = os.path.join(self.tmp, "new_dir", "new_file.txt")
- with open(f_name, "wb") as f:
- f.write(b"DELETE ME")
-
- util.delete_dir_contents(self.tmp)
-
- self.assertDirEmpty(self.tmp)
-
- def test_deletes_symlinks(self):
- """Symlinks should be deleted."""
- file_name = os.path.join(self.tmp, "new_file.txt")
- link_name = os.path.join(self.tmp, "new_file_link.txt")
- with open(file_name, "wb") as f:
- f.write(b"DELETE ME")
- os.symlink(file_name, link_name)
-
- util.delete_dir_contents(self.tmp)
-
- self.assertDirEmpty(self.tmp)
-
-
-class TestKeyValStrings(helpers.TestCase):
- def test_keyval_str_to_dict(self):
- expected = {'1': 'one', '2': 'one+one', 'ro': True}
- cmdline = "1=one ro 2=one+one"
- self.assertEqual(expected, util.keyval_str_to_dict(cmdline))
-
-
-class TestGetCmdline(helpers.TestCase):
- def test_cmdline_reads_debug_env(self):
- os.environ['DEBUG_PROC_CMDLINE'] = 'abcd 123'
- self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], util.get_cmdline())
-
-
-class TestLoadYaml(helpers.TestCase):
- mydefault = "7b03a8ebace993d806255121073fed52"
-
- def test_simple(self):
- mydata = {'1': "one", '2': "two"}
- self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata)
-
- def test_nonallowed_returns_default(self):
- # for now, anything not in the allowed list just returns the default.
- myyaml = yaml.dump({'1': "one"})
- self.assertEqual(util.load_yaml(blob=myyaml,
- default=self.mydefault,
- allowed=(str,)),
- self.mydefault)
-
- def test_bogus_returns_default(self):
- badyaml = "1\n 2:"
- self.assertEqual(util.load_yaml(blob=badyaml,
- default=self.mydefault),
- self.mydefault)
-
- def test_unsafe_types(self):
- # should not load complex types
- unsafe_yaml = yaml.dump((1, 2, 3,))
- self.assertEqual(util.load_yaml(blob=unsafe_yaml,
- default=self.mydefault),
- self.mydefault)
-
- def test_python_unicode(self):
- # complex type of python/unicode is explicitly allowed
- myobj = {'1': six.text_type("FOOBAR")}
- safe_yaml = yaml.dump(myobj)
- self.assertEqual(util.load_yaml(blob=safe_yaml,
- default=self.mydefault),
- myobj)
-
-
-class TestMountinfoParsing(helpers.ResourceUsingTestCase):
- def test_invalid_mountinfo(self):
- line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root"
- "rw,errors=remount-ro,data=ordered")
- elements = line.split()
- for i in range(len(elements) + 1):
- lines = [' '.join(elements[0:i])]
- if i < 10:
- expected = None
- else:
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
-
- def test_precise_ext4_root(self):
-
- lines = self.readResource('mountinfo_precise_ext4.txt').splitlines()
-
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
-
- expected = ('/dev/md0', 'ext4', '/boot')
- self.assertEqual(expected, util.parse_mount_info('/boot', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
-
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/home', lines))
- self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
-
- expected = ('tmpfs', 'tmpfs', '/run')
- self.assertEqual(expected, util.parse_mount_info('/run', lines))
-
- expected = ('none', 'tmpfs', '/run/lock')
- self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
-
- def test_raring_btrfs_root(self):
- lines = self.readResource('mountinfo_raring_btrfs.txt').splitlines()
-
- expected = ('/dev/vda1', 'btrfs', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
-
- expected = ('/dev/vda1', 'btrfs', '/home')
- self.assertEqual(expected, util.parse_mount_info('/home', lines))
- self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
-
- expected = ('tmpfs', 'tmpfs', '/run')
- self.assertEqual(expected, util.parse_mount_info('/run', lines))
-
- expected = ('none', 'tmpfs', '/run/lock')
- self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
-
-
-class TestReadDMIData(helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestReadDMIData, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
- self.patchOS(self.new_root)
- self.patchUtils(self.new_root)
-
- def _create_sysfs_parent_directory(self):
- util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
-
- def _create_sysfs_file(self, key, content):
- """Mocks the sys path found on Linux systems."""
- self._create_sysfs_parent_directory()
- dmi_key = "/sys/class/dmi/id/{0}".format(key)
- util.write_file(dmi_key, content)
-
- def _configure_dmidecode_return(self, key, content, error=None):
- """
- In order to test a missing sys path and call outs to dmidecode, this
- function fakes the results of dmidecode to test the results.
- """
- def _dmidecode_subp(cmd):
- if cmd[-1] != key:
- raise util.ProcessExecutionError()
- return (content, error)
-
- self.patched_funcs.enter_context(
- mock.patch.object(util, 'which', lambda _: True))
- self.patched_funcs.enter_context(
- mock.patch.object(util, 'subp', _dmidecode_subp))
-
- def patch_mapping(self, new_mapping):
- self.patched_funcs.enter_context(
- mock.patch('cloudinit.util.DMIDECODE_TO_DMI_SYS_MAPPING',
- new_mapping))
-
- def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self):
- self.patch_mapping({'mapped-key': 'mapped-value'})
- expected_dmi_value = 'sys-used-correctly'
- self._create_sysfs_file('mapped-value', expected_dmi_value)
- self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong')
- self.assertEqual(expected_dmi_value, util.read_dmi_data('mapped-key'))
-
- def test_dmidecode_used_if_no_sysfs_file_on_disk(self):
- self.patch_mapping({})
- self._create_sysfs_parent_directory()
- expected_dmi_value = 'dmidecode-used'
- self._configure_dmidecode_return('use-dmidecode', expected_dmi_value)
- self.assertEqual(expected_dmi_value,
- util.read_dmi_data('use-dmidecode'))
-
- def test_none_returned_if_neither_source_has_data(self):
- self.patch_mapping({})
- self._configure_dmidecode_return('key', 'value')
- self.assertEqual(None, util.read_dmi_data('expect-fail'))
-
- def test_none_returned_if_dmidecode_not_in_path(self):
- self.patched_funcs.enter_context(
- mock.patch.object(util, 'which', lambda _: False))
- self.patch_mapping({})
- self.assertEqual(None, util.read_dmi_data('expect-fail'))
-
- def test_dots_returned_instead_of_foxfox(self):
- # uninitialized dmi values show as \xff, return those as .
- my_len = 32
- dmi_value = b'\xff' * my_len + b'\n'
- expected = ""
- dmi_key = 'system-product-name'
- sysfs_key = 'product_name'
- self._create_sysfs_file(sysfs_key, dmi_value)
- self.assertEqual(expected, util.read_dmi_data(dmi_key))
-
-
-class TestMultiLog(helpers.FilesystemMockingTestCase):
-
- def _createConsole(self, root):
- os.mkdir(os.path.join(root, 'dev'))
- open(os.path.join(root, 'dev', 'console'), 'a').close()
-
- def setUp(self):
- super(TestMultiLog, self).setUp()
- self.root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.root)
- self.patchOS(self.root)
- self.patchUtils(self.root)
- self.patchOpen(self.root)
- self.stdout = six.StringIO()
- self.stderr = six.StringIO()
- self.patchStdoutAndStderr(self.stdout, self.stderr)
-
- def test_stderr_used_by_default(self):
- logged_string = 'test stderr output'
- util.multi_log(logged_string)
- self.assertEqual(logged_string, self.stderr.getvalue())
-
- def test_stderr_not_used_if_false(self):
- util.multi_log('should not see this', stderr=False)
- self.assertEqual('', self.stderr.getvalue())
-
- def test_logs_go_to_console_by_default(self):
- self._createConsole(self.root)
- logged_string = 'something very important'
- util.multi_log(logged_string)
- self.assertEqual(logged_string, open('/dev/console').read())
-
- def test_logs_dont_go_to_stdout_if_console_exists(self):
- self._createConsole(self.root)
- util.multi_log('something')
- self.assertEqual('', self.stdout.getvalue())
-
- def test_logs_go_to_stdout_if_console_does_not_exist(self):
- logged_string = 'something very important'
- util.multi_log(logged_string)
- self.assertEqual(logged_string, self.stdout.getvalue())
-
- def test_logs_go_to_log_if_given(self):
- log = mock.MagicMock()
- logged_string = 'something very important'
- util.multi_log(logged_string, log=log)
- self.assertEqual([((mock.ANY, logged_string), {})],
- log.log.call_args_list)
-
- def test_newlines_stripped_from_log_call(self):
- log = mock.MagicMock()
- expected_string = 'something very important'
- util.multi_log('{0}\n'.format(expected_string), log=log)
- self.assertEqual((mock.ANY, expected_string), log.log.call_args[0])
-
- def test_log_level_defaults_to_debug(self):
- log = mock.MagicMock()
- util.multi_log('message', log=log)
- self.assertEqual((logging.DEBUG, mock.ANY), log.log.call_args[0])
-
- def test_given_log_level_used(self):
- log = mock.MagicMock()
- log_level = mock.Mock()
- util.multi_log('message', log=log, log_level=log_level)
- self.assertEqual((log_level, mock.ANY), log.log.call_args[0])
-
-
-class TestMessageFromString(helpers.TestCase):
-
- def test_unicode_not_messed_up(self):
- roundtripped = util.message_from_string(u'\n').as_string()
- self.assertNotIn('\x00', roundtripped)
-
-
-class TestReadSeeded(helpers.TestCase):
- def setUp(self):
- super(TestReadSeeded, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_unicode_not_messed_up(self):
- ud = b"userdatablob"
- helpers.populate_dir(
- self.tmp, {'meta-data': "key1: val1", 'user-data': ud})
- sdir = self.tmp + os.path.sep
- (found_md, found_ud) = util.read_seeded(sdir)
-
- self.assertEqual(found_md, {'key1': 'val1'})
- self.assertEqual(found_ud, ud)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
deleted file mode 100644
index d5c7367b..00000000
--- a/tests/unittests/test_vmware_config_file.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2016 VMware INC.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import sys
-import unittest
-
-from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
-from cloudinit.sources.helpers.vmware.imc.config import Config
-from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
-
-logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
-logger = logging.getLogger(__name__)
-
-
-class TestVmwareConfigFile(unittest.TestCase):
-
- def test_utility_methods(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- cf.clear()
-
- self.assertEqual(0, len(cf), "clear size")
-
- cf._insertKey(" PASSWORD|-PASS ", " foo ")
- cf._insertKey("BAR", " ")
-
- self.assertEqual(2, len(cf), "insert size")
- self.assertEqual('foo', cf["PASSWORD|-PASS"], "password")
- self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
- self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"),
- "keepPassword")
- self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"),
- "removePassword")
- self.assertFalse("FOO" in cf, "hasFoo")
- self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
- self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
- self.assertTrue("BAR" in cf, "hasBar")
- self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
- self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
-
- def test_configfile_static_2nics(self):
- cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
-
- conf = Config(cf)
-
- self.assertEqual('myhost1', conf.host_name, "hostName")
- self.assertEqual('Africa/Abidjan', conf.timezone, "tz")
- self.assertTrue(conf.utc, "utc")
-
- self.assertEqual(['10.20.145.1', '10.20.145.2'],
- conf.name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'],
- conf.dns_suffixes,
- "suffixes")
-
- nics = conf.nics
- ipv40 = nics[0].staticIpv4
-
- self.assertEqual(2, len(nics), "nics")
- self.assertEqual('NIC1', nics[0].name, "nic0")
- self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0")
- self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0")
- self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0")
- self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0")
- self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0")
- self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0")
- self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1")
-
- self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0")
- self.assertEqual('fc00:10:20:87::154',
- nics[0].staticIpv6[0].ip,
- "ipv6Addr0")
-
- self.assertEqual('NIC2', nics[1].name, "nic1")
- self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp")
-
- def test_config_file_dhcp_2nics(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- conf = Config(cf)
- nics = conf.nics
- self.assertEqual(2, len(nics), "nics")
- self.assertEqual('NIC1', nics[0].name, "nic0")
- self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0")
- self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0")
diff --git a/tools/21-cloudinit.conf b/tools/21-cloudinit.conf
deleted file mode 100644
index c65325c1..00000000
--- a/tools/21-cloudinit.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-# Log cloudinit generated log messages to file
-:syslogtag, isequal, "[CLOUDINIT]" /var/log/cloud-init.log
-
-# comment out the following line to allow CLOUDINIT messages through.
-# Doing so means you'll also get CLOUDINIT messages in /var/log/syslog
-& ~
diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh
deleted file mode 100755
index 8e0469ed..00000000
--- a/tools/Z99-cloud-locale-test.sh
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/bin/sh
-# vi: ts=4 noexpandtab
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-# Author: Scott Moser <scott.moser@ubuntu.com>
-# (c) 2012, Canonical Group, Ltd.
-#
-# Purpose: Detect invalid locale settings and inform the user
-# of how to fix them.
-#
-
-locale_warn() {
- local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv=""
- local w1 w2 w3 w4 remain
-
- # if shell is zsh, act like sh only for this function (-L).
- # The behavior change will not permenently affect user's shell.
- [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh
-
- # locale is expected to output either:
- # VARIABLE=
- # VARIABLE="value"
- # locale: Cannot set LC_SOMETHING to default locale
- while read -r w1 w2 w3 w4 remain; do
- case "$w1" in
- locale:) bad_names="${bad_names} ${w4}";;
- *)
- key=${w1%%=*}
- val=${w1#*=}
- val=${val#\"}
- val=${val%\"}
- vars="${vars} $key=$val";;
- esac
- done
- for bad in $bad_names; do
- for var in ${vars}; do
- [ "${bad}" = "${var%=*}" ] || continue
- val=${var#*=}
- [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] &&
- bad_lcs="${bad_lcs} ${val}"
- bad_kv="${bad_kv} $bad=$val"
- break
- done
- done
- bad_lcs=${bad_lcs# }
- bad_kv=${bad_kv# }
- [ -n "$bad_lcs" ] || return 0
-
- printf "_____________________________________________________________________\n"
- printf "WARNING! Your environment specifies an invalid locale.\n"
- printf " The unknown environment variables are:\n %s\n" "$bad_kv"
- printf " This can affect your user experience significantly, including the\n"
- printf " ability to manage packages. You may install the locales by running:\n\n"
-
- local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED"
- local pkgs=""
- if [ -e "$sfile" ]; then
- for bad in ${bad_lcs}; do
- grep -q -i "${bad}" "$sfile" &&
- to_gen="${to_gen} ${bad}" ||
- invalid="${invalid} ${bad}"
- done
- else
- printf " sudo apt-get install locales\n"
- to_gen=$bad_lcs
- fi
- to_gen=${to_gen# }
-
- local pkgs=""
- for bad in ${to_gen}; do
- pkgs="${pkgs} language-pack-${bad%%_*}"
- done
- pkgs=${pkgs# }
-
- if [ -n "${pkgs}" ]; then
- printf " sudo apt-get install ${pkgs# }\n"
- printf " or\n"
- printf " sudo locale-gen ${to_gen# }\n"
- printf "\n"
- fi
- for bad in ${invalid}; do
- printf "WARNING: '${bad}' is an invalid locale\n"
- done
-
- printf "To see all available language packs, run:\n"
- printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n"
- printf "To disable this message for all users, run:\n"
- printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n"
- printf "_____________________________________________________________________\n\n"
-
- # only show the message once
- : > ~/.cloud-locale-test.skip 2>/dev/null || :
-}
-
-[ -f ~/.cloud-locale-test.skip -o -f /var/lib/cloud/instance/locale-check.skip ] ||
- locale 2>&1 | locale_warn
-
-unset locale_warn
diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd
deleted file mode 100755
index 8436498e..00000000
--- a/tools/build-on-freebsd
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/sh
-# Since there is no official FreeBSD port yet, we need some way of building and
-# installing cloud-init. This script takes care of building and installing. It
-# will optionally make a first run at the end.
-
-fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
-
-# Check dependencies:
-depschecked=/tmp/c-i.dependencieschecked
-pkgs="
- dmidecode
- e2fsprogs
- gpart
- py27-Jinja2
- py27-argparse
- py27-boto
- py27-cheetah
- py27-configobj
- py27-jsonpatch
- py27-jsonpointer
- py27-oauth
- py27-prettytable
- py27-requests
- py27-serial
- py27-six
- py27-yaml
- python
- sudo
-"
-[ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages"
-touch $depschecked
-
-# Required but unavailable port/pkg: py27-jsonpatch py27-jsonpointer
-# Luckily, the install step will take care of this by installing it from pypi...
-
-# Build the code and install in /usr/local/:
-python setup.py build
-python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd
-
-# Install the correct config file:
-cp config/cloud.cfg-freebsd /usr/local/etc/cloud/cloud.cfg
-
-# Enable cloud-init in /etc/rc.conf:
-sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf
-echo 'cloudinit_enable="YES"' >> /etc/rc.conf
-
-echo "Installation completed."
-
-if [ "$1" = "run" ]; then
- echo "Ok, now let's see if it works."
-
- # Backup SSH keys
- mv /etc/ssh/ssh_host_* /tmp/
-
- # Remove old metadata
- rm -rf /var/lib/cloud
-
- # Just log everything, quick&dirty
- rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg
-
- # Start:
- /usr/local/etc/rc.d/cloudinit start
-
- # Restore SSH keys
- mv /tmp/ssh_host_* /etc/ssh/
-fi
diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug
deleted file mode 100755
index 1f08e0cb..00000000
--- a/tools/ccfg-merge-debug
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/python
-
-from cloudinit import handlers
-from cloudinit.handlers import cloud_config as cc_part
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import user_data as ud
-
-import argparse
-import os
-import shutil
-import tempfile
-
-
-def main():
- parser = argparse.ArgumentParser(
- description='test cloud-config merging')
- parser.add_argument("--output", "-o", metavar="file",
- help="specify output file", default="-")
- parser.add_argument('--verbose', '-v', action='count', default=0)
- parser.add_argument('files', nargs='+')
-
- args = parser.parse_args()
-
- if args.verbose:
- level = (logging.WARN, logging.INFO,
- logging.DEBUG)[min(args.verbose, 2)]
- logging.setupBasicLogging(level)
-
- outfile = args.output
- if args.output == "-":
- outfile = "/dev/stdout"
-
- tempd = tempfile.mkdtemp()
- handler_dir = os.path.join(tempd, "hdir")
- data = None # the 'init' object
- frequency = PER_INSTANCE
-
- paths = helpers.Paths({})
-
- # make a '#include <f1>' style
- udproc = ud.UserDataProcessor(paths=paths)
- user_data_msg = udproc.process("#include\n" +
- '\n'.join([os.path.abspath(f) for f in args.files]))
-
- ccph = cc_part.CloudConfigPartHandler(paths=paths)
- ccph.cloud_fn = outfile
-
- c_handlers = helpers.ContentHandlers()
- c_handlers.register(ccph)
-
- called = []
- for (_ctype, mod) in c_handlers.items():
- if mod in called:
- continue
- handlers.call_begin(mod, data, frequency)
- called.append(mod)
-
- # Walk the user data
- part_data = {
- 'handlers': c_handlers,
- # Any new handlers that are encountered get writen here
- 'handlerdir': handler_dir,
- 'data': data,
- # The default frequency if handlers don't have one
- 'frequency': frequency,
- # This will be used when new handlers are found
- # to help write there contents to files with numbered
- # names...
- 'handlercount': 0,
- 'excluded': [],
- }
-
- handlers.walk(user_data_msg, handlers.walker_callback, data=part_data)
-
- # Give callbacks opportunity to finalize
- called = []
- for (_ctype, mod) in c_handlers.items():
- if mod in called:
- continue
- handlers.call_end(mod, data, frequency)
- called.append(mod)
-
- shutil.rmtree(tempd)
-
-if __name__ == "__main__":
- main()
-
-# vi: ts=4 expandtab
diff --git a/tools/cloud-init-per b/tools/cloud-init-per
deleted file mode 100755
index 5d9a2864..00000000
--- a/tools/cloud-init-per
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/sh
-
-DATA_PRE="/var/lib/cloud/sem/bootper"
-INST_PRE="/var/lib/cloud/instance/sem/bootper"
-
-Usage() {
- cat <<EOF
-Usage: ${0##*/} frequency name cmd [ arg1 [ arg2 [ ... ] ]
- run cmd with arguments provided.
-
- This utility can make it easier to use boothooks or bootcmd
- on a per "once" or "always" basis.
-
- If frequency is:
- * once: run only once (do not re-run for new instance-id)
- * instance: run only the first boot for a given instance-id
- * always: run every boot
-
-EOF
-}
-error() { echo "$@" 1>&2; }
-fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
-
-# support the old 'cloud-init-run-module freq name "execute" cmd arg1'
-# if < 3 arguments, it will fail below on usage.
-if [ "${0##*/}" = "cloud-init-run-module" ]; then
- if [ $# -le 2 -o "$3" = "execute" ]; then
- error "Warning: ${0##*/} is deprecated. Please use cloud-init-per."
- freq=$1; name=$2;
- [ $# -le 2 ] || shift 3;
- set -- "$freq" "$name" "$@"
- else
- fail "legacy cloud-init-run-module only supported with module 'execute'"
- fi
-fi
-
-[ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; }
-[ $# -ge 3 ] || { Usage 1>&2; exit 1; }
-freq=$1
-name=$2
-shift 2;
-
-[ "${name#*/}" = "${name}" ] || fail "name cannot contain a /"
-[ "$(id -u)" = "0" ] || fail "must be root"
-
-case "$freq" in
- once|always) sem="${DATA_PRE}.$name.$freq";;
- instance) sem="${INST_PRE}.$name.$freq";;
- *) Usage 1>&2; fail "invalid frequency: $freq";;
-esac
-
-[ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" ||
- fail "failed to make directory for ${sem}"
-
-[ "$freq" != "always" -a -e "$sem" ] && exit 0
-"$@"
-ret=$?
-printf "%s\t%s\n" "$ret" "$(date +%s)" > "$sem" ||
- fail "failed to write to $sem"
-exit $ret
diff --git a/tools/hacking.py b/tools/hacking.py
deleted file mode 100755
index 716c1154..00000000
--- a/tools/hacking.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012, Cloudscaling
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""cloudinit HACKING file compliance testing (based off of nova hacking.py)
-
-built on top of pep8.py
-"""
-
-import inspect
-import logging
-import re
-import sys
-
-import pep8
-
-# Don't need this for testing
-logging.disable('LOG')
-
-# N1xx comments
-# N2xx except
-# N3xx imports
-# N4xx docstrings
-# N[5-9]XX (future use)
-
-DOCSTRING_TRIPLE = ['"""', "'''"]
-VERBOSE_MISSING_IMPORT = False
-_missingImport = set([])
-
-
-def import_normalize(line):
- # convert "from x import y" to "import x.y"
- # handle "from x import y as z" to "import x.y as z"
- split_line = line.split()
- if (line.startswith("from ") and "," not in line and
- split_line[2] == "import" and split_line[3] != "*" and
- split_line[1] != "__future__" and
- (len(split_line) == 4 or (len(split_line) == 6 and
- split_line[4] == "as"))):
- return "import %s.%s" % (split_line[1], split_line[3])
- else:
- return line
-
-
-def cloud_import_alphabetical(physical_line, line_number, lines):
- """Check for imports in alphabetical order.
-
- HACKING guide recommendation for imports:
- imports in human alphabetical order
- N306
- """
- # handle import x
- # use .lower since capitalization shouldn't dictate order
- split_line = import_normalize(physical_line.strip()).lower().split()
- split_previous = import_normalize(lines[line_number - 2])
- split_previous = split_previous.strip().lower().split()
- # with or without "as y"
- length = [2, 4]
- if (len(split_line) in length and len(split_previous) in length and
- split_line[0] == "import" and split_previous[0] == "import"):
- if split_line[1] < split_previous[1]:
- return (0, "N306: imports not in alphabetical order (%s, %s)"
- % (split_previous[1], split_line[1]))
-
-
-def cloud_docstring_start_space(physical_line):
- """Check for docstring not start with space.
-
- HACKING guide recommendation for docstring:
- Docstring should not start with space
- N401
- """
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
- if (pos != -1 and len(physical_line) > pos + 1):
- if (physical_line[pos + 3] == ' '):
- return (pos,
- "N401: one line docstring should not start with a space")
-
-
-def cloud_todo_format(physical_line):
- """Check for 'TODO()'.
-
- HACKING guide recommendation for TODO:
- Include your name with TODOs as in "#TODO(termie)"
- N101
- """
- pos = physical_line.find('TODO')
- pos1 = physical_line.find('TODO(')
- pos2 = physical_line.find('#') # make sure it's a comment
- if (pos != pos1 and pos2 >= 0 and pos2 < pos):
- return pos, "N101: Use TODO(NAME)"
-
-
-def cloud_docstring_one_line(physical_line):
- """Check one line docstring end.
-
- HACKING guide recommendation for one line docstring:
- A one line docstring looks like this and ends in a period.
- N402
- """
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
- end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
- if (pos != -1 and end and len(physical_line) > pos + 4):
- if (physical_line[-5] != '.'):
- return pos, "N402: one line docstring needs a period"
-
-
-def cloud_docstring_multiline_end(physical_line):
- """Check multi line docstring end.
-
- HACKING guide recommendation for docstring:
- Docstring should end on a new line
- N403
- """
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
- if (pos != -1 and len(physical_line) == pos):
- print(physical_line)
- if (physical_line[pos + 3] == ' '):
- return (pos, "N403: multi line docstring end on new line")
-
-
-current_file = ""
-
-
-def readlines(filename):
- """Record the current file being tested."""
- pep8.current_file = filename
- return open(filename).readlines()
-
-
-def add_cloud():
- """Monkey patch pep8 for cloud-init guidelines.
-
- Look for functions that start with cloud_
- and add them to pep8 module.
-
- Assumes you know how to write pep8.py checks
- """
- for name, function in globals().items():
- if not inspect.isfunction(function):
- continue
- if name.startswith("cloud_"):
- exec("pep8.%s = %s" % (name, name))
-
-if __name__ == "__main__":
- # NOVA based 'hacking.py' error codes start with an N
- pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}')
- add_cloud()
- pep8.current_file = current_file
- pep8.readlines = readlines
- try:
- pep8._main()
- finally:
- if len(_missingImport) > 0:
- print >> sys.stderr, ("%i imports missing in this test environment"
- % len(_missingImport))
diff --git a/tools/make-dist-tarball b/tools/make-dist-tarball
deleted file mode 100755
index 5b078515..00000000
--- a/tools/make-dist-tarball
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-
-Usage() {
- cat <<EOF
-Usage: ${0##*/} version
- make a tarball of 'version'
- must be in a bzr directory, and 'version' must be a tag
-
-EOF
-}
-
-topdir="$PWD"
-tag="$1"
-
-[ -n "$tag" ] || { Usage 1>&2 ; exit 1; }
-
-out="${topdir}/cloud-init-${tag}.tar.gz"
-
-bzr export --format=tgz --root="cloud-init-$tag" \
- "--revision=tag:${tag}" "$out" "$topdir" &&
- echo "Wrote ${out}"
diff --git a/tools/make-mime.py b/tools/make-mime.py
deleted file mode 100755
index 72b29fb9..00000000
--- a/tools/make-mime.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/python
-
-import argparse
-import sys
-
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
-
-KNOWN_CONTENT_TYPES = [
- 'text/x-include-once-url',
- 'text/x-include-url',
- 'text/cloud-config-archive',
- 'text/upstart-job',
- 'text/cloud-config',
- 'text/part-handler',
- 'text/x-shellscript',
- 'text/cloud-boothook',
-]
-
-
-def file_content_type(text):
- try:
- filename, content_type = text.split(":", 1)
- return (open(filename, 'r'), filename, content_type.strip())
- except:
- raise argparse.ArgumentError("Invalid value for %r" % (text))
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("-a", "--attach",
- dest="files",
- type=file_content_type,
- action='append',
- default=[],
- required=True,
- metavar="<file>:<content-type>",
- help="attach the given file in the specified "
- "content type")
- args = parser.parse_args()
- sub_messages = []
- for i, (fh, filename, format_type) in enumerate(args.files):
- contents = fh.read()
- sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition',
- 'attachment; filename="%s"' % (filename))
- content_type = sub_message.get_content_type().lower()
- if content_type not in KNOWN_CONTENT_TYPES:
- sys.stderr.write(("WARNING: content type %r for attachment %s "
- "may be incorrect!\n") % (content_type, i + 1))
- sub_messages.append(sub_message)
- combined_message = MIMEMultipart()
- for msg in sub_messages:
- combined_message.attach(msg)
- print(combined_message)
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/tools/make-tarball b/tools/make-tarball
deleted file mode 100755
index b7039150..00000000
--- a/tools/make-tarball
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-set -e
-
-find_root() {
- local topd
- if [ -z "${CLOUD_INIT_TOP_D}" ]; then
- topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
- else
- topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
- fi
- [ $? -eq 0 -a -f "${topd}/setup.py" ] || return
- ROOT_DIR="$topd"
-}
-
-if ! find_root; then
- echo "Unable to locate 'setup.py' file that should" \
- "exist in the cloud-init root directory." 1>&2
- exit 1;
-fi
-
-REVNO=$(bzr revno "$ROOT_DIR")
-
-if [ ! -z "$1" ]; then
- ARCHIVE_FN="$1"
-else
- VERSION=$("$ROOT_DIR/tools/read-version")
- ARCHIVE_FN="$PWD/cloud-init-$VERSION~bzr$REVNO.tar.gz"
-fi
-
-export_uncommitted=""
-if [ "${UNCOMMITTED:-0}" != "0" ]; then
- export_uncommitted="--uncommitted"
-fi
-
-bzr export ${export_uncommitted} \
- --format=tgz --root="cloud-init-$VERSION~bzr$REVNO" \
- "--revision=${REVNO}" "${ARCHIVE_FN}" "$ROOT_DIR"
-
-echo "$ARCHIVE_FN"
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
deleted file mode 100755
index 1c746f17..00000000
--- a/tools/mock-meta.py
+++ /dev/null
@@ -1,454 +0,0 @@
-#!/usr/bin/python
-
-# Provides a somewhat random, somewhat compat, somewhat useful mock version of
-# http://docs.amazonwebservices.com
-# /AWSEC2/2007-08-29/DeveloperGuide/AESDG-chapter-instancedata.htm
-
-"""
-To use this to mimic the EC2 metadata service entirely, run it like:
- # Where 'eth0' is *some* interface.
- sudo ifconfig eth0:0 169.254.169.254 netmask 255.255.255.255
-
- sudo ./mock-meta.py -a 169.254.169.254 -p 80
-
-Then:
- wget -q http://169.254.169.254/latest/meta-data/instance-id -O -; echo
- curl --silent http://169.254.169.254/latest/meta-data/instance-id ; echo
- ec2metadata --instance-id
-"""
-
-import functools
-import httplib
-import json
-import logging
-import os
-import random
-import string
-import sys
-import yaml
-
-from optparse import OptionParser
-
-from BaseHTTPServer import (HTTPServer, BaseHTTPRequestHandler)
-
-log = logging.getLogger('meta-server')
-
-EC2_VERSIONS = [
- '1.0',
- '2007-01-19',
- '2007-03-01',
- '2007-08-29',
- '2007-10-10',
- '2007-12-15',
- '2008-02-01',
- '2008-09-01',
- '2009-04-04',
-]
-
-BLOCK_DEVS = [
- 'ami',
- 'ephemeral0',
- 'root',
-]
-
-DEV_PREFIX = 'v' # This seems to vary alot depending on images...
-DEV_MAPPINGS = {
- 'ephemeral0': '%sda2' % (DEV_PREFIX),
- 'root': '/dev/%sda1' % (DEV_PREFIX),
- 'ami': '%sda1' % (DEV_PREFIX),
- 'swap': '%sda3' % (DEV_PREFIX),
-}
-
-META_CAPABILITIES = [
- 'aki-id',
- 'ami-id',
- 'ami-launch-index',
- 'ami-manifest-path',
- 'ari-id',
- 'block-device-mapping/',
- 'hostname',
- 'instance-action',
- 'instance-id',
- 'instance-type',
- 'local-hostname',
- 'local-ipv4',
- 'placement/',
- 'product-codes',
- 'public-hostname',
- 'public-ipv4',
- 'public-keys/',
- 'reservation-id',
- 'security-groups'
-]
-
-PUB_KEYS = {
- 'brickies': [
- ('ssh-rsa '
- 'AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemN'
- 'Sj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxz'
- 'xtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJ'
- 'tO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7'
- 'u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN'
- '+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== brickies'),
- '',
- ],
-}
-
-INSTANCE_TYPES = [
- 'm1.large',
- 'm1.medium',
- 'm1.small',
- 'm1.xlarge',
-]
-
-AVAILABILITY_ZONES = [
- "us-east-1a",
- "us-east-1b",
- "us-east-1c",
- "us-east-1d",
- 'eu-west-1a',
- 'eu-west-1b',
- 'us-west-1',
-]
-
-PLACEMENT_CAPABILITIES = {
- 'availability-zone': AVAILABILITY_ZONES,
-}
-
-NOT_IMPL_RESPONSE = json.dumps({})
-
-
-class WebException(Exception):
- def __init__(self, code, msg):
- Exception.__init__(self, msg)
- self.code = code
-
-
-def yamlify(data):
- formatted = yaml.dump(data,
- line_break="\n",
- indent=4,
- explicit_start=True,
- explicit_end=True,
- default_flow_style=False)
- return formatted
-
-
-def format_text(text):
- if not len(text):
- return "<<"
- lines = text.splitlines()
- nlines = []
- for line in lines:
- nlines.append("<< %s" % line)
- return "\n".join(nlines)
-
-
-def traverse(keys, mp):
- result = dict(mp)
- for k in keys:
- try:
- result = result.get(k)
- except (AttributeError, TypeError):
- result = None
- break
- return result
-
-
-ID_CHARS = [c for c in (string.ascii_uppercase + string.digits)]
-
-
-def id_generator(size=6, lower=False):
- txt = ''.join(random.choice(ID_CHARS) for x in range(size))
- if lower:
- return txt.lower()
- else:
- return txt
-
-
-def get_ssh_keys():
- keys = {}
- keys.update(PUB_KEYS)
-
- # Nice helper to add in the 'running' users key (if they have one)
- key_pth = os.path.expanduser('~/.ssh/id_rsa.pub')
- if not os.path.isfile(key_pth):
- key_pth = os.path.expanduser('~/.ssh/id_dsa.pub')
-
- if os.path.isfile(key_pth):
- with open(key_pth, 'rb') as fh:
- contents = fh.read()
- keys[os.getlogin()] = [contents, '']
-
- return keys
-
-
-class MetaDataHandler(object):
-
- def __init__(self, opts):
- self.opts = opts
- self.instances = {}
-
- def get_data(self, params, who, **kwargs):
- if not params:
- # Show the root level capabilities when
- # no params are passed...
- caps = sorted(META_CAPABILITIES)
- return "\n".join(caps)
- action = params[0]
- action = action.lower()
- if action == 'instance-id':
- return 'i-%s' % (id_generator(lower=True))
- elif action == 'ami-launch-index':
- return "%s" % random.choice([0, 1, 2, 3])
- elif action == 'aki-id':
- return 'aki-%s' % (id_generator(lower=True))
- elif action == 'ami-id':
- return 'ami-%s' % (id_generator(lower=True))
- elif action == 'ari-id':
- return 'ari-%s' % (id_generator(lower=True))
- elif action == 'block-device-mapping':
- nparams = params[1:]
- if not nparams:
- return "\n".join(BLOCK_DEVS)
- else:
- subvalue = traverse(nparams, DEV_MAPPINGS)
- if not subvalue:
- return "\n".join(sorted(list(DEV_MAPPINGS.keys())))
- else:
- return str(subvalue)
- elif action in ['hostname', 'local-hostname', 'public-hostname']:
- # Just echo back there own hostname that they called in on..
- return "%s" % (who)
- elif action == 'instance-type':
- return random.choice(INSTANCE_TYPES)
- elif action == 'ami-manifest-path':
- return 'my-amis/spamd-image.manifest.xml'
- elif action == 'security-groups':
- return 'default'
- elif action in ['local-ipv4', 'public-ipv4']:
- # Just echo back there own ip that they called in on...
- return "%s" % (kwargs.get('client_ip', '10.0.0.1'))
- elif action == 'reservation-id':
- return "r-%s" % (id_generator(lower=True))
- elif action == 'product-codes':
- return "%s" % (id_generator(size=8))
- elif action == 'public-keys':
- nparams = params[1:]
- # This is a weird kludge, why amazon why!!!
- # public-keys is messed up, list of /latest/meta-data/public-keys/
- # shows something like: '0=brickies'
- # but a GET to /latest/meta-data/public-keys/0=brickies will fail
- # you have to know to get '/latest/meta-data/public-keys/0', then
- # from there you get a 'openssh-key', which you can get.
- # this hunk of code just re-works the object for that.
- avail_keys = get_ssh_keys()
- key_ids = sorted(list(avail_keys.keys()))
- if nparams:
- mybe_key = nparams[0]
- try:
- key_id = int(mybe_key)
- key_name = key_ids[key_id]
- except:
- raise WebException(httplib.BAD_REQUEST,
- "Unknown key id %r" % mybe_key)
- # Extract the possible sub-params
- result = traverse(nparams[1:], {
- "openssh-key": "\n".join(avail_keys[key_name]),
- })
- if isinstance(result, (dict)):
- # TODO(harlowja): This might not be right??
- result = "\n".join(sorted(result.keys()))
- if not result:
- result = ''
- return result
- else:
- contents = []
- for (i, key_id) in enumerate(key_ids):
- contents.append("%s=%s" % (i, key_id))
- return "\n".join(contents)
- elif action == 'placement':
- nparams = params[1:]
- if not nparams:
- pcaps = sorted(PLACEMENT_CAPABILITIES.keys())
- return "\n".join(pcaps)
- else:
- pentry = nparams[0].strip().lower()
- if pentry == 'availability-zone':
- zones = PLACEMENT_CAPABILITIES[pentry]
- return "%s" % random.choice(zones)
- else:
- return "%s" % (PLACEMENT_CAPABILITIES.get(pentry, ''))
- else:
- log.warn(("Did not implement action %s, "
- "returning empty response: %r"),
- action, NOT_IMPL_RESPONSE)
- return NOT_IMPL_RESPONSE
-
-
-class UserDataHandler(object):
-
- def __init__(self, opts):
- self.opts = opts
-
- def _get_user_blob(self, **kwargs):
- blob = None
- if self.opts['user_data_file'] is not None:
- blob = self.opts['user_data_file']
- if not blob:
- blob_mp = {
- 'hostname': kwargs.get('who', 'localhost'),
- }
- lines = [
- "#cloud-config",
- yamlify(blob_mp),
- ]
- blob = "\n".join(lines)
- return blob.strip()
-
- def get_data(self, params, who, **kwargs):
- if not params:
- return self._get_user_blob(who=who)
- return NOT_IMPL_RESPONSE
-
-
-# Seem to need to use globals since can't pass
-# data into the request handlers instances...
-# Puke!
-meta_fetcher = None
-user_fetcher = None
-
-
-class Ec2Handler(BaseHTTPRequestHandler):
-
- def _get_versions(self):
- versions = ['latest'] + EC2_VERSIONS
- versions = sorted(versions)
- return "\n".join(versions)
-
- def log_message(self, fmt, *args):
- msg = "%s - %s" % (self.address_string(), fmt % (args))
- log.info(msg)
-
- def _find_method(self, path):
- # Puke! (globals)
- func_mapping = {
- 'user-data': user_fetcher.get_data,
- 'meta-data': meta_fetcher.get_data,
- }
- segments = [piece for piece in path.split('/') if len(piece)]
- log.info("Received segments %s", segments)
- if not segments:
- return self._get_versions
- date = segments[0].strip().lower()
- if date not in self._get_versions():
- raise WebException(httplib.BAD_REQUEST,
- "Unknown version format %r" % date)
- if len(segments) < 2:
- raise WebException(httplib.BAD_REQUEST, "No action provided")
- look_name = segments[1].lower()
- if look_name not in func_mapping:
- raise WebException(httplib.BAD_REQUEST,
- "Unknown requested data %r" % look_name)
- base_func = func_mapping[look_name]
- who = self.address_string()
- ip_from = self.client_address[0]
- if who == ip_from:
- # Nothing resolved, so just use 'localhost'
- who = 'localhost'
- kwargs = {
- 'params': list(segments[2:]),
- 'who': who,
- 'client_ip': ip_from,
- }
- return functools.partial(base_func, **kwargs)
-
- def _do_response(self):
- who = self.client_address
- log.info("Got a call from %s for path %s", who, self.path)
- try:
- func = self._find_method(self.path)
- data = func()
- if not data:
- data = ''
- self.send_response(httplib.OK)
- self.send_header("Content-Type", "binary/octet-stream")
- self.send_header("Content-Length", len(data))
- log.info("Sending data (len=%s):\n%s", len(data),
- format_text(data))
- self.end_headers()
- self.wfile.write(data)
- except RuntimeError as e:
- log.exception("Error somewhere in the server.")
- self.send_error(httplib.INTERNAL_SERVER_ERROR, message=str(e))
- except WebException as e:
- code = e.code
- log.exception(str(e))
- self.send_error(code, message=str(e))
-
- def do_GET(self):
- self._do_response()
-
- def do_POST(self):
- self._do_response()
-
-
-def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'):
- root_logger = logging.getLogger()
- console_logger = logging.StreamHandler(sys.stdout)
- console_logger.setFormatter(logging.Formatter(fmt))
- root_logger.addHandler(console_logger)
- root_logger.setLevel(log_level)
-
-
-def extract_opts():
- parser = OptionParser()
- parser.add_option("-p", "--port", dest="port", action="store", type=int,
- default=80, metavar="PORT",
- help=("port from which to serve traffic"
- " (default: %default)"))
- parser.add_option("-a", "--addr", dest="address", action="store", type=str,
- default='0.0.0.0', metavar="ADDRESS",
- help=("address from which to serve traffic"
- " (default: %default)"))
- parser.add_option("-f", '--user-data-file', dest='user_data_file',
- action='store', metavar='FILE',
- help=("user data filename to serve back to"
- "incoming requests"))
- (options, args) = parser.parse_args()
- out = dict()
- out['extra'] = args
- out['port'] = options.port
- out['user_data_file'] = None
- out['address'] = options.address
- if options.user_data_file:
- if not os.path.isfile(options.user_data_file):
- parser.error("Option -f specified a non-existent file")
- with open(options.user_data_file, 'rb') as fh:
- out['user_data_file'] = fh.read()
- return out
-
-
-def setup_fetchers(opts):
- global meta_fetcher
- global user_fetcher
- meta_fetcher = MetaDataHandler(opts)
- user_fetcher = UserDataHandler(opts)
-
-
-def run_server():
- # Using global here since it doesn't seem like we
- # can pass opts into a request handler constructor...
- opts = extract_opts()
- setup_logging(logging.DEBUG)
- setup_fetchers(opts)
- log.info("CLI opts: %s", opts)
- server_address = (opts['address'], opts['port'])
- server = HTTPServer(server_address, Ec2Handler)
- sa = server.socket.getsockname()
- log.info("Serving ec2 metadata on %s using port %s ...", sa[0], sa[1])
- server.serve_forever()
-
-
-if __name__ == '__main__':
- run_server()
diff --git a/tools/motd-hook b/tools/motd-hook
deleted file mode 100755
index 8c482e8c..00000000
--- a/tools/motd-hook
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-#
-# 92-ec2-upgrade-available - update-motd script
-#
-# Copyright (C) 2010 Canonical Ltd.
-#
-# Authors: Scott Moser <smoser@ubuntu.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, version 3 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-# Determining if updates are available is possibly slow.
-# a cronjob runs occasioinally and updates a file with information
-# on the latest available release (if newer than current)
-
-BUILD_FILE=/var/lib/cloud/data/available.build
-
-[ -s "${BUILD_FILE}" ] || exit 0
-
-read suite build_name name serial other < "${BUILD_FILE}"
-
-cat <<EOF
-A newer build of the Ubuntu ${suite} ${build_name} image is available.
-It is named '${name}' and has build serial '${serial}'.
-EOF
diff --git a/tools/read-dependencies b/tools/read-dependencies
deleted file mode 100755
index 6a6f3e12..00000000
--- a/tools/read-dependencies
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import re
-import sys
-
-if 'CLOUD_INIT_TOP_D' in os.environ:
- topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
-else:
- topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-
-for fname in ("setup.py", "requirements.txt"):
- if not os.path.isfile(os.path.join(topd, fname)):
- sys.stderr.write("Unable to locate '%s' file that should "
- "exist in cloud-init root directory." % fname)
- sys.exit(1)
-
-if len(sys.argv) > 1:
- reqfile = sys.argv[1]
-else:
- reqfile = "requirements.txt"
-
-with open(os.path.join(topd, reqfile), "r") as fp:
- for line in fp:
- if not line.strip() or line.startswith("#"):
- continue
- sys.stdout.write(re.split("[>=.<]*", line)[0].strip() + "\n")
-
-sys.exit(0)
diff --git a/tools/read-version b/tools/read-version
deleted file mode 100755
index d02651e9..00000000
--- a/tools/read-version
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import re
-import sys
-
-if 'CLOUD_INIT_TOP_D' in os.environ:
- topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
-else:
- topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-
-for fname in ("setup.py", "ChangeLog"):
- if not os.path.isfile(os.path.join(topd, fname)):
- sys.stderr.write("Unable to locate '%s' file that should "
- "exist in cloud-init root directory." % fname)
- sys.exit(1)
-
-vermatch = re.compile(r"^[0-9]+[.][0-9]+[.][0-9]+:$")
-
-with open(os.path.join(topd, "ChangeLog"), "r") as fp:
- for line in fp:
- if vermatch.match(line):
- sys.stdout.write(line.strip()[:-1] + "\n")
- break
-
-sys.exit(0)
diff --git a/tools/run-pep8 b/tools/run-pep8
deleted file mode 100755
index 4bd0bbfb..00000000
--- a/tools/run-pep8
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-pycheck_dirs=( "cloudinit/" "tests/" "tools/" )
-
-CR="
-"
-[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose=""
-
-set -f
-if [ $# -eq 0 ]; then unset IFS
- IFS="$CR"
- files=( "${bin_files[@]}" "${pycheck_dirs[@]}" )
- unset IFS
-else
- files=( "$@" )
-fi
-
-myname=${0##*/}
-cmd=( "${myname#run-}" $verbose "${files[@]}" )
-echo "Running: " "${cmd[@]}" 1>&2
-exec "${cmd[@]}"
diff --git a/tools/run-pyflakes b/tools/run-pyflakes
deleted file mode 100755
index b3759a94..00000000
--- a/tools/run-pyflakes
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-PYTHON_VERSION=${PYTHON_VERSION:-2}
-CR="
-"
-pycheck_dirs=( "cloudinit/" "tests/" "tools/" )
-
-set -f
-if [ $# -eq 0 ]; then
- files=( "${pycheck_dirs[@]}" )
-else
- files=( "$@" )
-fi
-
-cmd=( "python${PYTHON_VERSION}" -m "pyflakes" "${files[@]}" )
-
-echo "Running: " "${cmd[@]}" 1>&2
-exec "${cmd[@]}"
diff --git a/tools/run-pyflakes3 b/tools/run-pyflakes3
deleted file mode 100755
index e9f0863d..00000000
--- a/tools/run-pyflakes3
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-PYTHON_VERSION=3 exec "${0%/*}/run-pyflakes" "$@"
diff --git a/tools/tox-venv b/tools/tox-venv
deleted file mode 100755
index 76ed5076..00000000
--- a/tools/tox-venv
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh
-
-error() { echo "$@" 1>&2; }
-fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
-Usage() {
- cat <<EOF
-Usage: ${0##*/} tox-environment [command [args]]
- run command with provided arguments in the provided tox environment
- command defaults to \${SHELL:-/bin/sh}.
-
- invoke with '--list' to show available environments
-EOF
-}
-list_toxes() {
- local td="$1" pre="$2" d=""
- ( cd "$tox_d" &&
- for d in *; do [ -f "$d/bin/activate" ] && echo "${pre}$d"; done)
-}
-
-[ $# -eq 0 ] && { Usage 1>&2; exit 1; }
-[ "$1" = "-h" -o "$1" = "--help" ] && { Usage; exit 0; }
-
-env="$1"
-shift
-tox_d="${0%/*}/../.tox"
-activate="$tox_d/$env/bin/activate"
-
-
-[ -d "$tox_d" ] || fail "$tox_d: not a dir. maybe run 'tox'?"
-
-[ "$env" = "-l" -o "$env" = "--list" ] && { list_toxes ; exit ; }
-
-if [ ! -f "$activate" ]; then
- error "$env: not a valid tox environment?"
- error "try one of:"
- list_toxes "$tox_d" " "
- fail
-fi
-. "$activate"
-
-[ "$#" -gt 0 ] || set -- ${SHELL:-/bin/bash}
-debian_chroot="tox:$env" exec "$@"
diff --git a/tools/uncloud-init b/tools/uncloud-init
deleted file mode 100755
index 2574d482..00000000
--- a/tools/uncloud-init
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/bin/sh
-# vi: ts=4 noexpandtab
-
-# This script is meant to "kvmify" an image. Its not meant to be
-# terribly robust, or a good idea to ever run in a "real image".
-# its' only intended method of invocation is from the kernel as 'init'
-# in which case it will then invoke /sbin/init after it is done
-# init=/path/to/kvmify-init
-
-KEY="xupdate"
-UMOUNT=""
-RMDIR=""
-MARK=/var/lib/cloud/sem/uncloud-init.once
-ROOT_RW=""
-
-doexec() {
- if [ -n "$ROOT_RW" ]; then
- mkdir -p "${MARK%/*}";
- date > "${MARK}";
- fi
- cleanup;
- log "invoking /sbin/init $*"
- exec /sbin/init "$@";
-}
-log() { echo "::${0##*/}:" "$@"; }
-cleanup() {
- [ -z "${UMOUNT}" ] || { umount "${UMOUNT}" && unset UMOUNT; }
- [ -z "${RMDIR}" ] || { rm -Rf "${RMDIR}" && unset RMDIR; }
- [ -z "${ROOT_RW}" ] || { mount -o remount,ro / ; unset ROOT_RW; }
-}
-
-updateFrom() {
- local dev=$1 fmt=$2
- local mp="";
-
- [ "${fmt}" = "tar" -o "${fmt}" = "mnt" ] ||
- { log FAIL "unknown format ${fmt}"; return 1; }
-
- log INFO "updating from ${dev} format ${fmt}"
- [ ! -e "${dev}" -a -e "/dev/${dev}" ] && dev="/dev/${dev}"
- [ -e "${dev}" ] || { echo "no file $dev"; return 2; }
-
- mp=$(mktemp -d "${TEMPDIR:-/tmp}/update.XXXXXX") &&
- RMDIR="${mp}" ||
- { log FAIL "failed to mktemp"; return 1; }
-
- if [ "$fmt" = "tar" ]; then
- dd "if=${dev}" | ( tar -C "${mp}" -xf - ) ||
- { log FAIL "failed to extract ${dev}"; return 1; }
- elif [ "$fmt" = "mnt" ]; then
- mount -o ro "${dev}" "${mp}" && UMOUNT=${mp} ||
- { log FAIL "failed mount ${mp}"; return 1; }
- else
- log FAIL "unknown format ${fmt}"; return 1;
- fi
-
- if [ -d "${mp}/updates" ]; then
- rsync -av "${mp}/updates/" "/" ||
- { log FAIL "failed rsync updates/ /"; return 1; }
- fi
- if [ -f "${mp}/updates.tar" ]; then
- tar -C / -xvf "${mp}/updates.tar" ||
- { log FAIL "failed tar -C / -xvf ${mp}/updates.tar"; return 1; }
- fi
- script="${mp}/updates.script"
- if [ -f "${script}" -a -x "${script}" ]; then
- MP_DIR=${mp} "${mp}/updates.script" ||
- { log FAIL "failed to run updates.script"; return 1; }
- fi
-}
-
-fail() { { [ $# -eq 0 ] && log "FAILING" ; } || log "$@"; exit 1; }
-
-[ -s "$MARK" ] && { log "already updated" ; doexec "$@"; }
-
-mount -o remount,rw / || fail "failed to mount rw"
-ROOT_RW=1
-
-if [ ! -e /proc/cmdline ]; then
- mount -t proc /proc /proc
- read cmdline < /proc/cmdline
- umount /proc
-else
- read cmdline < /proc/cmdline
-fi
-
-ubuntu_pass=""
-
-for x in ${cmdline}; do
- case "$x" in
- ${KEY}=*)
- val=${x#${KEY}=}
- dev=${val%:*}
- [ "${dev}" = "${val}" ] && fmt="" || fmt=${val#${dev}:}
- log "update from ${dev},${fmt}"
- updateFrom "${dev}" "${fmt}" || fail "update failed"
- log "end update ${dev},${fmt}"
- ;;
- ubuntu-pass=*|ubuntu_pass=*) ubuntu_pass=${x#*=};;
- helpmount) helpmount=1;;
- root=*) rootspec=${x#root=};;
- esac
-done
-
-if [ "${ubuntu_pass}" = "R" -o "${ubuntu_pass}" = "random" ]; then
- ubuntu_pass=$(python -c 'import string, random;
-random.seed(); print "".join(random.sample(string.letters+string.digits, 8))')
- log "settting ubuntu pass = ${ubuntu_pass}"
- printf "\n===\nubuntu_pass = %s\n===\n" "${ubuntu_pass}" >/dev/ttyS0
-fi
-
-[ -z "${ubuntu_pass}" ] ||
- printf "ubuntu:%s\n" "${ubuntu_pass}" > /root/ubuntu-user-pass
-
-if [ -e /root/ubuntu-user-pass ]; then
- log "changing ubuntu user's password!"
- chpasswd < /root/ubuntu-user-pass ||
- log "FAIL: failed changing pass"
-fi
-
-cp /etc/init/tty2.conf /etc/init/ttyS0.conf &&
- sed -i s,tty2,ttyS0,g /etc/init/ttyS0.conf 2>/dev/null &&
- log "enabled console on ttyS0"
-
-pa=PasswordAuthentication
-sed -i "s,${pa} no,${pa} yes," /etc/ssh/sshd_config 2>/dev/null &&
- log "enabled passwd auth in ssh" ||
- log "failed to enable passwd ssh"
-
-grep -q vga16fb /etc/modprobe.d/blacklist.conf || {
- echo "blacklist vga16fb" >> /etc/modprobe.d/blacklist.conf &&
- log "blacklisted vga16fb"
-}
-
-#lstr="${rootspec}"
-#if ! grep -q "^${lstr}[[:space:]]" /etc/fstab; then
-# log "changing / in /etc/ftab to agree with cmdline (${lstr}) (bug 509841)"
-# sed -i "s,^\([^[[:space:]#]\+\)\([[:space:]]\+\)/\([[:space:]]\+\),${lstr}\2/\3," /etc/fstab
-#fi
-
-doexec "$@"
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
deleted file mode 100755
index ed9037d9..00000000
--- a/tools/validate-yaml.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python3
-
-"""Try to read a YAML file and report any errors.
-"""
-
-import sys
-import yaml
-
-
-if __name__ == "__main__":
- bads = 0
- for fn in sys.argv[1:]:
- sys.stdout.write("%s" % (fn))
- try:
- fh = open(fn, 'r')
- yaml.safe_load(fh.read())
- fh.close()
- sys.stdout.write(" - ok\n")
- except Exception as e:
- sys.stdout.write(" - bad (%s)\n" % (e))
- bads += 1
- if bads > 0:
- sys.exit(1)
- else:
- sys.exit(0)
diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints
deleted file mode 100755
index 6c3451fd..00000000
--- a/tools/write-ssh-key-fingerprints
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/sh
-
-logger_opts="-p user.info -t ec2"
-
-# rhels' version of logger_opts does not support long
-# for of -s (--stderr), so use short form.
-logger_opts="$logger_opts -s"
-
-# Redirect stderr to stdout
-exec 2>&1
-
-fp_blist=",${1},"
-key_blist=",${2},"
-{
-echo
-echo "#############################################################"
-echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----"
-for f in /etc/ssh/ssh_host_*key.pub; do
- [ -f "$f" ] || continue
- read ktype line < "$f"
- # skip the key if its type is in the blacklist
- [ "${fp_blist#*,$ktype,}" = "${fp_blist}" ] || continue
- ssh-keygen -l -f "$f"
-done
-echo "-----END SSH HOST KEY FINGERPRINTS-----"
-echo "#############################################################"
-
-} | logger $logger_opts
-
-echo "-----BEGIN SSH HOST KEY KEYS-----"
-for f in /etc/ssh/ssh_host_*key.pub; do
- [ -f "$f" ] || continue
- read ktype line < "$f"
- # skip the key if its type is in the blacklist
- [ "${key_blist#*,$ktype,}" = "${key_blist}" ] || continue
- cat $f
-done
-echo "-----END SSH HOST KEY KEYS-----"
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index e7a6f22c..00000000
--- a/tox.ini
+++ /dev/null
@@ -1,30 +0,0 @@
-[tox]
-envlist = py27,py3,flake8
-recreate = True
-
-[testenv]
-commands = python -m nose {posargs:tests}
-deps = -r{toxinidir}/test-requirements.txt
- -r{toxinidir}/requirements.txt
-setenv =
- LC_ALL = en_US.utf-8
-
-[testenv:flake8]
-basepython = python3
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
-
-# https://github.com/gabrielfalcao/HTTPretty/issues/223
-setenv =
- LC_ALL = en_US.utf-8
-
-[testenv:py3]
-basepython = python3
-
-[testenv:py26]
-commands = nosetests {posargs:tests}
-setenv =
- LC_ALL = C
-
-[flake8]
-ignore=H404,H405,H105,H301,H104,H403,H101
-exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
diff --git a/udev/66-azure-ephemeral.rules b/udev/66-azure-ephemeral.rules
deleted file mode 100644
index b9c5c3ef..00000000
--- a/udev/66-azure-ephemeral.rules
+++ /dev/null
@@ -1,18 +0,0 @@
-# Azure specific rules
-ACTION!="add|change", GOTO="cloud_init_end"
-SUBSYSTEM!="block", GOTO="cloud_init_end"
-ATTRS{ID_VENDOR}!="Msft", GOTO="cloud_init_end"
-ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="cloud_init_end"
-
-# Root has a GUID of 0000 as the second value
-# The resource/resource has GUID of 0001 as the second value
-ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="azure_root", GOTO="ci_azure_names"
-ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names"
-GOTO="cloud_init_end"
-
-# Create the symlinks
-LABEL="ci_azure_names"
-ENV{DEVTYPE}=="disk", SYMLINK+="disk/cloud/$env{fabric_name}"
-ENV{DEVTYPE}=="partition", SYMLINK+="disk/cloud/$env{fabric_name}-part%n"
-
-LABEL="cloud_init_end"
diff --git a/upstart/cloud-config.conf b/upstart/cloud-config.conf
deleted file mode 100644
index 2c3ef67b..00000000
--- a/upstart/cloud-config.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# cloud-config - Handle applying the settings specified in cloud-config
-description "Handle applying cloud-config"
-emits cloud-config
-
-start on (filesystem and started rsyslog)
-console output
-task
-
-exec cloud-init modules --mode=config
diff --git a/upstart/cloud-final.conf b/upstart/cloud-final.conf
deleted file mode 100644
index 72ae5052..00000000
--- a/upstart/cloud-final.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# cloud-final.conf - run "final" jobs
-# this runs around traditional "rc.local" time.
-# and after all cloud-config jobs are run
-description "execute cloud user/final scripts"
-
-start on (stopped rc RUNLEVEL=[2345] and stopped cloud-config)
-console output
-task
-
-exec cloud-init modules --mode=final
diff --git a/upstart/cloud-init-blocknet.conf b/upstart/cloud-init-blocknet.conf
deleted file mode 100644
index be09e7d8..00000000
--- a/upstart/cloud-init-blocknet.conf
+++ /dev/null
@@ -1,83 +0,0 @@
-# cloud-init-blocknet
-# the purpose of this job is
-# * to block networking from coming up until cloud-init-nonet has run
-# * timeout if they all do not come up in a reasonable amount of time
-description "block networking until cloud-init-local"
-start on (starting network-interface
- or starting network-manager
- or starting networking)
-stop on stopped cloud-init-local
-
-instance $JOB${INTERFACE:+/}${INTERFACE:-}
-export INTERFACE
-task
-
-script
- set +e # you cannot trap TERM reliably with 'set -e'
- SLEEP_CHILD=""
-
- static_network_up() {
- local emitted="/run/network/static-network-up-emitted"
- # /run/network/static-network-up-emitted is written by
- # upstart (via /etc/network/if-up.d/upstart). its presense would
- # indicate that static-network-up has already fired.
- [ -e "$emitted" -o -e "/var/$emitted" ]
- }
- msg() {
- local uptime="" idle="" msg=""
- if [ -r /proc/uptime ]; then
- read uptime idle < /proc/uptime
- fi
- msg="${UPSTART_INSTANCE}${uptime:+[${uptime}]}: $*"
- echo "$msg"
- }
-
- handle_sigterm() {
- # if we received sigterm and static networking is up then it probably
- # came from upstart as a result of 'stop on static-network-up'
- msg "got sigterm"
- if [ -n "$SLEEP_CHILD" ]; then
- if ! kill $SLEEP_CHILD 2>/dev/null; then
- [ ! -d "/proc/$SLEEP_CHILD" ] ||
- msg "hm.. failed to kill sleep pid $SLEEP_CHILD"
- fi
- fi
- msg "stopped"
- exit 0
- }
-
- dowait() {
- msg "blocking $1 seconds"
- # all this 'exec -a' does is get me a nicely named process in 'ps'
- # ie, 'sleep-block-network-interface.eth1'
- if [ -x /bin/bash ]; then
- bash -c 'exec -a sleep-block-$1 sleep $2' -- "$UPSTART_INSTANCE" "$1" &
- else
- sleep "$1" &
- fi
- SLEEP_CHILD=$!
- msg "sleepchild=$SLEEP_CHILD"
- wait $SLEEP_CHILD
- SLEEP_CHILD=""
- }
-
- trap handle_sigterm TERM
-
- if [ -n "$INTERFACE" -a "${INTERFACE#lo}" != "${INTERFACE}" ]; then
- msg "ignoring interface ${INTERFACE}";
- exit 0;
- fi
-
- # static_network_up already occurred
- static_network_up && { msg "static_network_up already"; exit 0; }
-
- # local-finished cloud-init-local success or failure
- lfin="/run/cloud-init/local-finished"
- disable="/etc/cloud/no-blocknet"
- [ -f "$lfin" ] && { msg "$lfin found"; exit 0; }
- [ -f "$disable" ] && { msg "$disable found"; exit 0; }
-
- dowait 120
- msg "gave up waiting for $lfin"
- exit 1
-end script
diff --git a/upstart/cloud-init-container.conf b/upstart/cloud-init-container.conf
deleted file mode 100644
index 6bdbe77e..00000000
--- a/upstart/cloud-init-container.conf
+++ /dev/null
@@ -1,57 +0,0 @@
-# in a lxc container, events for network interfaces do not
-# get created or may be missed. This helps cloud-init-nonet along
-# by emitting those events if they have not been emitted.
-
-start on container
-stop on static-network-up
-task
-
-emits net-device-added
-
-console output
-
-script
- # if we are inside a container, then we may have to emit the ifup
- # events for 'auto' network devices.
- set -f
-
- # from /etc/network/if-up.d/upstart
- MARK_DEV_PREFIX="/run/network/ifup."
- MARK_STATIC_NETWORK_EMITTED="/run/network/static-network-up-emitted"
- # if the all static network interfaces are already up, nothing to do
- [ -f "$MARK_STATIC_NETWORK_EMITTED" ] && exit 0
-
- # ifquery will exit failure if there is no /run/network directory.
- # normally that would get created by one of network-interface.conf
- # or networking.conf. But, it is possible that we're running
- # before either of those have.
- mkdir -p /run/network
-
- # get list of all 'auto' interfaces. if there are none, nothing to do.
- auto_list=$(ifquery --list --allow auto 2>/dev/null) || :
- [ -z "$auto_list" ] && exit 0
- set -- ${auto_list}
- [ "$*" = "lo" ] && exit 0
-
- # we only want to emit for interfaces that do not exist, so filter
- # out anything that does not exist.
- for iface in "$@"; do
- [ "$iface" = "lo" ] && continue
- # skip interfaces that are already up
- [ -f "${MARK_DEV_PREFIX}${iface}" ] && continue
-
- if [ -d /sys/net ]; then
- # if /sys is mounted, and there is no /sys/net/iface, then no device
- [ -e "/sys/net/$iface" ] && continue
- else
- # sys wasn't mounted, so just check via 'ifconfig'
- ifconfig "$iface" >/dev/null 2>&1 || continue
- fi
- initctl emit --no-wait net-device-added "INTERFACE=$iface" &&
- emitted="$emitted $iface" ||
- echo "warn: ${UPSTART_JOB} failed to emit net-device-added INTERFACE=$iface"
- done
-
- [ -z "${emitted# }" ] ||
- echo "${UPSTART_JOB}: emitted ifup for ${emitted# }"
-end script
diff --git a/upstart/cloud-init-local.conf b/upstart/cloud-init-local.conf
deleted file mode 100644
index 5def043d..00000000
--- a/upstart/cloud-init-local.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-# cloud-init - the initial cloud-init job
-# crawls metadata service, emits cloud-config
-start on mounted MOUNTPOINT=/ and mounted MOUNTPOINT=/run
-
-task
-
-console output
-
-script
- lfin=/run/cloud-init/local-finished
- ret=0
- cloud-init init --local || ret=$?
- [ -r /proc/uptime ] && read up idle < /proc/uptime || up="N/A"
- echo "$ret up $up" > "$lfin"
- exit $ret
-end script
diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf
deleted file mode 100644
index 6abf6573..00000000
--- a/upstart/cloud-init-nonet.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# cloud-init-no-net
-# the purpose of this job is
-# * to block running of cloud-init until all network interfaces
-# configured in /etc/network/interfaces are up
-# * timeout if they all do not come up in a reasonable amount of time
-start on mounted MOUNTPOINT=/ and stopped cloud-init-local
-stop on static-network-up
-task
-
-console output
-
-script
- set +e # you cannot trap TERM reliably with 'set -e'
- SLEEP_CHILD=""
-
- static_network_up() {
- local emitted="/run/network/static-network-up-emitted"
- # /run/network/static-network-up-emitted is written by
- # upstart (via /etc/network/if-up.d/upstart). its presense would
- # indicate that static-network-up has already fired.
- [ -e "$emitted" -o -e "/var/$emitted" ]
- }
- msg() {
- local uptime="" idle=""
- if [ -r /proc/uptime ]; then
- read uptime idle < /proc/uptime
- fi
- echo "$UPSTART_JOB${uptime:+[${uptime}]}:" "$1"
- }
-
- handle_sigterm() {
- # if we received sigterm and static networking is up then it probably
- # came from upstart as a result of 'stop on static-network-up'
- if [ -n "$SLEEP_CHILD" ]; then
- if ! kill $SLEEP_CHILD 2>/dev/null; then
- [ ! -d "/proc/$SLEEP_CHILD" ] ||
- msg "hm.. failed to kill sleep pid $SLEEP_CHILD"
- fi
- fi
- if static_network_up; then
- msg "static networking is now up"
- exit 0
- fi
- msg "recieved SIGTERM, networking not up"
- exit 2
- }
-
- dowait() {
- [ $# -eq 2 ] || msg "waiting $1 seconds for network device"
- sleep "$1" &
- SLEEP_CHILD=$!
- wait $SLEEP_CHILD
- SLEEP_CHILD=""
- }
-
- trap handle_sigterm TERM
-
- # static_network_up already occurred
- static_network_up && exit 0
-
- dowait 5 silent
- dowait 10
- dowait 115
- msg "gave up waiting for a network device."
- : > /var/lib/cloud/data/no-net
-end script
diff --git a/upstart/cloud-init.conf b/upstart/cloud-init.conf
deleted file mode 100644
index 41ddd284..00000000
--- a/upstart/cloud-init.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# cloud-init - the initial cloud-init job
-# crawls metadata service, emits cloud-config
-start on mounted MOUNTPOINT=/ and stopped cloud-init-nonet
-
-task
-
-console output
-
-exec /usr/bin/cloud-init init
diff --git a/upstart/cloud-log-shutdown.conf b/upstart/cloud-log-shutdown.conf
deleted file mode 100644
index 278b9c06..00000000
--- a/upstart/cloud-log-shutdown.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-# log shutdowns and reboots to the console (/dev/console)
-# this is useful for correlating logs
-start on runlevel PREVLEVEL=2
-
-task
-console output
-
-script
- # runlevel(7) says INIT_HALT will be set to HALT or POWEROFF
- date=$(date --utc)
- case "$RUNLEVEL:$INIT_HALT" in
- 6:*) mode="reboot";;
- 0:HALT) mode="halt";;
- 0:POWEROFF) mode="poweroff";;
- 0:*) mode="shutdown-unknown";;
- esac
- { read seconds idle < /proc/uptime; } 2>/dev/null || :
- echo "$date: shutting down for $mode${seconds:+ [up ${seconds%.*}s]}."
-end script