diff --git a/ansible/.gitignore b/ansible/.gitignore deleted file mode 100644 index 7193038e5..000000000 --- a/ansible/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.vagrant/ -*.retry diff --git a/ansible/LICENSE b/ansible/LICENSE deleted file mode 100644 index 64a33ddf1..000000000 --- a/ansible/LICENSE +++ /dev/null @@ -1,192 +0,0 @@ -Copyright (C) 2017 Tendermint - - - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ansible/README.rst b/ansible/README.rst deleted file mode 100644 index 5c416c018..000000000 --- a/ansible/README.rst +++ /dev/null @@ -1,291 +0,0 @@ -Using Ansible -============= - -.. figure:: assets/a_plus_t.png - :alt: Ansible plus Tendermint - - Ansible plus Tendermint - -The playbooks in `our ansible directory `__ -run ansible `roles `__ which: - -- install and configure basecoind or ethermint -- start/stop basecoind or ethermint and reset their configuration - -Prerequisites -------------- - -- Ansible 2.0 or higher -- SSH key to the servers - -Optional for DigitalOcean droplets: - -- DigitalOcean API Token -- python dopy package - -For a description on how to get a DigitalOcean API Token, see the explanation -in the `using terraform tutorial <./terraform-digitalocean.html>`__. - -Optional for Amazon AWS instances: - -- Amazon AWS API access key ID and secret access key. - -The cloud inventory scripts come from the ansible team at their -`GitHub `__ page. You can get the -latest version from the ``contrib/inventory`` folder. - -Setup ------ - -Ansible requires a "command machine" or "local machine" or "orchestrator -machine" to run on. This can be your laptop or any machine that can run -ansible. (It does not have to be part of the cloud network that hosts -your servers.) - -Use the official `Ansible installation -guide `__ to -install Ansible. Here are a few examples on basic installation commands: - -Ubuntu/Debian: - -:: - - sudo apt-get install ansible - -CentOS/RedHat: - -:: - - sudo yum install epel-release - sudo yum install ansible - -Mac OSX: If you have `Homebrew `__ installed, then it's: - -:: - - brew install ansible - -If not, you can install it using ``pip``: - -:: - - sudo easy_install pip - sudo pip install ansible - -To make life easier, you can start an SSH Agent and load your SSH -key(s). This way ansible will have an uninterrupted way of connecting to -your servers. - -:: - - ssh-agent > ~/.ssh/ssh.env - source ~/.ssh/ssh.env - - ssh-add private.key - -Subsequently, as long as the agent is running, you can use -``source ~/.ssh/ssh.env`` to load the keys to the current session. Note: -On Mac OSX, you can add the ``-K`` option to ssh-add to store the -passphrase in your keychain. The security of this feature is debated but -it is convenient. - -Optional cloud dependencies -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you are using a cloud provider to host your servers, you need the -below dependencies installed on your local machine. - -DigitalOcean inventory dependencies: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Ubuntu/Debian: - -:: - - sudo apt-get install python-pip - sudo pip install dopy - -CentOS/RedHat: - -:: - - sudo yum install python-pip - sudo pip install dopy - -Mac OSX: - -:: - - sudo pip install dopy - -Amazon AWS inventory dependencies: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Ubuntu/Debian: - -:: - - sudo apt-get install python-boto - -CentOS/RedHat: - -:: - - sudo yum install python-boto - -Mac OSX: - -:: - - sudo pip install boto - -Refreshing the DigitalOcean inventory -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you just finished creating droplets, the local DigitalOcean inventory -cache is not up-to-date. To refresh it, run: - -:: - - DO_API_TOKEN="" - python -u inventory/digital_ocean.py --refresh-cache 1> /dev/null - -Refreshing the Amazon AWS inventory -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you just finished creating Amazon AWS EC2 instances, the local AWS -inventory cache is not up-to-date. To refresh it, run: - -:: - - AWS_ACCESS_KEY_ID='' - AWS_SECRET_ACCESS_KEY='' - python -u inventory/ec2.py --refresh-cache 1> /dev/null - -Note: you don't need the access key and secret key set, if you are -running ansible on an Amazon AMI instance with the proper IAM -permissions set. - -Running the playbooks ---------------------- - -The playbooks are locked down to only run if the environment variable -``TF_VAR_TESTNET_NAME`` is populated. This is a precaution so you don't -accidentally run the playbook on all your servers. - -The variable ``TF_VAR_TESTNET_NAME`` contains the testnet name which -ansible translates into an ansible group. If you used Terraform to -create the servers, it was the testnet name used there. - -If the playbook cannot connect to the servers because of public key -denial, your SSH Agent is not set up properly. Alternatively you can add -the SSH key to ansible using the ``--private-key`` option. - -If you need to connect to the nodes as root but your local username is -different, use the ansible option ``-u root`` to tell ansible to connect -to the servers and authenticate as the root user. - -If you secured your server and you need to ``sudo`` for root access, use -the the ``-b`` or ``--become`` option to tell ansible to sudo to root -after connecting to the server. In the Terraform-DigitalOcean example, -if you created the ec2-user by adding the ``noroot=true`` option (or if -you are simply on Amazon AWS), you need to add the options -``-u ec2-user -b`` to ansible to tell it to connect as the ec2-user and -then sudo to root to run the playbook. - -DigitalOcean -~~~~~~~~~~~~ - -:: - - DO_API_TOKEN="" - TF_VAR_TESTNET_NAME="testnet-servers" - ansible-playbook -i inventory/digital_ocean.py install.yml -e service=basecoind - -Amazon AWS -~~~~~~~~~~ - -:: - - AWS_ACCESS_KEY_ID='' - AWS_SECRET_ACCESS_KEY='' - TF_VAR_TESTNET_NAME="testnet-servers" - ansible-playbook -i inventory/ec2.py install.yml -e service=basecoind - -Installing custom versions -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default ansible installs the tendermint, basecoind or ethermint binary -versions from the latest release in the repository. If you build your -own version of the binaries, you can tell ansible to install that -instead. - -:: - - GOPATH="" - go get -u github.com/tendermint/basecoin/cmd/basecoind - - DO_API_TOKEN="" - TF_VAR_TESTNET_NAME="testnet-servers" - ansible-playbook -i inventory/digital_ocean.py install.yml -e service=basecoind -e release_install=false - -Alternatively you can change the variable settings in -``group_vars/all``. - -Other commands and roles ------------------------- - -There are few extra playbooks to make life easier managing your servers. - -- install.yml - Install basecoind or ethermint applications. (Tendermint - gets installed automatically.) Use the ``service`` parameter to - define which application to install. Defaults to ``basecoind``. -- reset.yml - Stop the application, reset the configuration and data, - then start the application again. You need to pass - ``-e service=``, like ``-e service=basecoind``. It will - restart the underlying tendermint application too. -- restart.yml - Restart a service on all nodes. You need to pass - ``-e service=``, like ``-e service=basecoind``. It will - restart the underlying tendermint application too. -- stop.yml - Stop the application. You need to pass - ``-e service=``. -- status.yml - Check the service status and print it. You need to pass - ``-e service=``. -- start.yml - Start the application. You need to pass - ``-e service=``. -- ubuntu16-patch.yml - Ubuntu 16.04 does not have the minimum required - python package installed to be able to run ansible. If you are using - ubuntu, run this playbook first on the target machines. This will - install the python pacakge that is required for ansible to work - correctly on the remote nodes. -- upgrade.yml - Upgrade the ``service`` on your testnet. It will stop - the service and restart it at the end. It will only work if the - upgraded version is backward compatible with the installed version. -- upgrade-reset.yml - Upgrade the ``service`` on your testnet and reset - the database. It will stop the service and restart it at the end. It - will work for upgrades where the new version is not - backward-compatible with the installed version - however it will - reset the testnet to its default. - -The roles are self-sufficient under the ``roles/`` folder. - -- install - install the application defined in the ``service`` - parameter. It can install release packages and update them with - custom-compiled binaries. -- unsafe\_reset - delete the database for a service, including the - tendermint database. -- config - configure the application defined in ``service``. It also - configures the underlying tendermint service. Check - ``group_vars/all`` for options. -- stop - stop an application. Requires the ``service`` parameter set. -- status - check the status of an application. Requires the ``service`` - parameter set. -- start - start an application. Requires the ``service`` parameter set. - -Default variables ------------------ - -Default variables are documented under ``group_vars/all``. You can the -parameters there to deploy a previously created genesis.json file -(instead of dynamically creating it) or if you want to deploy custom -built binaries instead of deploying a released version. diff --git a/ansible/Vagrantfile b/ansible/Vagrantfile deleted file mode 100644 index 117d7e18d..000000000 --- a/ansible/Vagrantfile +++ /dev/null @@ -1,18 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - # All Vagrant configuration is done here. The most common configuration - # options are documented and commented below. For a complete reference, - # please see the online documentation at vagrantup.com. - - # Every Vagrant virtual environment requires a box to build off of. - config.vm.box = "ubuntu/trusty64" - - config.vm.provision :ansible do |ansible| - ansible.playbook = "install.yml" - end -end diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg deleted file mode 100644 index 045c1ea60..000000000 --- a/ansible/ansible.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[defaults] -retry_files_enabled = False -host_key_checking = False - diff --git a/ansible/app_options_files/dev_money b/ansible/app_options_files/dev_money deleted file mode 100644 index ff0b6958d..000000000 --- a/ansible/app_options_files/dev_money +++ /dev/null @@ -1,16 +0,0 @@ - "accounts": [{ - "pub_key": { - "type": "ed25519", - "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - }, - "coins": [ - { - "denom": "mycoin", - "amount": 9007199254740992 - } - ] - }], - "plugin_options": [ - "coin/issuer", {"app": "sigs", "addr": "1B1BE55F969F54064628A63B9559E7C21C925165"} - ] - diff --git a/ansible/app_options_files/empty b/ansible/app_options_files/empty deleted file mode 100644 index 8b1378917..000000000 --- a/ansible/app_options_files/empty +++ /dev/null @@ -1 +0,0 @@ - diff --git a/ansible/app_options_files/public_testnet b/ansible/app_options_files/public_testnet deleted file mode 100644 index 67318e63d..000000000 --- a/ansible/app_options_files/public_testnet +++ /dev/null @@ -1,75 +0,0 @@ - "accounts": [ - { - "name": "greg", - "address": "42960119BC3D724F6FA0E2883C0DCF550C59D1B2", - "coins": [ - { - "denom": "fermion", - "amount": 1000000 - }, - { - "denom": "gregcoin", - "amount": 1000 - } - ] - }, - { - "name": "bucky", - "address": "5CAFE3CD0FEE7A5DD98B366B19A201D428A79FB6", - "coins": [ - { - "denom": "fermion", - "amount": 10000 - }, - { - "denom": "buckycoin", - "amount": 1000 - } - ] - }, - { - "name": "fabo", - "address": "9C145AAAE1E7AD8735BC1B2173B092CEF6FD8557", - "coins": [ - { - "denom": "fermion", - "amount": 100 - }, - { - "denom": "fabocoin", - "amount": 1000 - } - ] - }, - { - "name": "mattbell", - "address": "C2BA52AC0E98907ED7DC7FBFE85FCF3D4BD4D018", - "coins": [ - { - "denom": "fermion", - "amount": 100 - }, - { - "denom": "tokenmatt", - "amount": 1000 - } - ] - }, - { - "name": "fabo", - "address": "527E2333EF0B6E5FFB6E62FFA68B3707E08F2286", - "coins": [ - { - "denom": "fermion", - "amount": 100 - }, - { - "denom": "tokenfabo", - "amount": 1000 - } - ] - } - ], - "plugin_options": [ - "coin/issuer", {"app": "sigs", "addr": "B01C264BFE9CBD45458256E613A6F07061A3A6B6"} - ] diff --git a/ansible/app_options_files/relay b/ansible/app_options_files/relay deleted file mode 100644 index 65ddf0c11..000000000 --- a/ansible/app_options_files/relay +++ /dev/null @@ -1,20 +0,0 @@ - "accounts": [ - { - "name": "relay", - "address": "1B1BE55F969F54064628A63B9559E7C21C925165", - "pub_key": { - "type": "ed25519", - "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - }, - "coins": [ - { - "denom": "mycoin", - "amount": 9007199254740992 - } - ] - } - ], - "plugin_options": [ - "coin/issuer", {"app": "sigs", "addr": "1B1BE55F969F54064628A63B9559E7C21C925165"} - ] - diff --git a/ansible/assets/a_plus_t.png b/ansible/assets/a_plus_t.png deleted file mode 100644 index 8f5bc5e95..000000000 Binary files a/ansible/assets/a_plus_t.png and /dev/null differ diff --git a/ansible/getconfigtoml.yml b/ansible/getconfigtoml.yml deleted file mode 100644 index 055ed923d..000000000 --- a/ansible/getconfigtoml.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - getconfigtoml - diff --git a/ansible/getfile.yml b/ansible/getfile.yml deleted file mode 100644 index 9dcee0fa8..000000000 --- a/ansible/getfile.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -#variable "source" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - getfile - diff --git a/ansible/group_vars/all b/ansible/group_vars/all deleted file mode 100644 index bd14134cc..000000000 --- a/ansible/group_vars/all +++ /dev/null @@ -1,41 +0,0 @@ ---- -### -### Tendermint installation -### - -## This file shows and sets the global defaults for the role variables. - -## -## install -## - -## service variable defines which service is going to be managed. It can be set to basecoind or ethermint. -service: basecoind - -## release_install indicates if the install role should look for a privately built binary after installing the service package. If set to false, the privately built binary in the GOPATH is going to override the binary on the target systems. -#release_install: true - -## binary stores the path to the privately built service binary, if there is any. By default it uses the GOPATH environment variable. -#binary: "{{ lookup('env','GOPATH') | default('') }}/bin/{{service}}" - -## -## config -## - -## tendermint_genesis_file contains the path and filename to a previously generated genesis.json for the underlying tendermint service. If undefined, the json file is dynamically generated. -#tendermint_genesis_file: "" - -## service_genesis_file contains the path and filename to a previously generated genesis.json for the service. If undefined, the json file is dynamically generated. -#service_genesis_file: "" - -## testnet_name is used to find seed IPs and public keys and set the chain_id in genesis.json and config.toml -#testnet_name: testnet1 - -## app_options_file contains a path and filename which will be included in a generated service genesis.json file on all nodes. The content will be dumped into the app_options dictionary in the service genesis.json.. -#app_options_file: "app_options_files/dev_money" - -## Internal use only. validators indicates if the nodes are validator nodes. The tendermint genesis.json will contain their public keys. -#validators: true - -## Internal use only. seeds contain the list of servers (with ports) that are validators in a testnet. Only effective if validators == false. If validators == true, then all nodes will be automatically included here. -#seeds: "" diff --git a/ansible/install.yml b/ansible/install.yml deleted file mode 100644 index 82c34dae0..000000000 --- a/ansible/install.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - any_errors_fatal: "{{validators | default(true) | bool}}" - roles: - - install - - {role: generic-service, when: service == 'tendermint'} - - {role: config, testnet_name: "{{lookup('env','TF_VAR_TESTNET_NAME')}}", tags: reconfig } - - start - diff --git a/ansible/inventory/COPYING b/ansible/inventory/COPYING deleted file mode 100644 index 10926e87f..000000000 --- a/ansible/inventory/COPYING +++ /dev/null @@ -1,675 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/ansible/inventory/digital_ocean.ini b/ansible/inventory/digital_ocean.ini deleted file mode 100644 index b809554b2..000000000 --- a/ansible/inventory/digital_ocean.ini +++ /dev/null @@ -1,34 +0,0 @@ -# Ansible DigitalOcean external inventory script settings -# - -[digital_ocean] - -# The module needs your DigitalOcean API Token. -# It may also be specified on the command line via --api-token -# or via the environment variables DO_API_TOKEN or DO_API_KEY -# -#api_token = 123456abcdefg - - -# API calls to DigitalOcean may be slow. For this reason, we cache the results -# of an API call. Set this to the path you want cache files to be written to. -# One file will be written to this directory: -# - ansible-digital_ocean.cache -# -cache_path = /tmp - - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# -cache_max_age = 300 - -# Use the private network IP address instead of the public when available. -# -use_private_network = False - -# Pass variables to every group, e.g.: -# -# group_variables = { 'ansible_user': 'root' } -# -group_variables = {} diff --git a/ansible/inventory/digital_ocean.py b/ansible/inventory/digital_ocean.py deleted file mode 100755 index 24ba64370..000000000 --- a/ansible/inventory/digital_ocean.py +++ /dev/null @@ -1,471 +0,0 @@ -#!/usr/bin/env python - -''' -DigitalOcean external inventory script -====================================== - -Generates Ansible inventory of DigitalOcean Droplets. - -In addition to the --list and --host options used by Ansible, there are options -for generating JSON of other DigitalOcean data. This is useful when creating -droplets. For example, --regions will return all the DigitalOcean Regions. -This information can also be easily found in the cache file, whose default -location is /tmp/ansible-digital_ocean.cache). - -The --pretty (-p) option pretty-prints the output for better human readability. - ----- -Although the cache stores all the information received from DigitalOcean, -the cache is not used for current droplet information (in --list, --host, ---all, and --droplets). This is so that accurate droplet information is always -found. You can force this script to use the cache with --force-cache. - ----- -Configuration is read from `digital_ocean.ini`, then from environment variables, -then and command-line arguments. - -Most notably, the DigitalOcean API Token must be specified. It can be specified -in the INI file or with the following environment variables: - export DO_API_TOKEN='abc123' or - export DO_API_KEY='abc123' - -Alternatively, it can be passed on the command-line with --api-token. - -If you specify DigitalOcean credentials in the INI file, a handy way to -get them into your environment (e.g., to use the digital_ocean module) -is to use the output of the --env option with export: - export $(digital_ocean.py --env) - ----- -The following groups are generated from --list: - - ID (droplet ID) - - NAME (droplet NAME) - - image_ID - - image_NAME - - distro_NAME (distribution NAME from image) - - region_NAME - - size_NAME - - status_STATUS - -For each host, the following variables are registered: - - do_backup_ids - - do_created_at - - do_disk - - do_features - list - - do_id - - do_image - object - - do_ip_address - - do_private_ip_address - - do_kernel - object - - do_locked - - do_memory - - do_name - - do_networks - object - - do_next_backup_window - - do_region - object - - do_size - object - - do_size_slug - - do_snapshot_ids - list - - do_status - - do_tags - - do_vcpus - - do_volume_ids - ------ -``` -usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] - [--droplets] [--regions] [--images] [--sizes] - [--ssh-keys] [--domains] [--pretty] - [--cache-path CACHE_PATH] - [--cache-max_age CACHE_MAX_AGE] - [--force-cache] - [--refresh-cache] - [--api-token API_TOKEN] - -Produce an Ansible Inventory file based on DigitalOcean credentials - -optional arguments: - -h, --help show this help message and exit - --list List all active Droplets as Ansible inventory - (default: True) - --host HOST Get all Ansible inventory variables about a specific - Droplet - --all List all DigitalOcean information as JSON - --droplets List Droplets as JSON - --regions List Regions as JSON - --images List Images as JSON - --sizes List Sizes as JSON - --ssh-keys List SSH keys as JSON - --domains List Domains as JSON - --pretty, -p Pretty-print results - --cache-path CACHE_PATH - Path to the cache files (default: .) - --cache-max_age CACHE_MAX_AGE - Maximum age of the cached items (default: 0) - --force-cache Only use data from the cache - --refresh-cache Force refresh of cache by making API requests to - DigitalOcean (default: False - use cache files) - --api-token API_TOKEN, -a API_TOKEN - DigitalOcean API Token -``` - -''' - -# (c) 2013, Evan Wies -# -# Inspired by the EC2 inventory plugin: -# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -import os -import sys -import re -import argparse -from time import time -import ConfigParser -import ast - -try: - import json -except ImportError: - import simplejson as json - -try: - from dopy.manager import DoManager -except ImportError as e: - sys.exit("failed=True msg='`dopy` library required for this script'") - - -class DigitalOceanInventory(object): - - ########################################################################### - # Main execution path - ########################################################################### - - def __init__(self): - ''' Main execution path ''' - - # DigitalOceanInventory data - self.data = {} # All DigitalOcean data - self.inventory = {} # Ansible Inventory - - # Define defaults - self.cache_path = '.' - self.cache_max_age = 0 - self.use_private_network = False - self.group_variables = {} - - # Read settings, environment variables, and CLI arguments - self.read_settings() - self.read_environment() - self.read_cli_args() - - # Verify credentials were set - if not hasattr(self, 'api_token'): - sys.stderr.write('''Could not find values for DigitalOcean api_token. -They must be specified via either ini file, command line argument (--api-token), -or environment variables (DO_API_TOKEN)\n''') - sys.exit(-1) - - # env command, show DigitalOcean credentials - if self.args.env: - print("DO_API_TOKEN=%s" % self.api_token) - sys.exit(0) - - # Manage cache - self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" - self.cache_refreshed = False - - if self.is_cache_valid(): - self.load_from_cache() - if len(self.data) == 0: - if self.args.force_cache: - sys.stderr.write('''Cache is empty and --force-cache was specified\n''') - sys.exit(-1) - - self.manager = DoManager(None, self.api_token, api_version=2) - - # Pick the json_data to print based on the CLI command - if self.args.droplets: - self.load_from_digital_ocean('droplets') - json_data = {'droplets': self.data['droplets']} - elif self.args.regions: - self.load_from_digital_ocean('regions') - json_data = {'regions': self.data['regions']} - elif self.args.images: - self.load_from_digital_ocean('images') - json_data = {'images': self.data['images']} - elif self.args.sizes: - self.load_from_digital_ocean('sizes') - json_data = {'sizes': self.data['sizes']} - elif self.args.ssh_keys: - self.load_from_digital_ocean('ssh_keys') - json_data = {'ssh_keys': self.data['ssh_keys']} - elif self.args.domains: - self.load_from_digital_ocean('domains') - json_data = {'domains': self.data['domains']} - elif self.args.all: - self.load_from_digital_ocean() - json_data = self.data - elif self.args.host: - json_data = self.load_droplet_variables_for_host() - else: # '--list' this is last to make it default - self.load_from_digital_ocean('droplets') - self.build_inventory() - json_data = self.inventory - - if self.cache_refreshed: - self.write_to_cache() - - if self.args.pretty: - print(json.dumps(json_data, sort_keys=True, indent=2)) - else: - print(json.dumps(json_data)) - # That's all she wrote... - - ########################################################################### - # Script configuration - ########################################################################### - - def read_settings(self): - ''' Reads the settings from the digital_ocean.ini file ''' - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') - - # Credentials - if config.has_option('digital_ocean', 'api_token'): - self.api_token = config.get('digital_ocean', 'api_token') - - # Cache related - if config.has_option('digital_ocean', 'cache_path'): - self.cache_path = config.get('digital_ocean', 'cache_path') - if config.has_option('digital_ocean', 'cache_max_age'): - self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') - - # Private IP Address - if config.has_option('digital_ocean', 'use_private_network'): - self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') - - # Group variables - if config.has_option('digital_ocean', 'group_variables'): - self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) - - def read_environment(self): - ''' Reads the settings from environment variables ''' - # Setup credentials - if os.getenv("DO_API_TOKEN"): - self.api_token = os.getenv("DO_API_TOKEN") - if os.getenv("DO_API_KEY"): - self.api_token = os.getenv("DO_API_KEY") - - def read_cli_args(self): - ''' Command line argument processing ''' - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') - - parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') - parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') - - parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') - parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') - parser.add_argument('--regions', action='store_true', help='List Regions as JSON') - parser.add_argument('--images', action='store_true', help='List Images as JSON') - parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') - parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') - parser.add_argument('--domains', action='store_true', help='List Domains as JSON') - - parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') - - parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') - parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') - parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') - parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, - help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') - - parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') - parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') - - self.args = parser.parse_args() - - if self.args.api_token: - self.api_token = self.args.api_token - - # Make --list default if none of the other commands are specified - if (not self.args.droplets and not self.args.regions and - not self.args.images and not self.args.sizes and - not self.args.ssh_keys and not self.args.domains and - not self.args.all and not self.args.host): - self.args.list = True - - ########################################################################### - # Data Management - ########################################################################### - - def load_from_digital_ocean(self, resource=None): - '''Get JSON from DigitalOcean API''' - if self.args.force_cache and os.path.isfile(self.cache_filename): - return - # We always get fresh droplets - if self.is_cache_valid() and not (resource == 'droplets' or resource is None): - return - if self.args.refresh_cache: - resource = None - - if resource == 'droplets' or resource is None: - self.data['droplets'] = self.manager.all_active_droplets() - self.cache_refreshed = True - if resource == 'regions' or resource is None: - self.data['regions'] = self.manager.all_regions() - self.cache_refreshed = True - if resource == 'images' or resource is None: - self.data['images'] = self.manager.all_images(filter=None) - self.cache_refreshed = True - if resource == 'sizes' or resource is None: - self.data['sizes'] = self.manager.sizes() - self.cache_refreshed = True - if resource == 'ssh_keys' or resource is None: - self.data['ssh_keys'] = self.manager.all_ssh_keys() - self.cache_refreshed = True - if resource == 'domains' or resource is None: - self.data['domains'] = self.manager.all_domains() - self.cache_refreshed = True - - def build_inventory(self): - '''Build Ansible inventory of droplets''' - self.inventory = { - 'all': { - 'hosts': [], - 'vars': self.group_variables - }, - '_meta': {'hostvars': {}} - } - - # add all droplets by id and name - for droplet in self.data['droplets']: - # when using private_networking, the API reports the private one in "ip_address". - if 'private_networking' in droplet['features'] and not self.use_private_network: - for net in droplet['networks']['v4']: - if net['type'] == 'public': - dest = net['ip_address'] - else: - continue - else: - dest = droplet['ip_address'] - - self.inventory['all']['hosts'].append(dest) - - self.inventory[droplet['id']] = [dest] - self.inventory[droplet['name']] = [dest] - - # groups that are always present - for group in ('region_' + droplet['region']['slug'], - 'image_' + str(droplet['image']['id']), - 'size_' + droplet['size']['slug'], - 'distro_' + self.to_safe(droplet['image']['distribution']), - 'status_' + droplet['status']): - if group not in self.inventory: - self.inventory[group] = {'hosts': [], 'vars': {}} - self.inventory[group]['hosts'].append(dest) - - # groups that are not always present - for group in (droplet['image']['slug'], - droplet['image']['name']): - if group: - image = 'image_' + self.to_safe(group) - if image not in self.inventory: - self.inventory[image] = {'hosts': [], 'vars': {}} - self.inventory[image]['hosts'].append(dest) - - if droplet['tags']: - for tag in droplet['tags']: - if tag not in self.inventory: - self.inventory[tag] = {'hosts': [], 'vars': {}} - self.inventory[tag]['hosts'].append(dest) - - # hostvars - info = self.do_namespace(droplet) - self.inventory['_meta']['hostvars'][dest] = info - - def load_droplet_variables_for_host(self): - '''Generate a JSON response to a --host call''' - host = int(self.args.host) - droplet = self.manager.show_droplet(host) - info = self.do_namespace(droplet) - return {'droplet': info} - - ########################################################################### - # Cache Management - ########################################################################### - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - if os.path.isfile(self.cache_filename): - mod_time = os.path.getmtime(self.cache_filename) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - return True - return False - - def load_from_cache(self): - ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' - try: - cache = open(self.cache_filename, 'r') - json_data = cache.read() - cache.close() - data = json.loads(json_data) - except IOError: - data = {'data': {}, 'inventory': {}} - - self.data = data['data'] - self.inventory = data['inventory'] - - def write_to_cache(self): - ''' Writes data in JSON format to a file ''' - data = {'data': self.data, 'inventory': self.inventory} - json_data = json.dumps(data, sort_keys=True, indent=2) - - cache = open(self.cache_filename, 'w') - cache.write(json_data) - cache.close() - - ########################################################################### - # Utilities - ########################################################################### - - def push(self, my_dict, key, element): - ''' Pushed an element onto an array that may not have been defined in the dict ''' - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - return re.sub("[^A-Za-z0-9\-\.]", "_", word) - - def do_namespace(self, data): - ''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' - info = {} - for k, v in data.items(): - info['do_' + k] = v - return info - - -########################################################################### -# Run the script -DigitalOceanInventory() diff --git a/ansible/inventory/ec2.ini b/ansible/inventory/ec2.ini deleted file mode 100644 index e11a69cc1..000000000 --- a/ansible/inventory/ec2.ini +++ /dev/null @@ -1,209 +0,0 @@ -# Ansible EC2 external inventory script settings -# - -[ec2] - -# to talk to a private eucalyptus instance uncomment these lines -# and edit edit eucalyptus_host to be the host name of your cloud controller -#eucalyptus = True -#eucalyptus_host = clc.cloud.domain.org - -# AWS regions to make calls to. Set this to 'all' to make request to all regions -# in AWS and merge the results together. Alternatively, set this to a comma -# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not -# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or -# AWS_DEFAULT_REGION environment variable will be read to determine the region. -regions = all -regions_exclude = us-gov-west-1, cn-north-1 - -# When generating inventory, Ansible needs to know how to address a server. -# Each EC2 instance has a lot of variables associated with it. Here is the list: -# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance -# Below are 2 variables that are used as the address of a server: -# - destination_variable -# - vpc_destination_variable - -# This is the normal destination variable to use. If you are running Ansible -# from outside EC2, then 'public_dns_name' makes the most sense. If you are -# running Ansible from within EC2, then perhaps you want to use the internal -# address, and should set this to 'private_dns_name'. The key of an EC2 tag -# may optionally be used; however the boto instance variables hold precedence -# in the event of a collision. -destination_variable = public_dns_name - -# This allows you to override the inventory_name with an ec2 variable, instead -# of using the destination_variable above. Addressing (aka ansible_ssh_host) -# will still use destination_variable. Tags should be written as 'tag_TAGNAME'. -#hostname_variable = tag_Name - -# For server inside a VPC, using DNS names may not make sense. When an instance -# has 'subnet_id' set, this variable is used. If the subnet is public, setting -# this to 'ip_address' will return the public IP address. For instances in a -# private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from within EC2. The key of an EC2 tag may optionally be used; however -# the boto instance variables hold precedence in the event of a collision. -# WARNING: - instances that are in the private vpc, _without_ public ip address -# will not be listed in the inventory until You set: -# vpc_destination_variable = private_ip_address -vpc_destination_variable = ip_address - -# The following two settings allow flexible ansible host naming based on a -# python format string and a comma-separated list of ec2 tags. Note that: -# -# 1) If the tags referenced are not present for some instances, empty strings -# will be substituted in the format string. -# 2) This overrides both destination_variable and vpc_destination_variable. -# -#destination_format = {0}.{1}.example.com -#destination_format_tags = Name,environment - -# To tag instances on EC2 with the resource records that point to them from -# Route53, set 'route53' to True. -route53 = False - -# To use Route53 records as the inventory hostnames, uncomment and set -# to equal the domain name you wish to use. You must also have 'route53' (above) -# set to True. -# route53_hostnames = .example.com - -# To exclude RDS instances from the inventory, uncomment and set to False. -#rds = False - -# To exclude ElastiCache instances from the inventory, uncomment and set to False. -#elasticache = False - -# Additionally, you can specify the list of zones to exclude looking up in -# 'route53_excluded_zones' as a comma-separated list. -# route53_excluded_zones = samplezone1.com, samplezone2.com - -# By default, only EC2 instances in the 'running' state are returned. Set -# 'all_instances' to True to return all instances regardless of state. -all_instances = False - -# By default, only EC2 instances in the 'running' state are returned. Specify -# EC2 instance states to return as a comma-separated list. This -# option is overridden when 'all_instances' is True. -# instance_states = pending, running, shutting-down, terminated, stopping, stopped - -# By default, only RDS instances in the 'available' state are returned. Set -# 'all_rds_instances' to True return all RDS instances regardless of state. -all_rds_instances = False - -# Include RDS cluster information (Aurora etc.) -include_rds_clusters = False - -# By default, only ElastiCache clusters and nodes in the 'available' state -# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' -# to True return all ElastiCache clusters and nodes, regardless of state. -# -# Note that all_elasticache_nodes only applies to listed clusters. That means -# if you set all_elastic_clusters to false, no node will be return from -# unavailable clusters, regardless of the state and to what you set for -# all_elasticache_nodes. -all_elasticache_replication_groups = False -all_elasticache_clusters = False -all_elasticache_nodes = False - -# API calls to EC2 are slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-ec2.cache -# - ansible-ec2.index -cache_path = ~/.ansible/tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# To disable the cache, set this value to 0 -cache_max_age = 300 - -# Organize groups into a nested/hierarchy instead of a flat namespace. -nested_groups = False - -# Replace - tags when creating groups to avoid issues with ansible -replace_dash_in_groups = True - -# If set to true, any tag of the form "a,b,c" is expanded into a list -# and the results are used to create additional tag_* inventory groups. -expand_csv_tags = False - -# The EC2 inventory output can become very large. To manage its size, -# configure which groups should be created. -group_by_instance_id = True -group_by_region = True -group_by_availability_zone = True -group_by_aws_account = False -group_by_ami_id = True -group_by_instance_type = True -group_by_instance_state = False -group_by_key_pair = True -group_by_vpc_id = True -group_by_security_group = True -group_by_tag_keys = True -group_by_tag_none = True -group_by_route53_names = True -group_by_rds_engine = True -group_by_rds_parameter_group = True -group_by_elasticache_engine = True -group_by_elasticache_cluster = True -group_by_elasticache_parameter_group = True -group_by_elasticache_replication_group = True - -# If you only want to include hosts that match a certain regular expression -# pattern_include = staging-* - -# If you want to exclude any hosts that match a certain regular expression -# pattern_exclude = staging-* - -# Instance filters can be used to control which instances are retrieved for -# inventory. For the full list of possible filters, please read the EC2 API -# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters -# Filters are key/value pairs separated by '=', to list multiple filters use -# a list separated by commas. See examples below. - -# If you want to apply multiple filters simultaneously, set stack_filters to -# True. Default behaviour is to combine the results of all filters. Stacking -# allows the use of multiple conditions to filter down, for example by -# environment and type of host. -stack_filters = False - -# Retrieve only instances with (key=value) env=staging tag -# instance_filters = tag:env=staging - -# Retrieve only instances with role=webservers OR role=dbservers tag -# instance_filters = tag:role=webservers,tag:role=dbservers - -# Retrieve only t1.micro instances OR instances with tag env=staging -# instance_filters = instance-type=t1.micro,tag:env=staging - -# You can use wildcards in filter values also. Below will list instances which -# tag Name value matches webservers1* -# (ex. webservers15, webservers1a, webservers123 etc) -# instance_filters = tag:Name=webservers1* - -# An IAM role can be assumed, so all requests are run as that role. -# This can be useful for connecting across different accounts, or to limit user -# access -# iam_role = role-arn - -# A boto configuration profile may be used to separate out credentials -# see http://boto.readthedocs.org/en/latest/boto_config_tut.html -# boto_profile = some-boto-profile-name - - -[credentials] - -# The AWS credentials can optionally be specified here. Credentials specified -# here are ignored if the environment variable AWS_ACCESS_KEY_ID or -# AWS_PROFILE is set, or if the boto_profile property above is set. -# -# Supplying AWS credentials here is not recommended, as it introduces -# non-trivial security concerns. When going down this route, please make sure -# to set access permissions for this file correctly, e.g. handle it the same -# way as you would a private SSH key. -# -# Unlike the boto and AWS configure files, this section does not support -# profiles. -# -# aws_access_key_id = AXXXXXXXXXXXXXX -# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX -# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/ansible/inventory/ec2.py b/ansible/inventory/ec2.py deleted file mode 100755 index 9614c5fe9..000000000 --- a/ansible/inventory/ec2.py +++ /dev/null @@ -1,1595 +0,0 @@ -#!/usr/bin/env python - -''' -EC2 external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -AWS EC2 using the Boto library. - -NOTE: This script assumes Ansible is being executed where the environment -variables needed for Boto have already been set: - export AWS_ACCESS_KEY_ID='AK123' - export AWS_SECRET_ACCESS_KEY='abc123' - -optional region environement variable if region is 'auto' - -This script also assumes there is an ec2.ini file alongside it. To specify a -different path to ec2.ini, define the EC2_INI_PATH environment variable: - - export EC2_INI_PATH=/path/to/my_ec2.ini - -If you're using eucalyptus you need to set the above variables and -you need to define: - - export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus - -If you're using boto profiles (requires boto>=2.24.0) you can choose a profile -using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using -the AWS_PROFILE variable: - - AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml - -For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html - -When run against a specific host, this script returns the following variables: - - ec2_ami_launch_index - - ec2_architecture - - ec2_association - - ec2_attachTime - - ec2_attachment - - ec2_attachmentId - - ec2_block_devices - - ec2_client_token - - ec2_deleteOnTermination - - ec2_description - - ec2_deviceIndex - - ec2_dns_name - - ec2_eventsSet - - ec2_group_name - - ec2_hypervisor - - ec2_id - - ec2_image_id - - ec2_instanceState - - ec2_instance_type - - ec2_ipOwnerId - - ec2_ip_address - - ec2_item - - ec2_kernel - - ec2_key_name - - ec2_launch_time - - ec2_monitored - - ec2_monitoring - - ec2_networkInterfaceId - - ec2_ownerId - - ec2_persistent - - ec2_placement - - ec2_platform - - ec2_previous_state - - ec2_private_dns_name - - ec2_private_ip_address - - ec2_publicIp - - ec2_public_dns_name - - ec2_ramdisk - - ec2_reason - - ec2_region - - ec2_requester_id - - ec2_root_device_name - - ec2_root_device_type - - ec2_security_group_ids - - ec2_security_group_names - - ec2_shutdown_state - - ec2_sourceDestCheck - - ec2_spot_instance_request_id - - ec2_state - - ec2_state_code - - ec2_state_reason - - ec2_status - - ec2_subnet_id - - ec2_tenancy - - ec2_virtualization_type - - ec2_vpc_id - -These variables are pulled out of a boto.ec2.instance object. There is a lack of -consistency with variable spellings (camelCase and underscores) since this -just loops through all variables the object exposes. It is preferred to use the -ones with underscores when multiple exist. - -In addition, if an instance has AWS Tags associated with it, each tag is a new -variable named: - - ec2_tag_[Key] = [Value] - -Security groups are comma-separated in 'ec2_security_group_ids' and -'ec2_security_group_names'. -''' - -# (c) 2012, Peter Sankauskas -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -import sys -import os -import argparse -import re -from time import time -import boto -from boto import ec2 -from boto import rds -from boto import elasticache -from boto import route53 -from boto import sts -import six - -from ansible.module_utils import ec2 as ec2_utils - -HAS_BOTO3 = False -try: - import boto3 - HAS_BOTO3 = True -except ImportError: - pass - -from six.moves import configparser -from collections import defaultdict - -try: - import json -except ImportError: - import simplejson as json - - -class Ec2Inventory(object): - - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by instance IDs, tags, security groups, regions, - # and availability zones - self.inventory = self._empty_inventory() - - self.aws_account_id = None - - # Index of hostname (address) to instance ID - self.index = {} - - # Boto profile to use (if any) - self.boto_profile = None - - # AWS credentials. - self.credentials = {} - - # Read settings and parse CLI arguments - self.parse_cli_args() - self.read_settings() - - # Make sure that profile_name is not passed at all if not set - # as pre 2.24 boto will fall over otherwise - if self.boto_profile: - if not hasattr(boto.ec2.EC2Connection, 'profile_name'): - self.fail_with_error("boto version must be >= 2.24 to use profile") - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - if self.inventory == self._empty_inventory(): - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - def read_settings(self): - ''' Reads the settings from the ec2.ini file ''' - - scriptbasename = __file__ - scriptbasename = os.path.basename(scriptbasename) - scriptbasename = scriptbasename.replace('.py', '') - - defaults = { - 'ec2': { - 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) - } - } - - if six.PY3: - config = configparser.ConfigParser() - else: - config = configparser.SafeConfigParser() - ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) - ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) - config.read(ec2_ini_path) - - # is eucalyptus? - self.eucalyptus_host = None - self.eucalyptus = False - if config.has_option('ec2', 'eucalyptus'): - self.eucalyptus = config.getboolean('ec2', 'eucalyptus') - if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): - self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') - - # Regions - self.regions = [] - configRegions = config.get('ec2', 'regions') - if (configRegions == 'all'): - if self.eucalyptus_host: - self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) - else: - configRegions_exclude = config.get('ec2', 'regions_exclude') - for regionInfo in ec2.regions(): - if regionInfo.name not in configRegions_exclude: - self.regions.append(regionInfo.name) - else: - self.regions = configRegions.split(",") - if 'auto' in self.regions: - env_region = os.environ.get('AWS_REGION') - if env_region is None: - env_region = os.environ.get('AWS_DEFAULT_REGION') - self.regions = [env_region] - - # Destination addresses - self.destination_variable = config.get('ec2', 'destination_variable') - self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') - - if config.has_option('ec2', 'hostname_variable'): - self.hostname_variable = config.get('ec2', 'hostname_variable') - else: - self.hostname_variable = None - - if config.has_option('ec2', 'destination_format') and \ - config.has_option('ec2', 'destination_format_tags'): - self.destination_format = config.get('ec2', 'destination_format') - self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') - else: - self.destination_format = None - self.destination_format_tags = None - - # Route53 - self.route53_enabled = config.getboolean('ec2', 'route53') - if config.has_option('ec2', 'route53_hostnames'): - self.route53_hostnames = config.get('ec2', 'route53_hostnames') - else: - self.route53_hostnames = None - self.route53_excluded_zones = [] - if config.has_option('ec2', 'route53_excluded_zones'): - self.route53_excluded_zones.extend( - config.get('ec2', 'route53_excluded_zones', '').split(',')) - - # Include RDS instances? - self.rds_enabled = True - if config.has_option('ec2', 'rds'): - self.rds_enabled = config.getboolean('ec2', 'rds') - - # Include RDS cluster instances? - if config.has_option('ec2', 'include_rds_clusters'): - self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') - else: - self.include_rds_clusters = False - - # Include ElastiCache instances? - self.elasticache_enabled = True - if config.has_option('ec2', 'elasticache'): - self.elasticache_enabled = config.getboolean('ec2', 'elasticache') - - # Return all EC2 instances? - if config.has_option('ec2', 'all_instances'): - self.all_instances = config.getboolean('ec2', 'all_instances') - else: - self.all_instances = False - - # Instance states to be gathered in inventory. Default is 'running'. - # Setting 'all_instances' to 'yes' overrides this option. - ec2_valid_instance_states = [ - 'pending', - 'running', - 'shutting-down', - 'terminated', - 'stopping', - 'stopped' - ] - self.ec2_instance_states = [] - if self.all_instances: - self.ec2_instance_states = ec2_valid_instance_states - elif config.has_option('ec2', 'instance_states'): - for instance_state in config.get('ec2', 'instance_states').split(','): - instance_state = instance_state.strip() - if instance_state not in ec2_valid_instance_states: - continue - self.ec2_instance_states.append(instance_state) - else: - self.ec2_instance_states = ['running'] - - # Return all RDS instances? (if RDS is enabled) - if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: - self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') - else: - self.all_rds_instances = False - - # Return all ElastiCache replication groups? (if ElastiCache is enabled) - if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: - self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') - else: - self.all_elasticache_replication_groups = False - - # Return all ElastiCache clusters? (if ElastiCache is enabled) - if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: - self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') - else: - self.all_elasticache_clusters = False - - # Return all ElastiCache nodes? (if ElastiCache is enabled) - if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: - self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') - else: - self.all_elasticache_nodes = False - - # boto configuration profile (prefer CLI argument then environment variables then config file) - self.boto_profile = self.args.boto_profile or os.environ.get('AWS_PROFILE') - if config.has_option('ec2', 'boto_profile') and not self.boto_profile: - self.boto_profile = config.get('ec2', 'boto_profile') - - # AWS credentials (prefer environment variables) - if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or - os.environ.get('AWS_PROFILE')): - if config.has_option('credentials', 'aws_access_key_id'): - aws_access_key_id = config.get('credentials', 'aws_access_key_id') - else: - aws_access_key_id = None - if config.has_option('credentials', 'aws_secret_access_key'): - aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') - else: - aws_secret_access_key = None - if config.has_option('credentials', 'aws_security_token'): - aws_security_token = config.get('credentials', 'aws_security_token') - else: - aws_security_token = None - if aws_access_key_id: - self.credentials = { - 'aws_access_key_id': aws_access_key_id, - 'aws_secret_access_key': aws_secret_access_key - } - if aws_security_token: - self.credentials['security_token'] = aws_security_token - - # Cache related - cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) - if self.boto_profile: - cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - cache_name = 'ansible-ec2' - cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) - if cache_id: - cache_name = '%s-%s' % (cache_name, cache_id) - self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) - self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) - self.cache_max_age = config.getint('ec2', 'cache_max_age') - - if config.has_option('ec2', 'expand_csv_tags'): - self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') - else: - self.expand_csv_tags = False - - # Configure nested groups instead of flat namespace. - if config.has_option('ec2', 'nested_groups'): - self.nested_groups = config.getboolean('ec2', 'nested_groups') - else: - self.nested_groups = False - - # Replace dash or not in group names - if config.has_option('ec2', 'replace_dash_in_groups'): - self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') - else: - self.replace_dash_in_groups = True - - # IAM role to assume for connection - if config.has_option('ec2', 'iam_role'): - self.iam_role = config.get('ec2', 'iam_role') - else: - self.iam_role = None - - # Configure which groups should be created. - group_by_options = [ - 'group_by_instance_id', - 'group_by_region', - 'group_by_availability_zone', - 'group_by_ami_id', - 'group_by_instance_type', - 'group_by_instance_state', - 'group_by_key_pair', - 'group_by_vpc_id', - 'group_by_security_group', - 'group_by_tag_keys', - 'group_by_tag_none', - 'group_by_route53_names', - 'group_by_rds_engine', - 'group_by_rds_parameter_group', - 'group_by_elasticache_engine', - 'group_by_elasticache_cluster', - 'group_by_elasticache_parameter_group', - 'group_by_elasticache_replication_group', - 'group_by_aws_account', - ] - for option in group_by_options: - if config.has_option('ec2', option): - setattr(self, option, config.getboolean('ec2', option)) - else: - setattr(self, option, True) - - # Do we need to just include hosts that match a pattern? - try: - pattern_include = config.get('ec2', 'pattern_include') - if pattern_include and len(pattern_include) > 0: - self.pattern_include = re.compile(pattern_include) - else: - self.pattern_include = None - except configparser.NoOptionError: - self.pattern_include = None - - # Do we need to exclude hosts that match a pattern? - try: - pattern_exclude = config.get('ec2', 'pattern_exclude') - if pattern_exclude and len(pattern_exclude) > 0: - self.pattern_exclude = re.compile(pattern_exclude) - else: - self.pattern_exclude = None - except configparser.NoOptionError: - self.pattern_exclude = None - - # Do we want to stack multiple filters? - if config.has_option('ec2', 'stack_filters'): - self.stack_filters = config.getboolean('ec2', 'stack_filters') - else: - self.stack_filters = False - - # Instance filters (see boto and EC2 API docs). Ignore invalid filters. - self.ec2_instance_filters = defaultdict(list) - if config.has_option('ec2', 'instance_filters'): - - filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] - - for instance_filter in filters: - instance_filter = instance_filter.strip() - if not instance_filter or '=' not in instance_filter: - continue - filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] - if not filter_key: - continue - self.ec2_instance_filters[filter_key].append(filter_value) - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') - parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', - help='Use boto profile for connections to EC2') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - ''' Do API calls to each region, and save data in cache files ''' - - if self.route53_enabled: - self.get_route53_records() - - for region in self.regions: - self.get_instances_by_region(region) - if self.rds_enabled: - self.get_rds_instances_by_region(region) - if self.elasticache_enabled: - self.get_elasticache_clusters_by_region(region) - self.get_elasticache_replication_groups_by_region(region) - if self.include_rds_clusters: - self.include_rds_clusters_by_region(region) - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def connect(self, region): - ''' create connection to api server''' - if self.eucalyptus: - conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) - conn.APIVersion = '2010-08-31' - else: - conn = self.connect_to_aws(ec2, region) - return conn - - def boto_fix_security_token_in_profile(self, connect_args): - ''' monkey patch for boto issue boto/boto#2100 ''' - profile = 'profile ' + self.boto_profile - if boto.config.has_option(profile, 'aws_security_token'): - connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') - return connect_args - - def connect_to_aws(self, module, region): - connect_args = self.credentials - - # only pass the profile name if it's set (as it is not supported by older boto versions) - if self.boto_profile: - connect_args['profile_name'] = self.boto_profile - self.boto_fix_security_token_in_profile(connect_args) - - if self.iam_role: - sts_conn = sts.connect_to_region(region, **connect_args) - role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory') - connect_args['aws_access_key_id'] = role.credentials.access_key - connect_args['aws_secret_access_key'] = role.credentials.secret_key - connect_args['security_token'] = role.credentials.session_token - - conn = module.connect_to_region(region, **connect_args) - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - return conn - - def get_instances_by_region(self, region): - ''' Makes an AWS EC2 API call to the list of instances in a particular - region ''' - - try: - conn = self.connect(region) - reservations = [] - if self.ec2_instance_filters: - if self.stack_filters: - filters_dict = {} - for filter_key, filter_values in self.ec2_instance_filters.items(): - filters_dict[filter_key] = filter_values - reservations.extend(conn.get_all_instances(filters=filters_dict)) - else: - for filter_key, filter_values in self.ec2_instance_filters.items(): - reservations.extend(conn.get_all_instances(filters={filter_key: filter_values})) - else: - reservations = conn.get_all_instances() - - # Pull the tags back in a second step - # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not - # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` - instance_ids = [] - for reservation in reservations: - instance_ids.extend([instance.id for instance in reservation.instances]) - - max_filter_value = 199 - tags = [] - for i in range(0, len(instance_ids), max_filter_value): - tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]})) - - tags_by_instance_id = defaultdict(dict) - for tag in tags: - tags_by_instance_id[tag.res_id][tag.name] = tag.value - - if (not self.aws_account_id) and reservations: - self.aws_account_id = reservations[0].owner_id - - for reservation in reservations: - for instance in reservation.instances: - instance.tags = tags_by_instance_id[instance.id] - self.add_instance(instance, region) - - except boto.exception.BotoServerError as e: - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - else: - backend = 'Eucalyptus' if self.eucalyptus else 'AWS' - error = "Error connecting to %s backend.\n%s" % (backend, e.message) - self.fail_with_error(error, 'getting EC2 instances') - - def get_rds_instances_by_region(self, region): - ''' Makes an AWS API call to the list of RDS instances in a particular - region ''' - - if not HAS_BOTO3: - self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again", - "getting RDS instances") - - client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) - db_instances = client.describe_db_instances() - - try: - conn = self.connect_to_aws(rds, region) - if conn: - marker = None - while True: - instances = conn.get_all_dbinstances(marker=marker) - marker = instances.marker - for index, instance in enumerate(instances): - # Add tags to instances. - instance.arn = db_instances['DBInstances'][index]['DBInstanceArn'] - tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList'] - instance.tags = {} - for tag in tags: - instance.tags[tag['Key']] = tag['Value'] - - self.add_rds_instance(instance, region) - if not marker: - break - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - if not e.reason == "Forbidden": - error = "Looks like AWS RDS is down:\n%s" % e.message - self.fail_with_error(error, 'getting RDS instances') - - def include_rds_clusters_by_region(self, region): - if not HAS_BOTO3: - self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", - "getting RDS clusters") - - client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) - - marker, clusters = '', [] - while marker is not None: - resp = client.describe_db_clusters(Marker=marker) - clusters.extend(resp["DBClusters"]) - marker = resp.get('Marker', None) - - account_id = boto.connect_iam().get_user().arn.split(':')[4] - c_dict = {} - for c in clusters: - # remove these datetime objects as there is no serialisation to json - # currently in place and we don't need the data yet - if 'EarliestRestorableTime' in c: - del c['EarliestRestorableTime'] - if 'LatestRestorableTime' in c: - del c['LatestRestorableTime'] - - if self.ec2_instance_filters == {}: - matches_filter = True - else: - matches_filter = False - - try: - # arn:aws:rds:::: - tags = client.list_tags_for_resource( - ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) - c['Tags'] = tags['TagList'] - - if self.ec2_instance_filters: - for filter_key, filter_values in self.ec2_instance_filters.items(): - # get AWS tag key e.g. tag:env will be 'env' - tag_name = filter_key.split(":", 1)[1] - # Filter values is a list (if you put multiple values for the same tag name) - matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) - - if matches_filter: - # it matches a filter, so stop looking for further matches - break - - except Exception as e: - if e.message.find('DBInstanceNotFound') >= 0: - # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. - # Ignore errors when trying to find tags for these - pass - - # ignore empty clusters caused by AWS bug - if len(c['DBClusterMembers']) == 0: - continue - elif matches_filter: - c_dict[c['DBClusterIdentifier']] = c - - self.inventory['db_clusters'] = c_dict - - def get_elasticache_clusters_by_region(self, region): - ''' Makes an AWS API call to the list of ElastiCache clusters (with - nodes' info) in a particular region.''' - - # ElastiCache boto module doesn't provide a get_all_intances method, - # that's why we need to call describe directly (it would be called by - # the shorthand method anyway...) - try: - conn = self.connect_to_aws(elasticache, region) - if conn: - # show_cache_node_info = True - # because we also want nodes' information - response = conn.describe_cache_clusters(None, None, None, True) - - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - if not e.reason == "Forbidden": - error = "Looks like AWS ElastiCache is down:\n%s" % e.message - self.fail_with_error(error, 'getting ElastiCache clusters') - - try: - # Boto also doesn't provide wrapper classes to CacheClusters or - # CacheNodes. Because of that we can't make use of the get_list - # method in the AWSQueryConnection. Let's do the work manually - clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] - - except KeyError as e: - error = "ElastiCache query to AWS failed (unexpected format)." - self.fail_with_error(error, 'getting ElastiCache clusters') - - for cluster in clusters: - self.add_elasticache_cluster(cluster, region) - - def get_elasticache_replication_groups_by_region(self, region): - ''' Makes an AWS API call to the list of ElastiCache replication groups - in a particular region.''' - - # ElastiCache boto module doesn't provide a get_all_intances method, - # that's why we need to call describe directly (it would be called by - # the shorthand method anyway...) - try: - conn = self.connect_to_aws(elasticache, region) - if conn: - response = conn.describe_replication_groups() - - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - if not e.reason == "Forbidden": - error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message - self.fail_with_error(error, 'getting ElastiCache clusters') - - try: - # Boto also doesn't provide wrapper classes to ReplicationGroups - # Because of that we can't make use of the get_list method in the - # AWSQueryConnection. Let's do the work manually - replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] - - except KeyError as e: - error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." - self.fail_with_error(error, 'getting ElastiCache clusters') - - for replication_group in replication_groups: - self.add_elasticache_replication_group(replication_group, region) - - def get_auth_error_message(self): - ''' create an informative error message if there is an issue authenticating''' - errors = ["Authentication error retrieving ec2 inventory."] - if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: - errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') - else: - errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') - - boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] - boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) - if len(boto_config_found) > 0: - errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) - else: - errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) - - return '\n'.join(errors) - - def fail_with_error(self, err_msg, err_operation=None): - '''log an error to std err for ansible-playbook to consume and exit''' - if err_operation: - err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( - err_msg=err_msg, err_operation=err_operation) - sys.stderr.write(err_msg) - sys.exit(1) - - def get_instance(self, region, instance_id): - conn = self.connect(region) - - reservations = conn.get_all_instances([instance_id]) - for reservation in reservations: - for instance in reservation.instances: - return instance - - def add_instance(self, instance, region): - ''' Adds an instance to the inventory and index, as long as it is - addressable ''' - - # Only return instances with desired instance states - if instance.state not in self.ec2_instance_states: - return - - # Select the best destination address - if self.destination_format and self.destination_format_tags: - dest = self.destination_format.format(*[getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags]) - elif instance.subnet_id: - dest = getattr(instance, self.vpc_destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) - else: - dest = getattr(instance, self.destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.destination_variable, None) - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Set the inventory name - hostname = None - if self.hostname_variable: - if self.hostname_variable.startswith('tag_'): - hostname = instance.tags.get(self.hostname_variable[4:], None) - else: - hostname = getattr(instance, self.hostname_variable) - - # set the hostname from route53 - if self.route53_enabled and self.route53_hostnames: - route53_names = self.get_instance_route53_names(instance) - for name in route53_names: - if name.endswith(self.route53_hostnames): - hostname = name - - # If we can't get a nice hostname, use the destination address - if not hostname: - hostname = dest - # to_safe strips hostname characters like dots, so don't strip route53 hostnames - elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames): - hostname = hostname.lower() - else: - hostname = self.to_safe(hostname).lower() - - # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(hostname): - return - - # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(hostname): - return - - # Add to index - self.index[hostname] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [hostname] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.placement, hostname) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.placement) - self.push_group(self.inventory, 'zones', instance.placement) - - # Inventory: Group by Amazon Machine Image (AMI) ID - if self.group_by_ami_id: - ami_id = self.to_safe(instance.image_id) - self.push(self.inventory, ami_id, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'images', ami_id) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_type) - self.push(self.inventory, type_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by instance state - if self.group_by_instance_state: - state_name = self.to_safe('instance_state_' + instance.state) - self.push(self.inventory, state_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'instance_states', state_name) - - # Inventory: Group by key pair - if self.group_by_key_pair and instance.key_name: - key_name = self.to_safe('key_' + instance.key_name) - self.push(self.inventory, key_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'keys', key_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) - self.push(self.inventory, vpc_id_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - for group in instance.groups: - key = self.to_safe("security_group_" + group.name) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - except AttributeError: - self.fail_with_error('\n'.join(['Package boto seems a bit older.', - 'Please upgrade boto >= 2.3.0.'])) - - # Inventory: Group by AWS account ID - if self.group_by_aws_account: - self.push(self.inventory, self.aws_account_id, dest) - if self.nested_groups: - self.push_group(self.inventory, 'accounts', self.aws_account_id) - - # Inventory: Group by tag keys - if self.group_by_tag_keys: - for k, v in instance.tags.items(): - if self.expand_csv_tags and v and ',' in v: - values = map(lambda x: x.strip(), v.split(',')) - else: - values = [v] - - for v in values: - if v: - key = self.to_safe("tag_" + k + "=" + v) - else: - key = self.to_safe("tag_" + k) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - if v: - self.push_group(self.inventory, self.to_safe("tag_" + k), key) - - # Inventory: Group by Route53 domain names if enabled - if self.route53_enabled and self.group_by_route53_names: - route53_names = self.get_instance_route53_names(instance) - for name in route53_names: - self.push(self.inventory, name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'route53', name) - - # Global Tag: instances without tags - if self.group_by_tag_none and len(instance.tags) == 0: - self.push(self.inventory, 'tag_none', hostname) - if self.nested_groups: - self.push_group(self.inventory, 'tags', 'tag_none') - - # Global Tag: tag all EC2 instances - self.push(self.inventory, 'ec2', hostname) - - self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) - self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest - - def add_rds_instance(self, instance, region): - ''' Adds an RDS instance to the inventory and index, as long as it is - addressable ''' - - # Only want available instances unless all_rds_instances is True - if not self.all_rds_instances and instance.status != 'available': - return - - # Select the best destination address - dest = instance.endpoint[0] - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Set the inventory name - hostname = None - if self.hostname_variable: - if self.hostname_variable.startswith('tag_'): - hostname = instance.tags.get(self.hostname_variable[4:], None) - else: - hostname = getattr(instance, self.hostname_variable) - - # If we can't get a nice hostname, use the destination address - if not hostname: - hostname = dest - - hostname = self.to_safe(hostname).lower() - - # Add to index - self.index[hostname] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [hostname] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.availability_zone, hostname) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.availability_zone) - self.push_group(self.inventory, 'zones', instance.availability_zone) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_class) - self.push(self.inventory, type_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - self.push(self.inventory, vpc_id_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - if instance.security_group: - key = self.to_safe("security_group_" + instance.security_group.name) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - except AttributeError: - self.fail_with_error('\n'.join(['Package boto seems a bit older.', - 'Please upgrade boto >= 2.3.0.'])) - - # Inventory: Group by engine - if self.group_by_rds_engine: - self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) - if self.nested_groups: - self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) - - # Inventory: Group by parameter group - if self.group_by_rds_parameter_group: - self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) - if self.nested_groups: - self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) - - # Global Tag: all RDS instances - self.push(self.inventory, 'rds', hostname) - - self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) - self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest - - def add_elasticache_cluster(self, cluster, region): - ''' Adds an ElastiCache cluster to the inventory and index, as long as - it's nodes are addressable ''' - - # Only want available clusters unless all_elasticache_clusters is True - if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': - return - - # Select the best destination address - if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: - # Memcached cluster - dest = cluster['ConfigurationEndpoint']['Address'] - is_redis = False - else: - # Redis sigle node cluster - # Because all Redis clusters are single nodes, we'll merge the - # info from the cluster with info about the node - dest = cluster['CacheNodes'][0]['Endpoint']['Address'] - is_redis = True - - if not dest: - # Skip clusters we cannot address (e.g. private VPC subnet) - return - - # Add to index - self.index[dest] = [region, cluster['CacheClusterId']] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[cluster['CacheClusterId']] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) - - # Inventory: Group by region - if self.group_by_region and not is_redis: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone and not is_redis: - self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) - self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) - - # Inventory: Group by node type - if self.group_by_instance_type and not is_redis: - type_name = self.to_safe('type_' + cluster['CacheNodeType']) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for ElastiCache) - - # Inventory: Group by security group - if self.group_by_security_group and not is_redis: - - # Check for the existence of the 'SecurityGroups' key and also if - # this key has some value. When the cluster is not placed in a SG - # the query can return None here and cause an error. - if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: - for security_group in cluster['SecurityGroups']: - key = self.to_safe("security_group_" + security_group['SecurityGroupId']) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - # Inventory: Group by engine - if self.group_by_elasticache_engine and not is_redis: - self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) - - # Inventory: Group by parameter group - if self.group_by_elasticache_parameter_group: - self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) - - # Inventory: Group by replication group - if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: - self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) - - # Global Tag: all ElastiCache clusters - self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) - - host_info = self.get_host_info_dict_from_describe_dict(cluster) - - self.inventory["_meta"]["hostvars"][dest] = host_info - - # Add the nodes - for node in cluster['CacheNodes']: - self.add_elasticache_node(node, cluster, region) - - def add_elasticache_node(self, node, cluster, region): - ''' Adds an ElastiCache node to the inventory and index, as long as - it is addressable ''' - - # Only want available nodes unless all_elasticache_nodes is True - if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': - return - - # Select the best destination address - dest = node['Endpoint']['Address'] - - if not dest: - # Skip nodes we cannot address (e.g. private VPC subnet) - return - - node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) - - # Add to index - self.index[dest] = [region, node_id] - - # Inventory: Group by node ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[node_id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', node_id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) - self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) - - # Inventory: Group by node type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + cluster['CacheNodeType']) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for ElastiCache) - - # Inventory: Group by security group - if self.group_by_security_group: - - # Check for the existence of the 'SecurityGroups' key and also if - # this key has some value. When the cluster is not placed in a SG - # the query can return None here and cause an error. - if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: - for security_group in cluster['SecurityGroups']: - key = self.to_safe("security_group_" + security_group['SecurityGroupId']) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - # Inventory: Group by engine - if self.group_by_elasticache_engine: - self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) - - # Inventory: Group by parameter group (done at cluster level) - - # Inventory: Group by replication group (done at cluster level) - - # Inventory: Group by ElastiCache Cluster - if self.group_by_elasticache_cluster: - self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) - - # Global Tag: all ElastiCache nodes - self.push(self.inventory, 'elasticache_nodes', dest) - - host_info = self.get_host_info_dict_from_describe_dict(node) - - if dest in self.inventory["_meta"]["hostvars"]: - self.inventory["_meta"]["hostvars"][dest].update(host_info) - else: - self.inventory["_meta"]["hostvars"][dest] = host_info - - def add_elasticache_replication_group(self, replication_group, region): - ''' Adds an ElastiCache replication group to the inventory and index ''' - - # Only want available clusters unless all_elasticache_replication_groups is True - if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': - return - - # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis) - if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \ - replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None: - return - - # Select the best destination address (PrimaryEndpoint) - dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] - - # Add to index - self.index[dest] = [region, replication_group['ReplicationGroupId']] - - # Inventory: Group by ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[replication_group['ReplicationGroupId']] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone (doesn't apply to replication groups) - - # Inventory: Group by node type (doesn't apply to replication groups) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for replication groups - - # Inventory: Group by security group (doesn't apply to replication groups) - # Check this value in cluster level - - # Inventory: Group by engine (replication groups are always Redis) - if self.group_by_elasticache_engine: - self.push(self.inventory, 'elasticache_redis', dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', 'redis') - - # Global Tag: all ElastiCache clusters - self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) - - host_info = self.get_host_info_dict_from_describe_dict(replication_group) - - self.inventory["_meta"]["hostvars"][dest] = host_info - - def get_route53_records(self): - ''' Get and store the map of resource records to domain names that - point to them. ''' - - if self.boto_profile: - r53_conn = route53.Route53Connection(profile_name=self.boto_profile) - else: - r53_conn = route53.Route53Connection() - all_zones = r53_conn.get_zones() - - route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones] - - self.route53_records = {} - - for zone in route53_zones: - rrsets = r53_conn.get_all_rrsets(zone.id) - - for record_set in rrsets: - record_name = record_set.name - - if record_name.endswith('.'): - record_name = record_name[:-1] - - for resource in record_set.resource_records: - self.route53_records.setdefault(resource, set()) - self.route53_records[resource].add(record_name) - - def get_instance_route53_names(self, instance): - ''' Check if an instance is referenced in the records we have from - Route53. If it is, return the list of domain names pointing to said - instance. If nothing points to it, return an empty list. ''' - - instance_attributes = ['public_dns_name', 'private_dns_name', - 'ip_address', 'private_ip_address'] - - name_list = set() - - for attrib in instance_attributes: - try: - value = getattr(instance, attrib) - except AttributeError: - continue - - if value in self.route53_records: - name_list.update(self.route53_records[value]) - - return list(name_list) - - def get_host_info_dict_from_instance(self, instance): - instance_vars = {} - for key in vars(instance): - value = getattr(instance, key) - key = self.to_safe('ec2_' + key) - - # Handle complex types - # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 - if key == 'ec2__state': - instance_vars['ec2_state'] = instance.state or '' - instance_vars['ec2_state_code'] = instance.state_code - elif key == 'ec2__previous_state': - instance_vars['ec2_previous_state'] = instance.previous_state or '' - instance_vars['ec2_previous_state_code'] = instance.previous_state_code - elif isinstance(value, (int, bool)): - instance_vars[key] = value - elif isinstance(value, six.string_types): - instance_vars[key] = value.strip() - elif value is None: - instance_vars[key] = '' - elif key == 'ec2_region': - instance_vars[key] = value.name - elif key == 'ec2__placement': - instance_vars['ec2_placement'] = value.zone - elif key == 'ec2_tags': - for k, v in value.items(): - if self.expand_csv_tags and ',' in v: - v = list(map(lambda x: x.strip(), v.split(','))) - key = self.to_safe('ec2_tag_' + k) - instance_vars[key] = v - elif key == 'ec2_groups': - group_ids = [] - group_names = [] - for group in value: - group_ids.append(group.id) - group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) - instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) - elif key == 'ec2_block_device_mapping': - instance_vars["ec2_block_devices"] = {} - for k, v in value.items(): - instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id - else: - pass - # TODO Product codes if someone finds them useful - # print key - # print type(value) - # print value - - instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id - - return instance_vars - - def get_host_info_dict_from_describe_dict(self, describe_dict): - ''' Parses the dictionary returned by the API call into a flat list - of parameters. This method should be used only when 'describe' is - used directly because Boto doesn't provide specific classes. ''' - - # I really don't agree with prefixing everything with 'ec2' - # because EC2, RDS and ElastiCache are different services. - # I'm just following the pattern used until now to not break any - # compatibility. - - host_info = {} - for key in describe_dict: - value = describe_dict[key] - key = self.to_safe('ec2_' + self.uncammelize(key)) - - # Handle complex types - - # Target: Memcached Cache Clusters - if key == 'ec2_configuration_endpoint' and value: - host_info['ec2_configuration_endpoint_address'] = value['Address'] - host_info['ec2_configuration_endpoint_port'] = value['Port'] - - # Target: Cache Nodes and Redis Cache Clusters (single node) - if key == 'ec2_endpoint' and value: - host_info['ec2_endpoint_address'] = value['Address'] - host_info['ec2_endpoint_port'] = value['Port'] - - # Target: Redis Replication Groups - if key == 'ec2_node_groups' and value: - host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] - host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] - replica_count = 0 - for node in value[0]['NodeGroupMembers']: - if node['CurrentRole'] == 'primary': - host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] - host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] - host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] - elif node['CurrentRole'] == 'replica': - host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address'] - host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port'] - host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId'] - replica_count += 1 - - # Target: Redis Replication Groups - if key == 'ec2_member_clusters' and value: - host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) - - # Target: All Cache Clusters - elif key == 'ec2_cache_parameter_group': - host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) - host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] - host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] - - # Target: Almost everything - elif key == 'ec2_security_groups': - - # Skip if SecurityGroups is None - # (it is possible to have the key defined but no value in it). - if value is not None: - sg_ids = [] - for sg in value: - sg_ids.append(sg['SecurityGroupId']) - host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) - - # Target: Everything - # Preserve booleans and integers - elif isinstance(value, (int, bool)): - host_info[key] = value - - # Target: Everything - # Sanitize string values - elif isinstance(value, six.string_types): - host_info[key] = value.strip() - - # Target: Everything - # Replace None by an empty string - elif value is None: - host_info[key] = '' - - else: - # Remove non-processed complex types - pass - - return host_info - - def get_host_info(self): - ''' Get variables about a specific host ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - (region, instance_id) = self.index[self.args.host] - - instance = self.get_instance(region, instance_id) - return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) - - def push(self, my_dict, key, element): - ''' Push an element onto an array that may not have been defined in - the dict ''' - group_info = my_dict.setdefault(key, []) - if isinstance(group_info, dict): - host_list = group_info.setdefault('hosts', []) - host_list.append(element) - else: - group_info.append(element) - - def push_group(self, my_dict, key, element): - ''' Push a group as a child of another group. ''' - parent_group = my_dict.setdefault(key, {}) - if not isinstance(parent_group, dict): - parent_group = my_dict[key] = {'hosts': parent_group} - child_groups = parent_group.setdefault('children', []) - if element not in child_groups: - child_groups.append(element) - - def get_inventory_from_cache(self): - ''' Reads the inventory from the cache file and returns it as a JSON - object ''' - - with open(self.cache_path_cache, 'r') as f: - json_inventory = f.read() - return json_inventory - - def load_index_from_cache(self): - ''' Reads the index from the cache file sets self.index ''' - - with open(self.cache_path_index, 'rb') as f: - self.index = json.load(f) - - def write_to_cache(self, data, filename): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(data, True) - with open(filename, 'w') as f: - f.write(json_data) - - def uncammelize(self, key): - temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - regex = "[^A-Za-z0-9\_" - if not self.replace_dash_in_groups: - regex += "\-" - return re.sub(regex + "]", "_", word) - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -if __name__ == '__main__': - # Run the script - Ec2Inventory() diff --git a/ansible/jsonconfig.yml b/ansible/jsonconfig.yml deleted file mode 100644 index de1f5b459..000000000 --- a/ansible/jsonconfig.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - jsonconfig - diff --git a/ansible/reset.yml b/ansible/reset.yml deleted file mode 100644 index d18075062..000000000 --- a/ansible/reset.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - become: yes - roles: - - stop - - unsafe_reset - - start - diff --git a/ansible/restart.yml b/ansible/restart.yml deleted file mode 100644 index 6690a41f6..000000000 --- a/ansible/restart.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - stop - - start diff --git a/ansible/roles/config/defaults/main.yml b/ansible/roles/config/defaults/main.yml deleted file mode 100644 index 4b9281a5e..000000000 --- a/ansible/roles/config/defaults/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -#genesis_file: "" -app_options_file: "app_options_files/public_testnet" -seeds: "" -testnet_name: testnet1 -validators: true - diff --git a/ansible/roles/config/tasks/main.yml b/ansible/roles/config/tasks/main.yml deleted file mode 100644 index 85c37e3b3..000000000 --- a/ansible/roles/config/tasks/main.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- - -- name: gather tendermint public keys - when: (validators == true or validators == 'true') and genesis_file is not defined - tags: reconfig-toml,reconfig-genesis - command: "/usr/bin/tendermint show_validator --home /etc/{{service}} --log_level error" - register: pubkeys - changed_when: false - -- name: gather tendermint peer IDs - when: genesis_file is not defined - tags: reconfig-toml - command: "/usr/bin/tendermint show_node_id --home /etc/{{service}} --log_level error" - register: nodeids - changed_when: false - -- name: resetting permissions from root after gathering public keys - tags: reconfig-toml,reconfig-genesis - file: "path=/etc/{{service}} owner={{service}} group={{service}} recurse=yes" - -- name: register tendermint public keys as host facts - when: (validators == true or validators == 'true') and genesis_file is not defined - tags: reconfig-toml,reconfig-genesis - set_fact: "pubkey='{{pubkeys.stdout}}'" - connection: local - -- name: register node ids as host facts - when: genesis_file is not defined - tags: reconfig-toml - set_fact: "nodeid='{{nodeids.stdout}}'" - connection: local - -- name: copy generated genesis.json - genesis_time will be updated - when: (validators == true or validators == 'true') and (genesis_file is not defined) - tags: reconfig-genesis - template: - src: genesis.json.j2 - dest: "/etc/{{service}}/config/genesis.json" - owner: "{{service}}" - group: "{{service}}" - -- name: copy pre-created genesis.json - when: genesis_file is defined - tags: reconfig-genesis - copy: "src={{genesis_file}} dest=/etc/{{service}}/config/genesis.json owner={{service}} group={{service}}" - -- name: copy tendermint config.toml - tags: reconfig-toml - when: validators == true or validators == 'true' - template: - src: config.toml.j2 - dest: "/etc/{{service}}/config/config.toml" - owner: "{{service}}" - group: "{{service}}" - -- name: Copy validator network files for non-validators - when: validators == false or validators == 'false' - tags: reconfig-toml,reconfig-genesis - get_url: "url={{item['src']}} dest={{item['dst']}} force=yes" - with_items: - - { src: "https://raw.githubusercontent.com/tendermint/testnets/master/{{validator_network}}/{{service}}/genesis.json" , dst: "/etc/{{service}}/config/genesis.json" } - - { src: "https://raw.githubusercontent.com/tendermint/testnets/master/{{validator_network}}/config.toml" , dst: "/etc/{{service}}/config/config.toml" } - -- name: Set validator network files permissions for non-validators - when: validators == false or validators == 'false' - tags: reconfig-toml,reconfig-genesis - file: "path={{item}} owner={{service}} group={{service}}" - with_items: - - "/etc/{{service}}/config/genesis.json" - - "/etc/{{service}}/config/config.toml" - diff --git a/ansible/roles/config/templates/config.toml.j2 b/ansible/roles/config/templates/config.toml.j2 deleted file mode 100644 index 020baf61e..000000000 --- a/ansible/roles/config/templates/config.toml.j2 +++ /dev/null @@ -1,221 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:46658" - -# A custom human readable name for this node -moniker = "{{inventory_hostname}}" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -{% if service == 'tendermint' %} - -# Database backend: leveldb | memdb -db_backend = "memdb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "mempool:error,*:debug" - -{% else %} - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" -#log_level = "mempool:error,*:debug" - -{% endif %} - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:46657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:46656" - -# Comma separated list of seed nodes to connect to -seeds = "{{ seeds | default() }}" - -# Comma separated list of nodes to keep persistent connections to -{% set comma = joiner(",") %}persistent_peers = "{% for host in ((groups[testnet_name]|default([]))+(groups['tag_Environment_'~(testnet_name|regex_replace('-','_'))]|default([])))|difference(inventory_hostname) %}{{ comma() }}{{hostvars[host]["nodeid"]}}@{{hostvars[host]["inventory_hostname"]}}:46656{% endfor %}" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -#max_num_peers = 50 -max_num_peers = 300 - -# Maximum size of a message packet payload, in bytes -{% if service == 'tendermint' %} -max_msg_packet_payload_size = 65536 -{% else %} -max_msg_packet_payload_size = 1024 -{% endif %} - -# Rate at which packets can be sent, in bytes/second -{% if service == 'tendermint' %} -send_rate = 51200000 # 50 MB/s -{% else %} -send_rate = 512000 -{% endif %} - -# Rate at which packets can be received, in bytes/second -{% if service == 'tendermint' %} -recv_rate = 51200000 # 50 MB/s -{% else %} -recv_rate = 512000 -{% endif %} - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -##### mempool configuration options ##### -[mempool] - -{% if service == 'tendermint' %} -recheck = false -{% else %} -recheck = true -{% endif %} -recheck_empty = true -broadcast = true -{% if service == 'tendermint' %} -wal_dir = "" -{% else %} -wal_dir = "data/mempool.wal" -{% endif %} - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" -{% if service == 'tendermint' %} -wal_light = true -{% else %} -wal_light = false -{% endif %} - -# All timeouts are in milliseconds -{% if service == 'tendermint' %} -timeout_propose = 10000 -{% else %} -timeout_propose = 3000 -{% endif %} -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -{% if service == 'tendermint' %} -timeout_commit = 1 -{% else %} -timeout_commit = 1000 -{% endif %} - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -{% if service == 'tendermint' %} -skip_timeout_commit = true -{% else %} -skip_timeout_commit = false -{% endif %} - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -{% if service == 'tendermint' %} -create_empty_blocks = false -{% else %} -create_empty_blocks = true -create_empty_blocks_interval = 60 -{% endif %} - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false - diff --git a/ansible/roles/config/templates/genesis.json.j2 b/ansible/roles/config/templates/genesis.json.j2 deleted file mode 100644 index 6ca0f876d..000000000 --- a/ansible/roles/config/templates/genesis.json.j2 +++ /dev/null @@ -1,50 +0,0 @@ -{ - "genesis_time":"{{ansible_date_time.iso8601}}", - "chain_id":"{{testnet_name}}", - "validators": - [ -{% if (validators == true) or (validators == 'true') %} -{% set comma = joiner(",") %} -{% for host in (groups[testnet_name]|default([]))+(groups['tag_Environment_'~(testnet_name|regex_replace('-','_'))]|default([])) %} - {{ comma() }} - { - "pub_key": { - "data": "{{hostvars[host]["pubkey"]["data"]}}", - "type": "{{hostvars[host]["pubkey"]["type"]}}" - }, - "power":1000, - "name":"{{hostvars[host]["inventory_hostname"]}}" - } -{% endfor %} -{% endif %} - ], - "app_hash":"", -{% if service == 'basecoind' %} - "app_state": { -{% else %} - "app_options": { -{% endif %} -{% if app_options_file is defined %} -{% include app_options_file %} -{% endif %} - } -{% if service == 'ethermint' %} - , - "config": { - "chainId": 15, - "homesteadBlock": 0, - "eip155Block": 0, - "eip158Block": 0 - }, - "nonce": "0xdeadbeefdeadbeef", - "timestamp": "0x00", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "difficulty": "0x40", - "gasLimit": "0x8000000", - "alloc": { - "0x7eff122b94897ea5b0e2a9abf47b86337fafebdc": { "balance": "10000000000000000000000000000000000" }, - "0xc6713982649D9284ff56c32655a9ECcCDA78422A": { "balance": "10000000000000000000000000000000000" } - } -{% endif %} -} diff --git a/ansible/roles/generic-service/tasks/main.yml b/ansible/roles/generic-service/tasks/main.yml deleted file mode 100644 index e66ee7eaa..000000000 --- a/ansible/roles/generic-service/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- - -- name: Create service group - group: "name={{service}}" - -- name: Create service user - user: "name={{service}} group={{service}} home=/etc/{{service}}" - -- name: Change user folder to more permissive - file: "path=/etc/{{service}} mode=0755" - -- name: Create tendermint service - template: "src=systemd.service.j2 dest=/etc/systemd/system/{{service}}.service" - -- name: Reload systemd services - systemd: "name={{service}} daemon_reload=yes enabled=no" - -- name: Initialize tendermint - command: "/usr/bin/tendermint init --home /etc/{{service}}" - become: yes - become_user: "{{service}}" - diff --git a/ansible/roles/generic-service/templates/systemd.service.j2 b/ansible/roles/generic-service/templates/systemd.service.j2 deleted file mode 100644 index c38cca468..000000000 --- a/ansible/roles/generic-service/templates/systemd.service.j2 +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description={{service}} server -Requires=network-online.target -After=network-online.target - -[Service] -Environment="TMHOME=/etc/{{service}}" -Restart=on-failure -User={{service}} -Group={{service}} -PermissionsStartOnly=true -ExecStart=/usr/bin/tendermint node{{(service=='tendermint')|ternary(' --proxy_app=dummy','')}} -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/ansible/roles/getconfigtoml/tasks/main.yml b/ansible/roles/getconfigtoml/tasks/main.yml deleted file mode 100644 index 5daab5b6f..000000000 --- a/ansible/roles/getconfigtoml/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- name: Get config.toml from node - fetch: "dest={{ destination | default('.') }}/config.toml flat=yes src=/etc/{{service}}/config/config.toml" - run_once: yes - diff --git a/ansible/roles/getfile/tasks/main.yml b/ansible/roles/getfile/tasks/main.yml deleted file mode 100644 index 7f3156fc1..000000000 --- a/ansible/roles/getfile/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- name: Get file from node - fetch: "dest={{ destination | default('.') }}/{{ source | basename }} flat=yes src='{{source}}'" - run_once: yes - diff --git a/ansible/roles/install/defaults/main.yml b/ansible/roles/install/defaults/main.yml deleted file mode 100644 index f9e5a31d7..000000000 --- a/ansible/roles/install/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -release_install: true -binary: "{{ lookup('env','GOPATH') | default('') }}/bin/{{service}}" -devops_path: false - diff --git a/ansible/roles/install/tasks/centos.yml b/ansible/roles/install/tasks/centos.yml deleted file mode 100644 index 06db3ba21..000000000 --- a/ansible/roles/install/tasks/centos.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- - -#Three commands to install a service on CentOS/RedHat -#wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | rpm --import - -#wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo -#yum update && yum install basecoin - -#This has a bug in Ansible 2.3: https://github.com/ansible/ansible/issues/20711 -#- name: Add repository key on CentOS/RedHat -# when: ansible_os_family == "RedHat" -# rpm_key: key=https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint - -#Workaround -- name: Download repository key for CentOS/RedHat - when: ansible_os_family == "RedHat" - get_url: "url=https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}centos/7/os/x86_64/RPM-GPG-KEY-Tendermint dest=/root/RPM-GPG-KEY-Tendermint force=yes checksum=sha256:a8c61d4061697d2595562c703dbafbdfdcfa7f0c75a523ac84d5609d1b444abe" -- name: Import repository key for CentOS/RedHat - when: ansible_os_family == "RedHat" - command: "rpm --import /root/RPM-GPG-KEY-Tendermint" - -- name: Install tendermint repository on CentOS/RedHat - when: ansible_os_family == "RedHat" - yum_repository: - name: tendermint - baseurl: https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}centos/7/os/x86_64 - description: "Tendermint repo" - gpgcheck: yes - gpgkey: https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}centos/7/os/x86_64/RPM-GPG-KEY-Tendermint -# repo_gpgcheck: yes - -- name: Install package on CentOS/RedHat - when: ansible_os_family == "RedHat" - yum: "pkg={{service}} update_cache=yes state=latest" - -# The below commands are required so that the tomlconfig playbook can run. - -- name: Install epel-release on CentOS/RedHat - when: ansible_os_family == "RedHat" - yum: "pkg=epel-release update_cache=yes state=latest" - -- name: Install pip on CentOS/RedHat - when: ansible_os_family == "RedHat" - yum: "pkg={{item}} state=latest" - with_items: - - python2-pip - - python-virtualenv - - unzip - - tar -#For show_validator command: - - tendermint - -- name: Install toml - when: ansible_os_family == "RedHat" - pip: name=toml - diff --git a/ansible/roles/install/tasks/debian.yml b/ansible/roles/install/tasks/debian.yml deleted file mode 100644 index 706f53fd0..000000000 --- a/ansible/roles/install/tasks/debian.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- - -#Three commands to install a service on Debian/Ubuntu -#wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add - -#wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list -#apt-get update && apt-get install basecoin - -- name: Add repository key on Debian/Ubuntu - when: ansible_os_family == "Debian" - apt_key: - url: https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}centos/7/os/x86_64/RPM-GPG-KEY-Tendermint - id: 2122CBE9 - -- name: Install tendermint repository on Debian/Ubuntu - when: ansible_os_family == "Debian" - apt_repository: - repo: deb https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}debian stable main - -- name: Install package on Debian/Ubuntu - when: ansible_os_family == "Debian" - apt: "pkg={{service}} update_cache=yes state=latest" - -# The below command is required to use the tomlconfig playbook. - -- name: Install package on Debian/Ubuntu - when: ansible_os_family == "Debian" - apt: "pkg={{item}} state=latest" - with_items: - - python-toml - - unzip - - tar -#For show_validator command: - - tendermint - diff --git a/ansible/roles/install/tasks/main.yml b/ansible/roles/install/tasks/main.yml deleted file mode 100644 index 939b1acec..000000000 --- a/ansible/roles/install/tasks/main.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- - -- name: Set timezone - when: timezone is defined - file: path=/etc/localtime state=link src=/usr/share/zoneinfo/{{timezone}} force=yes - -- name: Disable journald rate-limiting - lineinfile: "dest=/etc/systemd/journald.conf regexp={{item.regexp}} line='{{item.line}}'" - with_items: - - { regexp: "^#RateLimitInterval", line: "RateLimitInterval=0s" } - - { regexp: "^#RateLimitBurst", line: "RateLimitBurst=0" } - -- name: Create journal directory for permanent logs - file: path=/var/log/journal state=directory - -- name: Set journal folder with systemd-tmpfiles - command: "systemd-tmpfiles --create --prefix /var/log/journal" - -- name: Restart journald - service: name=systemd-journald state=restarted - -- name: Ability to get the core dump on SIGABRT - shell: "ulimit -c unlimited" - -#TODO include is deprecated in Ansible 2.4.0 and will be removed in 2.8.0 -#Replace it with include_tasks - -- include: debian.yml - when: ansible_os_family == "Debian" - -- include: centos.yml - when: ansible_os_family == "RedHat" - -- name: copy compiled binary - when: not release_install|bool - copy: - src: "{{binary}}" - dest: /usr/local/bin - mode: 0755 - diff --git a/ansible/roles/jsonconfig/library/jsonconfig.py b/ansible/roles/jsonconfig/library/jsonconfig.py deleted file mode 100644 index 11f9146e6..000000000 --- a/ansible/roles/jsonconfig/library/jsonconfig.py +++ /dev/null @@ -1,360 +0,0 @@ -#!/usr/bin/python - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -DOCUMENTATION = ''' ---- -module: jsonconfig - -short_description: Ensure a particular configuration is added to a json-formatted configuration file - -version_added: "2.4" - -description: - - This module will add configuration to a json-formatted configuration file. - -options: - dest: - description: - - The file to modify. - required: true - aliases: [ name, destfile ] - json: - description: - - The configuration in json format to apply. - required: false - default: '{}' - merge: - description: - - Used with C(state=present). If specified, it will merge the configuration. Othwerwise - the configuration will be overwritten. - required: false - choices: [ "yes", "no" ] - default: "yes" - state: - description: - - Whether the configuration should be there or not. - required: false - choices: [ present, absent ] - default: "present" - create: - description: - - Used with C(state=present). If specified, the file will be created - if it does not already exist. By default it will fail if the file - is missing. - required: false - choices: [ "yes", "no" ] - default: "no" - backup: - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" - others: - description: - - All arguments accepted by the M(file) module also work here. - required: false - -extends_documentation_fragment: - - files - - validate - -author: - - "Greg Szabo (@greg-szabo)" -''' - -EXAMPLES = ''' -# Add a new section to a json file -- name: Add comment section - jsonconfig: - dest: /etc/something.json - json: '{ "comment": { "comment1": "mycomment" } }' - -# Rewrite a json file with the configuration -- name: Create or overwrite config.json - jsonconfig: - dest: /etc/config.json - json: '{ "regedit": { "freshfile": true } }' - merge: no - create: yes - -''' - -RETURN = ''' -changed: - description: True if the configuration changed. - type: bool -msg: - description: Description of the change - type: str -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import b -from ansible.module_utils._text import to_bytes, to_native -import tempfile -import json -import copy -import os - -def write_changes(module, b_lines, dest): - - tmpfd, tmpfile = tempfile.mkstemp() - f = os.fdopen(tmpfd, 'wb') - f.writelines(b_lines) - f.close() - - validate = module.params.get('validate', None) - valid = not validate - if validate: - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict')) - valid = rc == 0 - if rc != 0: - module.fail_json(msg='failed to validate: ' - 'rc:%s error:%s' % (rc, err)) - if valid: - module.atomic_move(tmpfile, - to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'), - unsafe_writes=module.params['unsafe_writes']) - - -def check_file_attrs(module, changed, message, diff): - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False, diff=diff): - - if changed: - message += " and " - changed = True - message += "ownership, perms or SE linux context changed" - - return message, changed - - -#Merge dict d2 into dict d1 and return a new object -def deepmerge(d1, d2): - if d1 is None: - return copy.deepcopy(d2) - if d2 is None: - return copy.deepcopy(d1) - if d1 == d2: - return copy.deepcopy(d1) - if isinstance(d1, dict) and isinstance(d2, dict): - result={} - for key in set(d1.keys()+d2.keys()): - da = db = None - if key in d1: - da = d1[key] - if key in d2: - db = d2[key] - result[key] = deepmerge(da, db) - return result - else: - return copy.deepcopy(d2) - - -#Remove dict d2 from dict d1 and return a new object -def deepdiff(d1, d2): - if d1 is None or d2 is None: - return None - if d1 == d2: - return None - if isinstance(d1, dict) and isinstance(d2, dict): - result = {} - for key in d1.keys(): - if key in d2: - dd = deepdiff(d1[key],d2[key]) - if dd is not None: - result[key] = dd - else: - result[key] = d1[key] - return result - else: - return None - - -def present(module, dest, conf, merge, create, backup): - - diff = {'before': '', - 'after': '', - 'before_header': '%s (content)' % dest, - 'after_header': '%s (content)' % dest} - - b_dest = to_bytes(dest, errors='surrogate_or_strict') - if not os.path.exists(b_dest): - if not create: - module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) - b_destpath = os.path.dirname(b_dest) - if not os.path.exists(b_destpath) and not module.check_mode: - os.makedirs(b_destpath) - b_lines = [] - else: - f = open(b_dest, 'rb') - b_lines = f.readlines() - f.close() - - lines = to_native(b('').join(b_lines)) - - if module._diff: - diff['before'] = lines - - b_conf = to_bytes(conf, errors='surrogate_or_strict') - - jsonconfig = json.loads(lines) - config = eval(b_conf) - - if not isinstance(config, dict): - module.fail_json(msg="Invalid value in json parameter: {0}".format(config)) - - b_lines_new = b_lines - msg = '' - changed = False - - if not merge: - if jsonconfig != config: - b_lines_new = to_bytes(json.dumps(config, sort_keys=True, indent=4, separators=(',', ': '))) - msg = 'config overwritten' - changed = True - else: - mergedconfig = deepmerge(jsonconfig,config) - if jsonconfig != mergedconfig: - b_lines_new = to_bytes(json.dumps(mergedconfig, sort_keys=True, indent=4, separators=(',', ': '))) - msg = 'config merged' - changed = True - - if module._diff: - diff['after'] = to_native(b('').join(b_lines_new)) - - backupdest = "" - if changed and not module.check_mode: - if backup and os.path.exists(b_dest): - backupdest = module.backup_local(dest) - write_changes(module, b_lines_new, dest) - - if module.check_mode and not os.path.exists(b_dest): - module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) - - attr_diff = {} - msg, changed = check_file_attrs(module, changed, msg, attr_diff) - - attr_diff['before_header'] = '%s (file attributes)' % dest - attr_diff['after_header'] = '%s (file attributes)' % dest - - difflist = [diff, attr_diff] - module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) - - -def absent(module, dest, conf, backup): - - b_dest = to_bytes(dest, errors='surrogate_or_strict') - if not os.path.exists(b_dest): - module.exit_json(changed=False, msg="file not present") - - msg = '' - diff = {'before': '', - 'after': '', - 'before_header': '%s (content)' % dest, - 'after_header': '%s (content)' % dest} - - f = open(b_dest, 'rb') - b_lines = f.readlines() - f.close() - - lines = to_native(b('').join(b_lines)) - b_conf = to_bytes(conf, errors='surrogate_or_strict') - - lines = to_native(b('').join(b_lines)) - jsonconfig = json.loads(lines) - config = eval(b_conf) - - if not isinstance(config, dict): - module.fail_json(msg="Invalid value in json parameter: {0}".format(config)) - - if module._diff: - diff['before'] = to_native(b('').join(b_lines)) - - b_lines_new = b_lines - msg = '' - changed = False - - diffconfig = deepdiff(jsonconfig,config) - if diffconfig is None: - diffconfig = {} - if jsonconfig != diffconfig: - b_lines_new = to_bytes(json.dumps(diffconfig, sort_keys=True, indent=4, separators=(',', ': '))) - msg = 'config removed' - changed = True - - if module._diff: - diff['after'] = to_native(b('').join(b_lines_new)) - - backupdest = "" - if changed and not module.check_mode: - if backup: - backupdest = module.backup_local(dest) - write_changes(module, b_lines_new, dest) - - attr_diff = {} - msg, changed = check_file_attrs(module, changed, msg, attr_diff) - - attr_diff['before_header'] = '%s (file attributes)' % dest - attr_diff['after_header'] = '%s (file attributes)' % dest - - difflist = [diff, attr_diff] - - module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) - - -def main(): - - # define the available arguments/parameters that a user can pass to - # the module - module_args = dict( - dest=dict(type='str', required=True), - json=dict(default=None, required=True), - merge=dict(type='bool', default=True), - state=dict(default='present', choices=['absent', 'present']), - create=dict(type='bool', default=False), - backup=dict(type='bool', default=False), - validate=dict(default=None, type='str') - ) - - # the AnsibleModule object will be our abstraction working with Ansible - # this includes instantiation, a couple of common attr would be the - # args/params passed to the execution, as well as if the module - # supports check mode - module = AnsibleModule( - argument_spec=module_args, - add_file_common_args=True, - supports_check_mode=True - ) - - params = module.params - create = params['create'] - merge = params['merge'] - backup = params['backup'] - dest = params['dest'] - - b_dest = to_bytes(dest, errors='surrogate_or_strict') - - if os.path.isdir(b_dest): - module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) - - conf = params['json'] - - if params['state'] == 'present': - present(module, dest, conf, merge, create, backup) - else: - absent(module, dest, conf, backup) - -if __name__ == '__main__': - main() - diff --git a/ansible/roles/jsonconfig/tasks/main.yml b/ansible/roles/jsonconfig/tasks/main.yml deleted file mode 100644 index 277be1095..000000000 --- a/ansible/roles/jsonconfig/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: Update - jsonconfig: "dest='{{destination}}' json='{{jsonconfig}}' state={{(remove | default(false) | bool) | ternary('absent','present')}}" - diff --git a/ansible/roles/setfile/tasks/main.yml b/ansible/roles/setfile/tasks/main.yml deleted file mode 100644 index 442072dda..000000000 --- a/ansible/roles/setfile/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- - -- name: Download file if necessary - when: source | regex_search('^https?://') - get_url: "url={{source}} dest={{localdir}}/{{source | basename | regex_replace('\\?.*$','')}}" - register: downloaded - connection: local - run_once: yes - become: no - -- name: Figure out file source - set_fact: - compiledsource: "{{ (downloaded.skipped is defined) | ternary(source, downloaded.dest) }}" - connection: local - become: no - -- name: Extract file to destination - when: compiledsource | regex_search('\\.(zip|tar|tar\\.gz|tgz|tb2|tbz|tbz2|tar\\.bz2|txz|tar\\.xz)$') - register: extractcopy - unarchive: - src: "{{compiledsource}}" - dest: "{{destination}}" - -- name: Copy non-zipped file to destination - when: extractcopy.skipped is defined - copy: "src='{{compiledsource}}' dest='{{destination}}'" - diff --git a/ansible/roles/start/tasks/main.yml b/ansible/roles/start/tasks/main.yml deleted file mode 100644 index 6bc611c91..000000000 --- a/ansible/roles/start/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: start service - service: "name={{service}} state=started" - diff --git a/ansible/roles/status/tasks/main.yml b/ansible/roles/status/tasks/main.yml deleted file mode 100644 index c9229d400..000000000 --- a/ansible/roles/status/tasks/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- - -- name: application service status - command: "service {{service}} status" - changed_when: false - register: status - -- name: Result - debug: var=status.stdout_lines - -#- name: tendermint service status -# when: service != 'tendermint' -# command: "service {{service}}-server status" -# changed_when: false -# register: tendermintstatus - -#- name: Result -# when: service != 'tendermint' -# debug: var=tendermintstatus.stdout_lines - diff --git a/ansible/roles/stop/tasks/main.yml b/ansible/roles/stop/tasks/main.yml deleted file mode 100644 index 7db356f22..000000000 --- a/ansible/roles/stop/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: stop service - service: "name={{service}} state=stopped" - diff --git a/ansible/roles/tomlconfig/defaults/main.yml b/ansible/roles/tomlconfig/defaults/main.yml deleted file mode 100644 index 2bb8c2927..000000000 --- a/ansible/roles/tomlconfig/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -destination: /etc/{{service}}/config.toml - diff --git a/ansible/roles/tomlconfig/library/tomlconfig.py b/ansible/roles/tomlconfig/library/tomlconfig.py deleted file mode 100644 index fbb10b823..000000000 --- a/ansible/roles/tomlconfig/library/tomlconfig.py +++ /dev/null @@ -1,386 +0,0 @@ -#!/usr/bin/python - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -DOCUMENTATION = ''' ---- -module: tomlconfig - -short_description: Ensure a particular configuration is added to a toml-formatted configuration file - -version_added: "2.4" - -description: - - This module will add configuration to a toml-formatted configuration file. - -options: - dest: - description: - - The file to modify. - required: true - aliases: [ name, destfile ] - json: - description: - - The configuration in json format to apply. Either C(json) or C(toml) has to be present. - required: false - default: '{}' - toml: - description: - - The configuration in toml format to apply. Either C(json) or C(toml) has to be present. - default: '' - merge: - description: - - Used with C(state=present). If specified, it will merge the configuration. Othwerwise - the configuration will be overwritten. - required: false - choices: [ "yes", "no" ] - default: "yes" - state: - description: - - Whether the configuration should be there or not. - required: false - choices: [ present, absent ] - default: "present" - create: - description: - - Used with C(state=present). If specified, the file will be created - if it does not already exist. By default it will fail if the file - is missing. - required: false - choices: [ "yes", "no" ] - default: "no" - backup: - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" - others: - description: - - All arguments accepted by the M(file) module also work here. - required: false - -extends_documentation_fragment: - - files - - validate - -author: - - "Greg Szabo (@greg-szabo)" -''' - -EXAMPLES = ''' -# Add a new section to a toml file -- name: Add comment section - tomlconfig: - dest: /etc/config.toml - json: '{ "comment": { "comment1": "mycomment" } }' - -# Rewrite a toml file with the configuration -- name: Create or overwrite config.toml - tomlconfig: - dest: /etc/config.toml - json: '{ "regedit": { "freshfile": true } }' - merge: no - create: yes - -''' - -RETURN = ''' -changed: - description: True if the configuration changed. - type: bool -msg: - description: Description of the change - type: str -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import b -from ansible.module_utils._text import to_bytes, to_native -import tempfile -import toml as pytoml -import json -import copy -import os - -def write_changes(module, b_lines, dest): - - tmpfd, tmpfile = tempfile.mkstemp() - f = os.fdopen(tmpfd, 'wb') - f.writelines(b_lines) - f.close() - - validate = module.params.get('validate', None) - valid = not validate - if validate: - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict')) - valid = rc == 0 - if rc != 0: - module.fail_json(msg='failed to validate: ' - 'rc:%s error:%s' % (rc, err)) - if valid: - module.atomic_move(tmpfile, - to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'), - unsafe_writes=module.params['unsafe_writes']) - - -def check_file_attrs(module, changed, message, diff): - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False, diff=diff): - - if changed: - message += " and " - changed = True - message += "ownership, perms or SE linux context changed" - - return message, changed - - -#Merge dict d2 into dict d1 and return a new object -def deepmerge(d1, d2): - if d1 is None: - return copy.deepcopy(d2) - if d2 is None: - return copy.deepcopy(d1) - if d1 == d2: - return copy.deepcopy(d1) - if isinstance(d1, dict) and isinstance(d2, dict): - result={} - for key in set(d1.keys()+d2.keys()): - da = db = None - if key in d1: - da = d1[key] - if key in d2: - db = d2[key] - result[key] = deepmerge(da, db) - return result - else: - return copy.deepcopy(d2) - - -#Remove dict d2 from dict d1 and return a new object -def deepdiff(d1, d2): - if d1 is None or d2 is None: - return None - if d1 == d2: - return None - if isinstance(d1, dict) and isinstance(d2, dict): - result = {} - for key in d1.keys(): - if key in d2: - dd = deepdiff(d1[key],d2[key]) - if dd is not None: - result[key] = dd - else: - result[key] = d1[key] - return result - else: - return None - - -def present(module, dest, conf, jsonbool, merge, create, backup): - - diff = {'before': '', - 'after': '', - 'before_header': '%s (content)' % dest, - 'after_header': '%s (content)' % dest} - - b_dest = to_bytes(dest, errors='surrogate_or_strict') - if not os.path.exists(b_dest): - if not create: - module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) - b_destpath = os.path.dirname(b_dest) - if not os.path.exists(b_destpath) and not module.check_mode: - os.makedirs(b_destpath) - b_lines = [] - else: - f = open(b_dest, 'rb') - b_lines = f.readlines() - f.close() - - lines = to_native(b('').join(b_lines)) - - if module._diff: - diff['before'] = lines - - b_conf = to_bytes(conf, errors='surrogate_or_strict') - - tomlconfig = pytoml.loads(lines) - config = {} - if jsonbool: - config = eval(b_conf) - else: - config = pytoml.loads(b_conf) - - if not isinstance(config, dict): - if jsonbool: - module.fail_json(msg="Invalid value in json parameter: {0}".format(config)) - else: - module.fail_json(msg="Invalid value in toml parameter: {0}".format(config)) - - b_lines_new = b_lines - msg = '' - changed = False - - if not merge: - if tomlconfig != config: - b_lines_new = to_bytes(pytoml.dumps(config)) - msg = 'config overwritten' - changed = True - else: - mergedconfig = deepmerge(tomlconfig,config) - if tomlconfig != mergedconfig: - b_lines_new = to_bytes(pytoml.dumps(mergedconfig)) - msg = 'config merged' - changed = True - - if module._diff: - diff['after'] = to_native(b('').join(b_lines_new)) - - backupdest = "" - if changed and not module.check_mode: - if backup and os.path.exists(b_dest): - backupdest = module.backup_local(dest) - write_changes(module, b_lines_new, dest) - - if module.check_mode and not os.path.exists(b_dest): - module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) - - attr_diff = {} - msg, changed = check_file_attrs(module, changed, msg, attr_diff) - - attr_diff['before_header'] = '%s (file attributes)' % dest - attr_diff['after_header'] = '%s (file attributes)' % dest - - difflist = [diff, attr_diff] - module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) - - -def absent(module, dest, conf, jsonbool, backup): - - b_dest = to_bytes(dest, errors='surrogate_or_strict') - if not os.path.exists(b_dest): - module.exit_json(changed=False, msg="file not present") - - msg = '' - diff = {'before': '', - 'after': '', - 'before_header': '%s (content)' % dest, - 'after_header': '%s (content)' % dest} - - f = open(b_dest, 'rb') - b_lines = f.readlines() - f.close() - - lines = to_native(b('').join(b_lines)) - b_conf = to_bytes(conf, errors='surrogate_or_strict') - - lines = to_native(b('').join(b_lines)) - tomlconfig = pytoml.loads(lines) - config = {} - if jsonbool: - config = eval(b_conf) - else: - config = pytoml.loads(b_conf) - - if not isinstance(config, dict): - if jsonbool: - module.fail_json(msg="Invalid value in json parameter: {0}".format(config)) - else: - module.fail_json(msg="Invalid value in toml parameter: {0}".format(config)) - - if module._diff: - diff['before'] = to_native(b('').join(b_lines)) - - b_lines_new = b_lines - msg = '' - changed = False - - diffconfig = deepdiff(tomlconfig,config) - if diffconfig is None: - diffconfig = {} - if tomlconfig != diffconfig: - b_lines_new = to_bytes(pytoml.dumps(diffconfig)) - msg = 'config removed' - changed = True - - if module._diff: - diff['after'] = to_native(b('').join(b_lines_new)) - - backupdest = "" - if changed and not module.check_mode: - if backup: - backupdest = module.backup_local(dest) - write_changes(module, b_lines_new, dest) - - attr_diff = {} - msg, changed = check_file_attrs(module, changed, msg, attr_diff) - - attr_diff['before_header'] = '%s (file attributes)' % dest - attr_diff['after_header'] = '%s (file attributes)' % dest - - difflist = [diff, attr_diff] - - module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) - - -def main(): - - # define the available arguments/parameters that a user can pass to - # the module - module_args = dict( - dest=dict(type='str', required=True), - json=dict(default=None), - toml=dict(default=None), - merge=dict(type='bool', default=True), - state=dict(default='present', choices=['absent', 'present']), - create=dict(type='bool', default=False), - backup=dict(type='bool', default=False), - validate=dict(default=None, type='str') - ) - - # the AnsibleModule object will be our abstraction working with Ansible - # this includes instantiation, a couple of common attr would be the - # args/params passed to the execution, as well as if the module - # supports check mode - module = AnsibleModule( - argument_spec=module_args, - mutually_exclusive=[['json', 'toml']], - add_file_common_args=True, - supports_check_mode=True - ) - - params = module.params - create = params['create'] - merge = params['merge'] - backup = params['backup'] - dest = params['dest'] - - b_dest = to_bytes(dest, errors='surrogate_or_strict') - - if os.path.isdir(b_dest): - module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) - - par_json, par_toml, jsonbool = params['json'], params['toml'], False - if par_json is None: - conf = par_toml - else: - conf = par_json - jsonbool = True - - if params['state'] == 'present': - present(module, dest, conf, jsonbool, merge, create, backup) - else: - absent(module, dest, conf, jsonbool, backup) - -if __name__ == '__main__': - main() - diff --git a/ansible/roles/tomlconfig/tasks/main.yml b/ansible/roles/tomlconfig/tasks/main.yml deleted file mode 100644 index 8fe47e821..000000000 --- a/ansible/roles/tomlconfig/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- name: Update config.toml with json - when: jsonconfig is defined - tomlconfig: "dest='{{destination}}' json='{{jsonconfig}}' state={{(remove | default(false) | bool) | ternary('absent','present')}}" - -- name: Update config.toml with toml - when: tomlconfig is defined - tomlconfig: "dest='{{destination}}' toml='{{tomlconfig}}' state={{(remove | default(false) | bool) | ternary('absent','present')}}" - diff --git a/ansible/roles/unsafe_reset/tasks/main.yml b/ansible/roles/unsafe_reset/tasks/main.yml deleted file mode 100644 index 7cf8aebf8..000000000 --- a/ansible/roles/unsafe_reset/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ -- shell: "ethermint --datadir /etc/ethermint unsafe_reset_all" - when: "service == 'ethermint'" - become_user: ethermint - -- command: "{{service}} node unsafe_reset_all --home=/etc/{{service}}" - become_user: "{{service}}" - -- file: "path=/etc/{{service}}/config/addrbook.json state=absent" - - diff --git a/ansible/setfile.yml b/ansible/setfile.yml deleted file mode 100644 index 22aff779e..000000000 --- a/ansible/setfile.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -#variable "source" is required -#variable "destination" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - gather_facts: no - roles: - - setfile - diff --git a/ansible/start.yml b/ansible/start.yml deleted file mode 100644 index 699c7052b..000000000 --- a/ansible/start.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - start - diff --git a/ansible/status.yml b/ansible/status.yml deleted file mode 100644 index 2839f0563..000000000 --- a/ansible/status.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - status - diff --git a/ansible/stop.yml b/ansible/stop.yml deleted file mode 100644 index f89e3cf22..000000000 --- a/ansible/stop.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - stop - diff --git a/ansible/tomlconfig.yml b/ansible/tomlconfig.yml deleted file mode 100644 index d410588ea..000000000 --- a/ansible/tomlconfig.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - tomlconfig - diff --git a/ansible/ubuntu16-patch.yml b/ansible/ubuntu16-patch.yml deleted file mode 100644 index 89c8864d0..000000000 --- a/ansible/ubuntu16-patch.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -#Ubuntu 16.04 is not installing the python package in the standard installation on DigitalOcean. This "patch" will install it so the rest of the ansible playbooks can work properly. - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - gather_facts: no - tasks: - - raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) - diff --git a/ansible/upgrade-reset.yml b/ansible/upgrade-reset.yml deleted file mode 100644 index 58688f7ce..000000000 --- a/ansible/upgrade-reset.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" - roles: - - stop - - install - - unsafe_reset - - start - diff --git a/ansible/upgrade.yml b/ansible/upgrade.yml deleted file mode 100644 index 42ebad009..000000000 --- a/ansible/upgrade.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -#variable "service" is required - -- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}" - roles: - - stop - - install - - start - diff --git a/terraform-digitalocean/.gitignore b/terraform-digitalocean/.gitignore deleted file mode 100644 index 2ded1561a..000000000 --- a/terraform-digitalocean/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -cluster/networking.tf -networking-output.tf - diff --git a/terraform-digitalocean/README.rst b/terraform-digitalocean/README.rst deleted file mode 100644 index 95af507f0..000000000 --- a/terraform-digitalocean/README.rst +++ /dev/null @@ -1,111 +0,0 @@ -Using Terraform -=============== - -This is a generic `Terraform `__ -configuration that sets up DigitalOcean droplets. See the -`terraform-digitalocean `__ -for the required files. - -Prerequisites -------------- - -- Install `HashiCorp Terraform `__ on a linux - machine. -- Create a `DigitalOcean API - token `__ with - read and write capability. -- Create a private/public key pair for SSH. This is needed to log onto - your droplets as well as by Ansible to connect for configuration - changes. -- Set up the public SSH key at the `DigitalOcean security - page `__. - `Here `__'s - a tutorial. -- Find out your SSH key ID at DigitalOcean by querying the below - command on your linux box: - -:: - - DO_API_TOKEN="" - curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DO_API_TOKEN" "https://api.digitalocean.com/v2/account/keys" - -Initialization --------------- - -If this is your first time using terraform, you have to initialize it by -running the below command. (Note: initialization can be run multiple -times) - -:: - - terraform init - -After initialization it's good measure to create a new Terraform -environment for the droplets so they are always managed together. - -:: - - TESTNET_NAME="testnet-servers" - terraform env new "$TESTNET_NAME" - -Note this ``terraform env`` command is only available in terraform -``v0.9`` and up. - -Execution ---------- - -The below command will create 4 nodes in DigitalOcean. They will be -named ``testnet-servers-node0`` to ``testnet-servers-node3`` and they -will be tagged as ``testnet-servers``. - -:: - - DO_API_TOKEN="" - SSH_IDS="[ \"\" ]" - terraform apply -var TESTNET_NAME="testnet-servers" -var servers=4 -var DO_API_TOKEN="$DO_API_TOKEN" -var ssh_keys="$SSH_IDS" - -Note: ``ssh_keys`` is a list of strings. You can add multiple keys. For -example: ``["1234567","9876543"]``. - -Alternatively you can use the default settings. The number of default -servers is 4 and the testnet name is ``tf-testnet1``. Variables can also -be defined as environment variables instead of the command-line. -Environment variables that start with ``TF_VAR_`` will be translated -into the Terraform configuration. For example the number of servers can -be overriden by setting the ``TF_VAR_servers`` variable. - -:: - - TF_VAR_DO_API_TOKEN="" - TF_VAR_TESTNET_NAME="testnet-servers" - terraform-apply - -Security --------- - -DigitalOcean uses the root user by default on its droplets. This is fine -as long as SSH keys are used. However some people still would like to -disable root and use an alternative user to connect to the droplets - -then ``sudo`` from there. Terraform can do this but it requires SSH -agent running on the machine where terraform is run, with one of the SSH -keys of the droplets added to the agent. (This will be neede for ansible -too, so it's worth setting it up here. Check out the -`ansible `__ -page for more information.) After setting up the SSH key, run -``terraform apply`` with ``-var noroot=true`` to create your droplets. -Terraform will create a user called ``ec2-user`` and move the SSH keys -over, this way disabling SSH login for root. It also adds the -``ec2-user`` to the sudoers file, so after logging in as ec2-user you -can ``sudo`` to ``root``. - -DigitalOcean announced firewalls but the current version of Terraform -(0.9.8 as of this writing) does not support it yet. Fortunately it is -quite easy to set it up through the web interface (and not that bad -through the `RESTful -API `__ -either). When adding droplets to a firewall rule, you can add tags. All -droplets in a testnet are tagged with the testnet name so it's enough to -define the testnet name in the firewall rule. It is not necessary to add -the nodes one-by-one. Also, the firewall rule "remembers" the testnet -name tag so if you change the servers but keep the name, the firewall -rules will still apply. diff --git a/terraform-digitalocean/cluster/main.tf b/terraform-digitalocean/cluster/main.tf deleted file mode 100644 index daab601c0..000000000 --- a/terraform-digitalocean/cluster/main.tf +++ /dev/null @@ -1,23 +0,0 @@ -resource "digitalocean_tag" "cluster" { - name = "${var.name}" -} - -resource "digitalocean_droplet" "cluster" { - name = "${var.name}-node${count.index}" - image = "${var.image_id}" - size = "${var.instance_size}" - region = "${element(var.regions, count.index)}" - ssh_keys = "${var.key_ids}" - count = "${var.servers}" - tags = ["${digitalocean_tag.cluster.id}"] - - lifecycle = { - prevent_destroy = false - } - - connection { - timeout = "30s" - } - -} - diff --git a/terraform-digitalocean/cluster/outputs.tf b/terraform-digitalocean/cluster/outputs.tf deleted file mode 100644 index 90255cfcd..000000000 --- a/terraform-digitalocean/cluster/outputs.tf +++ /dev/null @@ -1,25 +0,0 @@ -// The cluster name -output "name" { - value = "${var.name}" -} - -// The list of cluster instance IDs -output "instances" { - value = ["${digitalocean_droplet.cluster.*.id}"] -} - -// The list of cluster instance private IPs -output "private_ips" { - value = ["${digitalocean_droplet.cluster.*.ipv4_address_private}"] -} - -// The list of cluster instance public IPs -output "public_ips" { - value = ["${digitalocean_droplet.cluster.*.ipv4_address}"] -} - -#// The list of cluster floating IPs -#output "floating_ips" { -# value = ["${digitalocean_floating_ip.cluster.*.ip_address}"] -#} - diff --git a/terraform-digitalocean/cluster/security.tf b/terraform-digitalocean/cluster/security.tf deleted file mode 100644 index 3da56395c..000000000 --- a/terraform-digitalocean/cluster/security.tf +++ /dev/null @@ -1,17 +0,0 @@ -resource "null_resource" "cluster" { - count = "${ var.noroot ? var.servers : 0 }" - connection { - host = "${element(digitalocean_droplet.cluster.*.ipv4_address,count.index)}" - } - provisioner "remote-exec" { - inline = [ - "useradd -m -s /bin/bash ec2-user", - "echo 'ec2-user ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/ec2-user", - "cp -r /root/.ssh /home/ec2-user/.ssh", - "chown -R ec2-user.ec2-user /home/ec2-user/.ssh", - "chmod -R 700 /home/ec2-user/.ssh", - "rm -rf /root/.ssh" - ] - } -} - diff --git a/terraform-digitalocean/cluster/variables.tf b/terraform-digitalocean/cluster/variables.tf deleted file mode 100644 index 8f2f4d241..000000000 --- a/terraform-digitalocean/cluster/variables.tf +++ /dev/null @@ -1,35 +0,0 @@ -variable "name" { - description = "The cluster name, e.g cdn" -} - -variable "image_id" { - description = "Image ID" - default = "ubuntu-16-04-x64" -} - -variable "regions" { - description = "Regions to launch in" - type = "list" - default = ["AMS2", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"] -} - -variable "key_ids" { - description = "SSH keys to use on the nodes" - type = "list" -} - -variable "instance_size" { - description = "The instance size to use" - default = "2gb" -} - -variable "servers" { - description = "Desired instance count" - default = 4 -} - -variable "noroot" { - description = "Set this variable to true, if you want SSH keys set for ec2-user instead of root." - default = false -} - diff --git a/terraform-digitalocean/main.tf b/terraform-digitalocean/main.tf deleted file mode 100644 index e2e262797..000000000 --- a/terraform-digitalocean/main.tf +++ /dev/null @@ -1,64 +0,0 @@ -#Terraform Configuration - -variable "DO_API_TOKEN" { - description = "DigitalOcean Access Token" -} - -variable "TESTNET_NAME" { - description = "Name of the cluster/testnet" - default = "tf-testnet1" -} - -variable "ssh_keys" { - description = "SSH keys provided in DigitalOcean to be used on the nodes" - # curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DO_API_TOKEN" "https://api.digitalocean.com/v2/account/keys" - default = [ - "6259615", - "7658963", - "7668263", - "7668264", - "8036767", - "8163311", - "9495227", - "10318834", - "11435493" - ] -} - -variable "servers" { - description = "Number of nodes in cluster" - default = "4" -} - -variable "image" { - description = "DigitalOcean image name" - default = "ubuntu-16-04-x64" -} - -variable "noroot" { - description = "Set this variable to true, if you want SSH keys set for ec2-user instead of root." - default = false -} - -provider "digitalocean" { - token = "${var.DO_API_TOKEN}" -} - -module "cluster" { - source = "./cluster" - name = "${var.TESTNET_NAME}" - key_ids = "${var.ssh_keys}" - servers = "${var.servers}" - noroot = "${var.noroot}" - image_id = "${var.image}" -} - - -output "public_ips" { - value = "${module.cluster.public_ips}" -} - -#output "floating_ips" { -# value = "${module.cluster.floating_ips}" -#} -