see https://github.com/tendermint/tendermint/tree/master/networkspull/1943/head
@ -1,2 +0,0 @@ | |||
.vagrant/ | |||
*.retry |
@ -1,192 +0,0 @@ | |||
Copyright (C) 2017 Tendermint | |||
Apache License | |||
Version 2.0, January 2004 | |||
https://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
https://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@ -1,291 +0,0 @@ | |||
Using Ansible | |||
============= | |||
.. figure:: assets/a_plus_t.png | |||
:alt: Ansible plus Tendermint | |||
Ansible plus Tendermint | |||
The playbooks in `our ansible directory <https://github.com/tendermint/tools/tree/master/ansible>`__ | |||
run ansible `roles <http://www.ansible.com/>`__ which: | |||
- install and configure basecoind or ethermint | |||
- start/stop basecoind or ethermint and reset their configuration | |||
Prerequisites | |||
------------- | |||
- Ansible 2.0 or higher | |||
- SSH key to the servers | |||
Optional for DigitalOcean droplets: | |||
- DigitalOcean API Token | |||
- python dopy package | |||
For a description on how to get a DigitalOcean API Token, see the explanation | |||
in the `using terraform tutorial <./terraform-digitalocean.html>`__. | |||
Optional for Amazon AWS instances: | |||
- Amazon AWS API access key ID and secret access key. | |||
The cloud inventory scripts come from the ansible team at their | |||
`GitHub <https://github.com/ansible/ansible>`__ page. You can get the | |||
latest version from the ``contrib/inventory`` folder. | |||
Setup | |||
----- | |||
Ansible requires a "command machine" or "local machine" or "orchestrator | |||
machine" to run on. This can be your laptop or any machine that can run | |||
ansible. (It does not have to be part of the cloud network that hosts | |||
your servers.) | |||
Use the official `Ansible installation | |||
guide <http://docs.ansible.com/ansible/intro_installation.html>`__ to | |||
install Ansible. Here are a few examples on basic installation commands: | |||
Ubuntu/Debian: | |||
:: | |||
sudo apt-get install ansible | |||
CentOS/RedHat: | |||
:: | |||
sudo yum install epel-release | |||
sudo yum install ansible | |||
Mac OSX: If you have `Homebrew <https://brew.sh>`__ installed, then it's: | |||
:: | |||
brew install ansible | |||
If not, you can install it using ``pip``: | |||
:: | |||
sudo easy_install pip | |||
sudo pip install ansible | |||
To make life easier, you can start an SSH Agent and load your SSH | |||
key(s). This way ansible will have an uninterrupted way of connecting to | |||
your servers. | |||
:: | |||
ssh-agent > ~/.ssh/ssh.env | |||
source ~/.ssh/ssh.env | |||
ssh-add private.key | |||
Subsequently, as long as the agent is running, you can use | |||
``source ~/.ssh/ssh.env`` to load the keys to the current session. Note: | |||
On Mac OSX, you can add the ``-K`` option to ssh-add to store the | |||
passphrase in your keychain. The security of this feature is debated but | |||
it is convenient. | |||
Optional cloud dependencies | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
If you are using a cloud provider to host your servers, you need the | |||
below dependencies installed on your local machine. | |||
DigitalOcean inventory dependencies: | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Ubuntu/Debian: | |||
:: | |||
sudo apt-get install python-pip | |||
sudo pip install dopy | |||
CentOS/RedHat: | |||
:: | |||
sudo yum install python-pip | |||
sudo pip install dopy | |||
Mac OSX: | |||
:: | |||
sudo pip install dopy | |||
Amazon AWS inventory dependencies: | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Ubuntu/Debian: | |||
:: | |||
sudo apt-get install python-boto | |||
CentOS/RedHat: | |||
:: | |||
sudo yum install python-boto | |||
Mac OSX: | |||
:: | |||
sudo pip install boto | |||
Refreshing the DigitalOcean inventory | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
If you just finished creating droplets, the local DigitalOcean inventory | |||
cache is not up-to-date. To refresh it, run: | |||
:: | |||
DO_API_TOKEN="<The API token received from DigitalOcean>" | |||
python -u inventory/digital_ocean.py --refresh-cache 1> /dev/null | |||
Refreshing the Amazon AWS inventory | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
If you just finished creating Amazon AWS EC2 instances, the local AWS | |||
inventory cache is not up-to-date. To refresh it, run: | |||
:: | |||
AWS_ACCESS_KEY_ID='<The API access key ID received from Amazon>' | |||
AWS_SECRET_ACCESS_KEY='<The API secret access key received from Amazon>' | |||
python -u inventory/ec2.py --refresh-cache 1> /dev/null | |||
Note: you don't need the access key and secret key set, if you are | |||
running ansible on an Amazon AMI instance with the proper IAM | |||
permissions set. | |||
Running the playbooks | |||
--------------------- | |||
The playbooks are locked down to only run if the environment variable | |||
``TF_VAR_TESTNET_NAME`` is populated. This is a precaution so you don't | |||
accidentally run the playbook on all your servers. | |||
The variable ``TF_VAR_TESTNET_NAME`` contains the testnet name which | |||
ansible translates into an ansible group. If you used Terraform to | |||
create the servers, it was the testnet name used there. | |||
If the playbook cannot connect to the servers because of public key | |||
denial, your SSH Agent is not set up properly. Alternatively you can add | |||
the SSH key to ansible using the ``--private-key`` option. | |||
If you need to connect to the nodes as root but your local username is | |||
different, use the ansible option ``-u root`` to tell ansible to connect | |||
to the servers and authenticate as the root user. | |||
If you secured your server and you need to ``sudo`` for root access, use | |||
the the ``-b`` or ``--become`` option to tell ansible to sudo to root | |||
after connecting to the server. In the Terraform-DigitalOcean example, | |||
if you created the ec2-user by adding the ``noroot=true`` option (or if | |||
you are simply on Amazon AWS), you need to add the options | |||
``-u ec2-user -b`` to ansible to tell it to connect as the ec2-user and | |||
then sudo to root to run the playbook. | |||
DigitalOcean | |||
~~~~~~~~~~~~ | |||
:: | |||
DO_API_TOKEN="<The API token received from DigitalOcean>" | |||
TF_VAR_TESTNET_NAME="testnet-servers" | |||
ansible-playbook -i inventory/digital_ocean.py install.yml -e service=basecoind | |||
Amazon AWS | |||
~~~~~~~~~~ | |||
:: | |||
AWS_ACCESS_KEY_ID='<The API access key ID received from Amazon>' | |||
AWS_SECRET_ACCESS_KEY='<The API secret access key received from Amazon>' | |||
TF_VAR_TESTNET_NAME="testnet-servers" | |||
ansible-playbook -i inventory/ec2.py install.yml -e service=basecoind | |||
Installing custom versions | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
By default ansible installs the tendermint, basecoind or ethermint binary | |||
versions from the latest release in the repository. If you build your | |||
own version of the binaries, you can tell ansible to install that | |||
instead. | |||
:: | |||
GOPATH="<your go path>" | |||
go get -u github.com/tendermint/basecoin/cmd/basecoind | |||
DO_API_TOKEN="<The API token received from DigitalOcean>" | |||
TF_VAR_TESTNET_NAME="testnet-servers" | |||
ansible-playbook -i inventory/digital_ocean.py install.yml -e service=basecoind -e release_install=false | |||
Alternatively you can change the variable settings in | |||
``group_vars/all``. | |||
Other commands and roles | |||
------------------------ | |||
There are few extra playbooks to make life easier managing your servers. | |||
- install.yml - Install basecoind or ethermint applications. (Tendermint | |||
gets installed automatically.) Use the ``service`` parameter to | |||
define which application to install. Defaults to ``basecoind``. | |||
- reset.yml - Stop the application, reset the configuration and data, | |||
then start the application again. You need to pass | |||
``-e service=<servicename>``, like ``-e service=basecoind``. It will | |||
restart the underlying tendermint application too. | |||
- restart.yml - Restart a service on all nodes. You need to pass | |||
``-e service=<servicename>``, like ``-e service=basecoind``. It will | |||
restart the underlying tendermint application too. | |||
- stop.yml - Stop the application. You need to pass | |||
``-e service=<servicename>``. | |||
- status.yml - Check the service status and print it. You need to pass | |||
``-e service=<servicename>``. | |||
- start.yml - Start the application. You need to pass | |||
``-e service=<servicename>``. | |||
- ubuntu16-patch.yml - Ubuntu 16.04 does not have the minimum required | |||
python package installed to be able to run ansible. If you are using | |||
ubuntu, run this playbook first on the target machines. This will | |||
install the python pacakge that is required for ansible to work | |||
correctly on the remote nodes. | |||
- upgrade.yml - Upgrade the ``service`` on your testnet. It will stop | |||
the service and restart it at the end. It will only work if the | |||
upgraded version is backward compatible with the installed version. | |||
- upgrade-reset.yml - Upgrade the ``service`` on your testnet and reset | |||
the database. It will stop the service and restart it at the end. It | |||
will work for upgrades where the new version is not | |||
backward-compatible with the installed version - however it will | |||
reset the testnet to its default. | |||
The roles are self-sufficient under the ``roles/`` folder. | |||
- install - install the application defined in the ``service`` | |||
parameter. It can install release packages and update them with | |||
custom-compiled binaries. | |||
- unsafe\_reset - delete the database for a service, including the | |||
tendermint database. | |||
- config - configure the application defined in ``service``. It also | |||
configures the underlying tendermint service. Check | |||
``group_vars/all`` for options. | |||
- stop - stop an application. Requires the ``service`` parameter set. | |||
- status - check the status of an application. Requires the ``service`` | |||
parameter set. | |||
- start - start an application. Requires the ``service`` parameter set. | |||
Default variables | |||
----------------- | |||
Default variables are documented under ``group_vars/all``. You can the | |||
parameters there to deploy a previously created genesis.json file | |||
(instead of dynamically creating it) or if you want to deploy custom | |||
built binaries instead of deploying a released version. |
@ -1,18 +0,0 @@ | |||
# -*- mode: ruby -*- | |||
# vi: set ft=ruby : | |||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! | |||
VAGRANTFILE_API_VERSION = "2" | |||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| | |||
# All Vagrant configuration is done here. The most common configuration | |||
# options are documented and commented below. For a complete reference, | |||
# please see the online documentation at vagrantup.com. | |||
# Every Vagrant virtual environment requires a box to build off of. | |||
config.vm.box = "ubuntu/trusty64" | |||
config.vm.provision :ansible do |ansible| | |||
ansible.playbook = "install.yml" | |||
end | |||
end |
@ -1,4 +0,0 @@ | |||
[defaults] | |||
retry_files_enabled = False | |||
host_key_checking = False | |||
@ -1,16 +0,0 @@ | |||
"accounts": [{ | |||
"pub_key": { | |||
"type": "ed25519", | |||
"data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" | |||
}, | |||
"coins": [ | |||
{ | |||
"denom": "mycoin", | |||
"amount": 9007199254740992 | |||
} | |||
] | |||
}], | |||
"plugin_options": [ | |||
"coin/issuer", {"app": "sigs", "addr": "1B1BE55F969F54064628A63B9559E7C21C925165"} | |||
] | |||
@ -1 +0,0 @@ | |||
@ -1,75 +0,0 @@ | |||
"accounts": [ | |||
{ | |||
"name": "greg", | |||
"address": "42960119BC3D724F6FA0E2883C0DCF550C59D1B2", | |||
"coins": [ | |||
{ | |||
"denom": "fermion", | |||
"amount": 1000000 | |||
}, | |||
{ | |||
"denom": "gregcoin", | |||
"amount": 1000 | |||
} | |||
] | |||
}, | |||
{ | |||
"name": "bucky", | |||
"address": "5CAFE3CD0FEE7A5DD98B366B19A201D428A79FB6", | |||
"coins": [ | |||
{ | |||
"denom": "fermion", | |||
"amount": 10000 | |||
}, | |||
{ | |||
"denom": "buckycoin", | |||
"amount": 1000 | |||
} | |||
] | |||
}, | |||
{ | |||
"name": "fabo", | |||
"address": "9C145AAAE1E7AD8735BC1B2173B092CEF6FD8557", | |||
"coins": [ | |||
{ | |||
"denom": "fermion", | |||
"amount": 100 | |||
}, | |||
{ | |||
"denom": "fabocoin", | |||
"amount": 1000 | |||
} | |||
] | |||
}, | |||
{ | |||
"name": "mattbell", | |||
"address": "C2BA52AC0E98907ED7DC7FBFE85FCF3D4BD4D018", | |||
"coins": [ | |||
{ | |||
"denom": "fermion", | |||
"amount": 100 | |||
}, | |||
{ | |||
"denom": "tokenmatt", | |||
"amount": 1000 | |||
} | |||
] | |||
}, | |||
{ | |||
"name": "fabo", | |||
"address": "527E2333EF0B6E5FFB6E62FFA68B3707E08F2286", | |||
"coins": [ | |||
{ | |||
"denom": "fermion", | |||
"amount": 100 | |||
}, | |||
{ | |||
"denom": "tokenfabo", | |||
"amount": 1000 | |||
} | |||
] | |||
} | |||
], | |||
"plugin_options": [ | |||
"coin/issuer", {"app": "sigs", "addr": "B01C264BFE9CBD45458256E613A6F07061A3A6B6"} | |||
] |
@ -1,20 +0,0 @@ | |||
"accounts": [ | |||
{ | |||
"name": "relay", | |||
"address": "1B1BE55F969F54064628A63B9559E7C21C925165", | |||
"pub_key": { | |||
"type": "ed25519", | |||
"data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" | |||
}, | |||
"coins": [ | |||
{ | |||
"denom": "mycoin", | |||
"amount": 9007199254740992 | |||
} | |||
] | |||
} | |||
], | |||
"plugin_options": [ | |||
"coin/issuer", {"app": "sigs", "addr": "1B1BE55F969F54064628A63B9559E7C21C925165"} | |||
] | |||
@ -1,8 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- getconfigtoml | |||
@ -1,8 +0,0 @@ | |||
--- | |||
#variable "source" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- getfile | |||
@ -1,41 +0,0 @@ | |||
--- | |||
### | |||
### Tendermint installation | |||
### | |||
## This file shows and sets the global defaults for the role variables. | |||
## | |||
## install | |||
## | |||
## service variable defines which service is going to be managed. It can be set to basecoind or ethermint. | |||
service: basecoind | |||
## release_install indicates if the install role should look for a privately built binary after installing the service package. If set to false, the privately built binary in the GOPATH is going to override the binary on the target systems. | |||
#release_install: true | |||
## binary stores the path to the privately built service binary, if there is any. By default it uses the GOPATH environment variable. | |||
#binary: "{{ lookup('env','GOPATH') | default('') }}/bin/{{service}}" | |||
## | |||
## config | |||
## | |||
## tendermint_genesis_file contains the path and filename to a previously generated genesis.json for the underlying tendermint service. If undefined, the json file is dynamically generated. | |||
#tendermint_genesis_file: "<undefined>" | |||
## service_genesis_file contains the path and filename to a previously generated genesis.json for the service. If undefined, the json file is dynamically generated. | |||
#service_genesis_file: "<undefined>" | |||
## testnet_name is used to find seed IPs and public keys and set the chain_id in genesis.json and config.toml | |||
#testnet_name: testnet1 | |||
## app_options_file contains a path and filename which will be included in a generated service genesis.json file on all nodes. The content will be dumped into the app_options dictionary in the service genesis.json.. | |||
#app_options_file: "app_options_files/dev_money" | |||
## Internal use only. validators indicates if the nodes are validator nodes. The tendermint genesis.json will contain their public keys. | |||
#validators: true | |||
## Internal use only. seeds contain the list of servers (with ports) that are validators in a testnet. Only effective if validators == false. If validators == true, then all nodes will be automatically included here. | |||
#seeds: "" |
@ -1,12 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
any_errors_fatal: "{{validators | default(true) | bool}}" | |||
roles: | |||
- install | |||
- {role: generic-service, when: service == 'tendermint'} | |||
- {role: config, testnet_name: "{{lookup('env','TF_VAR_TESTNET_NAME')}}", tags: reconfig } | |||
- start | |||
@ -1,675 +0,0 @@ | |||
GNU GENERAL PUBLIC LICENSE | |||
Version 3, 29 June 2007 | |||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | |||
Everyone is permitted to copy and distribute verbatim copies | |||
of this license document, but changing it is not allowed. | |||
Preamble | |||
The GNU General Public License is a free, copyleft license for | |||
software and other kinds of works. | |||
The licenses for most software and other practical works are designed | |||
to take away your freedom to share and change the works. By contrast, | |||
the GNU General Public License is intended to guarantee your freedom to | |||
share and change all versions of a program--to make sure it remains free | |||
software for all its users. We, the Free Software Foundation, use the | |||
GNU General Public License for most of our software; it applies also to | |||
any other work released this way by its authors. You can apply it to | |||
your programs, too. | |||
When we speak of free software, we are referring to freedom, not | |||
price. Our General Public Licenses are designed to make sure that you | |||
have the freedom to distribute copies of free software (and charge for | |||
them if you wish), that you receive source code or can get it if you | |||
want it, that you can change the software or use pieces of it in new | |||
free programs, and that you know you can do these things. | |||
To protect your rights, we need to prevent others from denying you | |||
these rights or asking you to surrender the rights. Therefore, you have | |||
certain responsibilities if you distribute copies of the software, or if | |||
you modify it: responsibilities to respect the freedom of others. | |||
For example, if you distribute copies of such a program, whether | |||
gratis or for a fee, you must pass on to the recipients the same | |||
freedoms that you received. You must make sure that they, too, receive | |||
or can get the source code. And you must show them these terms so they | |||
know their rights. | |||
Developers that use the GNU GPL protect your rights with two steps: | |||
(1) assert copyright on the software, and (2) offer you this License | |||
giving you legal permission to copy, distribute and/or modify it. | |||
For the developers' and authors' protection, the GPL clearly explains | |||
that there is no warranty for this free software. For both users' and | |||
authors' sake, the GPL requires that modified versions be marked as | |||
changed, so that their problems will not be attributed erroneously to | |||
authors of previous versions. | |||
Some devices are designed to deny users access to install or run | |||
modified versions of the software inside them, although the manufacturer | |||
can do so. This is fundamentally incompatible with the aim of | |||
protecting users' freedom to change the software. The systematic | |||
pattern of such abuse occurs in the area of products for individuals to | |||
use, which is precisely where it is most unacceptable. Therefore, we | |||
have designed this version of the GPL to prohibit the practice for those | |||
products. If such problems arise substantially in other domains, we | |||
stand ready to extend this provision to those domains in future versions | |||
of the GPL, as needed to protect the freedom of users. | |||
Finally, every program is threatened constantly by software patents. | |||
States should not allow patents to restrict development and use of | |||
software on general-purpose computers, but in those that do, we wish to | |||
avoid the special danger that patents applied to a free program could | |||
make it effectively proprietary. To prevent this, the GPL assures that | |||
patents cannot be used to render the program non-free. | |||
The precise terms and conditions for copying, distribution and | |||
modification follow. | |||
TERMS AND CONDITIONS | |||
0. Definitions. | |||
"This License" refers to version 3 of the GNU General Public License. | |||
"Copyright" also means copyright-like laws that apply to other kinds of | |||
works, such as semiconductor masks. | |||
"The Program" refers to any copyrightable work licensed under this | |||
License. Each licensee is addressed as "you". "Licensees" and | |||
"recipients" may be individuals or organizations. | |||
To "modify" a work means to copy from or adapt all or part of the work | |||
in a fashion requiring copyright permission, other than the making of an | |||
exact copy. The resulting work is called a "modified version" of the | |||
earlier work or a work "based on" the earlier work. | |||
A "covered work" means either the unmodified Program or a work based | |||
on the Program. | |||
To "propagate" a work means to do anything with it that, without | |||
permission, would make you directly or secondarily liable for | |||
infringement under applicable copyright law, except executing it on a | |||
computer or modifying a private copy. Propagation includes copying, | |||
distribution (with or without modification), making available to the | |||
public, and in some countries other activities as well. | |||
To "convey" a work means any kind of propagation that enables other | |||
parties to make or receive copies. Mere interaction with a user through | |||
a computer network, with no transfer of a copy, is not conveying. | |||
An interactive user interface displays "Appropriate Legal Notices" | |||
to the extent that it includes a convenient and prominently visible | |||
feature that (1) displays an appropriate copyright notice, and (2) | |||
tells the user that there is no warranty for the work (except to the | |||
extent that warranties are provided), that licensees may convey the | |||
work under this License, and how to view a copy of this License. If | |||
the interface presents a list of user commands or options, such as a | |||
menu, a prominent item in the list meets this criterion. | |||
1. Source Code. | |||
The "source code" for a work means the preferred form of the work | |||
for making modifications to it. "Object code" means any non-source | |||
form of a work. | |||
A "Standard Interface" means an interface that either is an official | |||
standard defined by a recognized standards body, or, in the case of | |||
interfaces specified for a particular programming language, one that | |||
is widely used among developers working in that language. | |||
The "System Libraries" of an executable work include anything, other | |||
than the work as a whole, that (a) is included in the normal form of | |||
packaging a Major Component, but which is not part of that Major | |||
Component, and (b) serves only to enable use of the work with that | |||
Major Component, or to implement a Standard Interface for which an | |||
implementation is available to the public in source code form. A | |||
"Major Component", in this context, means a major essential component | |||
(kernel, window system, and so on) of the specific operating system | |||
(if any) on which the executable work runs, or a compiler used to | |||
produce the work, or an object code interpreter used to run it. | |||
The "Corresponding Source" for a work in object code form means all | |||
the source code needed to generate, install, and (for an executable | |||
work) run the object code and to modify the work, including scripts to | |||
control those activities. However, it does not include the work's | |||
System Libraries, or general-purpose tools or generally available free | |||
programs which are used unmodified in performing those activities but | |||
which are not part of the work. For example, Corresponding Source | |||
includes interface definition files associated with source files for | |||
the work, and the source code for shared libraries and dynamically | |||
linked subprograms that the work is specifically designed to require, | |||
such as by intimate data communication or control flow between those | |||
subprograms and other parts of the work. | |||
The Corresponding Source need not include anything that users | |||
can regenerate automatically from other parts of the Corresponding | |||
Source. | |||
The Corresponding Source for a work in source code form is that | |||
same work. | |||
2. Basic Permissions. | |||
All rights granted under this License are granted for the term of | |||
copyright on the Program, and are irrevocable provided the stated | |||
conditions are met. This License explicitly affirms your unlimited | |||
permission to run the unmodified Program. The output from running a | |||
covered work is covered by this License only if the output, given its | |||
content, constitutes a covered work. This License acknowledges your | |||
rights of fair use or other equivalent, as provided by copyright law. | |||
You may make, run and propagate covered works that you do not | |||
convey, without conditions so long as your license otherwise remains | |||
in force. You may convey covered works to others for the sole purpose | |||
of having them make modifications exclusively for you, or provide you | |||
with facilities for running those works, provided that you comply with | |||
the terms of this License in conveying all material for which you do | |||
not control copyright. Those thus making or running the covered works | |||
for you must do so exclusively on your behalf, under your direction | |||
and control, on terms that prohibit them from making any copies of | |||
your copyrighted material outside their relationship with you. | |||
Conveying under any other circumstances is permitted solely under | |||
the conditions stated below. Sublicensing is not allowed; section 10 | |||
makes it unnecessary. | |||
3. Protecting Users' Legal Rights From Anti-Circumvention Law. | |||
No covered work shall be deemed part of an effective technological | |||
measure under any applicable law fulfilling obligations under article | |||
11 of the WIPO copyright treaty adopted on 20 December 1996, or | |||
similar laws prohibiting or restricting circumvention of such | |||
measures. | |||
When you convey a covered work, you waive any legal power to forbid | |||
circumvention of technological measures to the extent such circumvention | |||
is effected by exercising rights under this License with respect to | |||
the covered work, and you disclaim any intention to limit operation or | |||
modification of the work as a means of enforcing, against the work's | |||
users, your or third parties' legal rights to forbid circumvention of | |||
technological measures. | |||
4. Conveying Verbatim Copies. | |||
You may convey verbatim copies of the Program's source code as you | |||
receive it, in any medium, provided that you conspicuously and | |||
appropriately publish on each copy an appropriate copyright notice; | |||
keep intact all notices stating that this License and any | |||
non-permissive terms added in accord with section 7 apply to the code; | |||
keep intact all notices of the absence of any warranty; and give all | |||
recipients a copy of this License along with the Program. | |||
You may charge any price or no price for each copy that you convey, | |||
and you may offer support or warranty protection for a fee. | |||
5. Conveying Modified Source Versions. | |||
You may convey a work based on the Program, or the modifications to | |||
produce it from the Program, in the form of source code under the | |||
terms of section 4, provided that you also meet all of these conditions: | |||
a) The work must carry prominent notices stating that you modified | |||
it, and giving a relevant date. | |||
b) The work must carry prominent notices stating that it is | |||
released under this License and any conditions added under section | |||
7. This requirement modifies the requirement in section 4 to | |||
"keep intact all notices". | |||
c) You must license the entire work, as a whole, under this | |||
License to anyone who comes into possession of a copy. This | |||
License will therefore apply, along with any applicable section 7 | |||
additional terms, to the whole of the work, and all its parts, | |||
regardless of how they are packaged. This License gives no | |||
permission to license the work in any other way, but it does not | |||
invalidate such permission if you have separately received it. | |||
d) If the work has interactive user interfaces, each must display | |||
Appropriate Legal Notices; however, if the Program has interactive | |||
interfaces that do not display Appropriate Legal Notices, your | |||
work need not make them do so. | |||
A compilation of a covered work with other separate and independent | |||
works, which are not by their nature extensions of the covered work, | |||
and which are not combined with it such as to form a larger program, | |||
in or on a volume of a storage or distribution medium, is called an | |||
"aggregate" if the compilation and its resulting copyright are not | |||
used to limit the access or legal rights of the compilation's users | |||
beyond what the individual works permit. Inclusion of a covered work | |||
in an aggregate does not cause this License to apply to the other | |||
parts of the aggregate. | |||
6. Conveying Non-Source Forms. | |||
You may convey a covered work in object code form under the terms | |||
of sections 4 and 5, provided that you also convey the | |||
machine-readable Corresponding Source under the terms of this License, | |||
in one of these ways: | |||
a) Convey the object code in, or embodied in, a physical product | |||
(including a physical distribution medium), accompanied by the | |||
Corresponding Source fixed on a durable physical medium | |||
customarily used for software interchange. | |||
b) Convey the object code in, or embodied in, a physical product | |||
(including a physical distribution medium), accompanied by a | |||
written offer, valid for at least three years and valid for as | |||
long as you offer spare parts or customer support for that product | |||
model, to give anyone who possesses the object code either (1) a | |||
copy of the Corresponding Source for all the software in the | |||
product that is covered by this License, on a durable physical | |||
medium customarily used for software interchange, for a price no | |||
more than your reasonable cost of physically performing this | |||
conveying of source, or (2) access to copy the | |||
Corresponding Source from a network server at no charge. | |||
c) Convey individual copies of the object code with a copy of the | |||
written offer to provide the Corresponding Source. This | |||
alternative is allowed only occasionally and noncommercially, and | |||
only if you received the object code with such an offer, in accord | |||
with subsection 6b. | |||
d) Convey the object code by offering access from a designated | |||
place (gratis or for a charge), and offer equivalent access to the | |||
Corresponding Source in the same way through the same place at no | |||
further charge. You need not require recipients to copy the | |||
Corresponding Source along with the object code. If the place to | |||
copy the object code is a network server, the Corresponding Source | |||
may be on a different server (operated by you or a third party) | |||
that supports equivalent copying facilities, provided you maintain | |||
clear directions next to the object code saying where to find the | |||
Corresponding Source. Regardless of what server hosts the | |||
Corresponding Source, you remain obligated to ensure that it is | |||
available for as long as needed to satisfy these requirements. | |||
e) Convey the object code using peer-to-peer transmission, provided | |||
you inform other peers where the object code and Corresponding | |||
Source of the work are being offered to the general public at no | |||
charge under subsection 6d. | |||
A separable portion of the object code, whose source code is excluded | |||
from the Corresponding Source as a System Library, need not be | |||
included in conveying the object code work. | |||
A "User Product" is either (1) a "consumer product", which means any | |||
tangible personal property which is normally used for personal, family, | |||
or household purposes, or (2) anything designed or sold for incorporation | |||
into a dwelling. In determining whether a product is a consumer product, | |||
doubtful cases shall be resolved in favor of coverage. For a particular | |||
product received by a particular user, "normally used" refers to a | |||
typical or common use of that class of product, regardless of the status | |||
of the particular user or of the way in which the particular user | |||
actually uses, or expects or is expected to use, the product. A product | |||
is a consumer product regardless of whether the product has substantial | |||
commercial, industrial or non-consumer uses, unless such uses represent | |||
the only significant mode of use of the product. | |||
"Installation Information" for a User Product means any methods, | |||
procedures, authorization keys, or other information required to install | |||
and execute modified versions of a covered work in that User Product from | |||
a modified version of its Corresponding Source. The information must | |||
suffice to ensure that the continued functioning of the modified object | |||
code is in no case prevented or interfered with solely because | |||
modification has been made. | |||
If you convey an object code work under this section in, or with, or | |||
specifically for use in, a User Product, and the conveying occurs as | |||
part of a transaction in which the right of possession and use of the | |||
User Product is transferred to the recipient in perpetuity or for a | |||
fixed term (regardless of how the transaction is characterized), the | |||
Corresponding Source conveyed under this section must be accompanied | |||
by the Installation Information. But this requirement does not apply | |||
if neither you nor any third party retains the ability to install | |||
modified object code on the User Product (for example, the work has | |||
been installed in ROM). | |||
The requirement to provide Installation Information does not include a | |||
requirement to continue to provide support service, warranty, or updates | |||
for a work that has been modified or installed by the recipient, or for | |||
the User Product in which it has been modified or installed. Access to a | |||
network may be denied when the modification itself materially and | |||
adversely affects the operation of the network or violates the rules and | |||
protocols for communication across the network. | |||
Corresponding Source conveyed, and Installation Information provided, | |||
in accord with this section must be in a format that is publicly | |||
documented (and with an implementation available to the public in | |||
source code form), and must require no special password or key for | |||
unpacking, reading or copying. | |||
7. Additional Terms. | |||
"Additional permissions" are terms that supplement the terms of this | |||
License by making exceptions from one or more of its conditions. | |||
Additional permissions that are applicable to the entire Program shall | |||
be treated as though they were included in this License, to the extent | |||
that they are valid under applicable law. If additional permissions | |||
apply only to part of the Program, that part may be used separately | |||
under those permissions, but the entire Program remains governed by | |||
this License without regard to the additional permissions. | |||
When you convey a copy of a covered work, you may at your option | |||
remove any additional permissions from that copy, or from any part of | |||
it. (Additional permissions may be written to require their own | |||
removal in certain cases when you modify the work.) You may place | |||
additional permissions on material, added by you to a covered work, | |||
for which you have or can give appropriate copyright permission. | |||
Notwithstanding any other provision of this License, for material you | |||
add to a covered work, you may (if authorized by the copyright holders of | |||
that material) supplement the terms of this License with terms: | |||
a) Disclaiming warranty or limiting liability differently from the | |||
terms of sections 15 and 16 of this License; or | |||
b) Requiring preservation of specified reasonable legal notices or | |||
author attributions in that material or in the Appropriate Legal | |||
Notices displayed by works containing it; or | |||
c) Prohibiting misrepresentation of the origin of that material, or | |||
requiring that modified versions of such material be marked in | |||
reasonable ways as different from the original version; or | |||
d) Limiting the use for publicity purposes of names of licensors or | |||
authors of the material; or | |||
e) Declining to grant rights under trademark law for use of some | |||
trade names, trademarks, or service marks; or | |||
f) Requiring indemnification of licensors and authors of that | |||
material by anyone who conveys the material (or modified versions of | |||
it) with contractual assumptions of liability to the recipient, for | |||
any liability that these contractual assumptions directly impose on | |||
those licensors and authors. | |||
All other non-permissive additional terms are considered "further | |||
restrictions" within the meaning of section 10. If the Program as you | |||
received it, or any part of it, contains a notice stating that it is | |||
governed by this License along with a term that is a further | |||
restriction, you may remove that term. If a license document contains | |||
a further restriction but permits relicensing or conveying under this | |||
License, you may add to a covered work material governed by the terms | |||
of that license document, provided that the further restriction does | |||
not survive such relicensing or conveying. | |||
If you add terms to a covered work in accord with this section, you | |||
must place, in the relevant source files, a statement of the | |||
additional terms that apply to those files, or a notice indicating | |||
where to find the applicable terms. | |||
Additional terms, permissive or non-permissive, may be stated in the | |||
form of a separately written license, or stated as exceptions; | |||
the above requirements apply either way. | |||
8. Termination. | |||
You may not propagate or modify a covered work except as expressly | |||
provided under this License. Any attempt otherwise to propagate or | |||
modify it is void, and will automatically terminate your rights under | |||
this License (including any patent licenses granted under the third | |||
paragraph of section 11). | |||
However, if you cease all violation of this License, then your | |||
license from a particular copyright holder is reinstated (a) | |||
provisionally, unless and until the copyright holder explicitly and | |||
finally terminates your license, and (b) permanently, if the copyright | |||
holder fails to notify you of the violation by some reasonable means | |||
prior to 60 days after the cessation. | |||
Moreover, your license from a particular copyright holder is | |||
reinstated permanently if the copyright holder notifies you of the | |||
violation by some reasonable means, this is the first time you have | |||
received notice of violation of this License (for any work) from that | |||
copyright holder, and you cure the violation prior to 30 days after | |||
your receipt of the notice. | |||
Termination of your rights under this section does not terminate the | |||
licenses of parties who have received copies or rights from you under | |||
this License. If your rights have been terminated and not permanently | |||
reinstated, you do not qualify to receive new licenses for the same | |||
material under section 10. | |||
9. Acceptance Not Required for Having Copies. | |||
You are not required to accept this License in order to receive or | |||
run a copy of the Program. Ancillary propagation of a covered work | |||
occurring solely as a consequence of using peer-to-peer transmission | |||
to receive a copy likewise does not require acceptance. However, | |||
nothing other than this License grants you permission to propagate or | |||
modify any covered work. These actions infringe copyright if you do | |||
not accept this License. Therefore, by modifying or propagating a | |||
covered work, you indicate your acceptance of this License to do so. | |||
10. Automatic Licensing of Downstream Recipients. | |||
Each time you convey a covered work, the recipient automatically | |||
receives a license from the original licensors, to run, modify and | |||
propagate that work, subject to this License. You are not responsible | |||
for enforcing compliance by third parties with this License. | |||
An "entity transaction" is a transaction transferring control of an | |||
organization, or substantially all assets of one, or subdividing an | |||
organization, or merging organizations. If propagation of a covered | |||
work results from an entity transaction, each party to that | |||
transaction who receives a copy of the work also receives whatever | |||
licenses to the work the party's predecessor in interest had or could | |||
give under the previous paragraph, plus a right to possession of the | |||
Corresponding Source of the work from the predecessor in interest, if | |||
the predecessor has it or can get it with reasonable efforts. | |||
You may not impose any further restrictions on the exercise of the | |||
rights granted or affirmed under this License. For example, you may | |||
not impose a license fee, royalty, or other charge for exercise of | |||
rights granted under this License, and you may not initiate litigation | |||
(including a cross-claim or counterclaim in a lawsuit) alleging that | |||
any patent claim is infringed by making, using, selling, offering for | |||
sale, or importing the Program or any portion of it. | |||
11. Patents. | |||
A "contributor" is a copyright holder who authorizes use under this | |||
License of the Program or a work on which the Program is based. The | |||
work thus licensed is called the contributor's "contributor version". | |||
A contributor's "essential patent claims" are all patent claims | |||
owned or controlled by the contributor, whether already acquired or | |||
hereafter acquired, that would be infringed by some manner, permitted | |||
by this License, of making, using, or selling its contributor version, | |||
but do not include claims that would be infringed only as a | |||
consequence of further modification of the contributor version. For | |||
purposes of this definition, "control" includes the right to grant | |||
patent sublicenses in a manner consistent with the requirements of | |||
this License. | |||
Each contributor grants you a non-exclusive, worldwide, royalty-free | |||
patent license under the contributor's essential patent claims, to | |||
make, use, sell, offer for sale, import and otherwise run, modify and | |||
propagate the contents of its contributor version. | |||
In the following three paragraphs, a "patent license" is any express | |||
agreement or commitment, however denominated, not to enforce a patent | |||
(such as an express permission to practice a patent or covenant not to | |||
sue for patent infringement). To "grant" such a patent license to a | |||
party means to make such an agreement or commitment not to enforce a | |||
patent against the party. | |||
If you convey a covered work, knowingly relying on a patent license, | |||
and the Corresponding Source of the work is not available for anyone | |||
to copy, free of charge and under the terms of this License, through a | |||
publicly available network server or other readily accessible means, | |||
then you must either (1) cause the Corresponding Source to be so | |||
available, or (2) arrange to deprive yourself of the benefit of the | |||
patent license for this particular work, or (3) arrange, in a manner | |||
consistent with the requirements of this License, to extend the patent | |||
license to downstream recipients. "Knowingly relying" means you have | |||
actual knowledge that, but for the patent license, your conveying the | |||
covered work in a country, or your recipient's use of the covered work | |||
in a country, would infringe one or more identifiable patents in that | |||
country that you have reason to believe are valid. | |||
If, pursuant to or in connection with a single transaction or | |||
arrangement, you convey, or propagate by procuring conveyance of, a | |||
covered work, and grant a patent license to some of the parties | |||
receiving the covered work authorizing them to use, propagate, modify | |||
or convey a specific copy of the covered work, then the patent license | |||
you grant is automatically extended to all recipients of the covered | |||
work and works based on it. | |||
A patent license is "discriminatory" if it does not include within | |||
the scope of its coverage, prohibits the exercise of, or is | |||
conditioned on the non-exercise of one or more of the rights that are | |||
specifically granted under this License. You may not convey a covered | |||
work if you are a party to an arrangement with a third party that is | |||
in the business of distributing software, under which you make payment | |||
to the third party based on the extent of your activity of conveying | |||
the work, and under which the third party grants, to any of the | |||
parties who would receive the covered work from you, a discriminatory | |||
patent license (a) in connection with copies of the covered work | |||
conveyed by you (or copies made from those copies), or (b) primarily | |||
for and in connection with specific products or compilations that | |||
contain the covered work, unless you entered into that arrangement, | |||
or that patent license was granted, prior to 28 March 2007. | |||
Nothing in this License shall be construed as excluding or limiting | |||
any implied license or other defenses to infringement that may | |||
otherwise be available to you under applicable patent law. | |||
12. No Surrender of Others' Freedom. | |||
If conditions are imposed on you (whether by court order, agreement or | |||
otherwise) that contradict the conditions of this License, they do not | |||
excuse you from the conditions of this License. If you cannot convey a | |||
covered work so as to satisfy simultaneously your obligations under this | |||
License and any other pertinent obligations, then as a consequence you may | |||
not convey it at all. For example, if you agree to terms that obligate you | |||
to collect a royalty for further conveying from those to whom you convey | |||
the Program, the only way you could satisfy both those terms and this | |||
License would be to refrain entirely from conveying the Program. | |||
13. Use with the GNU Affero General Public License. | |||
Notwithstanding any other provision of this License, you have | |||
permission to link or combine any covered work with a work licensed | |||
under version 3 of the GNU Affero General Public License into a single | |||
combined work, and to convey the resulting work. The terms of this | |||
License will continue to apply to the part which is the covered work, | |||
but the special requirements of the GNU Affero General Public License, | |||
section 13, concerning interaction through a network will apply to the | |||
combination as such. | |||
14. Revised Versions of this License. | |||
The Free Software Foundation may publish revised and/or new versions of | |||
the GNU General Public License from time to time. Such new versions will | |||
be similar in spirit to the present version, but may differ in detail to | |||
address new problems or concerns. | |||
Each version is given a distinguishing version number. If the | |||
Program specifies that a certain numbered version of the GNU General | |||
Public License "or any later version" applies to it, you have the | |||
option of following the terms and conditions either of that numbered | |||
version or of any later version published by the Free Software | |||
Foundation. If the Program does not specify a version number of the | |||
GNU General Public License, you may choose any version ever published | |||
by the Free Software Foundation. | |||
If the Program specifies that a proxy can decide which future | |||
versions of the GNU General Public License can be used, that proxy's | |||
public statement of acceptance of a version permanently authorizes you | |||
to choose that version for the Program. | |||
Later license versions may give you additional or different | |||
permissions. However, no additional obligations are imposed on any | |||
author or copyright holder as a result of your choosing to follow a | |||
later version. | |||
15. Disclaimer of Warranty. | |||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY | |||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT | |||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY | |||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, | |||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM | |||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF | |||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION. | |||
16. Limitation of Liability. | |||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING | |||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS | |||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY | |||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE | |||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF | |||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD | |||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), | |||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF | |||
SUCH DAMAGES. | |||
17. Interpretation of Sections 15 and 16. | |||
If the disclaimer of warranty and limitation of liability provided | |||
above cannot be given local legal effect according to their terms, | |||
reviewing courts shall apply local law that most closely approximates | |||
an absolute waiver of all civil liability in connection with the | |||
Program, unless a warranty or assumption of liability accompanies a | |||
copy of the Program in return for a fee. | |||
END OF TERMS AND CONDITIONS | |||
How to Apply These Terms to Your New Programs | |||
If you develop a new program, and you want it to be of the greatest | |||
possible use to the public, the best way to achieve this is to make it | |||
free software which everyone can redistribute and change under these terms. | |||
To do so, attach the following notices to the program. It is safest | |||
to attach them to the start of each source file to most effectively | |||
state the exclusion of warranty; and each file should have at least | |||
the "copyright" line and a pointer to where the full notice is found. | |||
<one line to give the program's name and a brief idea of what it does.> | |||
Copyright (C) <year> <name of author> | |||
This program is free software: you can redistribute it and/or modify | |||
it under the terms of the GNU General Public License as published by | |||
the Free Software Foundation, either version 3 of the License, or | |||
(at your option) any later version. | |||
This program is distributed in the hope that it will be useful, | |||
but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
GNU General Public License for more details. | |||
You should have received a copy of the GNU General Public License | |||
along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
Also add information on how to contact you by electronic and paper mail. | |||
If the program does terminal interaction, make it output a short | |||
notice like this when it starts in an interactive mode: | |||
<program> Copyright (C) <year> <name of author> | |||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. | |||
This is free software, and you are welcome to redistribute it | |||
under certain conditions; type `show c' for details. | |||
The hypothetical commands `show w' and `show c' should show the appropriate | |||
parts of the General Public License. Of course, your program's commands | |||
might be different; for a GUI interface, you would use an "about box". | |||
You should also get your employer (if you work as a programmer) or school, | |||
if any, to sign a "copyright disclaimer" for the program, if necessary. | |||
For more information on this, and how to apply and follow the GNU GPL, see | |||
<http://www.gnu.org/licenses/>. | |||
The GNU General Public License does not permit incorporating your program | |||
into proprietary programs. If your program is a subroutine library, you | |||
may consider it more useful to permit linking proprietary applications with | |||
the library. If this is what you want to do, use the GNU Lesser General | |||
Public License instead of this License. But first, please read | |||
<http://www.gnu.org/philosophy/why-not-lgpl.html>. | |||
@ -1,34 +0,0 @@ | |||
# Ansible DigitalOcean external inventory script settings | |||
# | |||
[digital_ocean] | |||
# The module needs your DigitalOcean API Token. | |||
# It may also be specified on the command line via --api-token | |||
# or via the environment variables DO_API_TOKEN or DO_API_KEY | |||
# | |||
#api_token = 123456abcdefg | |||
# API calls to DigitalOcean may be slow. For this reason, we cache the results | |||
# of an API call. Set this to the path you want cache files to be written to. | |||
# One file will be written to this directory: | |||
# - ansible-digital_ocean.cache | |||
# | |||
cache_path = /tmp | |||
# The number of seconds a cache file is considered valid. After this many | |||
# seconds, a new API call will be made, and the cache file will be updated. | |||
# | |||
cache_max_age = 300 | |||
# Use the private network IP address instead of the public when available. | |||
# | |||
use_private_network = False | |||
# Pass variables to every group, e.g.: | |||
# | |||
# group_variables = { 'ansible_user': 'root' } | |||
# | |||
group_variables = {} |
@ -1,471 +0,0 @@ | |||
#!/usr/bin/env python | |||
''' | |||
DigitalOcean external inventory script | |||
====================================== | |||
Generates Ansible inventory of DigitalOcean Droplets. | |||
In addition to the --list and --host options used by Ansible, there are options | |||
for generating JSON of other DigitalOcean data. This is useful when creating | |||
droplets. For example, --regions will return all the DigitalOcean Regions. | |||
This information can also be easily found in the cache file, whose default | |||
location is /tmp/ansible-digital_ocean.cache). | |||
The --pretty (-p) option pretty-prints the output for better human readability. | |||
---- | |||
Although the cache stores all the information received from DigitalOcean, | |||
the cache is not used for current droplet information (in --list, --host, | |||
--all, and --droplets). This is so that accurate droplet information is always | |||
found. You can force this script to use the cache with --force-cache. | |||
---- | |||
Configuration is read from `digital_ocean.ini`, then from environment variables, | |||
then and command-line arguments. | |||
Most notably, the DigitalOcean API Token must be specified. It can be specified | |||
in the INI file or with the following environment variables: | |||
export DO_API_TOKEN='abc123' or | |||
export DO_API_KEY='abc123' | |||
Alternatively, it can be passed on the command-line with --api-token. | |||
If you specify DigitalOcean credentials in the INI file, a handy way to | |||
get them into your environment (e.g., to use the digital_ocean module) | |||
is to use the output of the --env option with export: | |||
export $(digital_ocean.py --env) | |||
---- | |||
The following groups are generated from --list: | |||
- ID (droplet ID) | |||
- NAME (droplet NAME) | |||
- image_ID | |||
- image_NAME | |||
- distro_NAME (distribution NAME from image) | |||
- region_NAME | |||
- size_NAME | |||
- status_STATUS | |||
For each host, the following variables are registered: | |||
- do_backup_ids | |||
- do_created_at | |||
- do_disk | |||
- do_features - list | |||
- do_id | |||
- do_image - object | |||
- do_ip_address | |||
- do_private_ip_address | |||
- do_kernel - object | |||
- do_locked | |||
- do_memory | |||
- do_name | |||
- do_networks - object | |||
- do_next_backup_window | |||
- do_region - object | |||
- do_size - object | |||
- do_size_slug | |||
- do_snapshot_ids - list | |||
- do_status | |||
- do_tags | |||
- do_vcpus | |||
- do_volume_ids | |||
----- | |||
``` | |||
usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] | |||
[--droplets] [--regions] [--images] [--sizes] | |||
[--ssh-keys] [--domains] [--pretty] | |||
[--cache-path CACHE_PATH] | |||
[--cache-max_age CACHE_MAX_AGE] | |||
[--force-cache] | |||
[--refresh-cache] | |||
[--api-token API_TOKEN] | |||
Produce an Ansible Inventory file based on DigitalOcean credentials | |||
optional arguments: | |||
-h, --help show this help message and exit | |||
--list List all active Droplets as Ansible inventory | |||
(default: True) | |||
--host HOST Get all Ansible inventory variables about a specific | |||
Droplet | |||
--all List all DigitalOcean information as JSON | |||
--droplets List Droplets as JSON | |||
--regions List Regions as JSON | |||
--images List Images as JSON | |||
--sizes List Sizes as JSON | |||
--ssh-keys List SSH keys as JSON | |||
--domains List Domains as JSON | |||
--pretty, -p Pretty-print results | |||
--cache-path CACHE_PATH | |||
Path to the cache files (default: .) | |||
--cache-max_age CACHE_MAX_AGE | |||
Maximum age of the cached items (default: 0) | |||
--force-cache Only use data from the cache | |||
--refresh-cache Force refresh of cache by making API requests to | |||
DigitalOcean (default: False - use cache files) | |||
--api-token API_TOKEN, -a API_TOKEN | |||
DigitalOcean API Token | |||
``` | |||
''' | |||
# (c) 2013, Evan Wies <evan@neomantra.net> | |||
# | |||
# Inspired by the EC2 inventory plugin: | |||
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py | |||
# | |||
# This file is part of Ansible, | |||
# | |||
# Ansible is free software: you can redistribute it and/or modify | |||
# it under the terms of the GNU General Public License as published by | |||
# the Free Software Foundation, either version 3 of the License, or | |||
# (at your option) any later version. | |||
# | |||
# Ansible is distributed in the hope that it will be useful, | |||
# but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
# GNU General Public License for more details. | |||
# | |||
# You should have received a copy of the GNU General Public License | |||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. | |||
###################################################################### | |||
import os | |||
import sys | |||
import re | |||
import argparse | |||
from time import time | |||
import ConfigParser | |||
import ast | |||
try: | |||
import json | |||
except ImportError: | |||
import simplejson as json | |||
try: | |||
from dopy.manager import DoManager | |||
except ImportError as e: | |||
sys.exit("failed=True msg='`dopy` library required for this script'") | |||
class DigitalOceanInventory(object): | |||
########################################################################### | |||
# Main execution path | |||
########################################################################### | |||
def __init__(self): | |||
''' Main execution path ''' | |||
# DigitalOceanInventory data | |||
self.data = {} # All DigitalOcean data | |||
self.inventory = {} # Ansible Inventory | |||
# Define defaults | |||
self.cache_path = '.' | |||
self.cache_max_age = 0 | |||
self.use_private_network = False | |||
self.group_variables = {} | |||
# Read settings, environment variables, and CLI arguments | |||
self.read_settings() | |||
self.read_environment() | |||
self.read_cli_args() | |||
# Verify credentials were set | |||
if not hasattr(self, 'api_token'): | |||
sys.stderr.write('''Could not find values for DigitalOcean api_token. | |||
They must be specified via either ini file, command line argument (--api-token), | |||
or environment variables (DO_API_TOKEN)\n''') | |||
sys.exit(-1) | |||
# env command, show DigitalOcean credentials | |||
if self.args.env: | |||
print("DO_API_TOKEN=%s" % self.api_token) | |||
sys.exit(0) | |||
# Manage cache | |||
self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" | |||
self.cache_refreshed = False | |||
if self.is_cache_valid(): | |||
self.load_from_cache() | |||
if len(self.data) == 0: | |||
if self.args.force_cache: | |||
sys.stderr.write('''Cache is empty and --force-cache was specified\n''') | |||
sys.exit(-1) | |||
self.manager = DoManager(None, self.api_token, api_version=2) | |||
# Pick the json_data to print based on the CLI command | |||
if self.args.droplets: | |||
self.load_from_digital_ocean('droplets') | |||
json_data = {'droplets': self.data['droplets']} | |||
elif self.args.regions: | |||
self.load_from_digital_ocean('regions') | |||
json_data = {'regions': self.data['regions']} | |||
elif self.args.images: | |||
self.load_from_digital_ocean('images') | |||
json_data = {'images': self.data['images']} | |||
elif self.args.sizes: | |||
self.load_from_digital_ocean('sizes') | |||
json_data = {'sizes': self.data['sizes']} | |||
elif self.args.ssh_keys: | |||
self.load_from_digital_ocean('ssh_keys') | |||
json_data = {'ssh_keys': self.data['ssh_keys']} | |||
elif self.args.domains: | |||
self.load_from_digital_ocean('domains') | |||
json_data = {'domains': self.data['domains']} | |||
elif self.args.all: | |||
self.load_from_digital_ocean() | |||
json_data = self.data | |||
elif self.args.host: | |||
json_data = self.load_droplet_variables_for_host() | |||
else: # '--list' this is last to make it default | |||
self.load_from_digital_ocean('droplets') | |||
self.build_inventory() | |||
json_data = self.inventory | |||
if self.cache_refreshed: | |||
self.write_to_cache() | |||
if self.args.pretty: | |||
print(json.dumps(json_data, sort_keys=True, indent=2)) | |||
else: | |||
print(json.dumps(json_data)) | |||
# That's all she wrote... | |||
########################################################################### | |||
# Script configuration | |||
########################################################################### | |||
def read_settings(self): | |||
''' Reads the settings from the digital_ocean.ini file ''' | |||
config = ConfigParser.SafeConfigParser() | |||
config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') | |||
# Credentials | |||
if config.has_option('digital_ocean', 'api_token'): | |||
self.api_token = config.get('digital_ocean', 'api_token') | |||
# Cache related | |||
if config.has_option('digital_ocean', 'cache_path'): | |||
self.cache_path = config.get('digital_ocean', 'cache_path') | |||
if config.has_option('digital_ocean', 'cache_max_age'): | |||
self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') | |||
# Private IP Address | |||
if config.has_option('digital_ocean', 'use_private_network'): | |||
self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') | |||
# Group variables | |||
if config.has_option('digital_ocean', 'group_variables'): | |||
self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) | |||
def read_environment(self): | |||
''' Reads the settings from environment variables ''' | |||
# Setup credentials | |||
if os.getenv("DO_API_TOKEN"): | |||
self.api_token = os.getenv("DO_API_TOKEN") | |||
if os.getenv("DO_API_KEY"): | |||
self.api_token = os.getenv("DO_API_KEY") | |||
def read_cli_args(self): | |||
''' Command line argument processing ''' | |||
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') | |||
parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') | |||
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') | |||
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') | |||
parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') | |||
parser.add_argument('--regions', action='store_true', help='List Regions as JSON') | |||
parser.add_argument('--images', action='store_true', help='List Images as JSON') | |||
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') | |||
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') | |||
parser.add_argument('--domains', action='store_true', help='List Domains as JSON') | |||
parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') | |||
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') | |||
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') | |||
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') | |||
parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, | |||
help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') | |||
parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') | |||
parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') | |||
self.args = parser.parse_args() | |||
if self.args.api_token: | |||
self.api_token = self.args.api_token | |||
# Make --list default if none of the other commands are specified | |||
if (not self.args.droplets and not self.args.regions and | |||
not self.args.images and not self.args.sizes and | |||
not self.args.ssh_keys and not self.args.domains and | |||
not self.args.all and not self.args.host): | |||
self.args.list = True | |||
########################################################################### | |||
# Data Management | |||
########################################################################### | |||
def load_from_digital_ocean(self, resource=None): | |||
'''Get JSON from DigitalOcean API''' | |||
if self.args.force_cache and os.path.isfile(self.cache_filename): | |||
return | |||
# We always get fresh droplets | |||
if self.is_cache_valid() and not (resource == 'droplets' or resource is None): | |||
return | |||
if self.args.refresh_cache: | |||
resource = None | |||
if resource == 'droplets' or resource is None: | |||
self.data['droplets'] = self.manager.all_active_droplets() | |||
self.cache_refreshed = True | |||
if resource == 'regions' or resource is None: | |||
self.data['regions'] = self.manager.all_regions() | |||
self.cache_refreshed = True | |||
if resource == 'images' or resource is None: | |||
self.data['images'] = self.manager.all_images(filter=None) | |||
self.cache_refreshed = True | |||
if resource == 'sizes' or resource is None: | |||
self.data['sizes'] = self.manager.sizes() | |||
self.cache_refreshed = True | |||
if resource == 'ssh_keys' or resource is None: | |||
self.data['ssh_keys'] = self.manager.all_ssh_keys() | |||
self.cache_refreshed = True | |||
if resource == 'domains' or resource is None: | |||
self.data['domains'] = self.manager.all_domains() | |||
self.cache_refreshed = True | |||
def build_inventory(self): | |||
'''Build Ansible inventory of droplets''' | |||
self.inventory = { | |||
'all': { | |||
'hosts': [], | |||
'vars': self.group_variables | |||
}, | |||
'_meta': {'hostvars': {}} | |||
} | |||
# add all droplets by id and name | |||
for droplet in self.data['droplets']: | |||
# when using private_networking, the API reports the private one in "ip_address". | |||
if 'private_networking' in droplet['features'] and not self.use_private_network: | |||
for net in droplet['networks']['v4']: | |||
if net['type'] == 'public': | |||
dest = net['ip_address'] | |||
else: | |||
continue | |||
else: | |||
dest = droplet['ip_address'] | |||
self.inventory['all']['hosts'].append(dest) | |||
self.inventory[droplet['id']] = [dest] | |||
self.inventory[droplet['name']] = [dest] | |||
# groups that are always present | |||
for group in ('region_' + droplet['region']['slug'], | |||
'image_' + str(droplet['image']['id']), | |||
'size_' + droplet['size']['slug'], | |||
'distro_' + self.to_safe(droplet['image']['distribution']), | |||
'status_' + droplet['status']): | |||
if group not in self.inventory: | |||
self.inventory[group] = {'hosts': [], 'vars': {}} | |||
self.inventory[group]['hosts'].append(dest) | |||
# groups that are not always present | |||
for group in (droplet['image']['slug'], | |||
droplet['image']['name']): | |||
if group: | |||
image = 'image_' + self.to_safe(group) | |||
if image not in self.inventory: | |||
self.inventory[image] = {'hosts': [], 'vars': {}} | |||
self.inventory[image]['hosts'].append(dest) | |||
if droplet['tags']: | |||
for tag in droplet['tags']: | |||
if tag not in self.inventory: | |||
self.inventory[tag] = {'hosts': [], 'vars': {}} | |||
self.inventory[tag]['hosts'].append(dest) | |||
# hostvars | |||
info = self.do_namespace(droplet) | |||
self.inventory['_meta']['hostvars'][dest] = info | |||
def load_droplet_variables_for_host(self): | |||
'''Generate a JSON response to a --host call''' | |||
host = int(self.args.host) | |||
droplet = self.manager.show_droplet(host) | |||
info = self.do_namespace(droplet) | |||
return {'droplet': info} | |||
########################################################################### | |||
# Cache Management | |||
########################################################################### | |||
def is_cache_valid(self): | |||
''' Determines if the cache files have expired, or if it is still valid ''' | |||
if os.path.isfile(self.cache_filename): | |||
mod_time = os.path.getmtime(self.cache_filename) | |||
current_time = time() | |||
if (mod_time + self.cache_max_age) > current_time: | |||
return True | |||
return False | |||
def load_from_cache(self): | |||
''' Reads the data from the cache file and assigns it to member variables as Python Objects''' | |||
try: | |||
cache = open(self.cache_filename, 'r') | |||
json_data = cache.read() | |||
cache.close() | |||
data = json.loads(json_data) | |||
except IOError: | |||
data = {'data': {}, 'inventory': {}} | |||
self.data = data['data'] | |||
self.inventory = data['inventory'] | |||
def write_to_cache(self): | |||
''' Writes data in JSON format to a file ''' | |||
data = {'data': self.data, 'inventory': self.inventory} | |||
json_data = json.dumps(data, sort_keys=True, indent=2) | |||
cache = open(self.cache_filename, 'w') | |||
cache.write(json_data) | |||
cache.close() | |||
########################################################################### | |||
# Utilities | |||
########################################################################### | |||
def push(self, my_dict, key, element): | |||
''' Pushed an element onto an array that may not have been defined in the dict ''' | |||
if key in my_dict: | |||
my_dict[key].append(element) | |||
else: | |||
my_dict[key] = [element] | |||
def to_safe(self, word): | |||
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' | |||
return re.sub("[^A-Za-z0-9\-\.]", "_", word) | |||
def do_namespace(self, data): | |||
''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' | |||
info = {} | |||
for k, v in data.items(): | |||
info['do_' + k] = v | |||
return info | |||
########################################################################### | |||
# Run the script | |||
DigitalOceanInventory() |
@ -1,209 +0,0 @@ | |||
# Ansible EC2 external inventory script settings | |||
# | |||
[ec2] | |||
# to talk to a private eucalyptus instance uncomment these lines | |||
# and edit edit eucalyptus_host to be the host name of your cloud controller | |||
#eucalyptus = True | |||
#eucalyptus_host = clc.cloud.domain.org | |||
# AWS regions to make calls to. Set this to 'all' to make request to all regions | |||
# in AWS and merge the results together. Alternatively, set this to a comma | |||
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not | |||
# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or | |||
# AWS_DEFAULT_REGION environment variable will be read to determine the region. | |||
regions = all | |||
regions_exclude = us-gov-west-1, cn-north-1 | |||
# When generating inventory, Ansible needs to know how to address a server. | |||
# Each EC2 instance has a lot of variables associated with it. Here is the list: | |||
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance | |||
# Below are 2 variables that are used as the address of a server: | |||
# - destination_variable | |||
# - vpc_destination_variable | |||
# This is the normal destination variable to use. If you are running Ansible | |||
# from outside EC2, then 'public_dns_name' makes the most sense. If you are | |||
# running Ansible from within EC2, then perhaps you want to use the internal | |||
# address, and should set this to 'private_dns_name'. The key of an EC2 tag | |||
# may optionally be used; however the boto instance variables hold precedence | |||
# in the event of a collision. | |||
destination_variable = public_dns_name | |||
# This allows you to override the inventory_name with an ec2 variable, instead | |||
# of using the destination_variable above. Addressing (aka ansible_ssh_host) | |||
# will still use destination_variable. Tags should be written as 'tag_TAGNAME'. | |||
#hostname_variable = tag_Name | |||
# For server inside a VPC, using DNS names may not make sense. When an instance | |||
# has 'subnet_id' set, this variable is used. If the subnet is public, setting | |||
# this to 'ip_address' will return the public IP address. For instances in a | |||
# private subnet, this should be set to 'private_ip_address', and Ansible must | |||
# be run from within EC2. The key of an EC2 tag may optionally be used; however | |||
# the boto instance variables hold precedence in the event of a collision. | |||
# WARNING: - instances that are in the private vpc, _without_ public ip address | |||
# will not be listed in the inventory until You set: | |||
# vpc_destination_variable = private_ip_address | |||
vpc_destination_variable = ip_address | |||
# The following two settings allow flexible ansible host naming based on a | |||
# python format string and a comma-separated list of ec2 tags. Note that: | |||
# | |||
# 1) If the tags referenced are not present for some instances, empty strings | |||
# will be substituted in the format string. | |||
# 2) This overrides both destination_variable and vpc_destination_variable. | |||
# | |||
#destination_format = {0}.{1}.example.com | |||
#destination_format_tags = Name,environment | |||
# To tag instances on EC2 with the resource records that point to them from | |||
# Route53, set 'route53' to True. | |||
route53 = False | |||
# To use Route53 records as the inventory hostnames, uncomment and set | |||
# to equal the domain name you wish to use. You must also have 'route53' (above) | |||
# set to True. | |||
# route53_hostnames = .example.com | |||
# To exclude RDS instances from the inventory, uncomment and set to False. | |||
#rds = False | |||
# To exclude ElastiCache instances from the inventory, uncomment and set to False. | |||
#elasticache = False | |||
# Additionally, you can specify the list of zones to exclude looking up in | |||
# 'route53_excluded_zones' as a comma-separated list. | |||
# route53_excluded_zones = samplezone1.com, samplezone2.com | |||
# By default, only EC2 instances in the 'running' state are returned. Set | |||
# 'all_instances' to True to return all instances regardless of state. | |||
all_instances = False | |||
# By default, only EC2 instances in the 'running' state are returned. Specify | |||
# EC2 instance states to return as a comma-separated list. This | |||
# option is overridden when 'all_instances' is True. | |||
# instance_states = pending, running, shutting-down, terminated, stopping, stopped | |||
# By default, only RDS instances in the 'available' state are returned. Set | |||
# 'all_rds_instances' to True return all RDS instances regardless of state. | |||
all_rds_instances = False | |||
# Include RDS cluster information (Aurora etc.) | |||
include_rds_clusters = False | |||
# By default, only ElastiCache clusters and nodes in the 'available' state | |||
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' | |||
# to True return all ElastiCache clusters and nodes, regardless of state. | |||
# | |||
# Note that all_elasticache_nodes only applies to listed clusters. That means | |||
# if you set all_elastic_clusters to false, no node will be return from | |||
# unavailable clusters, regardless of the state and to what you set for | |||
# all_elasticache_nodes. | |||
all_elasticache_replication_groups = False | |||
all_elasticache_clusters = False | |||
all_elasticache_nodes = False | |||
# API calls to EC2 are slow. For this reason, we cache the results of an API | |||
# call. Set this to the path you want cache files to be written to. Two files | |||
# will be written to this directory: | |||
# - ansible-ec2.cache | |||
# - ansible-ec2.index | |||
cache_path = ~/.ansible/tmp | |||
# The number of seconds a cache file is considered valid. After this many | |||
# seconds, a new API call will be made, and the cache file will be updated. | |||
# To disable the cache, set this value to 0 | |||
cache_max_age = 300 | |||
# Organize groups into a nested/hierarchy instead of a flat namespace. | |||
nested_groups = False | |||
# Replace - tags when creating groups to avoid issues with ansible | |||
replace_dash_in_groups = True | |||
# If set to true, any tag of the form "a,b,c" is expanded into a list | |||
# and the results are used to create additional tag_* inventory groups. | |||
expand_csv_tags = False | |||
# The EC2 inventory output can become very large. To manage its size, | |||
# configure which groups should be created. | |||
group_by_instance_id = True | |||
group_by_region = True | |||
group_by_availability_zone = True | |||
group_by_aws_account = False | |||
group_by_ami_id = True | |||
group_by_instance_type = True | |||
group_by_instance_state = False | |||
group_by_key_pair = True | |||
group_by_vpc_id = True | |||
group_by_security_group = True | |||
group_by_tag_keys = True | |||
group_by_tag_none = True | |||
group_by_route53_names = True | |||
group_by_rds_engine = True | |||
group_by_rds_parameter_group = True | |||
group_by_elasticache_engine = True | |||
group_by_elasticache_cluster = True | |||
group_by_elasticache_parameter_group = True | |||
group_by_elasticache_replication_group = True | |||
# If you only want to include hosts that match a certain regular expression | |||
# pattern_include = staging-* | |||
# If you want to exclude any hosts that match a certain regular expression | |||
# pattern_exclude = staging-* | |||
# Instance filters can be used to control which instances are retrieved for | |||
# inventory. For the full list of possible filters, please read the EC2 API | |||
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters | |||
# Filters are key/value pairs separated by '=', to list multiple filters use | |||
# a list separated by commas. See examples below. | |||
# If you want to apply multiple filters simultaneously, set stack_filters to | |||
# True. Default behaviour is to combine the results of all filters. Stacking | |||
# allows the use of multiple conditions to filter down, for example by | |||
# environment and type of host. | |||
stack_filters = False | |||
# Retrieve only instances with (key=value) env=staging tag | |||
# instance_filters = tag:env=staging | |||
# Retrieve only instances with role=webservers OR role=dbservers tag | |||
# instance_filters = tag:role=webservers,tag:role=dbservers | |||
# Retrieve only t1.micro instances OR instances with tag env=staging | |||
# instance_filters = instance-type=t1.micro,tag:env=staging | |||
# You can use wildcards in filter values also. Below will list instances which | |||
# tag Name value matches webservers1* | |||
# (ex. webservers15, webservers1a, webservers123 etc) | |||
# instance_filters = tag:Name=webservers1* | |||
# An IAM role can be assumed, so all requests are run as that role. | |||
# This can be useful for connecting across different accounts, or to limit user | |||
# access | |||
# iam_role = role-arn | |||
# A boto configuration profile may be used to separate out credentials | |||
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html | |||
# boto_profile = some-boto-profile-name | |||
[credentials] | |||
# The AWS credentials can optionally be specified here. Credentials specified | |||
# here are ignored if the environment variable AWS_ACCESS_KEY_ID or | |||
# AWS_PROFILE is set, or if the boto_profile property above is set. | |||
# | |||
# Supplying AWS credentials here is not recommended, as it introduces | |||
# non-trivial security concerns. When going down this route, please make sure | |||
# to set access permissions for this file correctly, e.g. handle it the same | |||
# way as you would a private SSH key. | |||
# | |||
# Unlike the boto and AWS configure files, this section does not support | |||
# profiles. | |||
# | |||
# aws_access_key_id = AXXXXXXXXXXXXXX | |||
# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX | |||
# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX |
@ -1,8 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- jsonconfig | |||
@ -1,9 +0,0 @@ | |||
--- | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
become: yes | |||
roles: | |||
- stop | |||
- unsafe_reset | |||
- start | |||
@ -1,8 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- stop | |||
- start |
@ -1,7 +0,0 @@ | |||
--- | |||
#genesis_file: "<undefined>" | |||
app_options_file: "app_options_files/public_testnet" | |||
seeds: "" | |||
testnet_name: testnet1 | |||
validators: true | |||
@ -1,71 +0,0 @@ | |||
--- | |||
- name: gather tendermint public keys | |||
when: (validators == true or validators == 'true') and genesis_file is not defined | |||
tags: reconfig-toml,reconfig-genesis | |||
command: "/usr/bin/tendermint show_validator --home /etc/{{service}} --log_level error" | |||
register: pubkeys | |||
changed_when: false | |||
- name: gather tendermint peer IDs | |||
when: genesis_file is not defined | |||
tags: reconfig-toml | |||
command: "/usr/bin/tendermint show_node_id --home /etc/{{service}} --log_level error" | |||
register: nodeids | |||
changed_when: false | |||
- name: resetting permissions from root after gathering public keys | |||
tags: reconfig-toml,reconfig-genesis | |||
file: "path=/etc/{{service}} owner={{service}} group={{service}} recurse=yes" | |||
- name: register tendermint public keys as host facts | |||
when: (validators == true or validators == 'true') and genesis_file is not defined | |||
tags: reconfig-toml,reconfig-genesis | |||
set_fact: "pubkey='{{pubkeys.stdout}}'" | |||
connection: local | |||
- name: register node ids as host facts | |||
when: genesis_file is not defined | |||
tags: reconfig-toml | |||
set_fact: "nodeid='{{nodeids.stdout}}'" | |||
connection: local | |||
- name: copy generated genesis.json - genesis_time will be updated | |||
when: (validators == true or validators == 'true') and (genesis_file is not defined) | |||
tags: reconfig-genesis | |||
template: | |||
src: genesis.json.j2 | |||
dest: "/etc/{{service}}/config/genesis.json" | |||
owner: "{{service}}" | |||
group: "{{service}}" | |||
- name: copy pre-created genesis.json | |||
when: genesis_file is defined | |||
tags: reconfig-genesis | |||
copy: "src={{genesis_file}} dest=/etc/{{service}}/config/genesis.json owner={{service}} group={{service}}" | |||
- name: copy tendermint config.toml | |||
tags: reconfig-toml | |||
when: validators == true or validators == 'true' | |||
template: | |||
src: config.toml.j2 | |||
dest: "/etc/{{service}}/config/config.toml" | |||
owner: "{{service}}" | |||
group: "{{service}}" | |||
- name: Copy validator network files for non-validators | |||
when: validators == false or validators == 'false' | |||
tags: reconfig-toml,reconfig-genesis | |||
get_url: "url={{item['src']}} dest={{item['dst']}} force=yes" | |||
with_items: | |||
- { src: "https://raw.githubusercontent.com/tendermint/testnets/master/{{validator_network}}/{{service}}/genesis.json" , dst: "/etc/{{service}}/config/genesis.json" } | |||
- { src: "https://raw.githubusercontent.com/tendermint/testnets/master/{{validator_network}}/config.toml" , dst: "/etc/{{service}}/config/config.toml" } | |||
- name: Set validator network files permissions for non-validators | |||
when: validators == false or validators == 'false' | |||
tags: reconfig-toml,reconfig-genesis | |||
file: "path={{item}} owner={{service}} group={{service}}" | |||
with_items: | |||
- "/etc/{{service}}/config/genesis.json" | |||
- "/etc/{{service}}/config/config.toml" | |||
@ -1,221 +0,0 @@ | |||
# This is a TOML config file. | |||
# For more information, see https://github.com/toml-lang/toml | |||
##### main base config options ##### | |||
# TCP or UNIX socket address of the ABCI application, | |||
# or the name of an ABCI application compiled in with the Tendermint binary | |||
proxy_app = "tcp://127.0.0.1:46658" | |||
# A custom human readable name for this node | |||
moniker = "{{inventory_hostname}}" | |||
# If this node is many blocks behind the tip of the chain, FastSync | |||
# allows them to catchup quickly by downloading blocks in parallel | |||
# and verifying their commits | |||
fast_sync = true | |||
{% if service == 'tendermint' %} | |||
# Database backend: leveldb | memdb | |||
db_backend = "memdb" | |||
# Database directory | |||
db_path = "data" | |||
# Output level for logging, including package level options | |||
log_level = "mempool:error,*:debug" | |||
{% else %} | |||
# Database backend: leveldb | memdb | |||
db_backend = "leveldb" | |||
# Database directory | |||
db_path = "data" | |||
# Output level for logging, including package level options | |||
log_level = "main:info,state:info,*:error" | |||
#log_level = "mempool:error,*:debug" | |||
{% endif %} | |||
##### additional base config options ##### | |||
# Path to the JSON file containing the initial validator set and other meta data | |||
genesis_file = "config/genesis.json" | |||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol | |||
priv_validator_file = "config/priv_validator.json" | |||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol | |||
node_key_file = "config/node_key.json" | |||
# Mechanism to connect to the ABCI application: socket | grpc | |||
abci = "socket" | |||
# TCP or UNIX socket address for the profiling server to listen on | |||
prof_laddr = "" | |||
# If true, query the ABCI app on connecting to a new peer | |||
# so the app can decide if we should keep the connection or not | |||
filter_peers = false | |||
##### advanced configuration options ##### | |||
##### rpc server configuration options ##### | |||
[rpc] | |||
# TCP or UNIX socket address for the RPC server to listen on | |||
laddr = "tcp://0.0.0.0:46657" | |||
# TCP or UNIX socket address for the gRPC server to listen on | |||
# NOTE: This server only supports /broadcast_tx_commit | |||
grpc_laddr = "" | |||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool | |||
unsafe = false | |||
##### peer to peer configuration options ##### | |||
[p2p] | |||
# Address to listen for incoming connections | |||
laddr = "tcp://0.0.0.0:46656" | |||
# Comma separated list of seed nodes to connect to | |||
seeds = "{{ seeds | default() }}" | |||
# Comma separated list of nodes to keep persistent connections to | |||
{% set comma = joiner(",") %}persistent_peers = "{% for host in ((groups[testnet_name]|default([]))+(groups['tag_Environment_'~(testnet_name|regex_replace('-','_'))]|default([])))|difference(inventory_hostname) %}{{ comma() }}{{hostvars[host]["nodeid"]}}@{{hostvars[host]["inventory_hostname"]}}:46656{% endfor %}" | |||
# Path to address book | |||
addr_book_file = "config/addrbook.json" | |||
# Set true for strict address routability rules | |||
addr_book_strict = true | |||
# Time to wait before flushing messages out on the connection, in ms | |||
flush_throttle_timeout = 100 | |||
# Maximum number of peers to connect to | |||
#max_num_peers = 50 | |||
max_num_peers = 300 | |||
# Maximum size of a message packet payload, in bytes | |||
{% if service == 'tendermint' %} | |||
max_msg_packet_payload_size = 65536 | |||
{% else %} | |||
max_msg_packet_payload_size = 1024 | |||
{% endif %} | |||
# Rate at which packets can be sent, in bytes/second | |||
{% if service == 'tendermint' %} | |||
send_rate = 51200000 # 50 MB/s | |||
{% else %} | |||
send_rate = 512000 | |||
{% endif %} | |||
# Rate at which packets can be received, in bytes/second | |||
{% if service == 'tendermint' %} | |||
recv_rate = 51200000 # 50 MB/s | |||
{% else %} | |||
recv_rate = 512000 | |||
{% endif %} | |||
# Set true to enable the peer-exchange reactor | |||
pex = true | |||
# Seed mode, in which node constantly crawls the network and looks for | |||
# peers. If another node asks it for addresses, it responds and disconnects. | |||
# | |||
# Does not work if the peer-exchange reactor is disabled. | |||
seed_mode = false | |||
##### mempool configuration options ##### | |||
[mempool] | |||
{% if service == 'tendermint' %} | |||
recheck = false | |||
{% else %} | |||
recheck = true | |||
{% endif %} | |||
recheck_empty = true | |||
broadcast = true | |||
{% if service == 'tendermint' %} | |||
wal_dir = "" | |||
{% else %} | |||
wal_dir = "data/mempool.wal" | |||
{% endif %} | |||
##### consensus configuration options ##### | |||
[consensus] | |||
wal_file = "data/cs.wal/wal" | |||
{% if service == 'tendermint' %} | |||
wal_light = true | |||
{% else %} | |||
wal_light = false | |||
{% endif %} | |||
# All timeouts are in milliseconds | |||
{% if service == 'tendermint' %} | |||
timeout_propose = 10000 | |||
{% else %} | |||
timeout_propose = 3000 | |||
{% endif %} | |||
timeout_propose_delta = 500 | |||
timeout_prevote = 1000 | |||
timeout_prevote_delta = 500 | |||
timeout_precommit = 1000 | |||
timeout_precommit_delta = 500 | |||
{% if service == 'tendermint' %} | |||
timeout_commit = 1 | |||
{% else %} | |||
timeout_commit = 1000 | |||
{% endif %} | |||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) | |||
{% if service == 'tendermint' %} | |||
skip_timeout_commit = true | |||
{% else %} | |||
skip_timeout_commit = false | |||
{% endif %} | |||
# BlockSize | |||
max_block_size_txs = 10000 | |||
max_block_size_bytes = 1 | |||
# EmptyBlocks mode and possible interval between empty blocks in seconds | |||
{% if service == 'tendermint' %} | |||
create_empty_blocks = false | |||
{% else %} | |||
create_empty_blocks = true | |||
create_empty_blocks_interval = 60 | |||
{% endif %} | |||
# Reactor sleep duration parameters are in milliseconds | |||
peer_gossip_sleep_duration = 100 | |||
peer_query_maj23_sleep_duration = 2000 | |||
##### transactions indexer configuration options ##### | |||
[tx_index] | |||
# What indexer to use for transactions | |||
# | |||
# Options: | |||
# 1) "null" (default) | |||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). | |||
indexer = "kv" | |||
# Comma-separated list of tags to index (by default the only tag is tx hash) | |||
# | |||
# It's recommended to index only a subset of tags due to possible memory | |||
# bloat. This is, of course, depends on the indexer's DB and the volume of | |||
# transactions. | |||
index_tags = "" | |||
# When set to true, tells indexer to index all tags. Note this may be not | |||
# desirable (see the comment above). IndexTags has a precedence over | |||
# IndexAllTags (i.e. when given both, IndexTags will be indexed). | |||
index_all_tags = false | |||
@ -1,50 +0,0 @@ | |||
{ | |||
"genesis_time":"{{ansible_date_time.iso8601}}", | |||
"chain_id":"{{testnet_name}}", | |||
"validators": | |||
[ | |||
{% if (validators == true) or (validators == 'true') %} | |||
{% set comma = joiner(",") %} | |||
{% for host in (groups[testnet_name]|default([]))+(groups['tag_Environment_'~(testnet_name|regex_replace('-','_'))]|default([])) %} | |||
{{ comma() }} | |||
{ | |||
"pub_key": { | |||
"data": "{{hostvars[host]["pubkey"]["data"]}}", | |||
"type": "{{hostvars[host]["pubkey"]["type"]}}" | |||
}, | |||
"power":1000, | |||
"name":"{{hostvars[host]["inventory_hostname"]}}" | |||
} | |||
{% endfor %} | |||
{% endif %} | |||
], | |||
"app_hash":"", | |||
{% if service == 'basecoind' %} | |||
"app_state": { | |||
{% else %} | |||
"app_options": { | |||
{% endif %} | |||
{% if app_options_file is defined %} | |||
{% include app_options_file %} | |||
{% endif %} | |||
} | |||
{% if service == 'ethermint' %} | |||
, | |||
"config": { | |||
"chainId": 15, | |||
"homesteadBlock": 0, | |||
"eip155Block": 0, | |||
"eip158Block": 0 | |||
}, | |||
"nonce": "0xdeadbeefdeadbeef", | |||
"timestamp": "0x00", | |||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", | |||
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", | |||
"difficulty": "0x40", | |||
"gasLimit": "0x8000000", | |||
"alloc": { | |||
"0x7eff122b94897ea5b0e2a9abf47b86337fafebdc": { "balance": "10000000000000000000000000000000000" }, | |||
"0xc6713982649D9284ff56c32655a9ECcCDA78422A": { "balance": "10000000000000000000000000000000000" } | |||
} | |||
{% endif %} | |||
} |
@ -1,22 +0,0 @@ | |||
--- | |||
- name: Create service group | |||
group: "name={{service}}" | |||
- name: Create service user | |||
user: "name={{service}} group={{service}} home=/etc/{{service}}" | |||
- name: Change user folder to more permissive | |||
file: "path=/etc/{{service}} mode=0755" | |||
- name: Create tendermint service | |||
template: "src=systemd.service.j2 dest=/etc/systemd/system/{{service}}.service" | |||
- name: Reload systemd services | |||
systemd: "name={{service}} daemon_reload=yes enabled=no" | |||
- name: Initialize tendermint | |||
command: "/usr/bin/tendermint init --home /etc/{{service}}" | |||
become: yes | |||
become_user: "{{service}}" | |||
@ -1,18 +0,0 @@ | |||
[Unit] | |||
Description={{service}} server | |||
Requires=network-online.target | |||
After=network-online.target | |||
[Service] | |||
Environment="TMHOME=/etc/{{service}}" | |||
Restart=on-failure | |||
User={{service}} | |||
Group={{service}} | |||
PermissionsStartOnly=true | |||
ExecStart=/usr/bin/tendermint node{{(service=='tendermint')|ternary(' --proxy_app=dummy','')}} | |||
ExecReload=/bin/kill -HUP $MAINPID | |||
KillSignal=SIGTERM | |||
[Install] | |||
WantedBy=multi-user.target | |||
@ -1,6 +0,0 @@ | |||
--- | |||
- name: Get config.toml from node | |||
fetch: "dest={{ destination | default('.') }}/config.toml flat=yes src=/etc/{{service}}/config/config.toml" | |||
run_once: yes | |||
@ -1,6 +0,0 @@ | |||
--- | |||
- name: Get file from node | |||
fetch: "dest={{ destination | default('.') }}/{{ source | basename }} flat=yes src='{{source}}'" | |||
run_once: yes | |||
@ -1,5 +0,0 @@ | |||
--- | |||
release_install: true | |||
binary: "{{ lookup('env','GOPATH') | default('') }}/bin/{{service}}" | |||
devops_path: false | |||
@ -1,55 +0,0 @@ | |||
--- | |||
#Three commands to install a service on CentOS/RedHat | |||
#wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | rpm --import - | |||
#wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo | |||
#yum update && yum install basecoin | |||
#This has a bug in Ansible 2.3: https://github.com/ansible/ansible/issues/20711 | |||
#- name: Add repository key on CentOS/RedHat | |||
# when: ansible_os_family == "RedHat" | |||
# rpm_key: key=https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | |||
#Workaround | |||
- name: Download repository key for CentOS/RedHat | |||
when: ansible_os_family == "RedHat" | |||
get_url: "url=https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}centos/7/os/x86_64/RPM-GPG-KEY-Tendermint dest=/root/RPM-GPG-KEY-Tendermint force=yes checksum=sha256:a8c61d4061697d2595562c703dbafbdfdcfa7f0c75a523ac84d5609d1b444abe" | |||
- name: Import repository key for CentOS/RedHat | |||
when: ansible_os_family == "RedHat" | |||
command: "rpm --import /root/RPM-GPG-KEY-Tendermint" | |||
- name: Install tendermint repository on CentOS/RedHat | |||
when: ansible_os_family == "RedHat" | |||
yum_repository: | |||
name: tendermint | |||
baseurl: https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}centos/7/os/x86_64 | |||
description: "Tendermint repo" | |||
gpgcheck: yes | |||
gpgkey: https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | |||
# repo_gpgcheck: yes | |||
- name: Install package on CentOS/RedHat | |||
when: ansible_os_family == "RedHat" | |||
yum: "pkg={{service}} update_cache=yes state=latest" | |||
# The below commands are required so that the tomlconfig playbook can run. | |||
- name: Install epel-release on CentOS/RedHat | |||
when: ansible_os_family == "RedHat" | |||
yum: "pkg=epel-release update_cache=yes state=latest" | |||
- name: Install pip on CentOS/RedHat | |||
when: ansible_os_family == "RedHat" | |||
yum: "pkg={{item}} state=latest" | |||
with_items: | |||
- python2-pip | |||
- python-virtualenv | |||
- unzip | |||
- tar | |||
#For show_validator command: | |||
- tendermint | |||
- name: Install toml | |||
when: ansible_os_family == "RedHat" | |||
pip: name=toml | |||
@ -1,34 +0,0 @@ | |||
--- | |||
#Three commands to install a service on Debian/Ubuntu | |||
#wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add - | |||
#wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list | |||
#apt-get update && apt-get install basecoin | |||
- name: Add repository key on Debian/Ubuntu | |||
when: ansible_os_family == "Debian" | |||
apt_key: | |||
url: https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | |||
id: 2122CBE9 | |||
- name: Install tendermint repository on Debian/Ubuntu | |||
when: ansible_os_family == "Debian" | |||
apt_repository: | |||
repo: deb https://tendermint-packages.interblock.io/{{ (devops_path | default(false) | bool) | ternary('devops/','') }}debian stable main | |||
- name: Install package on Debian/Ubuntu | |||
when: ansible_os_family == "Debian" | |||
apt: "pkg={{service}} update_cache=yes state=latest" | |||
# The below command is required to use the tomlconfig playbook. | |||
- name: Install package on Debian/Ubuntu | |||
when: ansible_os_family == "Debian" | |||
apt: "pkg={{item}} state=latest" | |||
with_items: | |||
- python-toml | |||
- unzip | |||
- tar | |||
#For show_validator command: | |||
- tendermint | |||
@ -1,40 +0,0 @@ | |||
--- | |||
- name: Set timezone | |||
when: timezone is defined | |||
file: path=/etc/localtime state=link src=/usr/share/zoneinfo/{{timezone}} force=yes | |||
- name: Disable journald rate-limiting | |||
lineinfile: "dest=/etc/systemd/journald.conf regexp={{item.regexp}} line='{{item.line}}'" | |||
with_items: | |||
- { regexp: "^#RateLimitInterval", line: "RateLimitInterval=0s" } | |||
- { regexp: "^#RateLimitBurst", line: "RateLimitBurst=0" } | |||
- name: Create journal directory for permanent logs | |||
file: path=/var/log/journal state=directory | |||
- name: Set journal folder with systemd-tmpfiles | |||
command: "systemd-tmpfiles --create --prefix /var/log/journal" | |||
- name: Restart journald | |||
service: name=systemd-journald state=restarted | |||
- name: Ability to get the core dump on SIGABRT | |||
shell: "ulimit -c unlimited" | |||
#TODO include is deprecated in Ansible 2.4.0 and will be removed in 2.8.0 | |||
#Replace it with include_tasks | |||
- include: debian.yml | |||
when: ansible_os_family == "Debian" | |||
- include: centos.yml | |||
when: ansible_os_family == "RedHat" | |||
- name: copy compiled binary | |||
when: not release_install|bool | |||
copy: | |||
src: "{{binary}}" | |||
dest: /usr/local/bin | |||
mode: 0755 | |||
@ -1,360 +0,0 @@ | |||
#!/usr/bin/python | |||
ANSIBLE_METADATA = { | |||
'metadata_version': '1.1', | |||
'status': ['preview'], | |||
'supported_by': 'community' | |||
} | |||
DOCUMENTATION = ''' | |||
--- | |||
module: jsonconfig | |||
short_description: Ensure a particular configuration is added to a json-formatted configuration file | |||
version_added: "2.4" | |||
description: | |||
- This module will add configuration to a json-formatted configuration file. | |||
options: | |||
dest: | |||
description: | |||
- The file to modify. | |||
required: true | |||
aliases: [ name, destfile ] | |||
json: | |||
description: | |||
- The configuration in json format to apply. | |||
required: false | |||
default: '{}' | |||
merge: | |||
description: | |||
- Used with C(state=present). If specified, it will merge the configuration. Othwerwise | |||
the configuration will be overwritten. | |||
required: false | |||
choices: [ "yes", "no" ] | |||
default: "yes" | |||
state: | |||
description: | |||
- Whether the configuration should be there or not. | |||
required: false | |||
choices: [ present, absent ] | |||
default: "present" | |||
create: | |||
description: | |||
- Used with C(state=present). If specified, the file will be created | |||
if it does not already exist. By default it will fail if the file | |||
is missing. | |||
required: false | |||
choices: [ "yes", "no" ] | |||
default: "no" | |||
backup: | |||
description: | |||
- Create a backup file including the timestamp information so you can | |||
get the original file back if you somehow clobbered it incorrectly. | |||
required: false | |||
choices: [ "yes", "no" ] | |||
default: "no" | |||
others: | |||
description: | |||
- All arguments accepted by the M(file) module also work here. | |||
required: false | |||
extends_documentation_fragment: | |||
- files | |||
- validate | |||
author: | |||
- "Greg Szabo (@greg-szabo)" | |||
''' | |||
EXAMPLES = ''' | |||
# Add a new section to a json file | |||
- name: Add comment section | |||
jsonconfig: | |||
dest: /etc/something.json | |||
json: '{ "comment": { "comment1": "mycomment" } }' | |||
# Rewrite a json file with the configuration | |||
- name: Create or overwrite config.json | |||
jsonconfig: | |||
dest: /etc/config.json | |||
json: '{ "regedit": { "freshfile": true } }' | |||
merge: no | |||
create: yes | |||
''' | |||
RETURN = ''' | |||
changed: | |||
description: True if the configuration changed. | |||
type: bool | |||
msg: | |||
description: Description of the change | |||
type: str | |||
''' | |||
from ansible.module_utils.basic import AnsibleModule | |||
from ansible.module_utils.six import b | |||
from ansible.module_utils._text import to_bytes, to_native | |||
import tempfile | |||
import json | |||
import copy | |||
import os | |||
def write_changes(module, b_lines, dest): | |||
tmpfd, tmpfile = tempfile.mkstemp() | |||
f = os.fdopen(tmpfd, 'wb') | |||
f.writelines(b_lines) | |||
f.close() | |||
validate = module.params.get('validate', None) | |||
valid = not validate | |||
if validate: | |||
if "%s" not in validate: | |||
module.fail_json(msg="validate must contain %%s: %s" % (validate)) | |||
(rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict')) | |||
valid = rc == 0 | |||
if rc != 0: | |||
module.fail_json(msg='failed to validate: ' | |||
'rc:%s error:%s' % (rc, err)) | |||
if valid: | |||
module.atomic_move(tmpfile, | |||
to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'), | |||
unsafe_writes=module.params['unsafe_writes']) | |||
def check_file_attrs(module, changed, message, diff): | |||
file_args = module.load_file_common_arguments(module.params) | |||
if module.set_fs_attributes_if_different(file_args, False, diff=diff): | |||
if changed: | |||
message += " and " | |||
changed = True | |||
message += "ownership, perms or SE linux context changed" | |||
return message, changed | |||
#Merge dict d2 into dict d1 and return a new object | |||
def deepmerge(d1, d2): | |||
if d1 is None: | |||
return copy.deepcopy(d2) | |||
if d2 is None: | |||
return copy.deepcopy(d1) | |||
if d1 == d2: | |||
return copy.deepcopy(d1) | |||
if isinstance(d1, dict) and isinstance(d2, dict): | |||
result={} | |||
for key in set(d1.keys()+d2.keys()): | |||
da = db = None | |||
if key in d1: | |||
da = d1[key] | |||
if key in d2: | |||
db = d2[key] | |||
result[key] = deepmerge(da, db) | |||
return result | |||
else: | |||
return copy.deepcopy(d2) | |||
#Remove dict d2 from dict d1 and return a new object | |||
def deepdiff(d1, d2): | |||
if d1 is None or d2 is None: | |||
return None | |||
if d1 == d2: | |||
return None | |||
if isinstance(d1, dict) and isinstance(d2, dict): | |||
result = {} | |||
for key in d1.keys(): | |||
if key in d2: | |||
dd = deepdiff(d1[key],d2[key]) | |||
if dd is not None: | |||
result[key] = dd | |||
else: | |||
result[key] = d1[key] | |||
return result | |||
else: | |||
return None | |||
def present(module, dest, conf, merge, create, backup): | |||
diff = {'before': '', | |||
'after': '', | |||
'before_header': '%s (content)' % dest, | |||
'after_header': '%s (content)' % dest} | |||
b_dest = to_bytes(dest, errors='surrogate_or_strict') | |||
if not os.path.exists(b_dest): | |||
if not create: | |||
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) | |||
b_destpath = os.path.dirname(b_dest) | |||
if not os.path.exists(b_destpath) and not module.check_mode: | |||
os.makedirs(b_destpath) | |||
b_lines = [] | |||
else: | |||
f = open(b_dest, 'rb') | |||
b_lines = f.readlines() | |||
f.close() | |||
lines = to_native(b('').join(b_lines)) | |||
if module._diff: | |||
diff['before'] = lines | |||
b_conf = to_bytes(conf, errors='surrogate_or_strict') | |||
jsonconfig = json.loads(lines) | |||
config = eval(b_conf) | |||
if not isinstance(config, dict): | |||
module.fail_json(msg="Invalid value in json parameter: {0}".format(config)) | |||
b_lines_new = b_lines | |||
msg = '' | |||
changed = False | |||
if not merge: | |||
if jsonconfig != config: | |||
b_lines_new = to_bytes(json.dumps(config, sort_keys=True, indent=4, separators=(',', ': '))) | |||
msg = 'config overwritten' | |||
changed = True | |||
else: | |||
mergedconfig = deepmerge(jsonconfig,config) | |||
if jsonconfig != mergedconfig: | |||
b_lines_new = to_bytes(json.dumps(mergedconfig, sort_keys=True, indent=4, separators=(',', ': '))) | |||
msg = 'config merged' | |||
changed = True | |||
if module._diff: | |||
diff['after'] = to_native(b('').join(b_lines_new)) | |||
backupdest = "" | |||
if changed and not module.check_mode: | |||
if backup and os.path.exists(b_dest): | |||
backupdest = module.backup_local(dest) | |||
write_changes(module, b_lines_new, dest) | |||
if module.check_mode and not os.path.exists(b_dest): | |||
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) | |||
attr_diff = {} | |||
msg, changed = check_file_attrs(module, changed, msg, attr_diff) | |||
attr_diff['before_header'] = '%s (file attributes)' % dest | |||
attr_diff['after_header'] = '%s (file attributes)' % dest | |||
difflist = [diff, attr_diff] | |||
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) | |||
def absent(module, dest, conf, backup): | |||
b_dest = to_bytes(dest, errors='surrogate_or_strict') | |||
if not os.path.exists(b_dest): | |||
module.exit_json(changed=False, msg="file not present") | |||
msg = '' | |||
diff = {'before': '', | |||
'after': '', | |||
'before_header': '%s (content)' % dest, | |||
'after_header': '%s (content)' % dest} | |||
f = open(b_dest, 'rb') | |||
b_lines = f.readlines() | |||
f.close() | |||
lines = to_native(b('').join(b_lines)) | |||
b_conf = to_bytes(conf, errors='surrogate_or_strict') | |||
lines = to_native(b('').join(b_lines)) | |||
jsonconfig = json.loads(lines) | |||
config = eval(b_conf) | |||
if not isinstance(config, dict): | |||
module.fail_json(msg="Invalid value in json parameter: {0}".format(config)) | |||
if module._diff: | |||
diff['before'] = to_native(b('').join(b_lines)) | |||
b_lines_new = b_lines | |||
msg = '' | |||
changed = False | |||
diffconfig = deepdiff(jsonconfig,config) | |||
if diffconfig is None: | |||
diffconfig = {} | |||
if jsonconfig != diffconfig: | |||
b_lines_new = to_bytes(json.dumps(diffconfig, sort_keys=True, indent=4, separators=(',', ': '))) | |||
msg = 'config removed' | |||
changed = True | |||
if module._diff: | |||
diff['after'] = to_native(b('').join(b_lines_new)) | |||
backupdest = "" | |||
if changed and not module.check_mode: | |||
if backup: | |||
backupdest = module.backup_local(dest) | |||
write_changes(module, b_lines_new, dest) | |||
attr_diff = {} | |||
msg, changed = check_file_attrs(module, changed, msg, attr_diff) | |||
attr_diff['before_header'] = '%s (file attributes)' % dest | |||
attr_diff['after_header'] = '%s (file attributes)' % dest | |||
difflist = [diff, attr_diff] | |||
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) | |||
def main(): | |||
# define the available arguments/parameters that a user can pass to | |||
# the module | |||
module_args = dict( | |||
dest=dict(type='str', required=True), | |||
json=dict(default=None, required=True), | |||
merge=dict(type='bool', default=True), | |||
state=dict(default='present', choices=['absent', 'present']), | |||
create=dict(type='bool', default=False), | |||
backup=dict(type='bool', default=False), | |||
validate=dict(default=None, type='str') | |||
) | |||
# the AnsibleModule object will be our abstraction working with Ansible | |||
# this includes instantiation, a couple of common attr would be the | |||
# args/params passed to the execution, as well as if the module | |||
# supports check mode | |||
module = AnsibleModule( | |||
argument_spec=module_args, | |||
add_file_common_args=True, | |||
supports_check_mode=True | |||
) | |||
params = module.params | |||
create = params['create'] | |||
merge = params['merge'] | |||
backup = params['backup'] | |||
dest = params['dest'] | |||
b_dest = to_bytes(dest, errors='surrogate_or_strict') | |||
if os.path.isdir(b_dest): | |||
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) | |||
conf = params['json'] | |||
if params['state'] == 'present': | |||
present(module, dest, conf, merge, create, backup) | |||
else: | |||
absent(module, dest, conf, backup) | |||
if __name__ == '__main__': | |||
main() | |||
@ -1,5 +0,0 @@ | |||
--- | |||
- name: Update | |||
jsonconfig: "dest='{{destination}}' json='{{jsonconfig}}' state={{(remove | default(false) | bool) | ternary('absent','present')}}" | |||
@ -1,27 +0,0 @@ | |||
--- | |||
- name: Download file if necessary | |||
when: source | regex_search('^https?://') | |||
get_url: "url={{source}} dest={{localdir}}/{{source | basename | regex_replace('\\?.*$','')}}" | |||
register: downloaded | |||
connection: local | |||
run_once: yes | |||
become: no | |||
- name: Figure out file source | |||
set_fact: | |||
compiledsource: "{{ (downloaded.skipped is defined) | ternary(source, downloaded.dest) }}" | |||
connection: local | |||
become: no | |||
- name: Extract file to destination | |||
when: compiledsource | regex_search('\\.(zip|tar|tar\\.gz|tgz|tb2|tbz|tbz2|tar\\.bz2|txz|tar\\.xz)$') | |||
register: extractcopy | |||
unarchive: | |||
src: "{{compiledsource}}" | |||
dest: "{{destination}}" | |||
- name: Copy non-zipped file to destination | |||
when: extractcopy.skipped is defined | |||
copy: "src='{{compiledsource}}' dest='{{destination}}'" | |||
@ -1,5 +0,0 @@ | |||
--- | |||
- name: start service | |||
service: "name={{service}} state=started" | |||
@ -1,20 +0,0 @@ | |||
--- | |||
- name: application service status | |||
command: "service {{service}} status" | |||
changed_when: false | |||
register: status | |||
- name: Result | |||
debug: var=status.stdout_lines | |||
#- name: tendermint service status | |||
# when: service != 'tendermint' | |||
# command: "service {{service}}-server status" | |||
# changed_when: false | |||
# register: tendermintstatus | |||
#- name: Result | |||
# when: service != 'tendermint' | |||
# debug: var=tendermintstatus.stdout_lines | |||
@ -1,5 +0,0 @@ | |||
--- | |||
- name: stop service | |||
service: "name={{service}} state=stopped" | |||
@ -1,3 +0,0 @@ | |||
--- | |||
destination: /etc/{{service}}/config.toml | |||
@ -1,386 +0,0 @@ | |||
#!/usr/bin/python | |||
ANSIBLE_METADATA = { | |||
'metadata_version': '1.1', | |||
'status': ['preview'], | |||
'supported_by': 'community' | |||
} | |||
DOCUMENTATION = ''' | |||
--- | |||
module: tomlconfig | |||
short_description: Ensure a particular configuration is added to a toml-formatted configuration file | |||
version_added: "2.4" | |||
description: | |||
- This module will add configuration to a toml-formatted configuration file. | |||
options: | |||
dest: | |||
description: | |||
- The file to modify. | |||
required: true | |||
aliases: [ name, destfile ] | |||
json: | |||
description: | |||
- The configuration in json format to apply. Either C(json) or C(toml) has to be present. | |||
required: false | |||
default: '{}' | |||
toml: | |||
description: | |||
- The configuration in toml format to apply. Either C(json) or C(toml) has to be present. | |||
default: '' | |||
merge: | |||
description: | |||
- Used with C(state=present). If specified, it will merge the configuration. Othwerwise | |||
the configuration will be overwritten. | |||
required: false | |||
choices: [ "yes", "no" ] | |||
default: "yes" | |||
state: | |||
description: | |||
- Whether the configuration should be there or not. | |||
required: false | |||
choices: [ present, absent ] | |||
default: "present" | |||
create: | |||
description: | |||
- Used with C(state=present). If specified, the file will be created | |||
if it does not already exist. By default it will fail if the file | |||
is missing. | |||
required: false | |||
choices: [ "yes", "no" ] | |||
default: "no" | |||
backup: | |||
description: | |||
- Create a backup file including the timestamp information so you can | |||
get the original file back if you somehow clobbered it incorrectly. | |||
required: false | |||
choices: [ "yes", "no" ] | |||
default: "no" | |||
others: | |||
description: | |||
- All arguments accepted by the M(file) module also work here. | |||
required: false | |||
extends_documentation_fragment: | |||
- files | |||
- validate | |||
author: | |||
- "Greg Szabo (@greg-szabo)" | |||
''' | |||
EXAMPLES = ''' | |||
# Add a new section to a toml file | |||
- name: Add comment section | |||
tomlconfig: | |||
dest: /etc/config.toml | |||
json: '{ "comment": { "comment1": "mycomment" } }' | |||
# Rewrite a toml file with the configuration | |||
- name: Create or overwrite config.toml | |||
tomlconfig: | |||
dest: /etc/config.toml | |||
json: '{ "regedit": { "freshfile": true } }' | |||
merge: no | |||
create: yes | |||
''' | |||
RETURN = ''' | |||
changed: | |||
description: True if the configuration changed. | |||
type: bool | |||
msg: | |||
description: Description of the change | |||
type: str | |||
''' | |||
from ansible.module_utils.basic import AnsibleModule | |||
from ansible.module_utils.six import b | |||
from ansible.module_utils._text import to_bytes, to_native | |||
import tempfile | |||
import toml as pytoml | |||
import json | |||
import copy | |||
import os | |||
def write_changes(module, b_lines, dest): | |||
tmpfd, tmpfile = tempfile.mkstemp() | |||
f = os.fdopen(tmpfd, 'wb') | |||
f.writelines(b_lines) | |||
f.close() | |||
validate = module.params.get('validate', None) | |||
valid = not validate | |||
if validate: | |||
if "%s" not in validate: | |||
module.fail_json(msg="validate must contain %%s: %s" % (validate)) | |||
(rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict')) | |||
valid = rc == 0 | |||
if rc != 0: | |||
module.fail_json(msg='failed to validate: ' | |||
'rc:%s error:%s' % (rc, err)) | |||
if valid: | |||
module.atomic_move(tmpfile, | |||
to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'), | |||
unsafe_writes=module.params['unsafe_writes']) | |||
def check_file_attrs(module, changed, message, diff): | |||
file_args = module.load_file_common_arguments(module.params) | |||
if module.set_fs_attributes_if_different(file_args, False, diff=diff): | |||
if changed: | |||
message += " and " | |||
changed = True | |||
message += "ownership, perms or SE linux context changed" | |||
return message, changed | |||
#Merge dict d2 into dict d1 and return a new object | |||
def deepmerge(d1, d2): | |||
if d1 is None: | |||
return copy.deepcopy(d2) | |||
if d2 is None: | |||
return copy.deepcopy(d1) | |||
if d1 == d2: | |||
return copy.deepcopy(d1) | |||
if isinstance(d1, dict) and isinstance(d2, dict): | |||
result={} | |||
for key in set(d1.keys()+d2.keys()): | |||
da = db = None | |||
if key in d1: | |||
da = d1[key] | |||
if key in d2: | |||
db = d2[key] | |||
result[key] = deepmerge(da, db) | |||
return result | |||
else: | |||
return copy.deepcopy(d2) | |||
#Remove dict d2 from dict d1 and return a new object | |||
def deepdiff(d1, d2): | |||
if d1 is None or d2 is None: | |||
return None | |||
if d1 == d2: | |||
return None | |||
if isinstance(d1, dict) and isinstance(d2, dict): | |||
result = {} | |||
for key in d1.keys(): | |||
if key in d2: | |||
dd = deepdiff(d1[key],d2[key]) | |||
if dd is not None: | |||
result[key] = dd | |||
else: | |||
result[key] = d1[key] | |||
return result | |||
else: | |||
return None | |||
def present(module, dest, conf, jsonbool, merge, create, backup): | |||
diff = {'before': '', | |||
'after': '', | |||
'before_header': '%s (content)' % dest, | |||
'after_header': '%s (content)' % dest} | |||
b_dest = to_bytes(dest, errors='surrogate_or_strict') | |||
if not os.path.exists(b_dest): | |||
if not create: | |||
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) | |||
b_destpath = os.path.dirname(b_dest) | |||
if not os.path.exists(b_destpath) and not module.check_mode: | |||
os.makedirs(b_destpath) | |||
b_lines = [] | |||
else: | |||
f = open(b_dest, 'rb') | |||
b_lines = f.readlines() | |||
f.close() | |||
lines = to_native(b('').join(b_lines)) | |||
if module._diff: | |||
diff['before'] = lines | |||
b_conf = to_bytes(conf, errors='surrogate_or_strict') | |||
tomlconfig = pytoml.loads(lines) | |||
config = {} | |||
if jsonbool: | |||
config = eval(b_conf) | |||
else: | |||
config = pytoml.loads(b_conf) | |||
if not isinstance(config, dict): | |||
if jsonbool: | |||
module.fail_json(msg="Invalid value in json parameter: {0}".format(config)) | |||
else: | |||
module.fail_json(msg="Invalid value in toml parameter: {0}".format(config)) | |||
b_lines_new = b_lines | |||
msg = '' | |||
changed = False | |||
if not merge: | |||
if tomlconfig != config: | |||
b_lines_new = to_bytes(pytoml.dumps(config)) | |||
msg = 'config overwritten' | |||
changed = True | |||
else: | |||
mergedconfig = deepmerge(tomlconfig,config) | |||
if tomlconfig != mergedconfig: | |||
b_lines_new = to_bytes(pytoml.dumps(mergedconfig)) | |||
msg = 'config merged' | |||
changed = True | |||
if module._diff: | |||
diff['after'] = to_native(b('').join(b_lines_new)) | |||
backupdest = "" | |||
if changed and not module.check_mode: | |||
if backup and os.path.exists(b_dest): | |||
backupdest = module.backup_local(dest) | |||
write_changes(module, b_lines_new, dest) | |||
if module.check_mode and not os.path.exists(b_dest): | |||
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) | |||
attr_diff = {} | |||
msg, changed = check_file_attrs(module, changed, msg, attr_diff) | |||
attr_diff['before_header'] = '%s (file attributes)' % dest | |||
attr_diff['after_header'] = '%s (file attributes)' % dest | |||
difflist = [diff, attr_diff] | |||
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) | |||
def absent(module, dest, conf, jsonbool, backup): | |||
b_dest = to_bytes(dest, errors='surrogate_or_strict') | |||
if not os.path.exists(b_dest): | |||
module.exit_json(changed=False, msg="file not present") | |||
msg = '' | |||
diff = {'before': '', | |||
'after': '', | |||
'before_header': '%s (content)' % dest, | |||
'after_header': '%s (content)' % dest} | |||
f = open(b_dest, 'rb') | |||
b_lines = f.readlines() | |||
f.close() | |||
lines = to_native(b('').join(b_lines)) | |||
b_conf = to_bytes(conf, errors='surrogate_or_strict') | |||
lines = to_native(b('').join(b_lines)) | |||
tomlconfig = pytoml.loads(lines) | |||
config = {} | |||
if jsonbool: | |||
config = eval(b_conf) | |||
else: | |||
config = pytoml.loads(b_conf) | |||
if not isinstance(config, dict): | |||
if jsonbool: | |||
module.fail_json(msg="Invalid value in json parameter: {0}".format(config)) | |||
else: | |||
module.fail_json(msg="Invalid value in toml parameter: {0}".format(config)) | |||
if module._diff: | |||
diff['before'] = to_native(b('').join(b_lines)) | |||
b_lines_new = b_lines | |||
msg = '' | |||
changed = False | |||
diffconfig = deepdiff(tomlconfig,config) | |||
if diffconfig is None: | |||
diffconfig = {} | |||
if tomlconfig != diffconfig: | |||
b_lines_new = to_bytes(pytoml.dumps(diffconfig)) | |||
msg = 'config removed' | |||
changed = True | |||
if module._diff: | |||
diff['after'] = to_native(b('').join(b_lines_new)) | |||
backupdest = "" | |||
if changed and not module.check_mode: | |||
if backup: | |||
backupdest = module.backup_local(dest) | |||
write_changes(module, b_lines_new, dest) | |||
attr_diff = {} | |||
msg, changed = check_file_attrs(module, changed, msg, attr_diff) | |||
attr_diff['before_header'] = '%s (file attributes)' % dest | |||
attr_diff['after_header'] = '%s (file attributes)' % dest | |||
difflist = [diff, attr_diff] | |||
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) | |||
def main(): | |||
# define the available arguments/parameters that a user can pass to | |||
# the module | |||
module_args = dict( | |||
dest=dict(type='str', required=True), | |||
json=dict(default=None), | |||
toml=dict(default=None), | |||
merge=dict(type='bool', default=True), | |||
state=dict(default='present', choices=['absent', 'present']), | |||
create=dict(type='bool', default=False), | |||
backup=dict(type='bool', default=False), | |||
validate=dict(default=None, type='str') | |||
) | |||
# the AnsibleModule object will be our abstraction working with Ansible | |||
# this includes instantiation, a couple of common attr would be the | |||
# args/params passed to the execution, as well as if the module | |||
# supports check mode | |||
module = AnsibleModule( | |||
argument_spec=module_args, | |||
mutually_exclusive=[['json', 'toml']], | |||
add_file_common_args=True, | |||
supports_check_mode=True | |||
) | |||
params = module.params | |||
create = params['create'] | |||
merge = params['merge'] | |||
backup = params['backup'] | |||
dest = params['dest'] | |||
b_dest = to_bytes(dest, errors='surrogate_or_strict') | |||
if os.path.isdir(b_dest): | |||
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) | |||
par_json, par_toml, jsonbool = params['json'], params['toml'], False | |||
if par_json is None: | |||
conf = par_toml | |||
else: | |||
conf = par_json | |||
jsonbool = True | |||
if params['state'] == 'present': | |||
present(module, dest, conf, jsonbool, merge, create, backup) | |||
else: | |||
absent(module, dest, conf, jsonbool, backup) | |||
if __name__ == '__main__': | |||
main() | |||
@ -1,10 +0,0 @@ | |||
--- | |||
- name: Update config.toml with json | |||
when: jsonconfig is defined | |||
tomlconfig: "dest='{{destination}}' json='{{jsonconfig}}' state={{(remove | default(false) | bool) | ternary('absent','present')}}" | |||
- name: Update config.toml with toml | |||
when: tomlconfig is defined | |||
tomlconfig: "dest='{{destination}}' toml='{{tomlconfig}}' state={{(remove | default(false) | bool) | ternary('absent','present')}}" | |||
@ -1,10 +0,0 @@ | |||
- shell: "ethermint --datadir /etc/ethermint unsafe_reset_all" | |||
when: "service == 'ethermint'" | |||
become_user: ethermint | |||
- command: "{{service}} node unsafe_reset_all --home=/etc/{{service}}" | |||
become_user: "{{service}}" | |||
- file: "path=/etc/{{service}}/config/addrbook.json state=absent" | |||
@ -1,10 +0,0 @@ | |||
--- | |||
#variable "source" is required | |||
#variable "destination" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
gather_facts: no | |||
roles: | |||
- setfile | |||
@ -1,8 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- start | |||
@ -1,8 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- status | |||
@ -1,8 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- stop | |||
@ -1,8 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- tomlconfig | |||
@ -1,8 +0,0 @@ | |||
--- | |||
#Ubuntu 16.04 is not installing the python package in the standard installation on DigitalOcean. This "patch" will install it so the rest of the ansible playbooks can work properly. | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
gather_facts: no | |||
tasks: | |||
- raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) | |||
@ -1,11 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" | |||
roles: | |||
- stop | |||
- install | |||
- unsafe_reset | |||
- start | |||
@ -1,10 +0,0 @@ | |||
--- | |||
#variable "service" is required | |||
- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}" | |||
roles: | |||
- stop | |||
- install | |||
- start | |||
@ -1,3 +0,0 @@ | |||
cluster/networking.tf | |||
networking-output.tf | |||
@ -1,111 +0,0 @@ | |||
Using Terraform | |||
=============== | |||
This is a generic `Terraform <https://www.terraform.io/>`__ | |||
configuration that sets up DigitalOcean droplets. See the | |||
`terraform-digitalocean <https://github.com/tendermint/tools/tree/master/terraform-digitalocean>`__ | |||
for the required files. | |||
Prerequisites | |||
------------- | |||
- Install `HashiCorp Terraform <https://www.terraform.io>`__ on a linux | |||
machine. | |||
- Create a `DigitalOcean API | |||
token <https://cloud.digitalocean.com/settings/api/tokens>`__ with | |||
read and write capability. | |||
- Create a private/public key pair for SSH. This is needed to log onto | |||
your droplets as well as by Ansible to connect for configuration | |||
changes. | |||
- Set up the public SSH key at the `DigitalOcean security | |||
page <https://cloud.digitalocean.com/settings/security>`__. | |||
`Here <https://www.digitalocean.com/community/tutorials/how-to-use-ssh-keys-with-digitalocean-droplets>`__'s | |||
a tutorial. | |||
- Find out your SSH key ID at DigitalOcean by querying the below | |||
command on your linux box: | |||
:: | |||
DO_API_TOKEN="<The API token received from DigitalOcean>" | |||
curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DO_API_TOKEN" "https://api.digitalocean.com/v2/account/keys" | |||
Initialization | |||
-------------- | |||
If this is your first time using terraform, you have to initialize it by | |||
running the below command. (Note: initialization can be run multiple | |||
times) | |||
:: | |||
terraform init | |||
After initialization it's good measure to create a new Terraform | |||
environment for the droplets so they are always managed together. | |||
:: | |||
TESTNET_NAME="testnet-servers" | |||
terraform env new "$TESTNET_NAME" | |||
Note this ``terraform env`` command is only available in terraform | |||
``v0.9`` and up. | |||
Execution | |||
--------- | |||
The below command will create 4 nodes in DigitalOcean. They will be | |||
named ``testnet-servers-node0`` to ``testnet-servers-node3`` and they | |||
will be tagged as ``testnet-servers``. | |||
:: | |||
DO_API_TOKEN="<The API token received from DigitalOcean>" | |||
SSH_IDS="[ \"<The SSH ID received from the curl call above.>\" ]" | |||
terraform apply -var TESTNET_NAME="testnet-servers" -var servers=4 -var DO_API_TOKEN="$DO_API_TOKEN" -var ssh_keys="$SSH_IDS" | |||
Note: ``ssh_keys`` is a list of strings. You can add multiple keys. For | |||
example: ``["1234567","9876543"]``. | |||
Alternatively you can use the default settings. The number of default | |||
servers is 4 and the testnet name is ``tf-testnet1``. Variables can also | |||
be defined as environment variables instead of the command-line. | |||
Environment variables that start with ``TF_VAR_`` will be translated | |||
into the Terraform configuration. For example the number of servers can | |||
be overriden by setting the ``TF_VAR_servers`` variable. | |||
:: | |||
TF_VAR_DO_API_TOKEN="<The API token received from DigitalOcean>" | |||
TF_VAR_TESTNET_NAME="testnet-servers" | |||
terraform-apply | |||
Security | |||
-------- | |||
DigitalOcean uses the root user by default on its droplets. This is fine | |||
as long as SSH keys are used. However some people still would like to | |||
disable root and use an alternative user to connect to the droplets - | |||
then ``sudo`` from there. Terraform can do this but it requires SSH | |||
agent running on the machine where terraform is run, with one of the SSH | |||
keys of the droplets added to the agent. (This will be neede for ansible | |||
too, so it's worth setting it up here. Check out the | |||
`ansible <https://github.com/tendermint/tools/tree/master/ansible>`__ | |||
page for more information.) After setting up the SSH key, run | |||
``terraform apply`` with ``-var noroot=true`` to create your droplets. | |||
Terraform will create a user called ``ec2-user`` and move the SSH keys | |||
over, this way disabling SSH login for root. It also adds the | |||
``ec2-user`` to the sudoers file, so after logging in as ec2-user you | |||
can ``sudo`` to ``root``. | |||
DigitalOcean announced firewalls but the current version of Terraform | |||
(0.9.8 as of this writing) does not support it yet. Fortunately it is | |||
quite easy to set it up through the web interface (and not that bad | |||
through the `RESTful | |||
API <https://developers.digitalocean.com/documentation/v2/#firewalls>`__ | |||
either). When adding droplets to a firewall rule, you can add tags. All | |||
droplets in a testnet are tagged with the testnet name so it's enough to | |||
define the testnet name in the firewall rule. It is not necessary to add | |||
the nodes one-by-one. Also, the firewall rule "remembers" the testnet | |||
name tag so if you change the servers but keep the name, the firewall | |||
rules will still apply. |
@ -1,23 +0,0 @@ | |||
resource "digitalocean_tag" "cluster" { | |||
name = "${var.name}" | |||
} | |||
resource "digitalocean_droplet" "cluster" { | |||
name = "${var.name}-node${count.index}" | |||
image = "${var.image_id}" | |||
size = "${var.instance_size}" | |||
region = "${element(var.regions, count.index)}" | |||
ssh_keys = "${var.key_ids}" | |||
count = "${var.servers}" | |||
tags = ["${digitalocean_tag.cluster.id}"] | |||
lifecycle = { | |||
prevent_destroy = false | |||
} | |||
connection { | |||
timeout = "30s" | |||
} | |||
} | |||
@ -1,25 +0,0 @@ | |||
// The cluster name | |||
output "name" { | |||
value = "${var.name}" | |||
} | |||
// The list of cluster instance IDs | |||
output "instances" { | |||
value = ["${digitalocean_droplet.cluster.*.id}"] | |||
} | |||
// The list of cluster instance private IPs | |||
output "private_ips" { | |||
value = ["${digitalocean_droplet.cluster.*.ipv4_address_private}"] | |||
} | |||
// The list of cluster instance public IPs | |||
output "public_ips" { | |||
value = ["${digitalocean_droplet.cluster.*.ipv4_address}"] | |||
} | |||
#// The list of cluster floating IPs | |||
#output "floating_ips" { | |||
# value = ["${digitalocean_floating_ip.cluster.*.ip_address}"] | |||
#} | |||
@ -1,17 +0,0 @@ | |||
resource "null_resource" "cluster" { | |||
count = "${ var.noroot ? var.servers : 0 }" | |||
connection { | |||
host = "${element(digitalocean_droplet.cluster.*.ipv4_address,count.index)}" | |||
} | |||
provisioner "remote-exec" { | |||
inline = [ | |||
"useradd -m -s /bin/bash ec2-user", | |||
"echo 'ec2-user ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/ec2-user", | |||
"cp -r /root/.ssh /home/ec2-user/.ssh", | |||
"chown -R ec2-user.ec2-user /home/ec2-user/.ssh", | |||
"chmod -R 700 /home/ec2-user/.ssh", | |||
"rm -rf /root/.ssh" | |||
] | |||
} | |||
} | |||
@ -1,35 +0,0 @@ | |||
variable "name" { | |||
description = "The cluster name, e.g cdn" | |||
} | |||
variable "image_id" { | |||
description = "Image ID" | |||
default = "ubuntu-16-04-x64" | |||
} | |||
variable "regions" { | |||
description = "Regions to launch in" | |||
type = "list" | |||
default = ["AMS2", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"] | |||
} | |||
variable "key_ids" { | |||
description = "SSH keys to use on the nodes" | |||
type = "list" | |||
} | |||
variable "instance_size" { | |||
description = "The instance size to use" | |||
default = "2gb" | |||
} | |||
variable "servers" { | |||
description = "Desired instance count" | |||
default = 4 | |||
} | |||
variable "noroot" { | |||
description = "Set this variable to true, if you want SSH keys set for ec2-user instead of root." | |||
default = false | |||
} | |||
@ -1,64 +0,0 @@ | |||
#Terraform Configuration | |||
variable "DO_API_TOKEN" { | |||
description = "DigitalOcean Access Token" | |||
} | |||
variable "TESTNET_NAME" { | |||
description = "Name of the cluster/testnet" | |||
default = "tf-testnet1" | |||
} | |||
variable "ssh_keys" { | |||
description = "SSH keys provided in DigitalOcean to be used on the nodes" | |||
# curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DO_API_TOKEN" "https://api.digitalocean.com/v2/account/keys" | |||
default = [ | |||
"6259615", | |||
"7658963", | |||
"7668263", | |||
"7668264", | |||
"8036767", | |||
"8163311", | |||
"9495227", | |||
"10318834", | |||
"11435493" | |||
] | |||
} | |||
variable "servers" { | |||
description = "Number of nodes in cluster" | |||
default = "4" | |||
} | |||
variable "image" { | |||
description = "DigitalOcean image name" | |||
default = "ubuntu-16-04-x64" | |||
} | |||
variable "noroot" { | |||
description = "Set this variable to true, if you want SSH keys set for ec2-user instead of root." | |||
default = false | |||
} | |||
provider "digitalocean" { | |||
token = "${var.DO_API_TOKEN}" | |||
} | |||
module "cluster" { | |||
source = "./cluster" | |||
name = "${var.TESTNET_NAME}" | |||
key_ids = "${var.ssh_keys}" | |||
servers = "${var.servers}" | |||
noroot = "${var.noroot}" | |||
image_id = "${var.image}" | |||
} | |||
output "public_ips" { | |||
value = "${module.cluster.public_ips}" | |||
} | |||
#output "floating_ips" { | |||
# value = "${module.cluster.floating_ips}" | |||
#} | |||