diff --git a/.gitignore b/.gitignore index 71e4bd648..62495be0a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ */vendor */.glide +.terraform +terraform.tfstate +terraform.tfstate.backup +terraform.tfstate.d \ No newline at end of file diff --git a/ansible-tendermint/README.md b/ansible-tendermint/README.md deleted file mode 100644 index 569834982..000000000 --- a/ansible-tendermint/README.md +++ /dev/null @@ -1,151 +0,0 @@ -# Ansible role for Tendermint - -![Ansible plus Tendermint](img/a_plus_t.png) - -* [Requirements](#requirements) -* [Variables](#variables) -* [Handlers](#handlers) -* [Example playbook that configures a Tendermint on Ubuntu](#example-playbook-that-configures-a-tendermint-on-ubuntu) - -`ansible-tendermint` is an [ansible](http://www.ansible.com/) role which: - -* installs tendermint -* configures tendermint -* configures tendermint service - -## Requirements - -This role requires Ansible 2.0 or higher. - -## Variables - -Here is a list of all the default variables for this role, which are also -available in `defaults/main.yml`. - -``` -tendermint_version: 0.9.0 -tendermint_archive: "tendermint_{{tendermint_version}}_linux_amd64.zip" -tendermint_download: "https://s3-us-west-2.amazonaws.com/tendermint/{{tendermint_version}}/{{tendermint_archive}}" -tendermint_download_folder: /tmp - -tendermint_user: tendermint -tendermint_group: tendermint - -# Upstart start/stop conditions can vary by distribution and environment -tendermint_upstart_start_on: start on runlevel [345] -tendermint_upstart_stop_on: stop on runlevel [!345] -tendermint_manage_service: true -tendermint_use_upstart: true -tendermint_use_systemd: false -tendermint_upstart_template: "tendermint.conf.j2" -tendermint_systemd_template: "tendermint.systemd.j2" - -tendermint_home: /opt/tendermint -tendermint_node_port: 46656 -tendermint_rpc_port: 46657 -tendermint_proxy_app: "tcp://127.0.0.1:46658" -tendermint_node_laddr: "tcp://0.0.0.0:{{tendermint_node_port}}" -tendermint_rpc_laddr: "tcp://0.0.0.0:{{tendermint_rpc_port}}" -tendermint_seeds: "" -tendermint_fast_sync: true -tendermint_db_backend: leveldb -tendermint_log_level: notice -tendermint_genesis_file: "{{tendermint_home}}/genesis.json" -tendermint_abci: socket -tendermint_skip_upnp: false -tendermint_addrbook_file: "{{tendermint_home}}/addrbook.json" -tendermint_addrbook_strict: true -tendermint_pex_reactor: false -tendermint_priv_validator_file: "{{tendermint_home}}/priv_validator.json" -tendermint_db_dir: "{{tendermint_home}}/data" -tendermint_grpc_laddr: "" -tendermint_prof_laddr: "" -tendermint_cs_wal_file: "{{tendermint_db_dir}}/cs.wal/wal" -tendermint_cs_wal_light: false -tendermint_filter_peers: false -tendermint_block_size: 10000 -tendermint_block_part_size: 65536 -tendermint_disable_data_hash: false -# all timeouts are in milliseconds -tendermint_timeout_propose: 3000 -tendermint_timeout_propose_delta: 500 -tendermint_timeout_prevote: 1000 -tendermint_timeout_prevote_delta: 500 -tendermint_timeout_precommit: 1000 -tendermint_timeout_precommit_delta: 500 -tendermint_timeout_commit: 1000 -tendermint_skip_timeout_commit: false -tendermint_mempool_recheck: true -tendermint_mempool_recheck_empty: true -tendermint_mempool_broadcast: true -tendermint_mempool_wal_dir: "{{tendermint_db_dir}}/mempool.wal" - -tendermint_log_file: /var/log/tendermint.log - -tendermint_chain_id: mychain -tendermint_genesis_time: "{{ansible_date_time.iso8601_micro}}" -tendermint_validators: [] -``` - -## Handlers - -These are the handlers that are defined in `handlers/main.yml`. - -* `restart tendermint` -* `reload systemd` - -## Example playbook that configures a Tendermint on Ubuntu 14.04 - -``` ---- - -- hosts: all - vars: - tendermint_chain_id: MyAwesomeChain - tendermint_seeds: "172.13.0.1:46656,172.13.0.2:46656,172.13.0.3:46656" - roles: - - ansible-tendermint -``` - -This playbook will install Tendermint and will create all the -required directories. But **it won't start the Tendermint if no -validators were given**. - -You will need to collect validators public keys manually or using -`collect_public_keys.yml` given you have SSH access to all the nodes and set `tendermint_validators` variable: - -``` ---- - -- hosts: all - vars: - tendermint_chain_id: MyAwesomeChain - tendermint_seeds: "172.13.0.1:46656,172.13.0.2:46656,172.13.0.3:46656" - tendermint_validators: - - pub_key: - - 1 - - 1F017E488A6327FAFBBE092193B427912E117733DE6AF72150BF09AA58411E7F - amount: 10 - name: paris - roles: - - ansible-tendermint -``` - -### Example playbook that configures a Tendermint with in-proc dummy app - -``` ---- - -- hosts: all - vars: - tendermint_chain_id: MyAwesomeChain - tendermint_proxy_app: dummy - roles: - - ansible-tendermint -``` - -## Testing - -``` -vagrant up -``` diff --git a/ansible-tendermint/ansible.cfg b/ansible-tendermint/ansible.cfg deleted file mode 100644 index 613d83b12..000000000 --- a/ansible-tendermint/ansible.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[defaults] -roles_path = ../ diff --git a/ansible-tendermint/collect_public_keys.yml b/ansible-tendermint/collect_public_keys.yml deleted file mode 100644 index c60fbc0c1..000000000 --- a/ansible-tendermint/collect_public_keys.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Outputs validator public keys from all the nodes to help you form genesis file - -- hosts: all - vars: - tendermint_home: /opt/tendermint - tasks: - - shell: cat "{{tendermint_home}}/priv_validator.json" | jq ".pub_key[1]" - register: pk - - - debug: var=pk.stdout_lines diff --git a/ansible-tendermint/defaults/main.yml b/ansible-tendermint/defaults/main.yml deleted file mode 100644 index 7dd7e281e..000000000 --- a/ansible-tendermint/defaults/main.yml +++ /dev/null @@ -1,64 +0,0 @@ ---- -tendermint_version: 0.9.0 -tendermint_archive: "tendermint_{{tendermint_version}}_linux_amd64.zip" -tendermint_download: "https://s3-us-west-2.amazonaws.com/tendermint/{{tendermint_version}}/{{tendermint_archive}}" -tendermint_download_folder: /tmp - -tendermint_user: tendermint -tendermint_group: tendermint - -# Upstart start/stop conditions can vary by distribution and environment -tendermint_upstart_start_on: start on runlevel [345] -tendermint_upstart_stop_on: stop on runlevel [!345] -tendermint_manage_service: true -tendermint_use_upstart: true -tendermint_use_systemd: false -tendermint_upstart_template: "tendermint.conf.j2" -tendermint_systemd_template: "tendermint.systemd.j2" - -tendermint_home: /opt/tendermint -tendermint_node_port: 46656 -tendermint_rpc_port: 46657 -tendermint_proxy_app: "tcp://127.0.0.1:46658" -tendermint_node_laddr: "tcp://0.0.0.0:{{tendermint_node_port}}" -tendermint_rpc_laddr: "tcp://0.0.0.0:{{tendermint_rpc_port}}" -tendermint_seeds: "" -tendermint_fast_sync: true -tendermint_db_backend: leveldb -tendermint_log_level: notice -tendermint_genesis_file: "{{tendermint_home}}/genesis.json" -tendermint_abci: socket -tendermint_skip_upnp: false -tendermint_addrbook_file: "{{tendermint_home}}/addrbook.json" -tendermint_addrbook_strict: true -tendermint_pex_reactor: false -tendermint_priv_validator_file: "{{tendermint_home}}/priv_validator.json" -tendermint_db_dir: "{{tendermint_home}}/data" -tendermint_grpc_laddr: "" -tendermint_prof_laddr: "" -tendermint_cs_wal_file: "{{tendermint_db_dir}}/cs.wal/wal" -tendermint_cs_wal_light: false -tendermint_filter_peers: false -tendermint_block_size: 10000 -tendermint_block_part_size: 65536 -tendermint_disable_data_hash: false -# all timeouts are in milliseconds -tendermint_timeout_propose: 3000 -tendermint_timeout_propose_delta: 500 -tendermint_timeout_prevote: 1000 -tendermint_timeout_prevote_delta: 500 -tendermint_timeout_precommit: 1000 -tendermint_timeout_precommit_delta: 500 -tendermint_timeout_commit: 1000 -tendermint_skip_timeout_commit: false -tendermint_mempool_recheck: true -tendermint_mempool_recheck_empty: true -tendermint_mempool_broadcast: true -tendermint_mempool_wal_dir: "{{tendermint_db_dir}}/mempool.wal" - -tendermint_log_file: /var/log/tendermint.log - -tendermint_chain_id: mychain -# FIXME https://github.com/tendermint/tools/issues/22 -tendermint_genesis_time: "0001-01-01T00:00:00.000Z" -tendermint_validators: [] diff --git a/ansible-tendermint/handlers/main.yml b/ansible-tendermint/handlers/main.yml deleted file mode 100644 index dab7e901e..000000000 --- a/ansible-tendermint/handlers/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -- name: reload systemd - become: true - command: systemctl daemon-reload - -- name: restart tendermint - action: service name=tendermint state=restarted enabled=yes - when: tendermint_manage_service diff --git a/ansible-tendermint/tasks/install.yml b/ansible-tendermint/tasks/install.yml deleted file mode 100644 index 49fe6d0d7..000000000 --- a/ansible-tendermint/tasks/install.yml +++ /dev/null @@ -1,123 +0,0 @@ ---- -- name: update apt - apt: > - update_cache=yes - cache_valid_time=3600 - when: ansible_os_family == "Debian" - -- name: install deps (Ubuntu) - apt: > - pkg={{item}} - state=installed - with_items: - - unzip - - jq - when: ansible_os_family == "Debian" - -- name: install deps (RHEL) - yum: > - pkg={{item}} - state=installed - with_items: - - unzip - - jq - when: ansible_os_family == "RedHat" - -- name: create group - group: > - name={{tendermint_group}} - state=present - system=yes - register: tendermint_group_created - -- name: create user - user: > - home={{tendermint_home}} - name={{tendermint_user}} - group={{tendermint_group}} - system=yes - when: tendermint_group_created|changed - -- name: create directory - file: > - path={{tendermint_home}} - state=directory - owner={{tendermint_user}} - group={{tendermint_group}} - mode=0755 - -# Check before creating log dir to prevent aggressively overwriting permissions -- name: check for log directory - stat: > - path={{ tendermint_log_file|dirname }} - register: logdir - -- name: create log directory if it does not exist - file: > - state=directory - path={{ tendermint_log_file|dirname }} - owner={{ tendermint_user }} - group={{ tendermint_group }} - when: not logdir.stat.exists - -- name: touch the log file - file: > - state=touch - path={{ tendermint_log_file }} - owner={{ tendermint_user }} - group={{ tendermint_group }} - changed_when: false - -- name: copy and unpack binary - unarchive: > - src={{tendermint_download}} - dest=/usr/local/bin - remote_src=true - mode=0755 - creates=/usr/local/bin/tendermint - -- name: generate private validator - shell: tendermint gen_validator >> priv_validator.json - args: - chdir: "{{tendermint_home}}" - creates: priv_validator.json - -- name: copy config - template: > - src=config.toml.j2 - dest={{tendermint_home}}/config.toml - owner={{tendermint_user}} - group={{tendermint_group}} - mode=0644 - register: config_was_copied - # notify: restart tendermint - -- name: set ownership - file: > - state=directory - path={{tendermint_home}} - owner={{tendermint_user}} - group={{tendermint_group}} - recurse=yes - when: config_was_copied|changed - -- name: copy upstart script - template: > - src={{tendermint_upstart_template}} - dest=/etc/init/tendermint.conf - owner=root - group=root - mode=0644 - when: tendermint_use_upstart - # notify: restart tendermint - -- name: copy systemd script - template: > - src={{tendermint_systemd_template}} - dest=/etc/systemd/system/tendermint.service - owner=root - group=root - mode=0644 - when: tendermint_use_systemd - notify: - - reload systemd diff --git a/ansible-tendermint/tasks/main.yml b/ansible-tendermint/tasks/main.yml deleted file mode 100644 index cabf64bd4..000000000 --- a/ansible-tendermint/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: gather OS specific variables - include_vars: "{{ item }}" - with_first_found: - - "{{ ansible_os_family }}-{{ ansible_distribution_major_version}}.yml" - - "{{ ansible_os_family }}.yml" -- include: install.yml -- include: start.yml diff --git a/ansible-tendermint/tasks/start.yml b/ansible-tendermint/tasks/start.yml deleted file mode 100644 index 5bbbc04a1..000000000 --- a/ansible-tendermint/tasks/start.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- - -- name: copy genesis file - template: > - src=genesis.json.j2 - dest={{tendermint_home}}/genesis.json - owner={{tendermint_user}} - group={{tendermint_group}} - mode=0644 - register: genesis_file_copied - -- name: get validators number from genesis file - shell: cat {{tendermint_home}}/genesis.json | jq ".validators | length" - register: num_validators - when: genesis_file_copied|changed - -- name: restart tendermint if number of validators greater than 0 - shell: echo "restarting ..." - when: genesis_file_copied|changed and num_validators.stdout|int > 0 - notify: restart tendermint - -- service: > - name=tendermint - state=started - enabled=yes - when: tendermint_manage_service diff --git a/ansible-tendermint/templates/config.toml.j2 b/ansible-tendermint/templates/config.toml.j2 deleted file mode 100644 index 3743a195b..000000000 --- a/ansible-tendermint/templates/config.toml.j2 +++ /dev/null @@ -1,38 +0,0 @@ -proxy_app = "{{tendermint_proxy_app}}" -node_laddr = "{{tendermint_node_laddr}}" -seeds = "{{tendermint_seeds}}" -fast_sync = {{tendermint_fast_sync | bool | lower}} -db_backend = "{{tendermint_db_backend}}" -log_level = "{{tendermint_log_level}}" -rpc_laddr = "{{tendermint_rpc_laddr}}" - -genesis_file = "{{tendermint_genesis_file}}" -abci = "{{tendermint_abci}}" -skip_upnp = {{tendermint_skip_upnp | bool | lower}} -addrbook_file = "{{tendermint_addrbook_file}}" -addrbook_strict = {{tendermint_addrbook_strict | bool | lower}} -pex_reactor = {{tendermint_pex_reactor | bool | lower}} -priv_validator_file = "{{tendermint_priv_validator_file}}" - -db_dir = "{{tendermint_db_dir}}" -grpc_laddr = "{{tendermint_grpc_laddr}}" -prof_laddr = "{{tendermint_prof_laddr}}" -cs_wal_file = "{{tendermint_cs_wal_file}}" -cs_wal_light = {{tendermint_cs_wal_light | bool | lower}} -filter_peers = {{tendermint_filter_peers | bool | lower}} - -block_size = {{tendermint_block_size}} -block_part_size = {{tendermint_block_part_size}} -disable_data_hash = {{tendermint_disable_data_hash | bool | lower}} -timeout_propose = {{tendermint_timeout_propose}} -timeout_propose_delta = {{tendermint_timeout_propose_delta}} -timeout_prevote = {{tendermint_timeout_prevote}} -timeout_prevote_delta = {{tendermint_timeout_prevote_delta}} -timeout_precommit = {{tendermint_timeout_precommit}} -timeout_precommit_delta = {{tendermint_timeout_precommit_delta}} -timeout_commit = {{tendermint_timeout_commit}} -skip_timeout_commit = {{tendermint_skip_timeout_commit | bool | lower}} -mempool_recheck = {{tendermint_mempool_recheck | bool | lower}} -mempool_recheck_empty = {{tendermint_mempool_recheck_empty | bool | lower}} -mempool_broadcast = {{tendermint_mempool_broadcast | bool | lower}} -mempool_wal_dir = "{{tendermint_mempool_wal_dir}}" diff --git a/ansible-tendermint/templates/genesis.json.j2 b/ansible-tendermint/templates/genesis.json.j2 deleted file mode 100644 index ea438833e..000000000 --- a/ansible-tendermint/templates/genesis.json.j2 +++ /dev/null @@ -1,6 +0,0 @@ -{ - "app_hash": "", - "chain_id": "{{tendermint_chain_id}}", - "genesis_time": "{{tendermint_genesis_time}}", - "validators": {{tendermint_validators | to_nice_json}} -} diff --git a/ansible-tendermint/templates/tendermint.conf.j2 b/ansible-tendermint/templates/tendermint.conf.j2 deleted file mode 100644 index 673f0246a..000000000 --- a/ansible-tendermint/templates/tendermint.conf.j2 +++ /dev/null @@ -1,16 +0,0 @@ -# Tendermint (Upstart unit) -description "Tendermint" - -start on {{ tendermint_upstart_start_on }} -stop on {{ tendermint_upstart_stop_on }} - -script - # Use su to become tendermint user non-interactively on old Upstart versions (see http://superuser.com/a/234541/76168) - exec su -s /bin/sh -c 'TMROOT={{tendermint_home}} exec "$0" "$@" >> {{ tendermint_log_file }} 2>&1' tendermint -- /usr/local/bin/tendermint node \ - --moniker={{inventory_hostname}} -end script - -respawn -respawn limit 10 10 - -kill timeout 10 diff --git a/ansible-tendermint/templates/tendermint.systemd.j2 b/ansible-tendermint/templates/tendermint.systemd.j2 deleted file mode 100644 index e15803505..000000000 --- a/ansible-tendermint/templates/tendermint.systemd.j2 +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Tendermint -Requires=network-online.target -After=network-online.target - -[Service] -Environment="TMROOT={{tendermint_home}}" -Restart=on-failure -User={{ tendermint_user }} -Group={{ tendermint_group }} -PermissionsStartOnly=true -ExecStart=/usr/local/bin/tendermint node --moniker={{inventory_hostname}} -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGINT - -[Install] -WantedBy=multi-user.target diff --git a/ansible-tendermint/test.yml b/ansible-tendermint/test.yml deleted file mode 100644 index 4b11dd24d..000000000 --- a/ansible-tendermint/test.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- - -- hosts: all - roles: - - ansible-tendermint - vars: - tendermint_proxy_app: dummy - # tendermint_validators: - # - pub_key: - # - 1 - # - 1F017E488A6327FAFBBE092193B427912E117733DE6AF72150BF09AA58411E7F - # amount: 10 - # name: paris diff --git a/ansible-tendermint/vars/Debian.yml b/ansible-tendermint/vars/Debian.yml deleted file mode 100644 index 642304b92..000000000 --- a/ansible-tendermint/vars/Debian.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -consul_upstart_start_on: (local-filesystems and net-device-up IFACE!=lo) -consul_upstart_stop_on: runlevel [016] diff --git a/ansible-tendermint/vars/RedHat.yml b/ansible-tendermint/vars/RedHat.yml deleted file mode 100644 index 4c2bb72b2..000000000 --- a/ansible-tendermint/vars/RedHat.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -consul_upstart_start_on: (runlevel [345] and started network) -consul_upstart_stop_on: (runlevel [!345] or stopping network) diff --git a/ansible-tendermint/.gitignore b/ansible/.gitignore similarity index 100% rename from ansible-tendermint/.gitignore rename to ansible/.gitignore diff --git a/ansible-tendermint/LICENSE b/ansible/LICENSE similarity index 100% rename from ansible-tendermint/LICENSE rename to ansible/LICENSE diff --git a/ansible/README.rst b/ansible/README.rst new file mode 100644 index 000000000..24ca6f878 --- /dev/null +++ b/ansible/README.rst @@ -0,0 +1,291 @@ +Using Ansible +============= + +.. figure:: assets/a_plus_t.png + :alt: Ansible plus Tendermint + + Ansible plus Tendermint + +The playbooks in `our ansible directory `__ +run ansible `roles `__ which: + +- install and configure basecoin or ethermint +- start/stop basecoin or ethermint and reset their configuration + +Prerequisites +------------- + +- Ansible 2.0 or higher +- SSH key to the servers + +Optional for DigitalOcean droplets: + +- DigitalOcean API Token +- python dopy package + +For a description on how to get a DigitalOcean API Token, see the explanation +in the `using terraform tutorial <./terraform-digitalocean.html>`__. + +Optional for Amazon AWS instances: + +- Amazon AWS API access key ID and secret access key. + +The cloud inventory scripts come from the ansible team at their +`GitHub `__ page. You can get the +latest version from the ``contrib/inventory`` folder. + +Setup +----- + +Ansible requires a "command machine" or "local machine" or "orchestrator +machine" to run on. This can be your laptop or any machine that can run +ansible. (It does not have to be part of the cloud network that hosts +your servers.) + +Use the official `Ansible installation +guide `__ to +install Ansible. Here are a few examples on basic installation commands: + +Ubuntu/Debian: + +:: + + sudo apt-get install ansible + +CentOS/RedHat: + +:: + + sudo yum install epel-release + sudo yum install ansible + +Mac OSX: If you have `Homebrew `__ installed, then it's: + +:: + + brew install ansible + +If not, you can install it using ``pip``: + +:: + + sudo easy_install pip + sudo pip install ansible + +To make life easier, you can start an SSH Agent and load your SSH +key(s). This way ansible will have an uninterrupted way of connecting to +your servers. + +:: + + ssh-agent > ~/.ssh/ssh.env + source ~/.ssh/ssh.env + + ssh-add private.key + +Subsequently, as long as the agent is running, you can use +``source ~/.ssh/ssh.env`` to load the keys to the current session. Note: +On Mac OSX, you can add the ``-K`` option to ssh-add to store the +passphrase in your keychain. The security of this feature is debated but +it is convenient. + +Optional cloud dependencies +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you are using a cloud provider to host your servers, you need the +below dependencies installed on your local machine. + +DigitalOcean inventory dependencies: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ubuntu/Debian: + +:: + + sudo apt-get install python-pip + sudo pip install dopy + +CentOS/RedHat: + +:: + + sudo yum install python-pip + sudo pip install dopy + +Mac OSX: + +:: + + sudo pip install dopy + +Amazon AWS inventory dependencies: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ubuntu/Debian: + +:: + + sudo apt-get install python-boto + +CentOS/RedHat: + +:: + + sudo yum install python-boto + +Mac OSX: + +:: + + sudo pip install boto + +Refreshing the DigitalOcean inventory +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you just finished creating droplets, the local DigitalOcean inventory +cache is not up-to-date. To refresh it, run: + +:: + + DO_API_TOKEN="" + python -u inventory/digital_ocean.py --refresh-cache 1> /dev/null + +Refreshing the Amazon AWS inventory +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you just finished creating Amazon AWS EC2 instances, the local AWS +inventory cache is not up-to-date. To refresh it, run: + +:: + + AWS_ACCESS_KEY_ID='' + AWS_SECRET_ACCESS_KEY='' + python -u inventory/ec2.py --refresh-cache 1> /dev/null + +Note: you don't need the access key and secret key set, if you are +running ansible on an Amazon AMI instance with the proper IAM +permissions set. + +Running the playbooks +--------------------- + +The playbooks are locked down to only run if the environment variable +``TF_VAR_TESTNET_NAME`` is populated. This is a precaution so you don't +accidentally run the playbook on all your servers. + +The variable ``TF_VAR_TESTNET_NAME`` contains the testnet name which +ansible translates into an ansible group. If you used Terraform to +create the servers, it was the testnet name used there. + +If the playbook cannot connect to the servers because of public key +denial, your SSH Agent is not set up properly. Alternatively you can add +the SSH key to ansible using the ``--private-key`` option. + +If you need to connect to the nodes as root but your local username is +different, use the ansible option ``-u root`` to tell ansible to connect +to the servers and authenticate as the root user. + +If you secured your server and you need to ``sudo`` for root access, use +the the ``-b`` or ``--become`` option to tell ansible to sudo to root +after connecting to the server. In the Terraform-DigitalOcean example, +if you created the ec2-user by adding the ``noroot=true`` option (or if +you are simply on Amazon AWS), you need to add the options +``-u ec2-user -b`` to ansible to tell it to connect as the ec2-user and +then sudo to root to run the playbook. + +DigitalOcean +~~~~~~~~~~~~ + +:: + + DO_API_TOKEN="" + TF_VAR_TESTNET_NAME="testnet-servers" + ansible-playbook -i inventory/digital_ocean.py install.yml -e service=basecoin + +Amazon AWS +~~~~~~~~~~ + +:: + + AWS_ACCESS_KEY_ID='' + AWS_SECRET_ACCESS_KEY='' + TF_VAR_TESTNET_NAME="testnet-servers" + ansible-playbook -i inventory/ec2.py install.yml -e service=basecoin + +Installing custom versions +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default ansible installs the tendermint, basecoin or ethermint binary +versions from the latest release in the repository. If you build your +own version of the binaries, you can tell ansible to install that +instead. + +:: + + GOPATH="" + go get -u github.com/tendermint/basecoin/cmd/basecoin + + DO_API_TOKEN="" + TF_VAR_TESTNET_NAME="testnet-servers" + ansible-playbook -i inventory/digital_ocean.py install.yml -e service=basecoin -e release_install=false + +Alternatively you can change the variable settings in +``group_vars/all``. + +Other commands and roles +------------------------ + +There are few extra playbooks to make life easier managing your servers. + +- install.yml - Install basecoin or ethermint applications. (Tendermint + gets installed automatically.) Use the ``service`` parameter to + define which application to install. Defaults to ``basecoin``. +- reset.yml - Stop the application, reset the configuration and data, + then start the application again. You need to pass + ``-e service=``, like ``-e service=basecoin``. It will + restart the underlying tendermint application too. +- restart.yml - Restart a service on all nodes. You need to pass + ``-e service=``, like ``-e service=basecoin``. It will + restart the underlying tendermint application too. +- stop.yml - Stop the application. You need to pass + ``-e service=``. +- status.yml - Check the service status and print it. You need to pass + ``-e service=``. +- start.yml - Start the application. You need to pass + ``-e service=``. +- ubuntu16-patch.yml - Ubuntu 16.04 does not have the minimum required + python package installed to be able to run ansible. If you are using + ubuntu, run this playbook first on the target machines. This will + install the python pacakge that is required for ansible to work + correctly on the remote nodes. +- upgrade.yml - Upgrade the ``service`` on your testnet. It will stop + the service and restart it at the end. It will only work if the + upgraded version is backward compatible with the installed version. +- upgrade-reset.yml - Upgrade the ``service`` on your testnet and reset + the database. It will stop the service and restart it at the end. It + will work for upgrades where the new version is not + backward-compatible with the installed version - however it will + reset the testnet to its default. + +The roles are self-sufficient under the ``roles/`` folder. + +- install - install the application defined in the ``service`` + parameter. It can install release packages and update them with + custom-compiled binaries. +- unsafe\_reset - delete the database for a service, including the + tendermint database. +- config - configure the application defined in ``service``. It also + configures the underlying tendermint service. Check + ``group_vars/all`` for options. +- stop - stop an application. Requires the ``service`` parameter set. +- status - check the status of an application. Requires the ``service`` + parameter set. +- start - start an application. Requires the ``service`` parameter set. + +Default variables +----------------- + +Default variables are documented under ``group_vars/all``. You can the +parameters there to deploy a previously created genesis.json file +(instead of dynamically creating it) or if you want to deploy custom +built binaries instead of deploying a released version. diff --git a/ansible-tendermint/Vagrantfile b/ansible/Vagrantfile similarity index 87% rename from ansible-tendermint/Vagrantfile rename to ansible/Vagrantfile index bc6f9fde0..117d7e18d 100644 --- a/ansible-tendermint/Vagrantfile +++ b/ansible/Vagrantfile @@ -13,8 +13,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.box = "ubuntu/trusty64" config.vm.provision :ansible do |ansible| - ansible.playbook = "test.yml" - ansible.verbose = 'vv' - ansible.sudo = true + ansible.playbook = "install.yml" end end diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 000000000..045c1ea60 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,4 @@ +[defaults] +retry_files_enabled = False +host_key_checking = False + diff --git a/ansible/app_options_files/dev_money b/ansible/app_options_files/dev_money new file mode 100644 index 000000000..2b204dc1d --- /dev/null +++ b/ansible/app_options_files/dev_money @@ -0,0 +1,13 @@ + "accounts": [{ + "pub_key": { + "type": "ed25519", + "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + }, + "coins": [ + { + "denom": "mycoin", + "amount": 9007199254740992 + } + ] + }] + diff --git a/ansible/app_options_files/empty b/ansible/app_options_files/empty new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/ansible/app_options_files/empty @@ -0,0 +1 @@ + diff --git a/ansible/app_options_files/kingethan b/ansible/app_options_files/kingethan new file mode 100644 index 000000000..2245ffd4d --- /dev/null +++ b/ansible/app_options_files/kingethan @@ -0,0 +1,12 @@ + "accounts": [{ + "address": "C13B2A17030E416D0C83B7FF7CDFFB2E2353FA11", + "coins": [ + { + "denom": "mycoin", + "amount": 9007199254740992 + } + ] + }], + "plugin_options": [ + "coin/issuer", {"app": "sigs", "addr": "C13B2A17030E416D0C83B7FF7CDFFB2E2353FA11"} + ] diff --git a/ansible/app_options_files/public_testnet b/ansible/app_options_files/public_testnet new file mode 100644 index 000000000..8a6732fe0 --- /dev/null +++ b/ansible/app_options_files/public_testnet @@ -0,0 +1,136 @@ + "accounts": [ + { + "name": "relay", + "address": "1B1BE55F969F54064628A63B9559E7C21C925165", + "pub_key": { + "type": "ed25519", + "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + }, + "coins": [ + { + "denom": "mycoin", + "amount": 9007199254740992 + } + ] + }, + { + "name": "anton", + "address": "40CC622438D3E42148A1FFD3A27C07C100F8FA3D", + "pub_key": { + "type": "ed25519", + "data": "97BD389257763747488803DC686A8819C685936A3CD275D54EABEE51E0117EE6" + }, + "coins": [ + { + "denom": "anton", + "amount": 9007199254740992 + }, + { + "denom": "tank", + "amount": 99 + } + ] + }, + { + "name": "adrian", + "address": "98F28277FA8C512968BBDE443F5DB27AC743F814", + "pub_key": { + "type": "ed25519", + "data": "2E7EAB9E4C93D3657A63E063D6ABB851596BA97ED9F4EB9A2FC714043DC9685D" + }, + "coins": [ + { + "denom": "axi", + "amount": 1928936473812 + }, + { + "denom": "bear", + "amount": 42 + } + ] + }, + { + "name": "bucky", + "address": "2B24DEE2364762300168DF19B6C18BCE2D399EA2", + "pub_key": { + "type": "ed25519", + "data": "37BF9D6F8D66DC3FAECD330291CF08DF52E2F646B01F7E43BC0A746AED927A62" + }, + "coins": [ + { + "denom": "buckyball", + "amount": 20000000000 + }, + { + "denom": "bumblebee", + "amount": 100 + }, + { + "denom": "french", + "amount": 10 + }, + { + "denom": "frey", + "amount": 4838271727204 + } + ] + }, + { + "name": "rigel", + "address": "3A8BE0EEE474C8D197AA14312B59ACB2D9DFECDE", + "pub_key": { + "type": "ed25519", + "data": "C7CAC198E3999C4DD9F25C575C4C10587C083F73B1E985AC65658992DE6B552E" + }, + "coins": [ + { + "denom": "radio", + "amount": 50 + }, + { + "denom": "tv", + "amount": 3478765434568 + } + ] + }, + { + "name": "gregkey", + "address": "B01C264BFE9CBD45458256E613A6F07061A3A6B6", + "pubkey": { + "type": "ed25519", + "data": "E1FFBD187FA2A922CE1B367532CEAC1AD8E606D576AB0D2E2CAA7EC6B7DAC10F" + }, + "coins": [ + { + "denom": "mycoin", + "amount": 3478765434568 + }, + { + "denom": "buckyball", + "amount": 8367251830291 + }, + { + "denom": "playmoney", + "amount": 9999999999999 + } + ] + }, + { + "name": "shadow", + "address": "B140EFAAE6D5CA1C8E98814C557AF7112E3B9EAE", + "pub_key": { + "type": "ed25519", + "data": "F492282705DF29ACC3BB803D543B7BF98C8080FA28AE85B62B45827EA9DA8167" + }, + "coins": [ + { + "denom": "shadow", + "amount": 53712836452781 + }, + { + "denom": "unit", + "amount": 1 + } + ] + } + ] diff --git a/ansible/app_options_files/relay b/ansible/app_options_files/relay new file mode 100644 index 000000000..d7322adce --- /dev/null +++ b/ansible/app_options_files/relay @@ -0,0 +1,16 @@ + "accounts": [ + { + "name": "relay", + "address": "1B1BE55F969F54064628A63B9559E7C21C925165", + "pub_key": { + "type": "ed25519", + "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + }, + "coins": [ + { + "denom": "mycoin", + "amount": 9007199254740992 + } + ] + } + ] diff --git a/ansible-tendermint/img/a_plus_t.png b/ansible/assets/a_plus_t.png similarity index 100% rename from ansible-tendermint/img/a_plus_t.png rename to ansible/assets/a_plus_t.png diff --git a/ansible/group_vars/all b/ansible/group_vars/all new file mode 100644 index 000000000..ea8ff4857 --- /dev/null +++ b/ansible/group_vars/all @@ -0,0 +1,41 @@ +--- +### +### Tendermint installation +### + +## This file shows and sets the global defaults for the role variables. + +## +## install +## + +## service variable defines which service is going to be managed. It can be set to basecoin or ethermint. +service: basecoin + +## release_install indicates if the install role should look for a privately built binary after installing the service package. If set to false, the privately built binary in the GOPATH is going to override the binary on the target systems. +#release_install: true + +## binary stores the path to the privately built service binary, if there is any. By default it uses the GOPATH environment variable. +#binary: "{{ lookup('env','GOPATH') | default('') }}/bin/{{service}}" + +## +## config +## + +## tendermint_genesis_file contains the path and filename to a previously generated genesis.json for the underlying tendermint service. If undefined, the json file is dynamically generated. +#tendermint_genesis_file: "" + +## service_genesis_file contains the path and filename to a previously generated genesis.json for the service. If undefined, the json file is dynamically generated. +#service_genesis_file: "" + +## testnet_name is used to find seed IPs and public keys and set the chain_id in genesis.json and config.toml +#testnet_name: testnet1 + +## app_options_file contains a path and filename which will be included in a generated service genesis.json file on all nodes. The content will be dumped into the app_options dictionary in the service genesis.json.. +#app_options_file: "app_options_files/dev_money" + +## Internal use only. validators indicates if the nodes are validator nodes. The tendermint genesis.json will contain their public keys. +#validators: true + +## Internal use only. seeds contain the list of servers (with ports) that are validators in a testnet. Only effective if validators == false. If validators == true, then all nodes will be automatically included here. +#seeds: "" diff --git a/ansible/install.yml b/ansible/install.yml new file mode 100644 index 000000000..e46bd6398 --- /dev/null +++ b/ansible/install.yml @@ -0,0 +1,12 @@ +--- + +#variable "service" is required + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" + any_errors_fatal: "{{validators | default(true) | bool}}" + roles: + - install + - {role: generic-service, when: service == 'tendermint'} + - {role: config, testnet_name: "{{lookup('env','TF_VAR_TESTNET_NAME')}}"} + - start + diff --git a/ansible/inventory/COPYING b/ansible/inventory/COPYING new file mode 100644 index 000000000..10926e87f --- /dev/null +++ b/ansible/inventory/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/ansible/inventory/digital_ocean.ini b/ansible/inventory/digital_ocean.ini new file mode 100644 index 000000000..b809554b2 --- /dev/null +++ b/ansible/inventory/digital_ocean.ini @@ -0,0 +1,34 @@ +# Ansible DigitalOcean external inventory script settings +# + +[digital_ocean] + +# The module needs your DigitalOcean API Token. +# It may also be specified on the command line via --api-token +# or via the environment variables DO_API_TOKEN or DO_API_KEY +# +#api_token = 123456abcdefg + + +# API calls to DigitalOcean may be slow. For this reason, we cache the results +# of an API call. Set this to the path you want cache files to be written to. +# One file will be written to this directory: +# - ansible-digital_ocean.cache +# +cache_path = /tmp + + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# +cache_max_age = 300 + +# Use the private network IP address instead of the public when available. +# +use_private_network = False + +# Pass variables to every group, e.g.: +# +# group_variables = { 'ansible_user': 'root' } +# +group_variables = {} diff --git a/ansible/inventory/digital_ocean.py b/ansible/inventory/digital_ocean.py new file mode 100755 index 000000000..24ba64370 --- /dev/null +++ b/ansible/inventory/digital_ocean.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python + +''' +DigitalOcean external inventory script +====================================== + +Generates Ansible inventory of DigitalOcean Droplets. + +In addition to the --list and --host options used by Ansible, there are options +for generating JSON of other DigitalOcean data. This is useful when creating +droplets. For example, --regions will return all the DigitalOcean Regions. +This information can also be easily found in the cache file, whose default +location is /tmp/ansible-digital_ocean.cache). + +The --pretty (-p) option pretty-prints the output for better human readability. + +---- +Although the cache stores all the information received from DigitalOcean, +the cache is not used for current droplet information (in --list, --host, +--all, and --droplets). This is so that accurate droplet information is always +found. You can force this script to use the cache with --force-cache. + +---- +Configuration is read from `digital_ocean.ini`, then from environment variables, +then and command-line arguments. + +Most notably, the DigitalOcean API Token must be specified. It can be specified +in the INI file or with the following environment variables: + export DO_API_TOKEN='abc123' or + export DO_API_KEY='abc123' + +Alternatively, it can be passed on the command-line with --api-token. + +If you specify DigitalOcean credentials in the INI file, a handy way to +get them into your environment (e.g., to use the digital_ocean module) +is to use the output of the --env option with export: + export $(digital_ocean.py --env) + +---- +The following groups are generated from --list: + - ID (droplet ID) + - NAME (droplet NAME) + - image_ID + - image_NAME + - distro_NAME (distribution NAME from image) + - region_NAME + - size_NAME + - status_STATUS + +For each host, the following variables are registered: + - do_backup_ids + - do_created_at + - do_disk + - do_features - list + - do_id + - do_image - object + - do_ip_address + - do_private_ip_address + - do_kernel - object + - do_locked + - do_memory + - do_name + - do_networks - object + - do_next_backup_window + - do_region - object + - do_size - object + - do_size_slug + - do_snapshot_ids - list + - do_status + - do_tags + - do_vcpus + - do_volume_ids + +----- +``` +usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] + [--droplets] [--regions] [--images] [--sizes] + [--ssh-keys] [--domains] [--pretty] + [--cache-path CACHE_PATH] + [--cache-max_age CACHE_MAX_AGE] + [--force-cache] + [--refresh-cache] + [--api-token API_TOKEN] + +Produce an Ansible Inventory file based on DigitalOcean credentials + +optional arguments: + -h, --help show this help message and exit + --list List all active Droplets as Ansible inventory + (default: True) + --host HOST Get all Ansible inventory variables about a specific + Droplet + --all List all DigitalOcean information as JSON + --droplets List Droplets as JSON + --regions List Regions as JSON + --images List Images as JSON + --sizes List Sizes as JSON + --ssh-keys List SSH keys as JSON + --domains List Domains as JSON + --pretty, -p Pretty-print results + --cache-path CACHE_PATH + Path to the cache files (default: .) + --cache-max_age CACHE_MAX_AGE + Maximum age of the cached items (default: 0) + --force-cache Only use data from the cache + --refresh-cache Force refresh of cache by making API requests to + DigitalOcean (default: False - use cache files) + --api-token API_TOKEN, -a API_TOKEN + DigitalOcean API Token +``` + +''' + +# (c) 2013, Evan Wies +# +# Inspired by the EC2 inventory plugin: +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import os +import sys +import re +import argparse +from time import time +import ConfigParser +import ast + +try: + import json +except ImportError: + import simplejson as json + +try: + from dopy.manager import DoManager +except ImportError as e: + sys.exit("failed=True msg='`dopy` library required for this script'") + + +class DigitalOceanInventory(object): + + ########################################################################### + # Main execution path + ########################################################################### + + def __init__(self): + ''' Main execution path ''' + + # DigitalOceanInventory data + self.data = {} # All DigitalOcean data + self.inventory = {} # Ansible Inventory + + # Define defaults + self.cache_path = '.' + self.cache_max_age = 0 + self.use_private_network = False + self.group_variables = {} + + # Read settings, environment variables, and CLI arguments + self.read_settings() + self.read_environment() + self.read_cli_args() + + # Verify credentials were set + if not hasattr(self, 'api_token'): + sys.stderr.write('''Could not find values for DigitalOcean api_token. +They must be specified via either ini file, command line argument (--api-token), +or environment variables (DO_API_TOKEN)\n''') + sys.exit(-1) + + # env command, show DigitalOcean credentials + if self.args.env: + print("DO_API_TOKEN=%s" % self.api_token) + sys.exit(0) + + # Manage cache + self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" + self.cache_refreshed = False + + if self.is_cache_valid(): + self.load_from_cache() + if len(self.data) == 0: + if self.args.force_cache: + sys.stderr.write('''Cache is empty and --force-cache was specified\n''') + sys.exit(-1) + + self.manager = DoManager(None, self.api_token, api_version=2) + + # Pick the json_data to print based on the CLI command + if self.args.droplets: + self.load_from_digital_ocean('droplets') + json_data = {'droplets': self.data['droplets']} + elif self.args.regions: + self.load_from_digital_ocean('regions') + json_data = {'regions': self.data['regions']} + elif self.args.images: + self.load_from_digital_ocean('images') + json_data = {'images': self.data['images']} + elif self.args.sizes: + self.load_from_digital_ocean('sizes') + json_data = {'sizes': self.data['sizes']} + elif self.args.ssh_keys: + self.load_from_digital_ocean('ssh_keys') + json_data = {'ssh_keys': self.data['ssh_keys']} + elif self.args.domains: + self.load_from_digital_ocean('domains') + json_data = {'domains': self.data['domains']} + elif self.args.all: + self.load_from_digital_ocean() + json_data = self.data + elif self.args.host: + json_data = self.load_droplet_variables_for_host() + else: # '--list' this is last to make it default + self.load_from_digital_ocean('droplets') + self.build_inventory() + json_data = self.inventory + + if self.cache_refreshed: + self.write_to_cache() + + if self.args.pretty: + print(json.dumps(json_data, sort_keys=True, indent=2)) + else: + print(json.dumps(json_data)) + # That's all she wrote... + + ########################################################################### + # Script configuration + ########################################################################### + + def read_settings(self): + ''' Reads the settings from the digital_ocean.ini file ''' + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') + + # Credentials + if config.has_option('digital_ocean', 'api_token'): + self.api_token = config.get('digital_ocean', 'api_token') + + # Cache related + if config.has_option('digital_ocean', 'cache_path'): + self.cache_path = config.get('digital_ocean', 'cache_path') + if config.has_option('digital_ocean', 'cache_max_age'): + self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') + + # Private IP Address + if config.has_option('digital_ocean', 'use_private_network'): + self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') + + # Group variables + if config.has_option('digital_ocean', 'group_variables'): + self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) + + def read_environment(self): + ''' Reads the settings from environment variables ''' + # Setup credentials + if os.getenv("DO_API_TOKEN"): + self.api_token = os.getenv("DO_API_TOKEN") + if os.getenv("DO_API_KEY"): + self.api_token = os.getenv("DO_API_KEY") + + def read_cli_args(self): + ''' Command line argument processing ''' + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') + + parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') + parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') + + parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') + parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') + parser.add_argument('--regions', action='store_true', help='List Regions as JSON') + parser.add_argument('--images', action='store_true', help='List Images as JSON') + parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') + parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') + parser.add_argument('--domains', action='store_true', help='List Domains as JSON') + + parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') + + parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') + parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') + parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') + parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, + help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') + + parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') + parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') + + self.args = parser.parse_args() + + if self.args.api_token: + self.api_token = self.args.api_token + + # Make --list default if none of the other commands are specified + if (not self.args.droplets and not self.args.regions and + not self.args.images and not self.args.sizes and + not self.args.ssh_keys and not self.args.domains and + not self.args.all and not self.args.host): + self.args.list = True + + ########################################################################### + # Data Management + ########################################################################### + + def load_from_digital_ocean(self, resource=None): + '''Get JSON from DigitalOcean API''' + if self.args.force_cache and os.path.isfile(self.cache_filename): + return + # We always get fresh droplets + if self.is_cache_valid() and not (resource == 'droplets' or resource is None): + return + if self.args.refresh_cache: + resource = None + + if resource == 'droplets' or resource is None: + self.data['droplets'] = self.manager.all_active_droplets() + self.cache_refreshed = True + if resource == 'regions' or resource is None: + self.data['regions'] = self.manager.all_regions() + self.cache_refreshed = True + if resource == 'images' or resource is None: + self.data['images'] = self.manager.all_images(filter=None) + self.cache_refreshed = True + if resource == 'sizes' or resource is None: + self.data['sizes'] = self.manager.sizes() + self.cache_refreshed = True + if resource == 'ssh_keys' or resource is None: + self.data['ssh_keys'] = self.manager.all_ssh_keys() + self.cache_refreshed = True + if resource == 'domains' or resource is None: + self.data['domains'] = self.manager.all_domains() + self.cache_refreshed = True + + def build_inventory(self): + '''Build Ansible inventory of droplets''' + self.inventory = { + 'all': { + 'hosts': [], + 'vars': self.group_variables + }, + '_meta': {'hostvars': {}} + } + + # add all droplets by id and name + for droplet in self.data['droplets']: + # when using private_networking, the API reports the private one in "ip_address". + if 'private_networking' in droplet['features'] and not self.use_private_network: + for net in droplet['networks']['v4']: + if net['type'] == 'public': + dest = net['ip_address'] + else: + continue + else: + dest = droplet['ip_address'] + + self.inventory['all']['hosts'].append(dest) + + self.inventory[droplet['id']] = [dest] + self.inventory[droplet['name']] = [dest] + + # groups that are always present + for group in ('region_' + droplet['region']['slug'], + 'image_' + str(droplet['image']['id']), + 'size_' + droplet['size']['slug'], + 'distro_' + self.to_safe(droplet['image']['distribution']), + 'status_' + droplet['status']): + if group not in self.inventory: + self.inventory[group] = {'hosts': [], 'vars': {}} + self.inventory[group]['hosts'].append(dest) + + # groups that are not always present + for group in (droplet['image']['slug'], + droplet['image']['name']): + if group: + image = 'image_' + self.to_safe(group) + if image not in self.inventory: + self.inventory[image] = {'hosts': [], 'vars': {}} + self.inventory[image]['hosts'].append(dest) + + if droplet['tags']: + for tag in droplet['tags']: + if tag not in self.inventory: + self.inventory[tag] = {'hosts': [], 'vars': {}} + self.inventory[tag]['hosts'].append(dest) + + # hostvars + info = self.do_namespace(droplet) + self.inventory['_meta']['hostvars'][dest] = info + + def load_droplet_variables_for_host(self): + '''Generate a JSON response to a --host call''' + host = int(self.args.host) + droplet = self.manager.show_droplet(host) + info = self.do_namespace(droplet) + return {'droplet': info} + + ########################################################################### + # Cache Management + ########################################################################### + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + if os.path.isfile(self.cache_filename): + mod_time = os.path.getmtime(self.cache_filename) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + return True + return False + + def load_from_cache(self): + ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' + try: + cache = open(self.cache_filename, 'r') + json_data = cache.read() + cache.close() + data = json.loads(json_data) + except IOError: + data = {'data': {}, 'inventory': {}} + + self.data = data['data'] + self.inventory = data['inventory'] + + def write_to_cache(self): + ''' Writes data in JSON format to a file ''' + data = {'data': self.data, 'inventory': self.inventory} + json_data = json.dumps(data, sort_keys=True, indent=2) + + cache = open(self.cache_filename, 'w') + cache.write(json_data) + cache.close() + + ########################################################################### + # Utilities + ########################################################################### + + def push(self, my_dict, key, element): + ''' Pushed an element onto an array that may not have been defined in the dict ''' + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + return re.sub("[^A-Za-z0-9\-\.]", "_", word) + + def do_namespace(self, data): + ''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' + info = {} + for k, v in data.items(): + info['do_' + k] = v + return info + + +########################################################################### +# Run the script +DigitalOceanInventory() diff --git a/ansible/inventory/ec2.ini b/ansible/inventory/ec2.ini new file mode 100644 index 000000000..e11a69cc1 --- /dev/null +++ b/ansible/inventory/ec2.ini @@ -0,0 +1,209 @@ +# Ansible EC2 external inventory script settings +# + +[ec2] + +# to talk to a private eucalyptus instance uncomment these lines +# and edit edit eucalyptus_host to be the host name of your cloud controller +#eucalyptus = True +#eucalyptus_host = clc.cloud.domain.org + +# AWS regions to make calls to. Set this to 'all' to make request to all regions +# in AWS and merge the results together. Alternatively, set this to a comma +# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not +# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or +# AWS_DEFAULT_REGION environment variable will be read to determine the region. +regions = all +regions_exclude = us-gov-west-1, cn-north-1 + +# When generating inventory, Ansible needs to know how to address a server. +# Each EC2 instance has a lot of variables associated with it. Here is the list: +# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance +# Below are 2 variables that are used as the address of a server: +# - destination_variable +# - vpc_destination_variable + +# This is the normal destination variable to use. If you are running Ansible +# from outside EC2, then 'public_dns_name' makes the most sense. If you are +# running Ansible from within EC2, then perhaps you want to use the internal +# address, and should set this to 'private_dns_name'. The key of an EC2 tag +# may optionally be used; however the boto instance variables hold precedence +# in the event of a collision. +destination_variable = public_dns_name + +# This allows you to override the inventory_name with an ec2 variable, instead +# of using the destination_variable above. Addressing (aka ansible_ssh_host) +# will still use destination_variable. Tags should be written as 'tag_TAGNAME'. +#hostname_variable = tag_Name + +# For server inside a VPC, using DNS names may not make sense. When an instance +# has 'subnet_id' set, this variable is used. If the subnet is public, setting +# this to 'ip_address' will return the public IP address. For instances in a +# private subnet, this should be set to 'private_ip_address', and Ansible must +# be run from within EC2. The key of an EC2 tag may optionally be used; however +# the boto instance variables hold precedence in the event of a collision. +# WARNING: - instances that are in the private vpc, _without_ public ip address +# will not be listed in the inventory until You set: +# vpc_destination_variable = private_ip_address +vpc_destination_variable = ip_address + +# The following two settings allow flexible ansible host naming based on a +# python format string and a comma-separated list of ec2 tags. Note that: +# +# 1) If the tags referenced are not present for some instances, empty strings +# will be substituted in the format string. +# 2) This overrides both destination_variable and vpc_destination_variable. +# +#destination_format = {0}.{1}.example.com +#destination_format_tags = Name,environment + +# To tag instances on EC2 with the resource records that point to them from +# Route53, set 'route53' to True. +route53 = False + +# To use Route53 records as the inventory hostnames, uncomment and set +# to equal the domain name you wish to use. You must also have 'route53' (above) +# set to True. +# route53_hostnames = .example.com + +# To exclude RDS instances from the inventory, uncomment and set to False. +#rds = False + +# To exclude ElastiCache instances from the inventory, uncomment and set to False. +#elasticache = False + +# Additionally, you can specify the list of zones to exclude looking up in +# 'route53_excluded_zones' as a comma-separated list. +# route53_excluded_zones = samplezone1.com, samplezone2.com + +# By default, only EC2 instances in the 'running' state are returned. Set +# 'all_instances' to True to return all instances regardless of state. +all_instances = False + +# By default, only EC2 instances in the 'running' state are returned. Specify +# EC2 instance states to return as a comma-separated list. This +# option is overridden when 'all_instances' is True. +# instance_states = pending, running, shutting-down, terminated, stopping, stopped + +# By default, only RDS instances in the 'available' state are returned. Set +# 'all_rds_instances' to True return all RDS instances regardless of state. +all_rds_instances = False + +# Include RDS cluster information (Aurora etc.) +include_rds_clusters = False + +# By default, only ElastiCache clusters and nodes in the 'available' state +# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' +# to True return all ElastiCache clusters and nodes, regardless of state. +# +# Note that all_elasticache_nodes only applies to listed clusters. That means +# if you set all_elastic_clusters to false, no node will be return from +# unavailable clusters, regardless of the state and to what you set for +# all_elasticache_nodes. +all_elasticache_replication_groups = False +all_elasticache_clusters = False +all_elasticache_nodes = False + +# API calls to EC2 are slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-ec2.cache +# - ansible-ec2.index +cache_path = ~/.ansible/tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# To disable the cache, set this value to 0 +cache_max_age = 300 + +# Organize groups into a nested/hierarchy instead of a flat namespace. +nested_groups = False + +# Replace - tags when creating groups to avoid issues with ansible +replace_dash_in_groups = True + +# If set to true, any tag of the form "a,b,c" is expanded into a list +# and the results are used to create additional tag_* inventory groups. +expand_csv_tags = False + +# The EC2 inventory output can become very large. To manage its size, +# configure which groups should be created. +group_by_instance_id = True +group_by_region = True +group_by_availability_zone = True +group_by_aws_account = False +group_by_ami_id = True +group_by_instance_type = True +group_by_instance_state = False +group_by_key_pair = True +group_by_vpc_id = True +group_by_security_group = True +group_by_tag_keys = True +group_by_tag_none = True +group_by_route53_names = True +group_by_rds_engine = True +group_by_rds_parameter_group = True +group_by_elasticache_engine = True +group_by_elasticache_cluster = True +group_by_elasticache_parameter_group = True +group_by_elasticache_replication_group = True + +# If you only want to include hosts that match a certain regular expression +# pattern_include = staging-* + +# If you want to exclude any hosts that match a certain regular expression +# pattern_exclude = staging-* + +# Instance filters can be used to control which instances are retrieved for +# inventory. For the full list of possible filters, please read the EC2 API +# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters +# Filters are key/value pairs separated by '=', to list multiple filters use +# a list separated by commas. See examples below. + +# If you want to apply multiple filters simultaneously, set stack_filters to +# True. Default behaviour is to combine the results of all filters. Stacking +# allows the use of multiple conditions to filter down, for example by +# environment and type of host. +stack_filters = False + +# Retrieve only instances with (key=value) env=staging tag +# instance_filters = tag:env=staging + +# Retrieve only instances with role=webservers OR role=dbservers tag +# instance_filters = tag:role=webservers,tag:role=dbservers + +# Retrieve only t1.micro instances OR instances with tag env=staging +# instance_filters = instance-type=t1.micro,tag:env=staging + +# You can use wildcards in filter values also. Below will list instances which +# tag Name value matches webservers1* +# (ex. webservers15, webservers1a, webservers123 etc) +# instance_filters = tag:Name=webservers1* + +# An IAM role can be assumed, so all requests are run as that role. +# This can be useful for connecting across different accounts, or to limit user +# access +# iam_role = role-arn + +# A boto configuration profile may be used to separate out credentials +# see http://boto.readthedocs.org/en/latest/boto_config_tut.html +# boto_profile = some-boto-profile-name + + +[credentials] + +# The AWS credentials can optionally be specified here. Credentials specified +# here are ignored if the environment variable AWS_ACCESS_KEY_ID or +# AWS_PROFILE is set, or if the boto_profile property above is set. +# +# Supplying AWS credentials here is not recommended, as it introduces +# non-trivial security concerns. When going down this route, please make sure +# to set access permissions for this file correctly, e.g. handle it the same +# way as you would a private SSH key. +# +# Unlike the boto and AWS configure files, this section does not support +# profiles. +# +# aws_access_key_id = AXXXXXXXXXXXXXX +# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX +# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/ansible/inventory/ec2.py b/ansible/inventory/ec2.py new file mode 100755 index 000000000..9614c5fe9 --- /dev/null +++ b/ansible/inventory/ec2.py @@ -0,0 +1,1595 @@ +#!/usr/bin/env python + +''' +EC2 external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +AWS EC2 using the Boto library. + +NOTE: This script assumes Ansible is being executed where the environment +variables needed for Boto have already been set: + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +optional region environement variable if region is 'auto' + +This script also assumes there is an ec2.ini file alongside it. To specify a +different path to ec2.ini, define the EC2_INI_PATH environment variable: + + export EC2_INI_PATH=/path/to/my_ec2.ini + +If you're using eucalyptus you need to set the above variables and +you need to define: + + export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus + +If you're using boto profiles (requires boto>=2.24.0) you can choose a profile +using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using +the AWS_PROFILE variable: + + AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml + +For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html + +When run against a specific host, this script returns the following variables: + - ec2_ami_launch_index + - ec2_architecture + - ec2_association + - ec2_attachTime + - ec2_attachment + - ec2_attachmentId + - ec2_block_devices + - ec2_client_token + - ec2_deleteOnTermination + - ec2_description + - ec2_deviceIndex + - ec2_dns_name + - ec2_eventsSet + - ec2_group_name + - ec2_hypervisor + - ec2_id + - ec2_image_id + - ec2_instanceState + - ec2_instance_type + - ec2_ipOwnerId + - ec2_ip_address + - ec2_item + - ec2_kernel + - ec2_key_name + - ec2_launch_time + - ec2_monitored + - ec2_monitoring + - ec2_networkInterfaceId + - ec2_ownerId + - ec2_persistent + - ec2_placement + - ec2_platform + - ec2_previous_state + - ec2_private_dns_name + - ec2_private_ip_address + - ec2_publicIp + - ec2_public_dns_name + - ec2_ramdisk + - ec2_reason + - ec2_region + - ec2_requester_id + - ec2_root_device_name + - ec2_root_device_type + - ec2_security_group_ids + - ec2_security_group_names + - ec2_shutdown_state + - ec2_sourceDestCheck + - ec2_spot_instance_request_id + - ec2_state + - ec2_state_code + - ec2_state_reason + - ec2_status + - ec2_subnet_id + - ec2_tenancy + - ec2_virtualization_type + - ec2_vpc_id + +These variables are pulled out of a boto.ec2.instance object. There is a lack of +consistency with variable spellings (camelCase and underscores) since this +just loops through all variables the object exposes. It is preferred to use the +ones with underscores when multiple exist. + +In addition, if an instance has AWS Tags associated with it, each tag is a new +variable named: + - ec2_tag_[Key] = [Value] + +Security groups are comma-separated in 'ec2_security_group_ids' and +'ec2_security_group_names'. +''' + +# (c) 2012, Peter Sankauskas +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import sys +import os +import argparse +import re +from time import time +import boto +from boto import ec2 +from boto import rds +from boto import elasticache +from boto import route53 +from boto import sts +import six + +from ansible.module_utils import ec2 as ec2_utils + +HAS_BOTO3 = False +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + pass + +from six.moves import configparser +from collections import defaultdict + +try: + import json +except ImportError: + import simplejson as json + + +class Ec2Inventory(object): + + def _empty_inventory(self): + return {"_meta": {"hostvars": {}}} + + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = self._empty_inventory() + + self.aws_account_id = None + + # Index of hostname (address) to instance ID + self.index = {} + + # Boto profile to use (if any) + self.boto_profile = None + + # AWS credentials. + self.credentials = {} + + # Read settings and parse CLI arguments + self.parse_cli_args() + self.read_settings() + + # Make sure that profile_name is not passed at all if not set + # as pre 2.24 boto will fall over otherwise + if self.boto_profile: + if not hasattr(boto.ec2.EC2Connection, 'profile_name'): + self.fail_with_error("boto version must be >= 2.24 to use profile") + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if self.inventory == self._empty_inventory(): + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print(data_to_print) + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + def read_settings(self): + ''' Reads the settings from the ec2.ini file ''' + + scriptbasename = __file__ + scriptbasename = os.path.basename(scriptbasename) + scriptbasename = scriptbasename.replace('.py', '') + + defaults = { + 'ec2': { + 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) + } + } + + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() + ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) + ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) + config.read(ec2_ini_path) + + # is eucalyptus? + self.eucalyptus_host = None + self.eucalyptus = False + if config.has_option('ec2', 'eucalyptus'): + self.eucalyptus = config.getboolean('ec2', 'eucalyptus') + if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): + self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') + + # Regions + self.regions = [] + configRegions = config.get('ec2', 'regions') + if (configRegions == 'all'): + if self.eucalyptus_host: + self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) + else: + configRegions_exclude = config.get('ec2', 'regions_exclude') + for regionInfo in ec2.regions(): + if regionInfo.name not in configRegions_exclude: + self.regions.append(regionInfo.name) + else: + self.regions = configRegions.split(",") + if 'auto' in self.regions: + env_region = os.environ.get('AWS_REGION') + if env_region is None: + env_region = os.environ.get('AWS_DEFAULT_REGION') + self.regions = [env_region] + + # Destination addresses + self.destination_variable = config.get('ec2', 'destination_variable') + self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + + if config.has_option('ec2', 'hostname_variable'): + self.hostname_variable = config.get('ec2', 'hostname_variable') + else: + self.hostname_variable = None + + if config.has_option('ec2', 'destination_format') and \ + config.has_option('ec2', 'destination_format_tags'): + self.destination_format = config.get('ec2', 'destination_format') + self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') + else: + self.destination_format = None + self.destination_format_tags = None + + # Route53 + self.route53_enabled = config.getboolean('ec2', 'route53') + if config.has_option('ec2', 'route53_hostnames'): + self.route53_hostnames = config.get('ec2', 'route53_hostnames') + else: + self.route53_hostnames = None + self.route53_excluded_zones = [] + if config.has_option('ec2', 'route53_excluded_zones'): + self.route53_excluded_zones.extend( + config.get('ec2', 'route53_excluded_zones', '').split(',')) + + # Include RDS instances? + self.rds_enabled = True + if config.has_option('ec2', 'rds'): + self.rds_enabled = config.getboolean('ec2', 'rds') + + # Include RDS cluster instances? + if config.has_option('ec2', 'include_rds_clusters'): + self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') + else: + self.include_rds_clusters = False + + # Include ElastiCache instances? + self.elasticache_enabled = True + if config.has_option('ec2', 'elasticache'): + self.elasticache_enabled = config.getboolean('ec2', 'elasticache') + + # Return all EC2 instances? + if config.has_option('ec2', 'all_instances'): + self.all_instances = config.getboolean('ec2', 'all_instances') + else: + self.all_instances = False + + # Instance states to be gathered in inventory. Default is 'running'. + # Setting 'all_instances' to 'yes' overrides this option. + ec2_valid_instance_states = [ + 'pending', + 'running', + 'shutting-down', + 'terminated', + 'stopping', + 'stopped' + ] + self.ec2_instance_states = [] + if self.all_instances: + self.ec2_instance_states = ec2_valid_instance_states + elif config.has_option('ec2', 'instance_states'): + for instance_state in config.get('ec2', 'instance_states').split(','): + instance_state = instance_state.strip() + if instance_state not in ec2_valid_instance_states: + continue + self.ec2_instance_states.append(instance_state) + else: + self.ec2_instance_states = ['running'] + + # Return all RDS instances? (if RDS is enabled) + if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: + self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') + else: + self.all_rds_instances = False + + # Return all ElastiCache replication groups? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: + self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') + else: + self.all_elasticache_replication_groups = False + + # Return all ElastiCache clusters? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: + self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') + else: + self.all_elasticache_clusters = False + + # Return all ElastiCache nodes? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: + self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') + else: + self.all_elasticache_nodes = False + + # boto configuration profile (prefer CLI argument then environment variables then config file) + self.boto_profile = self.args.boto_profile or os.environ.get('AWS_PROFILE') + if config.has_option('ec2', 'boto_profile') and not self.boto_profile: + self.boto_profile = config.get('ec2', 'boto_profile') + + # AWS credentials (prefer environment variables) + if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or + os.environ.get('AWS_PROFILE')): + if config.has_option('credentials', 'aws_access_key_id'): + aws_access_key_id = config.get('credentials', 'aws_access_key_id') + else: + aws_access_key_id = None + if config.has_option('credentials', 'aws_secret_access_key'): + aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') + else: + aws_secret_access_key = None + if config.has_option('credentials', 'aws_security_token'): + aws_security_token = config.get('credentials', 'aws_security_token') + else: + aws_security_token = None + if aws_access_key_id: + self.credentials = { + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key + } + if aws_security_token: + self.credentials['security_token'] = aws_security_token + + # Cache related + cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) + if self.boto_profile: + cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + cache_name = 'ansible-ec2' + cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) + if cache_id: + cache_name = '%s-%s' % (cache_name, cache_id) + self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) + self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) + self.cache_max_age = config.getint('ec2', 'cache_max_age') + + if config.has_option('ec2', 'expand_csv_tags'): + self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') + else: + self.expand_csv_tags = False + + # Configure nested groups instead of flat namespace. + if config.has_option('ec2', 'nested_groups'): + self.nested_groups = config.getboolean('ec2', 'nested_groups') + else: + self.nested_groups = False + + # Replace dash or not in group names + if config.has_option('ec2', 'replace_dash_in_groups'): + self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') + else: + self.replace_dash_in_groups = True + + # IAM role to assume for connection + if config.has_option('ec2', 'iam_role'): + self.iam_role = config.get('ec2', 'iam_role') + else: + self.iam_role = None + + # Configure which groups should be created. + group_by_options = [ + 'group_by_instance_id', + 'group_by_region', + 'group_by_availability_zone', + 'group_by_ami_id', + 'group_by_instance_type', + 'group_by_instance_state', + 'group_by_key_pair', + 'group_by_vpc_id', + 'group_by_security_group', + 'group_by_tag_keys', + 'group_by_tag_none', + 'group_by_route53_names', + 'group_by_rds_engine', + 'group_by_rds_parameter_group', + 'group_by_elasticache_engine', + 'group_by_elasticache_cluster', + 'group_by_elasticache_parameter_group', + 'group_by_elasticache_replication_group', + 'group_by_aws_account', + ] + for option in group_by_options: + if config.has_option('ec2', option): + setattr(self, option, config.getboolean('ec2', option)) + else: + setattr(self, option, True) + + # Do we need to just include hosts that match a pattern? + try: + pattern_include = config.get('ec2', 'pattern_include') + if pattern_include and len(pattern_include) > 0: + self.pattern_include = re.compile(pattern_include) + else: + self.pattern_include = None + except configparser.NoOptionError: + self.pattern_include = None + + # Do we need to exclude hosts that match a pattern? + try: + pattern_exclude = config.get('ec2', 'pattern_exclude') + if pattern_exclude and len(pattern_exclude) > 0: + self.pattern_exclude = re.compile(pattern_exclude) + else: + self.pattern_exclude = None + except configparser.NoOptionError: + self.pattern_exclude = None + + # Do we want to stack multiple filters? + if config.has_option('ec2', 'stack_filters'): + self.stack_filters = config.getboolean('ec2', 'stack_filters') + else: + self.stack_filters = False + + # Instance filters (see boto and EC2 API docs). Ignore invalid filters. + self.ec2_instance_filters = defaultdict(list) + if config.has_option('ec2', 'instance_filters'): + + filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] + + for instance_filter in filters: + instance_filter = instance_filter.strip() + if not instance_filter or '=' not in instance_filter: + continue + filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] + if not filter_key: + continue + self.ec2_instance_filters[filter_key].append(filter_value) + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') + parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', + help='Use boto profile for connections to EC2') + self.args = parser.parse_args() + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + if self.route53_enabled: + self.get_route53_records() + + for region in self.regions: + self.get_instances_by_region(region) + if self.rds_enabled: + self.get_rds_instances_by_region(region) + if self.elasticache_enabled: + self.get_elasticache_clusters_by_region(region) + self.get_elasticache_replication_groups_by_region(region) + if self.include_rds_clusters: + self.include_rds_clusters_by_region(region) + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def connect(self, region): + ''' create connection to api server''' + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) + conn.APIVersion = '2010-08-31' + else: + conn = self.connect_to_aws(ec2, region) + return conn + + def boto_fix_security_token_in_profile(self, connect_args): + ''' monkey patch for boto issue boto/boto#2100 ''' + profile = 'profile ' + self.boto_profile + if boto.config.has_option(profile, 'aws_security_token'): + connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') + return connect_args + + def connect_to_aws(self, module, region): + connect_args = self.credentials + + # only pass the profile name if it's set (as it is not supported by older boto versions) + if self.boto_profile: + connect_args['profile_name'] = self.boto_profile + self.boto_fix_security_token_in_profile(connect_args) + + if self.iam_role: + sts_conn = sts.connect_to_region(region, **connect_args) + role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory') + connect_args['aws_access_key_id'] = role.credentials.access_key + connect_args['aws_secret_access_key'] = role.credentials.secret_key + connect_args['security_token'] = role.credentials.session_token + + conn = module.connect_to_region(region, **connect_args) + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + return conn + + def get_instances_by_region(self, region): + ''' Makes an AWS EC2 API call to the list of instances in a particular + region ''' + + try: + conn = self.connect(region) + reservations = [] + if self.ec2_instance_filters: + if self.stack_filters: + filters_dict = {} + for filter_key, filter_values in self.ec2_instance_filters.items(): + filters_dict[filter_key] = filter_values + reservations.extend(conn.get_all_instances(filters=filters_dict)) + else: + for filter_key, filter_values in self.ec2_instance_filters.items(): + reservations.extend(conn.get_all_instances(filters={filter_key: filter_values})) + else: + reservations = conn.get_all_instances() + + # Pull the tags back in a second step + # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not + # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` + instance_ids = [] + for reservation in reservations: + instance_ids.extend([instance.id for instance in reservation.instances]) + + max_filter_value = 199 + tags = [] + for i in range(0, len(instance_ids), max_filter_value): + tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]})) + + tags_by_instance_id = defaultdict(dict) + for tag in tags: + tags_by_instance_id[tag.res_id][tag.name] = tag.value + + if (not self.aws_account_id) and reservations: + self.aws_account_id = reservations[0].owner_id + + for reservation in reservations: + for instance in reservation.instances: + instance.tags = tags_by_instance_id[instance.id] + self.add_instance(instance, region) + + except boto.exception.BotoServerError as e: + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + else: + backend = 'Eucalyptus' if self.eucalyptus else 'AWS' + error = "Error connecting to %s backend.\n%s" % (backend, e.message) + self.fail_with_error(error, 'getting EC2 instances') + + def get_rds_instances_by_region(self, region): + ''' Makes an AWS API call to the list of RDS instances in a particular + region ''' + + if not HAS_BOTO3: + self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again", + "getting RDS instances") + + client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) + db_instances = client.describe_db_instances() + + try: + conn = self.connect_to_aws(rds, region) + if conn: + marker = None + while True: + instances = conn.get_all_dbinstances(marker=marker) + marker = instances.marker + for index, instance in enumerate(instances): + # Add tags to instances. + instance.arn = db_instances['DBInstances'][index]['DBInstanceArn'] + tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList'] + instance.tags = {} + for tag in tags: + instance.tags[tag['Key']] = tag['Value'] + + self.add_rds_instance(instance, region) + if not marker: + break + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS RDS is down:\n%s" % e.message + self.fail_with_error(error, 'getting RDS instances') + + def include_rds_clusters_by_region(self, region): + if not HAS_BOTO3: + self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", + "getting RDS clusters") + + client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) + + marker, clusters = '', [] + while marker is not None: + resp = client.describe_db_clusters(Marker=marker) + clusters.extend(resp["DBClusters"]) + marker = resp.get('Marker', None) + + account_id = boto.connect_iam().get_user().arn.split(':')[4] + c_dict = {} + for c in clusters: + # remove these datetime objects as there is no serialisation to json + # currently in place and we don't need the data yet + if 'EarliestRestorableTime' in c: + del c['EarliestRestorableTime'] + if 'LatestRestorableTime' in c: + del c['LatestRestorableTime'] + + if self.ec2_instance_filters == {}: + matches_filter = True + else: + matches_filter = False + + try: + # arn:aws:rds:::: + tags = client.list_tags_for_resource( + ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) + c['Tags'] = tags['TagList'] + + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.items(): + # get AWS tag key e.g. tag:env will be 'env' + tag_name = filter_key.split(":", 1)[1] + # Filter values is a list (if you put multiple values for the same tag name) + matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) + + if matches_filter: + # it matches a filter, so stop looking for further matches + break + + except Exception as e: + if e.message.find('DBInstanceNotFound') >= 0: + # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. + # Ignore errors when trying to find tags for these + pass + + # ignore empty clusters caused by AWS bug + if len(c['DBClusterMembers']) == 0: + continue + elif matches_filter: + c_dict[c['DBClusterIdentifier']] = c + + self.inventory['db_clusters'] = c_dict + + def get_elasticache_clusters_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache clusters (with + nodes' info) in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = self.connect_to_aws(elasticache, region) + if conn: + # show_cache_node_info = True + # because we also want nodes' information + response = conn.describe_cache_clusters(None, None, None, True) + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to CacheClusters or + # CacheNodes. Because of that we can't make use of the get_list + # method in the AWSQueryConnection. Let's do the work manually + clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] + + except KeyError as e: + error = "ElastiCache query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for cluster in clusters: + self.add_elasticache_cluster(cluster, region) + + def get_elasticache_replication_groups_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache replication groups + in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = self.connect_to_aws(elasticache, region) + if conn: + response = conn.describe_replication_groups() + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to ReplicationGroups + # Because of that we can't make use of the get_list method in the + # AWSQueryConnection. Let's do the work manually + replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] + + except KeyError as e: + error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for replication_group in replication_groups: + self.add_elasticache_replication_group(replication_group, region) + + def get_auth_error_message(self): + ''' create an informative error message if there is an issue authenticating''' + errors = ["Authentication error retrieving ec2 inventory."] + if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: + errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') + else: + errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') + + boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] + boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) + if len(boto_config_found) > 0: + errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) + else: + errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) + + return '\n'.join(errors) + + def fail_with_error(self, err_msg, err_operation=None): + '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def get_instance(self, region, instance_id): + conn = self.connect(region) + + reservations = conn.get_all_instances([instance_id]) + for reservation in reservations: + for instance in reservation.instances: + return instance + + def add_instance(self, instance, region): + ''' Adds an instance to the inventory and index, as long as it is + addressable ''' + + # Only return instances with desired instance states + if instance.state not in self.ec2_instance_states: + return + + # Select the best destination address + if self.destination_format and self.destination_format_tags: + dest = self.destination_format.format(*[getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags]) + elif instance.subnet_id: + dest = getattr(instance, self.vpc_destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) + else: + dest = getattr(instance, self.destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.destination_variable, None) + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Set the inventory name + hostname = None + if self.hostname_variable: + if self.hostname_variable.startswith('tag_'): + hostname = instance.tags.get(self.hostname_variable[4:], None) + else: + hostname = getattr(instance, self.hostname_variable) + + # set the hostname from route53 + if self.route53_enabled and self.route53_hostnames: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + if name.endswith(self.route53_hostnames): + hostname = name + + # If we can't get a nice hostname, use the destination address + if not hostname: + hostname = dest + # to_safe strips hostname characters like dots, so don't strip route53 hostnames + elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames): + hostname = hostname.lower() + else: + hostname = self.to_safe(hostname).lower() + + # if we only want to include hosts that match a pattern, skip those that don't + if self.pattern_include and not self.pattern_include.match(hostname): + return + + # if we need to exclude hosts that match a pattern, skip those + if self.pattern_exclude and self.pattern_exclude.match(hostname): + return + + # Add to index + self.index[hostname] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [hostname] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.placement, hostname) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.placement) + self.push_group(self.inventory, 'zones', instance.placement) + + # Inventory: Group by Amazon Machine Image (AMI) ID + if self.group_by_ami_id: + ami_id = self.to_safe(instance.image_id) + self.push(self.inventory, ami_id, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'images', ami_id) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_type) + self.push(self.inventory, type_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by instance state + if self.group_by_instance_state: + state_name = self.to_safe('instance_state_' + instance.state) + self.push(self.inventory, state_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'instance_states', state_name) + + # Inventory: Group by key pair + if self.group_by_key_pair and instance.key_name: + key_name = self.to_safe('key_' + instance.key_name) + self.push(self.inventory, key_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'keys', key_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) + self.push(self.inventory, vpc_id_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + for group in instance.groups: + key = self.to_safe("security_group_" + group.name) + self.push(self.inventory, key, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + except AttributeError: + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + + # Inventory: Group by AWS account ID + if self.group_by_aws_account: + self.push(self.inventory, self.aws_account_id, dest) + if self.nested_groups: + self.push_group(self.inventory, 'accounts', self.aws_account_id) + + # Inventory: Group by tag keys + if self.group_by_tag_keys: + for k, v in instance.tags.items(): + if self.expand_csv_tags and v and ',' in v: + values = map(lambda x: x.strip(), v.split(',')) + else: + values = [v] + + for v in values: + if v: + key = self.to_safe("tag_" + k + "=" + v) + else: + key = self.to_safe("tag_" + k) + self.push(self.inventory, key, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + if v: + self.push_group(self.inventory, self.to_safe("tag_" + k), key) + + # Inventory: Group by Route53 domain names if enabled + if self.route53_enabled and self.group_by_route53_names: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + self.push(self.inventory, name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'route53', name) + + # Global Tag: instances without tags + if self.group_by_tag_none and len(instance.tags) == 0: + self.push(self.inventory, 'tag_none', hostname) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + + # Global Tag: tag all EC2 instances + self.push(self.inventory, 'ec2', hostname) + + self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest + + def add_rds_instance(self, instance, region): + ''' Adds an RDS instance to the inventory and index, as long as it is + addressable ''' + + # Only want available instances unless all_rds_instances is True + if not self.all_rds_instances and instance.status != 'available': + return + + # Select the best destination address + dest = instance.endpoint[0] + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Set the inventory name + hostname = None + if self.hostname_variable: + if self.hostname_variable.startswith('tag_'): + hostname = instance.tags.get(self.hostname_variable[4:], None) + else: + hostname = getattr(instance, self.hostname_variable) + + # If we can't get a nice hostname, use the destination address + if not hostname: + hostname = dest + + hostname = self.to_safe(hostname).lower() + + # Add to index + self.index[hostname] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [hostname] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.availability_zone, hostname) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.availability_zone) + self.push_group(self.inventory, 'zones', instance.availability_zone) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_class) + self.push(self.inventory, type_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + self.push(self.inventory, vpc_id_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + if instance.security_group: + key = self.to_safe("security_group_" + instance.security_group.name) + self.push(self.inventory, key, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + except AttributeError: + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + + # Inventory: Group by engine + if self.group_by_rds_engine: + self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) + if self.nested_groups: + self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) + + # Inventory: Group by parameter group + if self.group_by_rds_parameter_group: + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) + if self.nested_groups: + self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) + + # Global Tag: all RDS instances + self.push(self.inventory, 'rds', hostname) + + self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest + + def add_elasticache_cluster(self, cluster, region): + ''' Adds an ElastiCache cluster to the inventory and index, as long as + it's nodes are addressable ''' + + # Only want available clusters unless all_elasticache_clusters is True + if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': + return + + # Select the best destination address + if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: + # Memcached cluster + dest = cluster['ConfigurationEndpoint']['Address'] + is_redis = False + else: + # Redis sigle node cluster + # Because all Redis clusters are single nodes, we'll merge the + # info from the cluster with info about the node + dest = cluster['CacheNodes'][0]['Endpoint']['Address'] + is_redis = True + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, cluster['CacheClusterId']] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[cluster['CacheClusterId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) + + # Inventory: Group by region + if self.group_by_region and not is_redis: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone and not is_redis: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type and not is_redis: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group and not is_redis: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine and not is_redis: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) + + # Inventory: Group by parameter group + if self.group_by_elasticache_parameter_group: + self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) + + # Inventory: Group by replication group + if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: + self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) + + host_info = self.get_host_info_dict_from_describe_dict(cluster) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + # Add the nodes + for node in cluster['CacheNodes']: + self.add_elasticache_node(node, cluster, region) + + def add_elasticache_node(self, node, cluster, region): + ''' Adds an ElastiCache node to the inventory and index, as long as + it is addressable ''' + + # Only want available nodes unless all_elasticache_nodes is True + if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': + return + + # Select the best destination address + dest = node['Endpoint']['Address'] + + if not dest: + # Skip nodes we cannot address (e.g. private VPC subnet) + return + + node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) + + # Add to index + self.index[dest] = [region, node_id] + + # Inventory: Group by node ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[node_id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', node_id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) + + # Inventory: Group by parameter group (done at cluster level) + + # Inventory: Group by replication group (done at cluster level) + + # Inventory: Group by ElastiCache Cluster + if self.group_by_elasticache_cluster: + self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) + + # Global Tag: all ElastiCache nodes + self.push(self.inventory, 'elasticache_nodes', dest) + + host_info = self.get_host_info_dict_from_describe_dict(node) + + if dest in self.inventory["_meta"]["hostvars"]: + self.inventory["_meta"]["hostvars"][dest].update(host_info) + else: + self.inventory["_meta"]["hostvars"][dest] = host_info + + def add_elasticache_replication_group(self, replication_group, region): + ''' Adds an ElastiCache replication group to the inventory and index ''' + + # Only want available clusters unless all_elasticache_replication_groups is True + if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': + return + + # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis) + if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \ + replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None: + return + + # Select the best destination address (PrimaryEndpoint) + dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] + + # Add to index + self.index[dest] = [region, replication_group['ReplicationGroupId']] + + # Inventory: Group by ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[replication_group['ReplicationGroupId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone (doesn't apply to replication groups) + + # Inventory: Group by node type (doesn't apply to replication groups) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for replication groups + + # Inventory: Group by security group (doesn't apply to replication groups) + # Check this value in cluster level + + # Inventory: Group by engine (replication groups are always Redis) + if self.group_by_elasticache_engine: + self.push(self.inventory, 'elasticache_redis', dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', 'redis') + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) + + host_info = self.get_host_info_dict_from_describe_dict(replication_group) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + def get_route53_records(self): + ''' Get and store the map of resource records to domain names that + point to them. ''' + + if self.boto_profile: + r53_conn = route53.Route53Connection(profile_name=self.boto_profile) + else: + r53_conn = route53.Route53Connection() + all_zones = r53_conn.get_zones() + + route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones] + + self.route53_records = {} + + for zone in route53_zones: + rrsets = r53_conn.get_all_rrsets(zone.id) + + for record_set in rrsets: + record_name = record_set.name + + if record_name.endswith('.'): + record_name = record_name[:-1] + + for resource in record_set.resource_records: + self.route53_records.setdefault(resource, set()) + self.route53_records[resource].add(record_name) + + def get_instance_route53_names(self, instance): + ''' Check if an instance is referenced in the records we have from + Route53. If it is, return the list of domain names pointing to said + instance. If nothing points to it, return an empty list. ''' + + instance_attributes = ['public_dns_name', 'private_dns_name', + 'ip_address', 'private_ip_address'] + + name_list = set() + + for attrib in instance_attributes: + try: + value = getattr(instance, attrib) + except AttributeError: + continue + + if value in self.route53_records: + name_list.update(self.route53_records[value]) + + return list(name_list) + + def get_host_info_dict_from_instance(self, instance): + instance_vars = {} + for key in vars(instance): + value = getattr(instance, key) + key = self.to_safe('ec2_' + key) + + # Handle complex types + # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 + if key == 'ec2__state': + instance_vars['ec2_state'] = instance.state or '' + instance_vars['ec2_state_code'] = instance.state_code + elif key == 'ec2__previous_state': + instance_vars['ec2_previous_state'] = instance.previous_state or '' + instance_vars['ec2_previous_state_code'] = instance.previous_state_code + elif isinstance(value, (int, bool)): + instance_vars[key] = value + elif isinstance(value, six.string_types): + instance_vars[key] = value.strip() + elif value is None: + instance_vars[key] = '' + elif key == 'ec2_region': + instance_vars[key] = value.name + elif key == 'ec2__placement': + instance_vars['ec2_placement'] = value.zone + elif key == 'ec2_tags': + for k, v in value.items(): + if self.expand_csv_tags and ',' in v: + v = list(map(lambda x: x.strip(), v.split(','))) + key = self.to_safe('ec2_tag_' + k) + instance_vars[key] = v + elif key == 'ec2_groups': + group_ids = [] + group_names = [] + for group in value: + group_ids.append(group.id) + group_names.append(group.name) + instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) + instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) + elif key == 'ec2_block_device_mapping': + instance_vars["ec2_block_devices"] = {} + for k, v in value.items(): + instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id + else: + pass + # TODO Product codes if someone finds them useful + # print key + # print type(value) + # print value + + instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id + + return instance_vars + + def get_host_info_dict_from_describe_dict(self, describe_dict): + ''' Parses the dictionary returned by the API call into a flat list + of parameters. This method should be used only when 'describe' is + used directly because Boto doesn't provide specific classes. ''' + + # I really don't agree with prefixing everything with 'ec2' + # because EC2, RDS and ElastiCache are different services. + # I'm just following the pattern used until now to not break any + # compatibility. + + host_info = {} + for key in describe_dict: + value = describe_dict[key] + key = self.to_safe('ec2_' + self.uncammelize(key)) + + # Handle complex types + + # Target: Memcached Cache Clusters + if key == 'ec2_configuration_endpoint' and value: + host_info['ec2_configuration_endpoint_address'] = value['Address'] + host_info['ec2_configuration_endpoint_port'] = value['Port'] + + # Target: Cache Nodes and Redis Cache Clusters (single node) + if key == 'ec2_endpoint' and value: + host_info['ec2_endpoint_address'] = value['Address'] + host_info['ec2_endpoint_port'] = value['Port'] + + # Target: Redis Replication Groups + if key == 'ec2_node_groups' and value: + host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] + host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + replica_count = 0 + for node in value[0]['NodeGroupMembers']: + if node['CurrentRole'] == 'primary': + host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] + host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] + host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] + elif node['CurrentRole'] == 'replica': + host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address'] + host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port'] + host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId'] + replica_count += 1 + + # Target: Redis Replication Groups + if key == 'ec2_member_clusters' and value: + host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) + + # Target: All Cache Clusters + elif key == 'ec2_cache_parameter_group': + host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) + host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] + host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] + + # Target: Almost everything + elif key == 'ec2_security_groups': + + # Skip if SecurityGroups is None + # (it is possible to have the key defined but no value in it). + if value is not None: + sg_ids = [] + for sg in value: + sg_ids.append(sg['SecurityGroupId']) + host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + + # Target: Everything + # Preserve booleans and integers + elif isinstance(value, (int, bool)): + host_info[key] = value + + # Target: Everything + # Sanitize string values + elif isinstance(value, six.string_types): + host_info[key] = value.strip() + + # Target: Everything + # Replace None by an empty string + elif value is None: + host_info[key] = '' + + else: + # Remove non-processed complex types + pass + + return host_info + + def get_host_info(self): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if self.args.host not in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if self.args.host not in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + (region, instance_id) = self.index[self.args.host] + + instance = self.get_instance(region, instance_id) + return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) + + def push(self, my_dict, key, element): + ''' Push an element onto an array that may not have been defined in + the dict ''' + group_info = my_dict.setdefault(key, []) + if isinstance(group_info, dict): + host_list = group_info.setdefault('hosts', []) + host_list.append(element) + else: + group_info.append(element) + + def push_group(self, my_dict, key, element): + ''' Push a group as a child of another group. ''' + parent_group = my_dict.setdefault(key, {}) + if not isinstance(parent_group, dict): + parent_group = my_dict[key] = {'hosts': parent_group} + child_groups = parent_group.setdefault('children', []) + if element not in child_groups: + child_groups.append(element) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + with open(self.cache_path_cache, 'r') as f: + json_inventory = f.read() + return json_inventory + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + with open(self.cache_path_index, 'rb') as f: + self.index = json.load(f) + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + with open(filename, 'w') as f: + f.write(json_data) + + def uncammelize(self, key): + temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + regex = "[^A-Za-z0-9\_" + if not self.replace_dash_in_groups: + regex += "\-" + return re.sub(regex + "]", "_", word) + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +if __name__ == '__main__': + # Run the script + Ec2Inventory() diff --git a/ansible/reset.yml b/ansible/reset.yml new file mode 100644 index 000000000..d18075062 --- /dev/null +++ b/ansible/reset.yml @@ -0,0 +1,9 @@ +--- + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" + become: yes + roles: + - stop + - unsafe_reset + - start + diff --git a/ansible/restart.yml b/ansible/restart.yml new file mode 100644 index 000000000..6690a41f6 --- /dev/null +++ b/ansible/restart.yml @@ -0,0 +1,8 @@ +--- + +#variable "service" is required + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" + roles: + - stop + - start diff --git a/ansible/roles/config/defaults/main.yml b/ansible/roles/config/defaults/main.yml new file mode 100644 index 000000000..34a5ab120 --- /dev/null +++ b/ansible/roles/config/defaults/main.yml @@ -0,0 +1,8 @@ +--- +#tendermint_genesis_file: "" +#service_genesis_file: "" +app_options_file: "app_options_files/dev_money" +seeds: "" +testnet_name: testnet1 +validators: true + diff --git a/ansible/roles/config/tasks/main.yml b/ansible/roles/config/tasks/main.yml new file mode 100644 index 000000000..9a9fe3e5f --- /dev/null +++ b/ansible/roles/config/tasks/main.yml @@ -0,0 +1,65 @@ +--- + +- name: gather tendermint public keys + when: (validators == true or validators == 'true') and tendermint_genesis_file is not defined + command: "/usr/bin/tendermint show_validator --home /etc/{{service}}/tendermint --log_level error" + register: pubkeys + changed_when: false + +- name: resetting permissions from root after gathering public keys + file: "path=/etc/{{service}}/tendermint owner={{service}} group={{service}} recurse=yes" + +- name: register tendermint public keys as host facts + when: (validators == true or validators == 'true') and tendermint_genesis_file is not defined + set_fact: "pubkey='{{pubkeys.stdout}}'" + connection: local + +- name: copy generated tendermint genesis.json - genesis_time will be updated + when: (validators == true or validators == 'true') and tendermint_genesis_file is not defined + template: + src: genesis-server.json.j2 + dest: "/etc/{{service}}/tendermint/genesis.json" + owner: "{{service}}" + group: "{{service}}" + +- name: copy generated service genesis.json - genesis_time will be updated + when: (validators == true or validators == 'true') and (service_genesis_file is not defined) and (service != 'ethermint') + template: + src: genesis-service.json.j2 + dest: "/etc/{{service}}/genesis.json" + owner: "{{service}}" + group: "{{service}}" + +- name: copy pre-created tendermint genesis.json + when: tendermint_genesis_file is defined + copy: "src={{tendermint_genesis_file}} dest=/etc/{{service}}/tendermint/genesis.json owner={{service}} group={{service}}" + +- name: copy pre-created service genesis.json + when: service_genesis_file is defined + copy: "src={{service_genesis_file}} dest=/etc/{{service}}/genesis.json owner={{service}} group={{service}}" + +- name: copy tendermint config.toml + tags: reconfig + when: validators == true or validators == 'true' + template: + src: config.toml.j2 + dest: "/etc/{{service}}/tendermint/config.toml" + owner: "{{service}}" + group: "{{service}}" + +- name: Copy validator network files for non-validators + when: validators == false or validators == 'false' + get_url: "url={{item['src']}} dest={{item['dst']}} force=yes" + with_items: + - { src: "https://raw.githubusercontent.com/tendermint/testnets/master/{{validator_network}}/{{service}}/genesis.json" , dst: "/etc/{{service}}/genesis.json" } + - { src: "https://raw.githubusercontent.com/tendermint/testnets/master/{{validator_network}}/tendermint/genesis.json" , dst: "/etc/{{service}}/tendermint/genesis.json" } + - { src: "https://raw.githubusercontent.com/tendermint/testnets/master/{{validator_network}}/tendermint/config.toml" , dst: "/etc/{{service}}/tendermint/config.toml" } + +- name: Set validator network files permissions for non-validators + when: validators == false or validators == 'false' + file: "path={{item}} owner={{service}} group={{service}}" + with_items: + - "/etc/{{service}}/genesis.json" + - "/etc/{{service}}/tendermint/genesis.json" + - "/etc/{{service}}/tendermint/config.toml" + diff --git a/ansible/roles/config/templates/config.toml.j2 b/ansible/roles/config/templates/config.toml.j2 new file mode 100644 index 000000000..79e39c39f --- /dev/null +++ b/ansible/roles/config/templates/config.toml.j2 @@ -0,0 +1,37 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +proxy_app = "tcp://127.0.0.1:46658" +moniker = "{{inventory_hostname}}" +fast_sync = true +db_backend = "memdb" +#log_level = "mempool:error,*:debug" +log_level = "state:info,*:error" + +[rpc] +laddr = "tcp://0.0.0.0:46657" + +[mempool] +recheck = false +broadcast = false +wal_dir = "" + +[consensus] +max_block_size_txs = 10000 +create_empty_blocks = false +timeout_propose = 10000 +skip_timeout_commit = true +timeout_commit = 1 +wal_light = true +block_part_size = 262144 + +[p2p] +max_msg_packet_payload_size=65536 +send_rate=51200000 # 50 MB/s +recv_rate=51200000 # 50 MB/s +laddr = "tcp://0.0.0.0:46656" +{% if validators == true or validators == 'true' %} +{% set comma = joiner(",") %}seeds = "{% for host in ((groups[testnet_name]|default([]))+(groups['tag_Environment_'~(testnet_name|regex_replace('-','_'))]|default([])))|difference(inventory_hostname) %}{{ comma() }}{{hostvars[host]["inventory_hostname"]}}:46656{% endfor %}" +{% else %} +seeds = "{{ seeds | default() }}" +{% endif %} diff --git a/ansible/roles/config/templates/genesis-server.json.j2 b/ansible/roles/config/templates/genesis-server.json.j2 new file mode 100644 index 000000000..e41c98a0c --- /dev/null +++ b/ansible/roles/config/templates/genesis-server.json.j2 @@ -0,0 +1,24 @@ +{ + "genesis_time":"{{ansible_date_time.iso8601}}", + "chain_id":"{{testnet_name}}", + "validators": + [ +{% if (validators == true) or (validators == 'true') %} +{% set comma = joiner(",") %} +{% for host in (groups[testnet_name]|default([]))+(groups['tag_Environment_'~(testnet_name|regex_replace('-','_'))]|default([])) %} + {{ comma() }} + { + "pub_key": { + "data": "{{hostvars[host]["pubkey"]["data"]}}", + "type": "{{hostvars[host]["pubkey"]["type"]}}" + }, + "amount":10, + "name":"{{hostvars[host]["inventory_hostname"]}}" + } +{% endfor %} +{% endif %} + ], + "app_hash":"", + "app_options": {} +} + diff --git a/ansible/roles/config/templates/genesis-service.json.j2 b/ansible/roles/config/templates/genesis-service.json.j2 new file mode 100644 index 000000000..7f9a48316 --- /dev/null +++ b/ansible/roles/config/templates/genesis-service.json.j2 @@ -0,0 +1,12 @@ +{ + "genesis_time":"{{ansible_date_time.iso8601}}", + "chain_id":"{{testnet_name}}", + "validators": [], + "app_hash":"", + "app_options": { +{% if app_options_file is defined %} +{% include app_options_file %} +{% endif %} + } +} + diff --git a/ansible/roles/generic-service/tasks/main.yml b/ansible/roles/generic-service/tasks/main.yml new file mode 100644 index 000000000..d7a2e015f --- /dev/null +++ b/ansible/roles/generic-service/tasks/main.yml @@ -0,0 +1,25 @@ +--- + +- name: Create service group + group: "name={{service}}" + +- name: Create service user + user: "name={{service}} group={{service}} home=/etc/{{service}}" + +- name: Change user folder to more permissive + file: "path=/etc/{{service}} mode=0755" + +- name: Create tendermint service + template: "src=systemd.service.j2 dest=/etc/systemd/system/{{service}}.service" + +- name: Reload systemd services + systemd: "name={{service}} daemon_reload=yes enabled=no" + +- name: Create tendermint directory + file: "path=/etc/{{service}}/tendermint state=directory mode=0755 owner={{service}} group={{service}}" + +- name: Initialize tendermint + command: "/usr/bin/tendermint init --home /etc/{{service}}/tendermint" + become: yes + become_user: "{{service}}" + diff --git a/ansible/roles/generic-service/templates/systemd.service.j2 b/ansible/roles/generic-service/templates/systemd.service.j2 new file mode 100644 index 000000000..201b7942d --- /dev/null +++ b/ansible/roles/generic-service/templates/systemd.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description={{service}} server +Requires=network-online.target +After=network-online.target + +[Service] +Environment="TMHOME=/etc/{{service}}/tendermint" +Restart=on-failure +User={{service}} +Group={{service}} +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node{{(service=='tendermint')|ternary(' --proxy_app=dummy','')}} +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target + diff --git a/ansible/roles/install/defaults/main.yml b/ansible/roles/install/defaults/main.yml new file mode 100644 index 000000000..10dad07d4 --- /dev/null +++ b/ansible/roles/install/defaults/main.yml @@ -0,0 +1,4 @@ +--- +release_install: true +binary: "{{ lookup('env','GOPATH') | default('') }}/bin/{{service}}" + diff --git a/ansible/roles/install/tasks/centos.yml b/ansible/roles/install/tasks/centos.yml new file mode 100644 index 000000000..b6396d7d4 --- /dev/null +++ b/ansible/roles/install/tasks/centos.yml @@ -0,0 +1,34 @@ +--- + +#Three commands to install a service on CentOS/RedHat +#wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | rpm --import - +#wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo +#yum update && yum install basecoin + +#This has a bug in Ansible 2.3: https://github.com/ansible/ansible/issues/20711 +#- name: Add repository key on CentOS/RedHat +# when: ansible_os_family == "RedHat" +# rpm_key: key=https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint + +#Workaround +- name: Download repository key for CentOS/RedHat + when: ansible_os_family == "RedHat" + get_url: "url=https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint dest=/root/RPM-GPG-KEY-Tendermint force=yes checksum=sha256:a8c61d4061697d2595562c703dbafbdfdcfa7f0c75a523ac84d5609d1b444abe" +- name: Import repository key for CentOS/RedHat + when: ansible_os_family == "RedHat" + command: "rpm --import /root/RPM-GPG-KEY-Tendermint" + +- name: Install tendermint repository on CentOS/RedHat + when: ansible_os_family == "RedHat" + yum_repository: + name: tendermint + baseurl: https://tendermint-packages.interblock.io/centos/7/os/x86_64 + description: "Tendermint repo" + gpgcheck: yes + gpgkey: https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint +# repo_gpgcheck: yes + +- name: Install package on CentOS/RedHat + when: ansible_os_family == "RedHat" + yum: "pkg={{service}} update_cache=yes state=latest" + diff --git a/ansible/roles/install/tasks/debian.yml b/ansible/roles/install/tasks/debian.yml new file mode 100644 index 000000000..8b955659f --- /dev/null +++ b/ansible/roles/install/tasks/debian.yml @@ -0,0 +1,22 @@ +--- + +#Three commands to install a service on Debian/Ubuntu +#wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add - +#wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list +#apt-get update && apt-get install basecoin + +- name: Add repository key on Debian/Ubuntu + when: ansible_os_family == "Debian" + apt_key: + url: https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint + id: 2122CBE9 + +- name: Install tendermint repository on Debian/Ubuntu + when: ansible_os_family == "Debian" + apt_repository: + repo: deb https://tendermint-packages.interblock.io/debian stable main + +- name: Install package on Debian/Ubuntu + when: ansible_os_family == "Debian" + apt: "pkg={{service}} update_cache=yes state=latest" + diff --git a/ansible/roles/install/tasks/main.yml b/ansible/roles/install/tasks/main.yml new file mode 100644 index 000000000..b9261382a --- /dev/null +++ b/ansible/roles/install/tasks/main.yml @@ -0,0 +1,28 @@ +--- + +- name: Set timezone + when: timezone is defined + file: path=/etc/localtime state=link src=/usr/share/zoneinfo/{{timezone}} force=yes + +- name: Disable journald rate-limiting + lineinfile: "path=/etc/systemd/journald.conf regexp={{item.regexp}} line='{{item.line}}'" + with_items: + - { regexp: "^#RateLimitInterval", line: "RateLimitInterval=0s" } + - { regexp: "^#RateLimitBurst", line: "RateLimitBurst=0" } + +- name: Restart journald + service: name=systemd-journald state=restarted + +- include: debian.yml + when: ansible_os_family == "Debian" + +- include: centos.yml + when: ansible_os_family == "RedHat" + +- name: copy compiled binary + when: not release_install|bool + copy: + src: "{{binary}}" + dest: /usr/local/bin + mode: 0755 + diff --git a/ansible/roles/start/tasks/main.yml b/ansible/roles/start/tasks/main.yml new file mode 100644 index 000000000..6bc611c91 --- /dev/null +++ b/ansible/roles/start/tasks/main.yml @@ -0,0 +1,5 @@ +--- + +- name: start service + service: "name={{service}} state=started" + diff --git a/ansible/roles/status/tasks/main.yml b/ansible/roles/status/tasks/main.yml new file mode 100644 index 000000000..e47d634e7 --- /dev/null +++ b/ansible/roles/status/tasks/main.yml @@ -0,0 +1,10 @@ +--- + +- name: start service + command: "service {{service}} status" + changed_when: false + register: status + +- name: Result + debug: var=status.stdout_lines + diff --git a/ansible/roles/stop/tasks/main.yml b/ansible/roles/stop/tasks/main.yml new file mode 100644 index 000000000..7db356f22 --- /dev/null +++ b/ansible/roles/stop/tasks/main.yml @@ -0,0 +1,5 @@ +--- + +- name: stop service + service: "name={{service}} state=stopped" + diff --git a/ansible/roles/unsafe_reset/tasks/main.yml b/ansible/roles/unsafe_reset/tasks/main.yml new file mode 100644 index 000000000..99c536f57 --- /dev/null +++ b/ansible/roles/unsafe_reset/tasks/main.yml @@ -0,0 +1,11 @@ +- shell: "ethermint --datadir /etc/ethermint unsafe_reset_all" + when: "service == 'ethermint'" + become_user: ethermint + +- shell: "export BCHOME=/etc/basecoin ; basecoin unsafe_reset_all" + when: "service == 'basecoin'" + become_user: basecoin + +- shell: "export TMHOME=/etc/{{service}}/tendermint ; tendermint unsafe_reset_all" + become_user: "{{service}}" + diff --git a/ansible/start.yml b/ansible/start.yml new file mode 100644 index 000000000..699c7052b --- /dev/null +++ b/ansible/start.yml @@ -0,0 +1,8 @@ +--- + +#variable "service" is required + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" + roles: + - start + diff --git a/ansible/status.yml b/ansible/status.yml new file mode 100644 index 000000000..2839f0563 --- /dev/null +++ b/ansible/status.yml @@ -0,0 +1,8 @@ +--- + +#variable "service" is required + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" + roles: + - status + diff --git a/ansible/stop.yml b/ansible/stop.yml new file mode 100644 index 000000000..f89e3cf22 --- /dev/null +++ b/ansible/stop.yml @@ -0,0 +1,8 @@ +--- + +#variable "service" is required + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" + roles: + - stop + diff --git a/ansible/ubuntu16-patch.yml b/ansible/ubuntu16-patch.yml new file mode 100644 index 000000000..89c8864d0 --- /dev/null +++ b/ansible/ubuntu16-patch.yml @@ -0,0 +1,8 @@ +--- +#Ubuntu 16.04 is not installing the python package in the standard installation on DigitalOcean. This "patch" will install it so the rest of the ansible playbooks can work properly. + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" + gather_facts: no + tasks: + - raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) + diff --git a/ansible/upgrade-reset.yml b/ansible/upgrade-reset.yml new file mode 100644 index 000000000..58688f7ce --- /dev/null +++ b/ansible/upgrade-reset.yml @@ -0,0 +1,11 @@ +--- + +#variable "service" is required + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}:tag_Environment_{{ lookup('env','TF_VAR_TESTNET_NAME') | regex_replace('-','_') }}" + roles: + - stop + - install + - unsafe_reset + - start + diff --git a/ansible/upgrade.yml b/ansible/upgrade.yml new file mode 100644 index 000000000..42ebad009 --- /dev/null +++ b/ansible/upgrade.yml @@ -0,0 +1,10 @@ +--- + +#variable "service" is required + +- hosts: "{{ lookup('env','TF_VAR_TESTNET_NAME') }}" + roles: + - stop + - install + - start + diff --git a/build/.gitignore b/build/.gitignore new file mode 100644 index 000000000..9974388f1 --- /dev/null +++ b/build/.gitignore @@ -0,0 +1,4 @@ +BUILD +RPMS +SPECS +tmp diff --git a/build/LICENSE b/build/LICENSE new file mode 100644 index 000000000..bb66bb350 --- /dev/null +++ b/build/LICENSE @@ -0,0 +1,204 @@ +Tendermint Core +License: Apache2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 All in Bits, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/build/Makefile b/build/Makefile new file mode 100644 index 000000000..519db9a4f --- /dev/null +++ b/build/Makefile @@ -0,0 +1,288 @@ +## +# Extra checks, because we do not use autoconf. Set extra_check to false if it is bothering you. +## + +extra_check = true +go_min_version = 1.8.3 +gpg_key = 2122CBE9 + +ifeq ($(extra_check),true) +ifndef GOPATH +$(error GOPATH not set) +else +go_version := $(shell go version | sed "s/^.* go\([0-9\.]*\) .*$$/\1/" ) +$(info Found go version $(go_version)) +go_version_check := $(shell echo -e "$(go_min_version)\n$(go_version)" | sort -V | head -1) +ifneq ($(go_min_version),$(go_version_check)) +$(error go version go_min_version or above is required) +endif +endif +gpg_check := $(shell gpg -K | grep '/$(gpg_key) ' | sed 's,^.*/\($(gpg_key)\) .*$$,\1,') +ifneq ($(gpg_check),$(gpg_key)) +$(error GPG key $(gpg_key) not found.) +else +$(info GPG key $(gpg_key) found) +endif +ifndef GPG_PASSPHRASE +$(error GPG_PASSPHRASE not set) +endif +endif + +### +# Here comes the real deal +### + +binaries = tendermint basecoin ethermint trackomatron gaia +build-binaries = build-tendermint build-basecoin build-ethermint build-trackomatron build-gaia +package-rpm = package-rpm-tendermint package-rpm-basecoin package-rpm-ethermint package-rpm-trackomatron package-rpm-gaia +install-rpm = install-rpm-tendermint install-rpm-basecoin install-rpm-ethermint install-rpm-trackomatron install-rpm-gaia +package-deb = package-deb-tendermint package-deb-basecoin package-deb-ethermint package-deb-trackomatron package-deb-gaia +install-deb = install-deb-tendermint install-deb-basecoin install-deb-ethermint install-deb-trackomatron install-deb-gaia + +all: $(binaries) +build: $(build-binaries) +package: $(package-rpm) $(package-deb) +install: $(install-rpm) $(install-deb) +$(binaries): %: build-% package-rpm-% package-deb-% + +### +# Build the binaries +### + +git-branch: + $(eval GIT_BRANCH=$(shell echo $${GIT_BRANCH:-master})) + +build-tendermint: git-branch + @echo "*** Building tendermint" + go get -d -u github.com/tendermint/tendermint/cmd/tendermint + cd $(GOPATH)/src/github.com/tendermint/tendermint && git checkout "$(GIT_BRANCH)" && git pull + $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint get_vendor_deps build + cp $(GOPATH)/src/github.com/tendermint/tendermint/build/tendermint $(GOPATH)/bin + @echo "*** Built tendermint" + +build-basecoin: git-branch + @echo "*** Building basecoin" + go get -d -u github.com/tendermint/basecoin/cmd/basecoin + cd $(GOPATH)/src/github.com/tendermint/basecoin && git checkout "$(GIT_BRANCH)" && git pull + $(MAKE) -C $(GOPATH)/src/github.com/tendermint/basecoin get_vendor_deps install + @echo "*** Built basecoin" + +build-ethermint: git-branch + @echo "*** Building ethermint" + go get -d -u github.com/tendermint/ethermint/cmd/ethermint + cd $(GOPATH)/src/github.com/tendermint/ethermint && git checkout "$(GIT_BRANCH)" && git pull + $(MAKE) -C $(GOPATH)/src/github.com/tendermint/ethermint get_vendor_deps build + cp $(GOPATH)/src/github.com/tendermint/ethermint/build/ethermint $(GOPATH)/bin + @echo "*** Built ethermint" + +build-trackomatron: git-branch + @echo "*** Building trackomatron" + go get -d -u go github.com/tendermint/trackomatron/cmd/tracko + cd $(GOPATH)/src/github.com/tendermint/trackomatron && git checkout "$(GIT_BRANCH)" && git pull + $(MAKE) -C $(GOPATH)/src/github.com/tendermint/trackomatron get_vendor_deps install + @echo "Workaround: trackomatron package has tracko as the binary - trackomatron needed for proper packaging" && rm -rf $(GOPATH)/bin/trackomatron && ln -s $(GOPATH)/bin/tracko $(GOPATH)/bin/trackomatron + @echo "*** Built trackomatron" + +build-gaia: git-branch + @echo "*** Building gaia" + go get -d -u go github.com/cosmos/gaia || echo "Workaround for go downloads." + cd $(GOPATH)/src/github.com/cosmos/gaia && git checkout "$(GIT_BRANCH)" && git pull + $(MAKE) -C $(GOPATH)/src/github.com/cosmos/gaia get_vendor_deps install + @echo "*** Built gaia" + +### +# Prepare package files +### + +# set app_version +version-%: $(GOPATH)/bin/% + if [ -z "$(BUILD_NUMBER)" ]; then echo "BUILD_NUMBER not set" ; false ; fi + $(eval $*_version=$(shell $< version | cut -d- -f1 )) + +# set build_folder +folder-%: version-% + $(eval build_folder=BUILD/$*-$($*_version)-$(BUILD_NUMBER)) + +# clean up folder structure for package files +prepare-files = rm -rf $(build_folder) && mkdir -p $(build_folder) && cp -r ./$(1)/* $(build_folder) && mkdir -p $(build_folder)/usr/bin && cp $(GOPATH)/bin/$(1) $(build_folder)/usr/bin + +## +## Package customizations for the different applications +## + +prepare-tendermint = +prepare-basecoin = cp $(GOPATH)/bin/basecli $(build_folder)/usr/bin +prepare-ethermint = mkdir -p $(build_folder)/etc/ethermint && \ + cp $(GOPATH)/src/github.com/tendermint/ethermint/setup/genesis.json $(build_folder)/etc/ethermint/genesis.json && \ + cp -r $(GOPATH)/src/github.com/tendermint/ethermint/setup/keystore $(build_folder)/etc/ethermint +prepare-trackomatron = rm -f $(build_folder)/usr/bin/trackomatron && cp $(GOPATH)/bin/tracko $(GOPATH)/bin/trackocli $(build_folder)/usr/bin +prepare-gaia = + +### +# Package the binary for CentOS/RedHat (RPM) and Debian/Ubuntu (DEB) +### + +# Depends on rpmbuild, sorry, this can only be built on CentOS/RedHat machines. +package-rpm-%: folder-% + @echo "*** Packaging RPM $* version $($*_version)" + + $(call prepare-files,$*) + $(call prepare-$*) + + rm -rf $(build_folder)/DEBIAN + mkdir -p $(build_folder)/usr/share/licenses/$* + cp ./LICENSE $(build_folder)/usr/share/licenses/$*/LICENSE + chmod -Rf a+rX,u+w,g-w,o-w $(build_folder) + + mkdir -p {SPECS,tmp} + + cp ./spectemplates/$*.spec SPECS + sed -i "s/@VERSION@/$($*_version)/" SPECS/$*.spec + sed -i "s/@BUILD_NUMBER@/$(BUILD_NUMBER)/" SPECS/$*.spec + + rpmbuild -bb SPECS/$*.spec --define "_topdir `pwd`" --define "_tmppath `pwd`/tmp" + ./sign RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm "$(gpg_key)" "`which gpg`" + rpm -Kv RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm || echo "rpm returns non-zero exist for some reason. ($?)" + @echo "*** Packaged RPM $* version $($*_version)" + +package-deb-%: folder-% + @echo "*** Packaging DEB $* version $($*_version)-$(BUILD_NUMBER)" + + $(call prepare-files,$*) + $(call prepare-$*) + + mkdir -p $(build_folder)/usr/share/doc/$* + cp $(build_folder)/DEBIAN/copyright $(build_folder)/usr/share/doc/$* + chmod -Rf a+rX,u+w,g-w,o-w $(build_folder) + + sed -i "s/@VERSION@/$($*_version)-$(BUILD_NUMBER)/" $(build_folder)/DEBIAN/changelog + sed -i "s/@STABILITY@/stable/" $(build_folder)/DEBIAN/changelog + sed -i "s/@DATETIMESTAMP@/`date +%a,\ %d\ %b\ %Y\ %T\ %z`/" $(build_folder)/DEBIAN/changelog + sed -i "s/@VERSION@/$($*_version)-$(BUILD_NUMBER)/" $(build_folder)/DEBIAN/control + + gzip -c $(build_folder)/DEBIAN/changelog > $(build_folder)/usr/share/doc/$*/changelog.Debian.gz + gzip -c $(build_folder)/DEBIAN/changelog > $(build_folder)/usr/share/doc/$*/changelog.Debian.amd64.gz + sed -i "s/@INSTALLEDSIZE@/`du -ks $(build_folder) | cut -f 1`/" $(build_folder)/DEBIAN/control + + cd $(build_folder) && tar --owner=root --group=root -cvJf ../../tmp/data.tar.xz --exclude DEBIAN * + cd $(build_folder)/DEBIAN && tar --owner=root --group=root -cvzf ../../../tmp/control.tar.gz * + echo "2.0" > tmp/debian-binary + + cp ./_gpg tmp/ + cd tmp && sed -i "s/@DATETIMESTAMP@/`date +%a\ %b\ %d\ %T\ %Y`/" _gpg + cd tmp && sed -i "s/@BINMD5@/`md5sum debian-binary | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@BINSHA1@/`sha1sum debian-binary | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@BINSIZE@/`stat -c %s debian-binary | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@CONMD5@/`md5sum control.tar.gz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@CONSHA1@/`sha1sum control.tar.gz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@CONSIZE@/`stat -c %s control.tar.gz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@DATMD5@/`md5sum data.tar.xz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@DATSHA1@/`sha1sum data.tar.xz | cut -d\ -f1`/" _gpg + cd tmp && sed -i "s/@DATSIZE@/`stat -c %s data.tar.xz | cut -d\ -f1`/" _gpg + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --clearsign tmp/_gpg + mv tmp/_gpg.asc tmp/_gpgbuilder + ar r tmp/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb tmp/debian-binary tmp/control.tar.gz tmp/data.tar.xz tmp/_gpgbuilder + mv tmp/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb RPMS/ + rm tmp/debian-binary tmp/control.tar.gz tmp/data.tar.xz tmp/_gpgbuilder tmp/_gpg + @echo "*** Packaged DEB $* version $($*_version)-$(BUILD_NUMBER)" + +install-rpm-%: version-% +#Make sure your host has the IAM role to read/write the S3 bucket OR that you set up ~/.boto + @echo "*** Uploading $*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm to AWS CentOS repository" + aws s3 sync s3://tendermint-packages/centos/ tmp/s3/ --delete + mkdir -p tmp/s3/7/os/x86_64/Packages + cp RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm tmp/s3/7/os/x86_64/Packages + cp ./RPM-GPG-KEY-Tendermint tmp/s3/7/os/x86_64/ + cp ./tendermint.repo tmp/s3/7/os/x86_64/ + rm -f tmp/s3/7/os/x86_64/repodata/*.bz2 tmp/s3/7/os/x86_64/repodata/*.gz tmp/s3/7/os/x86_64/repodata/repomd.xml.asc + createrepo tmp/s3/7/os/x86_64/Packages -u https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64/Packages -o tmp/s3/7/os/x86_64 --update -S --repo Tendermint --content tendermint --content basecoin --content ethermint + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --detach-sign -a tmp/s3/7/os/x86_64/repodata/repomd.xml + aws s3 sync tmp/s3/ s3://tendermint-packages/centos/ --delete --acl public-read + @echo "*** Uploaded $* to AWS CentOS repository" + +install-deb-%: version-% + @echo "*** Uploading $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb to AWS Debian repository" + @echo "Testing if $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb is already uploaded" + test ! -f tmp/debian-s3/pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb + aws s3 sync s3://tendermint-packages/debian/ tmp/debian-s3/ --delete + @echo "Testing if $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb is already uploaded" + test ! -f tmp/debian-s3/pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb + cp ./tendermint.list tmp/debian-s3/ + mkdir -p tmp/debian-s3/pool tmp/debian-s3/dists/stable/main/binary-amd64 + cp RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb tmp/debian-s3/pool + cp ./Release_amd64 tmp/debian-s3/dists/stable/main/binary-amd64/Release + + #Packages / Packages.gz + + echo > tmp/Package + echo "Filename: pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb" >> tmp/Package + echo "MD5sum: `md5sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package + echo "SHA1: `sha1sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package + echo "SHA256: `sha256sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package + echo "Size: `stat -c %s RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package + cat BUILD/$*-$($*_version)-$(BUILD_NUMBER)/DEBIAN/control >> tmp/Package + + cat tmp/Package >> tmp/debian-s3/dists/stable/main/binary-amd64/Packages + rm -f tmp/debian-s3/dists/stable/main/binary-amd64/Packages.gz + gzip -c tmp/debian-s3/dists/stable/main/binary-amd64/Packages > tmp/debian-s3/dists/stable/main/binary-amd64/Packages.gz + rm -f tmp/Package + + #main / Release / InRelease / Release.gpg + + cp ./Release tmp/debian-s3/dists/stable/main/Release + rm -f tmp/debian-s3/dists/stable/main/InRelease + rm -f tmp/debian-s3/dists/stable/main/Release.gpg + + echo "MD5Sum:" >> tmp/debian-s3/dists/stable/main/Release + cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; md5sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + echo "SHA1:" >> tmp/debian-s3/dists/stable/main/Release + cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha1sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + echo "SHA256:" >> tmp/debian-s3/dists/stable/main/Release + cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha256sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA256 -b -a tmp/debian-s3/dists/stable/main/Release + mv tmp/debian-s3/dists/stable/main/Release.asc tmp/debian-s3/dists/stable/main/Release.gpg + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA512 --clearsign tmp/debian-s3/dists/stable/main/Release + mv tmp/debian-s3/dists/stable/main/Release.asc tmp/debian-s3/dists/stable/main/InRelease + + #stable / Release / InRelease / Release.gpg + + cp ./Release tmp/debian-s3/dists/stable/Release + rm -f tmp/debian-s3/dists/stable/InRelease + rm -f tmp/debian-s3/dists/stable/Release.gpg + + echo "MD5Sum:" >> tmp/debian-s3/dists/stable/Release + cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; md5sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + echo "SHA1:" >> tmp/debian-s3/dists/stable/Release + cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha1sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + echo "SHA256:" >> tmp/debian-s3/dists/stable/Release + cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha256sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release + + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA256 -b -a tmp/debian-s3/dists/stable/Release + mv tmp/debian-s3/dists/stable/Release.asc tmp/debian-s3/dists/stable/Release.gpg + gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA512 --clearsign tmp/debian-s3/dists/stable/Release + mv tmp/debian-s3/dists/stable/Release.asc tmp/debian-s3/dists/stable/InRelease + + aws s3 sync tmp/debian-s3/ s3://tendermint-packages/debian/ --delete --acl public-read + @echo "*** Uploaded $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb to AWS Debian repository" + +mostlyclean: + rm -rf {BUILDROOT,SOURCES,SPECS,SRPMS,tmp} + +clean: mostlyclean + rm -rf {BUILD,RPMS} + +distclean: clean + rm -rf $(GOPATH)/src/github.com/tendermint/tendermint + rm -rf $(GOPATH)/src/github.com/tendermint/basecoin + rm -rf $(GOPATH)/src/github.com/tendermint/ethermint + rm -rf $(GOPATH)/src/github.com/tendermint/trackomatron + rm -rf $(GOPATH)/bin/tendermint + rm -rf $(GOPATH)/bin/basecoin + rm -rf $(GOPATH)/bin/basecli + rm -rf $(GOPATH)/bin/ethermint + rm -rf $(GOPATH)/bin/tracko + rm -rf $(GOPATH)/bin/trackocli + +.PHONY : clean + diff --git a/build/RPM-GPG-KEY-Tendermint b/build/RPM-GPG-KEY-Tendermint new file mode 100644 index 000000000..e6f200d87 --- /dev/null +++ b/build/RPM-GPG-KEY-Tendermint @@ -0,0 +1,19 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.22 (GNU/Linux) + +mQENBFk97ngBCADaiPQFKJI7zWYdUKqC490DzY9g9LatsWoJErK5LuMXwEnF5i+a +UkygueukA4C5U7L71l5EeOB9rtb6AbkF4IEZsmmp93APec/3Vfbac9xvK4dBdiht +F8SrazPdHeR6AKcZH8ZpG/+mdONvGb/gEgtxVjaeIJFpCbjKLlKEXazh2zamhhth +q+Nn/17QmI3KBiaGqQK5w4kGZ4mZPy6fXMQhW5dDMq9f4anlGIAYi9O53dVxsx2S +5d+NHuGer5Ps0u6WMJi/e+UT2EGwzP6ygOxkIjyhMFuVftabOtSSrRHHetw8UAaI +N/RPn2gSbQtOQ7unzHDXp3/o6/r2nDEErPyJABEBAAG0LkdyZWcgU3phYm8gKFRl +bmRlcm1pbnQpIDxncmVnQHBoaWxvc29iZWFyLmNvbT6JATkEEwECACMFAlk97ngC +GwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRDIkIHIISLL6bX/CACXTKmO +u5XgvJICH0pHNeVS5/4Om1Rsg1xNmEkGFBP8N2fqn576exbOLgWLSyNHTEyrJNoc +iTeUtod2qqbVGwRgWm1zeiP8NBYiQ9SUbqskIqcPavJNGWIxsCB0p/odoZah8xSj +tGrkoyoxrc+7z2JgKYK8SVSkJXQkzuc5/ZlY85ci5gPKQhlo5YDqGo+4U9n/Ieo5 +nkF8LBalFC2j7A7sQNroEicpulpGhIq3jyUHtadX01z3pNzuX+wfHX9futoet0YS +tG2007WoPGV0whGnoKxmk0JhwzhscC2XNtJl1GZcwqOOlPU9eGtZuPKj/HBAlRtz +4xTOAcklpg8soqRA +=jNDW +-----END PGP PUBLIC KEY BLOCK----- diff --git a/build/Release b/build/Release new file mode 100644 index 000000000..9003d1320 --- /dev/null +++ b/build/Release @@ -0,0 +1,7 @@ +Origin: Tendermint +Label: Tendermint +Suite: stable +Date: Fri, 16 Jun 2017 19:44:00 UTC +Architectures: amd64 +Components: main +Description: Tendermint repository diff --git a/build/Release_amd64 b/build/Release_amd64 new file mode 100644 index 000000000..1f2ecbfe2 --- /dev/null +++ b/build/Release_amd64 @@ -0,0 +1,5 @@ +Archive: stable +Component: main +Origin: Tendermint +Label: Tendermint +Architecture: amd64 diff --git a/build/_gpg b/build/_gpg new file mode 100644 index 000000000..73742b5d8 --- /dev/null +++ b/build/_gpg @@ -0,0 +1,8 @@ +Version: 4 +Signer: +Date: @DATETIMESTAMP@ +Role: builder +Files: + @BINMD5@ @BINSHA1@ @BINSIZE@ debian-binary + @CONMD5@ @CONSHA1@ @CONSIZE@ control.tar.gz + @DATMD5@ @DATSHA1@ @DATSIZE@ data.tar.xz diff --git a/build/basecoin/DEBIAN/changelog b/build/basecoin/DEBIAN/changelog new file mode 100644 index 000000000..e316bcd35 --- /dev/null +++ b/build/basecoin/DEBIAN/changelog @@ -0,0 +1,6 @@ +basecoin (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/tendermint/basecoin for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/build/basecoin/DEBIAN/compat b/build/basecoin/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/build/basecoin/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/build/basecoin/DEBIAN/control b/build/basecoin/DEBIAN/control new file mode 100644 index 000000000..7d86a71b5 --- /dev/null +++ b/build/basecoin/DEBIAN/control @@ -0,0 +1,15 @@ +Source: basecoin +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Depends: tendermint (>=0.10.0) +Standards-Version: 3.9.6 +Homepage: https://tendermint.com +Package: basecoin +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: basecoin is a Proof-of-Stake cryptocurrency and framework + Basecoin is an ABCI application designed to be used with the Tendermint consensus engine to form a Proof-of-Stake cryptocurrency. It also provides a general purpose framework for extending the feature-set of the cryptocurrency by implementing plugins. + diff --git a/build/basecoin/DEBIAN/copyright b/build/basecoin/DEBIAN/copyright new file mode 100644 index 000000000..22340c79a --- /dev/null +++ b/build/basecoin/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: basecoin +Source: https://github.com/tendermint/basecoin + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/build/basecoin/DEBIAN/postinst b/build/basecoin/DEBIAN/postinst new file mode 100644 index 000000000..9a06d6e3f --- /dev/null +++ b/build/basecoin/DEBIAN/postinst @@ -0,0 +1,46 @@ +#!/bin/sh +# postinst script for basecoin +# + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + chown basecoin.basecoin /etc/basecoin + sudo -Hu basecoin basecoin init --home /etc/basecoin 2B24DEE2364762300168DF19B6C18BCE2D399EA2 + #The above command generates a genesis.json file that contains validators. This is wrong, the validator part should be empty. https://github.com/tendermint/basecoin/issues/124 + sudo -Hu basecoin tendermint init --home /etc/basecoin/tendermint + #The above command might need some kind of additional option in the future. https://github.com/tendermint/tendermint/issues/542 + chmod 755 /etc/basecoin/tendermint + chown basecoin.basecoin /etc/basecoin/tendermint + systemctl daemon-reload + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/basecoin/DEBIAN/postrm b/build/basecoin/DEBIAN/postrm new file mode 100644 index 000000000..b84c9f2a4 --- /dev/null +++ b/build/basecoin/DEBIAN/postrm @@ -0,0 +1,41 @@ +#!/bin/sh +# postrm script for basecoin +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + upgrade|failed-upgrade|abort-upgrade) + systemctl daemon-reload + ;; + + purge|remove|abort-install|disappear) + systemctl daemon-reload + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/basecoin/DEBIAN/preinst b/build/basecoin/DEBIAN/preinst new file mode 100644 index 000000000..8fc1ec82a --- /dev/null +++ b/build/basecoin/DEBIAN/preinst @@ -0,0 +1,38 @@ +#!/bin/sh +# preinst script for basecoin +# + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + if ! grep -q '^basecoin:' /etc/passwd ; then + useradd -k /dev/null -r -m -b /etc basecoin + chmod 755 /etc/basecoin + fi + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/basecoin/DEBIAN/prerm b/build/basecoin/DEBIAN/prerm new file mode 100644 index 000000000..5cc57e154 --- /dev/null +++ b/build/basecoin/DEBIAN/prerm @@ -0,0 +1,39 @@ +#!/bin/sh +# prerm script for basecoin +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + remove|upgrade|deconfigure) + systemctl stop basecoin 2> /dev/null || : + systemctl stop basecoin-service 2> /dev/null || : + ;; + + failed-upgrade) + ;; + + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/basecoin/etc/systemd/system-preset/50-basecoin.preset b/build/basecoin/etc/systemd/system-preset/50-basecoin.preset new file mode 100644 index 000000000..c97097b13 --- /dev/null +++ b/build/basecoin/etc/systemd/system-preset/50-basecoin.preset @@ -0,0 +1,3 @@ +disable basecoin.service +disable basecoin-server.service + diff --git a/build/basecoin/etc/systemd/system/basecoin-server.service b/build/basecoin/etc/systemd/system/basecoin-server.service new file mode 100644 index 000000000..62392d52a --- /dev/null +++ b/build/basecoin/etc/systemd/system/basecoin-server.service @@ -0,0 +1,23 @@ +[Unit] +Description=Basecoin server +Requires=network-online.target +BindTo=basecoin.service +PartOf=basecoin.service +After=network-online.target basecoin.service +PropagatesReloadTo=basecoin.service +ReloadPropagatedFrom=basecoin.service + +[Service] +Environment="TMHOME=/etc/basecoin/tendermint" +Restart=on-failure +User=basecoin +Group=basecoin +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target basecoin.service +Also=basecoin.service + diff --git a/build/basecoin/etc/systemd/system/basecoin.service b/build/basecoin/etc/systemd/system/basecoin.service new file mode 100644 index 000000000..f806ef481 --- /dev/null +++ b/build/basecoin/etc/systemd/system/basecoin.service @@ -0,0 +1,29 @@ +[Unit] +Description=Basecoin +#propagates activation, deactivation and activation fails. +Requires=network-online.target +#propagates activation, deactivation, activation fails and stops +BindTo=basecoin-server.service +#propagates stop and restart (one-way) +PartOf=basecoin-server.service +#order +Before=basecoin-server.service +After=network-online.target +#propagates reload +PropagatesReloadTo=basecoin-server.service +ReloadPropagatedFrom=basecoin-server.service + +[Service] +Environment="BCHOME=/etc/basecoin" +Restart=on-failure +User=basecoin +Group=basecoin +PermissionsStartOnly=true +ExecStart=/usr/bin/basecoin start --without-tendermint +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target +Also=basecoin-server.service + diff --git a/build/basecoin/usr/share/basecoin/key.json b/build/basecoin/usr/share/basecoin/key.json new file mode 100644 index 000000000..bdefe8fd4 --- /dev/null +++ b/build/basecoin/usr/share/basecoin/key.json @@ -0,0 +1,12 @@ +{ + "address": "1B1BE55F969F54064628A63B9559E7C21C925165", + "priv_key": { + "type": "ed25519", + "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + }, + "pub_key": { + "type": "ed25519", + "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + } +} + diff --git a/build/basecoin/usr/share/basecoin/key2.json b/build/basecoin/usr/share/basecoin/key2.json new file mode 100644 index 000000000..ddfc6809b --- /dev/null +++ b/build/basecoin/usr/share/basecoin/key2.json @@ -0,0 +1,12 @@ +{ + "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", + "priv_key": { + "type": "ed25519", + "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + }, + "pub_key": { + "type": "ed25519", + "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + } +} + diff --git a/build/ethermint/DEBIAN/changelog b/build/ethermint/DEBIAN/changelog new file mode 100644 index 000000000..76a1fb154 --- /dev/null +++ b/build/ethermint/DEBIAN/changelog @@ -0,0 +1,6 @@ +ethermint (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/tendermint/tendermint for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/build/ethermint/DEBIAN/compat b/build/ethermint/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/build/ethermint/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/build/ethermint/DEBIAN/control b/build/ethermint/DEBIAN/control new file mode 100644 index 000000000..0904eb4e8 --- /dev/null +++ b/build/ethermint/DEBIAN/control @@ -0,0 +1,15 @@ +Source: ethermint +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Depends: tendermint (>=0.10.0) +Standards-Version: 3.9.6 +Homepage: https://tendermint.com +Package: ethermint +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: ethermint enables ethereum as an ABCI application on tendermint and the COSMOS hub + Ethermint enables ethereum to run as an ABCI application on tendermint and the COSMOS hub. This application allows you to get all the benefits of ethereum without having to run your own miners. + diff --git a/build/ethermint/DEBIAN/copyright b/build/ethermint/DEBIAN/copyright new file mode 100644 index 000000000..6d1bab01b --- /dev/null +++ b/build/ethermint/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: ethermint +Source: https://github.com/tendermint/ethermint + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/build/ethermint/DEBIAN/postinst b/build/ethermint/DEBIAN/postinst new file mode 100644 index 000000000..90739f818 --- /dev/null +++ b/build/ethermint/DEBIAN/postinst @@ -0,0 +1,50 @@ +#!/bin/sh +# postinst script for basecoin +# + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + chown ethermint.ethermint /etc/ethermint + chown ethermint.ethermint /etc/ethermint/genesis.json + chown ethermint.ethermint /etc/ethermint/keystore + chown ethermint.ethermint /etc/ethermint/keystore/UTC--2016-10-21T22-30-03.071787745Z--7eff122b94897ea5b0e2a9abf47b86337fafebdc + + sudo -Hu ethermint /usr/bin/ethermint --datadir /etc/ethermint init /etc/ethermint/genesis.json + sudo -Hu ethermint tendermint init --home /etc/ethermint/tendermint + + chown ethermint.ethermint /etc/ethermint/tendermint + chmod 755 /etc/ethermint/tendermint + + systemctl daemon-reload + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/ethermint/DEBIAN/postrm b/build/ethermint/DEBIAN/postrm new file mode 100644 index 000000000..f1d9d6afc --- /dev/null +++ b/build/ethermint/DEBIAN/postrm @@ -0,0 +1,41 @@ +#!/bin/sh +# postrm script for ethermint +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + upgrade|failed-upgrade|abort-upgrade) + systemctl daemon-reload + ;; + + purge|remove|abort-install|disappear) + systemctl daemon-reload + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/ethermint/DEBIAN/preinst b/build/ethermint/DEBIAN/preinst new file mode 100644 index 000000000..829112e6b --- /dev/null +++ b/build/ethermint/DEBIAN/preinst @@ -0,0 +1,38 @@ +#!/bin/sh +# preinst script for ethermint +# + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + if ! grep -q '^ethermint:' /etc/passwd ; then + useradd -k /dev/null -r -m -b /etc ethermint + chmod 755 /etc/ethermint + fi + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/ethermint/DEBIAN/prerm b/build/ethermint/DEBIAN/prerm new file mode 100644 index 000000000..5af626fdf --- /dev/null +++ b/build/ethermint/DEBIAN/prerm @@ -0,0 +1,39 @@ +#!/bin/sh +# prerm script for ethermint +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + remove|upgrade|deconfigure) + systemctl stop ethermint 2> /dev/null || : + systemctl stop ethermint-service 2> /dev/null || : + ;; + + failed-upgrade) + ;; + + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/ethermint/etc/systemd/system-preset/50-ethermint.preset b/build/ethermint/etc/systemd/system-preset/50-ethermint.preset new file mode 100644 index 000000000..f181a0bcc --- /dev/null +++ b/build/ethermint/etc/systemd/system-preset/50-ethermint.preset @@ -0,0 +1,3 @@ +disable ethermint.service +disable ethermint-server.service + diff --git a/build/ethermint/etc/systemd/system/ethermint-server.service b/build/ethermint/etc/systemd/system/ethermint-server.service new file mode 100644 index 000000000..78b277e6d --- /dev/null +++ b/build/ethermint/etc/systemd/system/ethermint-server.service @@ -0,0 +1,23 @@ +[Unit] +Description=Ethermint server +Requires=network-online.target +BindTo=ethermint.service +PartOf=ethermint.service +After=network-online.target ethermint.service +PropagatesReloadTo=ethermint.service +ReloadPropagatedFrom=ethermint.service + +[Service] +Environment="TMHOME=/etc/ethermint/tendermint" +Restart=on-failure +User=ethermint +Group=ethermint +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target ethermint.service +Also=ethermint.service + diff --git a/build/ethermint/etc/systemd/system/ethermint.service b/build/ethermint/etc/systemd/system/ethermint.service new file mode 100644 index 000000000..2fa46d4d8 --- /dev/null +++ b/build/ethermint/etc/systemd/system/ethermint.service @@ -0,0 +1,28 @@ +[Unit] +Description=Ethermint +#propagates activation, deactivation and activation fails. +Requires=network-online.target +#propagates activation, deactivation, activation fails and stops +BindTo=ethermint-server.service +#propagates stop and restart (one-way) +PartOf=ethermint-server.service +#order +Before=ethermint-server.service +After=network-online.target +#propagates reload +PropagatesReloadTo=ethermint-server.service +ReloadPropagatedFrom=ethermint-server.service + +[Service] +Restart=on-failure +User=ethermint +Group=ethermint +PermissionsStartOnly=true +ExecStart=/usr/bin/ethermint --datadir /etc/ethermint +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target +Also=ethermint-server.service + diff --git a/build/gaia/DEBIAN/changelog b/build/gaia/DEBIAN/changelog new file mode 100644 index 000000000..eca5fbc3d --- /dev/null +++ b/build/gaia/DEBIAN/changelog @@ -0,0 +1,6 @@ +gaia (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/tendermint/basecoin for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/build/gaia/DEBIAN/compat b/build/gaia/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/build/gaia/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/build/gaia/DEBIAN/control b/build/gaia/DEBIAN/control new file mode 100644 index 000000000..8b51d285b --- /dev/null +++ b/build/gaia/DEBIAN/control @@ -0,0 +1,15 @@ +Source: gaia +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Depends: tendermint (>=0.10.0) +Standards-Version: 3.9.6 +Homepage: https://cosmos.network +Package: gaia +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: gaia - Tendermint Cosmos delegation game chain + Gaia description comes later. + diff --git a/build/gaia/DEBIAN/copyright b/build/gaia/DEBIAN/copyright new file mode 100644 index 000000000..ffc230134 --- /dev/null +++ b/build/gaia/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: gaia +Source: https://github.com/cosmos/gaia + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/build/gaia/DEBIAN/postinst b/build/gaia/DEBIAN/postinst new file mode 100644 index 000000000..e1b606cb8 --- /dev/null +++ b/build/gaia/DEBIAN/postinst @@ -0,0 +1,48 @@ +#!/bin/sh +# postinst script for gaia +# + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + chown gaia.gaia /etc/gaia + sudo -Hu gaia gaia init --home /etc/gaia 2B24DEE2364762300168DF19B6C18BCE2D399EA2 + #The above command generates a genesis.json file that contains validators. This is wrong, the validator part should be empty. https://github.com/tendermint/basecoin/issues/124 + sudo -Hu gaia tendermint init --home /etc/gaia/tendermint + #The above command might need some kind of additional option in the future. https://github.com/tendermint/tendermint/issues/542 + + chmod 755 /etc/gaia/tendermint + chown gaia.gaia /etc/gaia/tendermint + + systemctl daemon-reload + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/gaia/DEBIAN/postrm b/build/gaia/DEBIAN/postrm new file mode 100644 index 000000000..da526ec30 --- /dev/null +++ b/build/gaia/DEBIAN/postrm @@ -0,0 +1,41 @@ +#!/bin/sh +# postrm script for gaia +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + upgrade|failed-upgrade|abort-upgrade) + systemctl daemon-reload + ;; + + purge|remove|abort-install|disappear) + systemctl daemon-reload + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/gaia/DEBIAN/preinst b/build/gaia/DEBIAN/preinst new file mode 100644 index 000000000..382fa419f --- /dev/null +++ b/build/gaia/DEBIAN/preinst @@ -0,0 +1,38 @@ +#!/bin/sh +# preinst script for gaia +# + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + if ! grep -q '^gaia:' /etc/passwd ; then + useradd -k /dev/null -r -m -b /etc gaia + chmod 755 /etc/gaia + fi + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/gaia/DEBIAN/prerm b/build/gaia/DEBIAN/prerm new file mode 100644 index 000000000..8a0522a41 --- /dev/null +++ b/build/gaia/DEBIAN/prerm @@ -0,0 +1,39 @@ +#!/bin/sh +# prerm script for gaia +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + remove|upgrade|deconfigure) + systemctl stop gaia 2> /dev/null || : + systemctl stop gaia-service 2> /dev/null || : + ;; + + failed-upgrade) + ;; + + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/gaia/etc/systemd/system-preset/50-gaia.preset b/build/gaia/etc/systemd/system-preset/50-gaia.preset new file mode 100644 index 000000000..a9fe09116 --- /dev/null +++ b/build/gaia/etc/systemd/system-preset/50-gaia.preset @@ -0,0 +1,3 @@ +disable gaia.service +disable gaia-server.service + diff --git a/build/gaia/etc/systemd/system/gaia-server.service b/build/gaia/etc/systemd/system/gaia-server.service new file mode 100644 index 000000000..821070de6 --- /dev/null +++ b/build/gaia/etc/systemd/system/gaia-server.service @@ -0,0 +1,23 @@ +[Unit] +Description=Gaia server +Requires=network-online.target +BindTo=gaia.service +PartOf=gaia.service +After=network-online.target gaia.service +PropagatesReloadTo=gaia.service +ReloadPropagatedFrom=gaia.service + +[Service] +Environment="TMHOME=/etc/gaia/tendermint" +Restart=on-failure +User=gaia +Group=gaia +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target gaia.service +Also=gaia.service + diff --git a/build/gaia/etc/systemd/system/gaia.service b/build/gaia/etc/systemd/system/gaia.service new file mode 100644 index 000000000..1e4b2778f --- /dev/null +++ b/build/gaia/etc/systemd/system/gaia.service @@ -0,0 +1,28 @@ +[Unit] +Description=Gaia +#propagates activation, deactivation and activation fails. +Requires=network-online.target +#propagates activation, deactivation, activation fails and stops +BindTo=gaia-server.service +#propagates stop and restart (one-way) +PartOf=gaia-server.service +#order +Before=gaia-server.service +After=network-online.target +#propagates reload +PropagatesReloadTo=gaia-server.service +ReloadPropagatedFrom=gaia-server.service + +[Service] +Restart=on-failure +User=gaia +Group=gaia +PermissionsStartOnly=true +ExecStart=/usr/bin/gaia start --without-tendermint --home=/etc/gaia +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target +Also=gaia-server.service + diff --git a/build/gaia/usr/share/gaia/key.json b/build/gaia/usr/share/gaia/key.json new file mode 100644 index 000000000..bdefe8fd4 --- /dev/null +++ b/build/gaia/usr/share/gaia/key.json @@ -0,0 +1,12 @@ +{ + "address": "1B1BE55F969F54064628A63B9559E7C21C925165", + "priv_key": { + "type": "ed25519", + "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + }, + "pub_key": { + "type": "ed25519", + "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + } +} + diff --git a/build/gaia/usr/share/gaia/key2.json b/build/gaia/usr/share/gaia/key2.json new file mode 100644 index 000000000..ddfc6809b --- /dev/null +++ b/build/gaia/usr/share/gaia/key2.json @@ -0,0 +1,12 @@ +{ + "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", + "priv_key": { + "type": "ed25519", + "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + }, + "pub_key": { + "type": "ed25519", + "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + } +} + diff --git a/build/sign b/build/sign new file mode 100755 index 000000000..0371b5d4b --- /dev/null +++ b/build/sign @@ -0,0 +1,26 @@ +#!/usr/bin/expect -f +set timeout 3 +set PACKAGE [lindex $argv 0] +set GPG_NAME [lindex $argv 1] +set GPG_PATH [lindex $argv 2] +set GPG_PASSPHRASE $env(GPG_PASSPHRASE) + +if {[llength $argv] == 0} { + send_user "Usage: ./sign \n" + exit 1 +} + +send_user "\nSigning $PACKAGE\n" +spawn rpmsign --resign $PACKAGE --define "_signature gpg" --define "_gpg_name $GPG_NAME" --define "_gpgbin $GPG_PATH" +expect { + timeout { send_user "\nTimeout signing $PACKAGE\n"; exit 1 } + "Enter pass phrase:" +} +send "$GPG_PASSPHRASE\r" +expect { + timeout { send_user "\nTimeout signing $PACKAGE\n"; exit 1 } + "Pass phrase is good." +} +interact +sleep 3 + diff --git a/build/spectemplates/basecoin.spec b/build/spectemplates/basecoin.spec new file mode 100644 index 000000000..9553962bf --- /dev/null +++ b/build/spectemplates/basecoin.spec @@ -0,0 +1,67 @@ +Version: @VERSION@ +Release: @BUILD_NUMBER@ + +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{nil} + +Name: basecoin +Summary: basecoin is a Proof-of-Stake cryptocurrency and framework +License: Apache 2.0 +URL: https://tendermint.com/ +Packager: Greg Szabo +Requires: tendermint >= 0.10.0 +Provides: basecli +#Requires(pre): useradd + +%description +Basecoin is an ABCI application designed to be used with the Tendermint consensus engine to form a Proof-of-Stake cryptocurrency. It also provides a general purpose framework for extending the feature-set of the cryptocurrency by implementing plugins. + +%pre +if ! %{__grep} -q '^%{name}:' /etc/passwd ; then + useradd -k /dev/null -r -m -b %{_sysconfdir} %{name} + chmod 755 %{_sysconfdir}/%{name} +fi + +%prep +# Nothing to do here. - It is done in the Makefile. + +%build +# Nothing to do here. + +%install +cd %{name}-%{version}-%{release} +%{__cp} -a * %{buildroot} + +%post +sudo -Hu %{name} basecoin init --home %{_sysconfdir}/%{name} 2B24DEE2364762300168DF19B6C18BCE2D399EA2 +#The above command generates a genesis.json file that contains validators. This is wrong, the validator part should be empty. https://github.com/tendermint/basecoin/issues/124 +sudo -Hu %{name} tendermint init --home %{_sysconfdir}/%{name}/tendermint +#The above command might need some kind of additional option in the future. https://github.com/tendermint/tendermint/issues/542 + +chmod 755 %{_sysconfdir}/%{name}/tendermint + +#Temporary until https://github.com/tendermint/basecoin/issues/123 +rm -f %{_sysconfdir}/%{name}/key.json +rm -f %{_sysconfdir}/%{name}/key2.json + +systemctl daemon-reload + +%preun +systemctl stop %{name} 2> /dev/null || : +systemctl stop %{name}-service 2> /dev/null || : + +%postun +systemctl daemon-reload + +%files +%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} +%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name}/tendermint +%{_bindir}/* +%{_sysconfdir}/systemd/system/* +%{_sysconfdir}/systemd/system-preset/* +%dir %{_datadir}/%{name} +%{_datadir}/%{name}/* +%dir %{_defaultlicensedir}/%{name} +%doc %{_defaultlicensedir}/%{name}/LICENSE + diff --git a/build/spectemplates/ethermint.spec b/build/spectemplates/ethermint.spec new file mode 100644 index 000000000..72fbe2b4b --- /dev/null +++ b/build/spectemplates/ethermint.spec @@ -0,0 +1,61 @@ +Version: @VERSION@ +Release: @BUILD_NUMBER@ + +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{nil} + +Name: ethermint +Summary: ethermint enables ethereum as an ABCI application on tendermint and the COSMOS hub +License: Apache 2.0 +URL: https://tendermint.com/ +Packager: Greg Szabo +Requires: tendermint >= 0.10.0 +#Requires(pre): useradd + +%description +Ethermint enables ethereum to run as an ABCI application on tendermint and the COSMOS hub. This application allows you to get all the benefits of ethereum without having to run your own miners. + +%pre +if ! %{__grep} -q '^%{name}:' /etc/passwd ; then + useradd -k /dev/null -r -m -b %{_sysconfdir} %{name} + chmod 755 %{_sysconfdir}/%{name} +fi + +%prep +# Nothing to do here. - It is done in the Makefile. + +%build +# Nothing to do here. + +%install +cd %{name}-%{version}-%{release} +%{__cp} -a * %{buildroot} + +%post +sudo -Hu %{name} %{_bindir}/%{name} --datadir %{_sysconfdir}/%{name} init %{_sysconfdir}/%{name}/genesis.json +sudo -Hu %{name} tendermint init --home %{_sysconfdir}/%{name}/tendermint + +chmod 755 %{_sysconfdir}/%{name}/tendermint + +systemctl daemon-reload + +%preun +systemctl stop %{name} 2> /dev/null || : +systemctl stop %{name}-service 2> /dev/null || : + +%postun +systemctl daemon-reload + +%files +%attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} +%config(noreplace) %attr(0644, %{name}, %{name}) %{_sysconfdir}/%{name}/genesis.json +%attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name}/keystore +%attr(0644, %{name}, %{name}) %{_sysconfdir}/%{name}/keystore/* +%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name}/tendermint +%{_bindir}/* +%{_sysconfdir}/systemd/system/* +%{_sysconfdir}/systemd/system-preset/* +%dir %{_defaultlicensedir}/%{name} +%doc %{_defaultlicensedir}/%{name}/LICENSE + diff --git a/build/spectemplates/gaia.spec b/build/spectemplates/gaia.spec new file mode 100644 index 000000000..a09e05cb7 --- /dev/null +++ b/build/spectemplates/gaia.spec @@ -0,0 +1,66 @@ +Version: @VERSION@ +Release: @BUILD_NUMBER@ + +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{nil} + +Name: gaia +Summary: gaia - Tendermint Cosmos delegation game chain +License: Apache 2.0 +URL: https://cosmos.network/ +Packager: Greg Szabo +Requires: tendermint >= 0.10.0 +#Requires(pre): useradd + +%description +Gaia description comes later. + +%pre +if ! %{__grep} -q '^%{name}:' /etc/passwd ; then + useradd -k /dev/null -r -m -b %{_sysconfdir} %{name} + chmod 755 %{_sysconfdir}/%{name} +fi + +%prep +# Nothing to do here. - It is done in the Makefile. + +%build +# Nothing to do here. + +%install +cd %{name}-%{version}-%{release} +%{__cp} -a * %{buildroot} + +%post +sudo -Hu %{name} gaia init --home %{_sysconfdir}/%{name} 2B24DEE2364762300168DF19B6C18BCE2D399EA2 +#The above command generates a genesis.json file that contains validators. This is wrong, the validator part should be empty. https://github.com/tendermint/basecoin/issues/124 +sudo -Hu %{name} tendermint init --home %{_sysconfdir}/%{name}/tendermint +#The above command might need some kind of additional option in the future. https://github.com/tendermint/tendermint/issues/542 + +chmod 755 %{_sysconfdir}/%{name}/tendermint + +#Temporary until https://github.com/tendermint/basecoin/issues/123 +rm -f %{_sysconfdir}/%{name}/key.json +rm -f %{_sysconfdir}/%{name}/key2.json + +systemctl daemon-reload + +%preun +systemctl stop %{name} 2> /dev/null || : +systemctl stop %{name}-service 2> /dev/null || : + +%postun +systemctl daemon-reload + +%files +%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} +%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name}/tendermint +%{_bindir}/* +%{_sysconfdir}/systemd/system/* +%{_sysconfdir}/systemd/system-preset/* +%dir %{_datadir}/%{name} +%{_datadir}/%{name}/* +%dir %{_defaultlicensedir}/%{name} +%doc %{_defaultlicensedir}/%{name}/LICENSE + diff --git a/build/spectemplates/tendermint.spec b/build/spectemplates/tendermint.spec new file mode 100644 index 000000000..68902a170 --- /dev/null +++ b/build/spectemplates/tendermint.spec @@ -0,0 +1,31 @@ +Version: @VERSION@ +Release: @BUILD_NUMBER@ + +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{nil} + +Name: tendermint +Summary: securely and consistently replicate an application on many machines +License: Apache 2.0 +URL: https://tendermint.com/ +Packager: Greg Szabo + +%description +Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state. + +%prep +# Nothing to do here. - It is done in the Makefile. + +%build +# Nothing to do here. + +%install +cd %{name}-%{version}-%{release} +%{__cp} -a * %{buildroot} + +%files +%{_bindir}/tendermint +%dir %{_defaultlicensedir}/%{name} +%doc %{_defaultlicensedir}/%{name}/LICENSE + diff --git a/build/spectemplates/trackomatron.spec b/build/spectemplates/trackomatron.spec new file mode 100644 index 000000000..5034658ea --- /dev/null +++ b/build/spectemplates/trackomatron.spec @@ -0,0 +1,66 @@ +Version: @VERSION@ +Release: @BUILD_NUMBER@ + +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{nil} + +Name: trackomatron +Summary: Trackomatron - Track invoices on the blockchain +License: Apache 2.0 +URL: https://tendermint.com/ +Packager: Greg Szabo +Requires: tendermint >= 0.10.0 +#Requires(pre): useradd + +%description +This software is intended to create a space to easily send invoices between and within institutions. Firstly, the commands of trackmatron are separated into two broad categories: submitting information to the blockchain (transactions), and retrieving information from the blockchain (query). + +%pre +if ! %{__grep} -q '^%{name}:' /etc/passwd ; then + useradd -k /dev/null -r -m -b %{_sysconfdir} %{name} + chmod 755 %{_sysconfdir}/%{name} +fi + +%prep +# Nothing to do here. - It is done in the Makefile. + +%build +# Nothing to do here. + +%install +cd %{name}-%{version}-%{release} +%{__cp} -a * %{buildroot} + +%post +sudo -Hu %{name} tracko init --home %{_sysconfdir}/%{name} 2B24DEE2364762300168DF19B6C18BCE2D399EA2 +#The above command generates a genesis.json file that contains validators. This is wrong, the validator part should be empty. https://github.com/tendermint/basecoin/issues/124 +sudo -Hu %{name} tendermint init --home %{_sysconfdir}/%{name}/tendermint +#The above command might need some kind of additional option in the future. https://github.com/tendermint/tendermint/issues/542 + +chmod 755 %{_sysconfdir}/%{name}/tendermint + +#Temporary until https://github.com/tendermint/basecoin/issues/123 +rm -f %{_sysconfdir}/%{name}/key.json +rm -f %{_sysconfdir}/%{name}/key2.json + +systemctl daemon-reload + +%preun +systemctl stop %{name} 2> /dev/null || : +systemctl stop %{name}-service 2> /dev/null || : + +%postun +systemctl daemon-reload + +%files +%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} +%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name}/tendermint +%{_bindir}/* +%{_sysconfdir}/systemd/system/* +%{_sysconfdir}/systemd/system-preset/* +%dir %{_datadir}/%{name} +%{_datadir}/%{name}/* +%dir %{_defaultlicensedir}/%{name} +%doc %{_defaultlicensedir}/%{name}/LICENSE + diff --git a/build/tendermint.list b/build/tendermint.list new file mode 100644 index 000000000..bba521af5 --- /dev/null +++ b/build/tendermint.list @@ -0,0 +1 @@ +deb http://tendermint-packages.s3-website-us-west-1.amazonaws.com/debian stable main diff --git a/build/tendermint.repo b/build/tendermint.repo new file mode 100644 index 000000000..439f98ecb --- /dev/null +++ b/build/tendermint.repo @@ -0,0 +1,12 @@ +#This is the .repo file for the Tendermint CentOS repositories. +#Although it has only been tested under CentOS 7, it should work under Fedora and RedHat 7 too. +#Currently only 64-bit packages are built. + +[tendermint] +name=Tendermint stable releases repository +baseurl=https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64 +gpgcheck=1 +gpgkey=https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint +enabled=1 +#sslverify = 1 + diff --git a/build/tendermint/DEBIAN/changelog b/build/tendermint/DEBIAN/changelog new file mode 100644 index 000000000..4b016f845 --- /dev/null +++ b/build/tendermint/DEBIAN/changelog @@ -0,0 +1,6 @@ +tendermint (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/tendermint/tendermint for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/build/tendermint/DEBIAN/compat b/build/tendermint/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/build/tendermint/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/build/tendermint/DEBIAN/control b/build/tendermint/DEBIAN/control new file mode 100644 index 000000000..d9da17dd1 --- /dev/null +++ b/build/tendermint/DEBIAN/control @@ -0,0 +1,14 @@ +Source: tendermint +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Standards-Version: 3.9.6 +Homepage: https://tendermint.com +Package: tendermint +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: securely and consistently replicate an application on many machines + Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state. + diff --git a/build/tendermint/DEBIAN/copyright b/build/tendermint/DEBIAN/copyright new file mode 100644 index 000000000..15ee960dd --- /dev/null +++ b/build/tendermint/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: tendermint +Source: https://github.com/tendermint/tendermint + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/build/trackomatron/DEBIAN/changelog b/build/trackomatron/DEBIAN/changelog new file mode 100644 index 000000000..aab92cc02 --- /dev/null +++ b/build/trackomatron/DEBIAN/changelog @@ -0,0 +1,6 @@ +trackomatron (@VERSION@) @STABILITY@; urgency=medium + + * Automatic build. See https://github.com/tendermint/trackomatron for more information. + + -- Greg Szabo @DATETIMESTAMP@ + diff --git a/build/trackomatron/DEBIAN/compat b/build/trackomatron/DEBIAN/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/build/trackomatron/DEBIAN/compat @@ -0,0 +1 @@ +9 diff --git a/build/trackomatron/DEBIAN/control b/build/trackomatron/DEBIAN/control new file mode 100644 index 000000000..72e6f9145 --- /dev/null +++ b/build/trackomatron/DEBIAN/control @@ -0,0 +1,15 @@ +Source: trackomatron +Section: net +Priority: optional +Maintainer: Greg Szabo +Build-Depends: debhelper (>=9) +Depends: tendermint (>=0.10.0) +Standards-Version: 3.9.6 +Homepage: https://tendermint.com +Package: trackomatron +Architecture: amd64 +Version: @VERSION@ +Installed-Size: @INSTALLEDSIZE@ +Description: Trackomatron - Track invoices on the blockchain + This software is intended to create a space to easily send invoices between and within institutions. Firstly, the commands of trackmatron are separated into two broad categories: submitting information to the blockchain (transactions), and retrieving information from the blockchain (query). + diff --git a/build/trackomatron/DEBIAN/copyright b/build/trackomatron/DEBIAN/copyright new file mode 100644 index 000000000..32a1921d5 --- /dev/null +++ b/build/trackomatron/DEBIAN/copyright @@ -0,0 +1,21 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: trackomatron +Source: https://github.com/tendermint/trackomatron + +Files: * +Copyright: 2017 All In Bits, Inc. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the full text of the Apache License 2.0 can be found + in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/build/trackomatron/DEBIAN/postinst b/build/trackomatron/DEBIAN/postinst new file mode 100644 index 000000000..9aac7fc78 --- /dev/null +++ b/build/trackomatron/DEBIAN/postinst @@ -0,0 +1,48 @@ +#!/bin/sh +# postinst script for trackomatron +# + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + chown trackomatron.trackomatron /etc/trackomatron + sudo -Hu trackomatron tracko init --home /etc/trackomatron 2B24DEE2364762300168DF19B6C18BCE2D399EA2 + #The above command generates a genesis.json file that contains validators. This is wrong, the validator part should be empty. https://github.com/tendermint/basecoin/issues/124 + sudo -Hu trackomatron tendermint init --home /etc/trackomatron/tendermint + #The above command might need some kind of additional option in the future. https://github.com/tendermint/tendermint/issues/542 + + chown trackomatron.trackomatron /etc/trackomatron/tendermint + chmod 755 /etc/trackomatron/tendermint + + systemctl daemon-reload + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/trackomatron/DEBIAN/postrm b/build/trackomatron/DEBIAN/postrm new file mode 100644 index 000000000..445d4fbd8 --- /dev/null +++ b/build/trackomatron/DEBIAN/postrm @@ -0,0 +1,41 @@ +#!/bin/sh +# postrm script for trackomatron +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + upgrade|failed-upgrade|abort-upgrade) + systemctl daemon-reload + ;; + + purge|remove|abort-install|disappear) + systemctl daemon-reload + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/trackomatron/DEBIAN/preinst b/build/trackomatron/DEBIAN/preinst new file mode 100644 index 000000000..ac866587a --- /dev/null +++ b/build/trackomatron/DEBIAN/preinst @@ -0,0 +1,38 @@ +#!/bin/sh +# preinst script for trackomatron +# + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + if ! grep -q '^trackomatron:' /etc/passwd ; then + useradd -k /dev/null -r -m -b /etc trackomatron + chmod 755 /etc/trackomatron + fi + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/trackomatron/DEBIAN/prerm b/build/trackomatron/DEBIAN/prerm new file mode 100644 index 000000000..e18afec04 --- /dev/null +++ b/build/trackomatron/DEBIAN/prerm @@ -0,0 +1,39 @@ +#!/bin/sh +# prerm script for trackomatron +# + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + remove|upgrade|deconfigure) + systemctl stop trackomatron 2> /dev/null || : + systemctl stop trackomatron-service 2> /dev/null || : + ;; + + failed-upgrade) + ;; + + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 diff --git a/build/trackomatron/etc/systemd/system-preset/50-trackomatron.preset b/build/trackomatron/etc/systemd/system-preset/50-trackomatron.preset new file mode 100644 index 000000000..7ddca4019 --- /dev/null +++ b/build/trackomatron/etc/systemd/system-preset/50-trackomatron.preset @@ -0,0 +1,3 @@ +disable trackomatron.service +disable trackomatron-server.service + diff --git a/build/trackomatron/etc/systemd/system/trackomatron-server.service b/build/trackomatron/etc/systemd/system/trackomatron-server.service new file mode 100644 index 000000000..097789a0e --- /dev/null +++ b/build/trackomatron/etc/systemd/system/trackomatron-server.service @@ -0,0 +1,23 @@ +[Unit] +Description=Trackomatron server +Requires=network-online.target +BindTo=trackomatron.service +PartOf=trackomatron.service +After=network-online.target trackomatron.service +PropagatesReloadTo=trackomatron.service +ReloadPropagatedFrom=trackomatron.service + +[Service] +Environment="TMHOME=/etc/trackomatron/tendermint" +Restart=on-failure +User=trackomatron +Group=trackomatron +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target trackomatron.service +Also=trackomatron.service + diff --git a/build/trackomatron/etc/systemd/system/trackomatron.service b/build/trackomatron/etc/systemd/system/trackomatron.service new file mode 100644 index 000000000..796e99772 --- /dev/null +++ b/build/trackomatron/etc/systemd/system/trackomatron.service @@ -0,0 +1,28 @@ +[Unit] +Description=Trackomatron +#propagates activation, deactivation and activation fails. +Requires=network-online.target +#propagates activation, deactivation, activation fails and stops +BindTo=trackomatron-server.service +#propagates stop and restart (one-way) +PartOf=trackomatron-server.service +#order +Before=trackomatron-server.service +After=network-online.target +#propagates reload +PropagatesReloadTo=trackomatron-server.service +ReloadPropagatedFrom=trackomatron-server.service + +[Service] +Restart=on-failure +User=trackomatron +Group=trackomatron +PermissionsStartOnly=true +ExecStart=/usr/bin/tracko start --without-tendermint --home /etc/trackomatron +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target +Also=trackomatron-server.service + diff --git a/build/trackomatron/usr/share/trackomatron/key.json b/build/trackomatron/usr/share/trackomatron/key.json new file mode 100644 index 000000000..bdefe8fd4 --- /dev/null +++ b/build/trackomatron/usr/share/trackomatron/key.json @@ -0,0 +1,12 @@ +{ + "address": "1B1BE55F969F54064628A63B9559E7C21C925165", + "priv_key": { + "type": "ed25519", + "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + }, + "pub_key": { + "type": "ed25519", + "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" + } +} + diff --git a/build/trackomatron/usr/share/trackomatron/key2.json b/build/trackomatron/usr/share/trackomatron/key2.json new file mode 100644 index 000000000..ddfc6809b --- /dev/null +++ b/build/trackomatron/usr/share/trackomatron/key2.json @@ -0,0 +1,12 @@ +{ + "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", + "priv_key": { + "type": "ed25519", + "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + }, + "pub_key": { + "type": "ed25519", + "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" + } +} + diff --git a/docker/README.rst b/docker/README.rst new file mode 100644 index 000000000..b135c0072 --- /dev/null +++ b/docker/README.rst @@ -0,0 +1,120 @@ +Using Docker +============ + +`This folder `__ contains Docker container descriptions. Using this folder +you can build your own Docker images with the tendermint application. + +It is assumed that you have already setup docker. + +If you don't want to build the images yourself, you should be able to +download them from Docker Hub. + +Tendermint +---------- + +Build the container: Copy the ``tendermint`` binary to the +``tendermint`` folder. + +:: + + docker build -t tendermint tendermint + +The application configuration will be stored at ``/tendermint`` in the +container. The ports 46656 and 46657 will be open for ABCI applications +to connect. + +Initialize tendermint configuration and keep it after the container is +finished in a docker volume called ``data``: + +:: + + docker run --rm -v data:/tendermint tendermint init + +If you want the docker volume to be a physical directory on your +filesystem, you have to give an absolute path to docker and make sure +the permissions allow the application to write it. + +Get the public key of tendermint: + +:: + + docker run --rm -v data:/tendermint tendermint show_validator + +Run the docker tendermint application with: + +:: + + docker run --rm -d -v data:/tendermint tendermint node + +Basecoin +-------- + +Build the container: Copy the ``basecoin`` binary to the ``basecoin`` +folder. + +:: + + docker build -t basecoin basecoin + +The application configuration will be stored at ``/basecoin``. + +Initialize basecoin configuration and keep it after the container is +finished: + +:: + + docker run --rm -v basecoindata:/basecoin basecoin init deadbeef + +Use your own basecoin account instead of ``deadbeef`` in the ``init`` +command. + +Get the public key of basecoin: We use a trick here: since the basecoin +and the tendermint configuration folders are similar, the ``tendermint`` +command can extract the public key for us if we feed the basecoin +configuration folder to tendermint. + +:: + + docker run --rm -v basecoindata:/tendermint tendermint show_validator + +Run the docker tendermint application with: This is a two-step process: +\* Run the basecoin container. \* Run the tendermint container and +expose the ports that allow clients to connect. The --proxy\_app should +contain the basecoin application's IP address and port. + +:: + + docker run --rm -d -v basecoindata:/basecoin basecoin start --without-tendermint + docker run --rm -d -v data:/tendermint -p 46656-46657:46656-46657 tendermint node --proxy_app tcp://172.17.0.2:46658 + +Ethermint +--------- + +Build the container: Copy the ``ethermint`` binary and the setup folder +to the ``ethermint`` folder. + +:: + + docker build -t ethermint ethermint + +The application configuration will be stored at ``/ethermint``. The +files required for initializing ethermint (the files in the source +``setup`` folder) are under ``/setup``. + +Initialize ethermint configuration: + +:: + + docker run --rm -v ethermintdata:/ethermint ethermint init /setup/genesis.json + +Start ethermint as a validator node: This is a two-step process: \* Run +the ethermint container. You will have to define where tendermint runs +as the ethermint binary connects to it explicitly. \* Run the tendermint +container and expose the ports that allow clients to connect. The +--proxy\_app should contain the ethermint application's IP address and +port. + +:: + + docker run --rm -d -v ethermintdata:/ethermint ethermint --tendermint_addr tcp://172.17.0.3:46657 + docker run --rm -d -v data:/tendermint -p 46656-46657:46656-46657 tendermint node --proxy_app tcp://172.17.0.2:46658 diff --git a/docker/basecoin/Dockerfile b/docker/basecoin/Dockerfile new file mode 100644 index 000000000..42d01883f --- /dev/null +++ b/docker/basecoin/Dockerfile @@ -0,0 +1,14 @@ +FROM busybox +#Use --build-arg to change where the basecoin binary resides +ARG BASECOIN_BINARY=basecoin +ENV BCHOME /basecoin +COPY $BASECOIN_BINARY /usr/bin/basecoin +RUN adduser -h $BCHOME -D basecoin +VOLUME [ $BCHOME ] +EXPOSE 46658 +USER basecoin +ENTRYPOINT ["/usr/bin/basecoin"] +CMD ["start","--without-tendermint"] +WORKDIR $BCHOME +STOPSIGNAL SIGTERM + diff --git a/docker/ethermint/Dockerfile b/docker/ethermint/Dockerfile new file mode 100644 index 000000000..3f5a9ab94 --- /dev/null +++ b/docker/ethermint/Dockerfile @@ -0,0 +1,15 @@ +FROM busybox +#Use --build-arg to change where the ethermint binary and setup directory resides +ARG ETHERMINT_BINARY=ethermint +ARG SETUP_DIR=setup +ENV EMHOME /ethermint +COPY $ETHERMINT_BINARY /usr/bin/ethermint +COPY $SETUP_DIR /setup +RUN adduser -h $EMHOME -D ethermint +VOLUME [ $EMHOME ] +EXPOSE 46658 +USER ethermint +ENTRYPOINT ["/usr/bin/ethermint","--datadir","$EMHOME"] +WORKDIR $EMHOME +STOPSIGNAL SIGTERM + diff --git a/docker/tendermint/Dockerfile b/docker/tendermint/Dockerfile new file mode 100644 index 000000000..3344c1fd0 --- /dev/null +++ b/docker/tendermint/Dockerfile @@ -0,0 +1,14 @@ +FROM busybox +#Use --build-arg to change where the tendermint binary resides +ARG TENDERMINT_BINARY=tendermint +ENV TMHOME /tendermint +COPY $TENDERMINT_BINARY /usr/bin/tendermint +RUN adduser -h $TMHOME -D tendermint +VOLUME [ $TMHOME ] +EXPOSE 46656 46657 +USER tendermint +ENTRYPOINT ["/usr/bin/tendermint"] +CMD ["node"] +WORKDIR $TMHOME +STOPSIGNAL SIGTERM + diff --git a/mintnet-kubernetes/README.md b/mintnet-kubernetes/README.md deleted file mode 100644 index c2928a8e0..000000000 --- a/mintnet-kubernetes/README.md +++ /dev/null @@ -1,229 +0,0 @@ -# Tendermint network powered by Kubernetes - -![Tendermint plus Kubernetes](img/t_plus_k.png) - -* [QuickStart (MacOS)](#quickstart-macos) -* [QuickStart (Linux)](#quickstart-linux) -* [Usage](#usage) -* [Security](#security) -* [Fault tolerance](#fault-tolerance) -* [Starting process](#starting-process) - -This should primarily be used for testing purposes or for tightly-defined -chains operated by a single stakeholder (see [the security -precautions](#security)). If your desire is to launch an application with many -stakeholders, consider using our set of Ansible scripts. - -## QuickStart (MacOS) - -[Requirements](https://github.com/kubernetes/minikube#requirements) - -``` -curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/kubectl -curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.18.0/minikube-darwin-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ -minikube start - -git clone https://github.com/tendermint/tools.git && cd tools/mintnet-kubernetes/examples/basecoin && make create -``` - -## QuickStart (Linux) - -[Requirements](https://github.com/kubernetes/minikube#requirements) - -``` -curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/kubectl -curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.18.0/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ -minikube start - -git clone https://github.com/tendermint/tools.git && cd tools/mintnet-kubernetes/examples/basecoin && make create -``` - -### Verify everything works - -**Using a shell:** - -1. wait until all the pods are `Running`. - - ``` - kubectl get pods -w -o wide -L tm - ``` - -2. query the Tendermint app logs from the first pod. - - ``` - kubectl logs -c tm -f tm-0 - ``` - -3. use [Rest API](https://tendermint.com/docs/internals/rpc) to fetch the - status of the second pod's Tendermint app. Note we are using `kubectl exec` - because pods are not exposed (and should not be) to the outer network. - - ``` - kubectl exec -c tm tm-0 -- curl -s http://tm-1.basecoin:46657/status | json_pp - ``` - -**Using the dashboard:** - -``` -minikube dashboard -``` - -### Clean up - -``` -make destroy -``` - -## Usage - -### (1/4) Setup a Kubernetes cluster - -- locally using [Minikube](https://github.com/kubernetes/minikube) -- on GCE with a single click in the web UI -- on AWS using [Kubernetes Operations](https://github.com/kubernetes/kops/blob/master/docs/aws.md) -- on Linux machines (Digital Ocean) using [kubeadm](https://kubernetes.io/docs/getting-started-guides/kubeadm/) -- on AWS, Azure, GCE or bare metal using [Kargo (Ansible)](https://kubernetes.io/docs/getting-started-guides/kargo/) - -Please refer to [the official -documentation](https://kubernetes.io/docs/getting-started-guides/) for overview -and comparison of different options. See our guides for [Google Cloud -Engine](docs/SETUP_K8S_ON_GCE.md) or [Digital Ocean](docs/SETUP_K8S_ON_DO.md). - -**Make sure you have Kubernetes >= 1.5, because you will be using StatefulSets, -which is a beta feature in 1.5.** - -### (2/4) Create a configuration file - -Download a template: - -``` -curl -Lo app.yaml https://github.com/tendermint/tools/raw/master/mintnet-kubernetes/app.template.yaml -``` - -Open `app.yaml` in your favorite editor and configure your app container -(navigate to `- name: app`). Kubernetes DSL (Domain Specific Language) is very -simple, so it should be easy. You will need to set Docker image, command and/or -run arguments. Replace variables prefixed with `YOUR_APP` with corresponding -values. Set genesis time to now and preferable chain ID in ConfigMap. - -Please note if you are changing `replicas` number, do not forget to update -`validators` set in ConfigMap. You will be able to scale the cluster up or down -later, but new pods (nodes) won't become validators automatically. - -### (3/4) Deploy your application - -``` -kubectl create -f ./app.yaml -``` - -### (4/4) Observe your cluster - -**web UI** <-> https://github.com/kubernetes/dashboard - -The easiest way to access Dashboard is to use kubectl. Run the following command in your desktop environment: - -``` -kubectl proxy -``` - -kubectl will handle authentication with apiserver and make Dashboard available at [http://localhost:8001/ui](http://localhost:8001/ui) - -**shell** - -List all the pods: - -``` -kubectl get pods -o wide -L tm -``` - -StatefulSet details: - -``` -kubectl describe statefulsets tm -``` - -First pod details: - -``` -kubectl describe pod tm-0 -``` - -Tendermint app logs from the first pod: - -``` -kubectl logs tm-0 -c tm -f -``` - -App logs from the first pod: - -``` -kubectl logs tm-0 -c app -f -``` - -Status of the second pod's Tendermint app: - -``` -kubectl exec -c tm tm-0 -- curl -s http://tm-1.:46657/status | json_pp -``` - -## Security - -Due to the nature of Kubernetes, where you typically have a single master, the -master could be a SPOF (Single Point Of Failure). Therefore, you need to make -sure only authorized people can access it. And these people themselves had -taken basic measures in order not to get hacked. - -These are the best practices: - -- all access to the master is over TLS -- access to the API Server is X.509 certificate or token based -- etcd is not exposed directly to the cluster -- ensure that images are free of vulnerabilities ([1](https://github.com/coreos/clair)) -- ensure that only authorized images are used in your environment -- disable direct access to Kubernetes nodes (no SSH) -- define resource quota - -Resources: - -- https://kubernetes.io/docs/admin/accessing-the-api/ -- http://blog.kubernetes.io/2016/08/security-best-practices-kubernetes-deployment.html -- https://blog.openshift.com/securing-kubernetes/ - -## Fault tolerance - -Having a single master (API server) is a bad thing also because if something -happens to it, you risk being left without an access to the application. - -To avoid that you can [run Kubernetes in multiple -zones](https://kubernetes.io/docs/admin/multiple-zones/), each zone running an -[API server](https://kubernetes.io/docs/admin/high-availability/) and load -balance requests between them. Do not forget to make sure only one instance of -scheduler and controller-manager are running at once. - -Running in multiple zones is a lightweight version of a broader [Cluster -Federation feature](https://kubernetes.io/docs/admin/federation/). Federated -deployments could span across multiple regions (not zones). We haven't tried -this feature yet, so any feedback is highly appreciated! Especially, related to -additional latency and cost of exchanging data between the regions. - -Resources: - -- https://kubernetes.io/docs/admin/high-availability/ - -## Starting process - -![StatefulSet](img/statefulset.png) - -Init containers (`tm-gen-validator`) are run before all other containers, -creating public-private key pair for each pod. Every `tm` container then asks -other pods for their public keys, which are served with nginx (`pub-key` -container). When `tm` container have all the keys, it forms a genesis file and -starts Tendermint process. - -## TODO - -- [ ] run tendermint from tmuser - ``` - securityContext: - fsGroup: 999 - ``` diff --git a/mintnet-kubernetes/README.rst b/mintnet-kubernetes/README.rst new file mode 100644 index 000000000..1baca6798 --- /dev/null +++ b/mintnet-kubernetes/README.rst @@ -0,0 +1,290 @@ +Using Kubernetes +================ + +.. figure:: assets/t_plus_k.png + :alt: Tendermint plus Kubernetes + + Tendermint plus Kubernetes + +This should primarily be used for testing purposes or for +tightly-defined chains operated by a single stakeholder (see `the +security precautions <#security>`__). If your desire is to launch an +application with many stakeholders, consider using our set of Ansible +scripts. + +Quick Start +----------- + +For either platform, see the `requirements `__ + +MacOS +^^^^^ + +:: + + curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/kubectl + curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.18.0/minikube-darwin-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ + minikube start + + git clone https://github.com/tendermint/tools.git && cd tools/mintnet-kubernetes/examples/basecoin && make create + +Linux +^^^^^ + +:: + + curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/kubectl + curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.18.0/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ + minikube start + + git clone https://github.com/tendermint/tools.git && cd tools/mintnet-kubernetes/examples/basecoin && make create + +Verify it worked +~~~~~~~~~~~~~~~~ + +**Using a shell:** + +First wait until all the pods are ``Running``: + +``kubectl get pods -w -o wide -L tm`` + +then query the Tendermint app logs from the first pod: + +``kubectl logs -c tm -f tm-0`` + +finally, use our `Rest API <../specification/rpc.html>`__ to fetch the status of the second pod's Tendermint app. + +Note we are using ``kubectl exec`` because pods are not exposed (and should not be) to the +outer network: + +``kubectl exec -c tm tm-0 -- curl -s http://tm-1.basecoin:46657/status | json_pp`` + +**Using the dashboard:** + +:: + + minikube dashboard + +Clean up +~~~~~~~~ + +:: + + make destroy + +Usage +----- + +Setup a Kubernetes cluster +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- locally using `Minikube `__ +- on GCE with a single click in the web UI +- on AWS using `Kubernetes + Operations `__ +- on Linux machines (Digital Ocean) using + `kubeadm `__ +- on AWS, Azure, GCE or bare metal using `Kargo + (Ansible) `__ + +Please refer to `the official +documentation `__ +for overview and comparison of different options. + +Kubernetes on Digital Ocean +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Available options: + +- `kubeadm (alpha) `__ +- `kargo `__ +- `rancher `__ +- `terraform `__ + +As you can see, there is no single tool for creating a cluster on DO. +Therefore, choose the one you know and comfortable working with. If you know +and used `terraform `__ before, then choose it. If you +know Ansible, then pick kargo. If none of these seem familiar to you, go with +``kubeadm``. Rancher is a beautiful UI for deploying and managing containers in +production. + +Kubernetes on Google Cloud Engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Review the `Official Documentation `__ for Kubernetes on Google Compute +Engine. + +**Create a cluster** + +The recommended way is to use `Google Container +Engine `__. You should be able +to create a fully fledged cluster with just a few clicks. + +**Connect to it** + +Install ``gcloud`` as a part of `Google Cloud SDK `__. + +Make sure you have credentials for GCloud by running ``gcloud auth login``. + +In order to make API calls against GCE, you must also run ``gcloud auth +application-default login``. + +Press ``Connect``: + +.. figure:: assets/gce1.png + +and execute the first command in your shell. Then start a proxy by +executing ``kubectl` proxy``. + +.. figure:: assets/gce2.png + +Now you should be able to run ``kubectl`` command to create resources, get +resource info, logs, etc. + +**Make sure you have Kubernetes >= 1.5, because you will be using +StatefulSets, which is a beta feature in 1.5.** + +Create a configuration file +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Download a template: + +:: + + curl -Lo app.yaml https://github.com/tendermint/tools/raw/master/mintnet-kubernetes/app.template.yaml + +Open ``app.yaml`` in your favorite editor and configure your app +container (navigate to ``- name: app``). Kubernetes DSL (Domain Specific +Language) is very simple, so it should be easy. You will need to set +Docker image, command and/or run arguments. Replace variables prefixed +with ``YOUR_APP`` with corresponding values. Set genesis time to now and +preferable chain ID in ConfigMap. + +Please note if you are changing ``replicas`` number, do not forget to +update ``validators`` set in ConfigMap. You will be able to scale the +cluster up or down later, but new pods (nodes) won't become validators +automatically. + +Deploy your application +^^^^^^^^^^^^^^^^^^^^^^^ + +:: + + kubectl create -f ./app.yaml + +Observe your cluster +^^^^^^^^^^^^^^^^^^^^ + +`web UI `__ + +The easiest way to access Dashboard is to use ``kubectl``. Run the following +command in your desktop environment: + +:: + + kubectl proxy + +``kubectl`` will handle authentication with apiserver and make Dashboard +available at http://localhost:8001/ui + +**shell** + +List all the pods: + +:: + + kubectl get pods -o wide -L tm + +StatefulSet details: + +:: + + kubectl describe statefulsets tm + +First pod details: + +:: + + kubectl describe pod tm-0 + +Tendermint app logs from the first pod: + +:: + + kubectl logs tm-0 -c tm -f + +App logs from the first pod: + +:: + + kubectl logs tm-0 -c app -f + +Status of the second pod's Tendermint app: + +:: + + kubectl exec -c tm tm-0 -- curl -s http://tm-1.:46657/status | json_pp + +Security +-------- + +Due to the nature of Kubernetes, where you typically have a single +master, the master could be a SPOF (Single Point Of Failure). Therefore, +you need to make sure only authorized people can access it. And these +people themselves had taken basic measures in order not to get hacked. + +These are the best practices: + +- all access to the master is over TLS +- access to the API Server is X.509 certificate or token based +- etcd is not exposed directly to the cluster +- ensure that images are free of vulnerabilities + (`1 `__) +- ensure that only authorized images are used in your environment +- disable direct access to Kubernetes nodes (no SSH) +- define resource quota + +Resources: + +- https://kubernetes.io/docs/admin/accessing-the-api/ +- http://blog.kubernetes.io/2016/08/security-best-practices-kubernetes-deployment.html +- https://blog.openshift.com/securing-kubernetes/ + +Fault tolerance +--------------- + +Having a single master (API server) is a bad thing also because if +something happens to it, you risk being left without an access to the +application. + +To avoid that you can `run Kubernetes in multiple +zones `__, each zone +running an `API +server `__ and load +balance requests between them. Do not forget to make sure only one +instance of scheduler and controller-manager are running at once. + +Running in multiple zones is a lightweight version of a broader `Cluster +Federation feature `__. +Federated deployments could span across multiple regions (not zones). We +haven't tried this feature yet, so any feedback is highly appreciated! +Especially, related to additional latency and cost of exchanging data +between the regions. + +Resources: + +- https://kubernetes.io/docs/admin/high-availability/ + +Starting process +---------------- + +.. figure:: assets/statefulset.png + :alt: StatefulSet + + StatefulSet + +Init containers (``tm-gen-validator``) are run before all other +containers, creating public-private key pair for each pod. Every ``tm`` +container then asks other pods for their public keys, which are served +with nginx (``pub-key`` container). When ``tm`` container have all the +keys, it forms a genesis file and starts the Tendermint process. diff --git a/mintnet-kubernetes/img/gce1.png b/mintnet-kubernetes/assets/gce1.png similarity index 100% rename from mintnet-kubernetes/img/gce1.png rename to mintnet-kubernetes/assets/gce1.png diff --git a/mintnet-kubernetes/img/gce2.png b/mintnet-kubernetes/assets/gce2.png similarity index 100% rename from mintnet-kubernetes/img/gce2.png rename to mintnet-kubernetes/assets/gce2.png diff --git a/mintnet-kubernetes/img/statefulset.png b/mintnet-kubernetes/assets/statefulset.png similarity index 100% rename from mintnet-kubernetes/img/statefulset.png rename to mintnet-kubernetes/assets/statefulset.png diff --git a/mintnet-kubernetes/img/t_plus_k.png b/mintnet-kubernetes/assets/t_plus_k.png similarity index 100% rename from mintnet-kubernetes/img/t_plus_k.png rename to mintnet-kubernetes/assets/t_plus_k.png diff --git a/mintnet-kubernetes/docs/SETUP_K8S_ON_DO.md b/mintnet-kubernetes/docs/SETUP_K8S_ON_DO.md deleted file mode 100644 index 5d00ad6d3..000000000 --- a/mintnet-kubernetes/docs/SETUP_K8S_ON_DO.md +++ /dev/null @@ -1,15 +0,0 @@ -# Setup a Kubernetes cluster on Digital Ocean (DO) - -Available options: - -1. [kubeadm (alpha)](https://kubernetes.io/docs/getting-started-guides/kubeadm/) -2. [kargo](https://kubernetes.io/docs/getting-started-guides/kargo/) -3. [rancher](http://rancher.com/) -4. [terraform](https://github.com/hermanjunge/kubernetes-digitalocean-terraform) - -As you can see, there is no single tool for creating a cluster on DO. -Therefore, choose the one you know and comfortable working with. If you know -and used [terraform](https://www.terraform.io/) before, then choose it. If you -know Ansible, then pick kargo. If none of these seem familiar to you, go with -kubeadm. Rancher is a beautiful UI for deploying and managing containers in -production. diff --git a/mintnet-kubernetes/docs/SETUP_K8S_ON_GCE.md b/mintnet-kubernetes/docs/SETUP_K8S_ON_GCE.md deleted file mode 100644 index 671d7ec7e..000000000 --- a/mintnet-kubernetes/docs/SETUP_K8S_ON_GCE.md +++ /dev/null @@ -1,31 +0,0 @@ -# Setup a Kubernetes cluster on Google Cloud Engine (GCE) - -Main article: [Running Kubernetes on Google Compute -Engine](https://kubernetes.io/docs/getting-started-guides/gce/) - -## 1. Create a cluster - -The recommended way is to use [Google Container -Engine](https://cloud.google.com/container-engine/) (GKE). You should be able -to create a fully fledged cluster with just a few clicks. - -## 2. Connect to it - -Install `gcloud` as a part of [Google Cloud SDK](https://cloud.google.com/sdk/). - -Make sure you have credentials for GCloud by running `gcloud auth login`. - -In order to make API calls against GCE, you must also run `gcloud auth -application-default login` - -Press `Connect` button: - -![Connect button](../img/gce1.png) - -![Connect pop-up](../img/gce2.png) - -and execute the first command in your shell. Then start a proxy by -executing `kubectl proxy`. - -Now you should be able to run `kubectl` command to create resources, get -resource info, logs, etc. diff --git a/terraforce/README.md b/terraforce/README.md deleted file mode 100644 index a91e21aa8..000000000 --- a/terraforce/README.md +++ /dev/null @@ -1,149 +0,0 @@ -# Stack - -This is a stripped down version of https://github.com/segmentio/stack -plus some shell scripts. - -It is responsible for the following: - - - spin up a cluster of nodes - - copy config files for a tendermint testnet to each node - - copy linux binaries for tendermint and the app to each node - - start tendermint on every node - -# How it Works - -To use, a user must only provide a directory containing two files: `bins` and `run.sh`. - -The `bins` file is a list of binaries, for instance: - -``` -$GOPATH/bin/tendermint -$GOPATH/bin/dummy -``` - -and the `run.sh` specifies how those binaries ought to be started: - -``` -#! /bin/bash - -if [[ "$SEEDS" != "" ]]; then - SEEDS_FLAG="--seeds=$SEEDS" -fi - -./dummy --persist .tendermint/data/dummy_data >> app.log 2>&1 & -./tendermint node --log_level=info $SEEDS_FLAG >> tendermint.log 2>&1 & -``` - -This let's you specify exactly which versions of Tendermint and the application are to be used, -and how they ought to be started. - -Note that these binaries *MUST* be compiled for Linux. -If you are not on Linux, you can compile binaries for linux using `go build` with the `GOOS` variable: - -``` -GOOS=linux go build -o $GOPATH/bin/tendermint-linux $GOPATH/src/github.com/tendermint/tendermint/cmd/tendermint -``` - -This cross-compilation must be done for each binary you want to copy over. - -If you want to use an application that requires more than just a few binaries, you may need to do more manual work, -for instance using `terraforce` to set up the development environment on every machine. - -# Dependencies - -We use `terraform` for spinning up the machines, -and a custom rolled tool, `terraforce`, -for running commands on many machines in parallel. -You can download terraform here: https://www.terraform.io/downloads.html -To download terraforce, run `go get github.com/ebuchman/terraforce` - -We use `tendermint` itself to generate files for a testnet. -You can install `tendermint` with - -``` -cd $GOPATH/src/github.com/tendermint/tendermint -glide install -go install ./cmd/tendermint -``` - -You also need to set the `DIGITALOCEAN_TOKEN` environment variables so that terraform can -spin up nodes on digital ocean. - -This stack is currently some terraform and a bunch of shell scripts, -so its helpful to work out of a directory containing everything. -Either change directory to `$GOPATH/src/github.com/tendermint/tendermint/test/net` -or make a copy of that directory and change to it. All commands are expected to be executed from there. - -For terraform to work, you must first run `terraform get` - -# Create - -To create a cluster with 4 nodes, run - -``` -terraform apply -``` - -To use a different number of nodes, change the `desired_capacity` parameter in the `main.tf`. - -Note that terraform keeps track of the current state of your infrastructure, -so if you change the `desired_capacity` and run `terraform apply` again, it will add or remove nodes as necessary. - -If you think that's amazing, so do we. - -To get some info about the cluster, run `terraform output`. - -See the [terraform docs](https://www.terraform.io/docs/index.html) for more details. - -To tear down the cluster, run `terraform destroy`. - -# Initialize - -Now that we have a cluster up and running, let's generate the necessary files for a Tendermint node and copy them over. -A Tendermint node needs, at the least, a `priv_validator.json` and a `genesis.json`. -To generate files for the nodes, run - -``` -tendermint testnet 4 mytestnet -``` - -This will create the directory `mytestnet`, containing one directory for each of the 4 nodes. -Each node directory contains a unique `priv_validator.json` and a `genesis.json`, -where the `genesis.json` contains the public keys of all `priv_validator.json` files. - -If you want to add more files to each node for your particular app, you'll have to add them to each of the node directories. - -Now we can copy everything over to the cluster. -If you are on Linux, run - -``` -bash scripts/init.sh 4 mytestnet examples/in-proc -``` - -Otherwise (if you are not on Linux), make sure you ran - -``` -GOOS=linux go build -o $GOPATH/bin/tendermint-linux $GOPATH/src/github.com/tendermint/tendermint/cmd/tendermint -``` - -and now run - -``` -bash scripts/init.sh 4 mytestnet examples/in-proc-linux -``` - -# Start - -Finally, to start Tendermint on all the nodes, run - -``` -bash scripts/start.sh 4 -``` - -# Check - -Query the status of all your nodes: - -``` -bash scripts/query.sh 4 status -``` diff --git a/terraforce/cluster/main.tf b/terraforce/cluster/main.tf deleted file mode 100644 index 99435a03e..000000000 --- a/terraforce/cluster/main.tf +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Cluster on DO - * - */ - -variable "name" { - description = "The cluster name, e.g cdn" -} - -variable "environment" { - description = "Environment tag, e.g prod" -} - -variable "image_id" { - description = "Image ID" -} - -variable "regions" { - description = "Regions to launch in" - type = "list" -} - -variable "key_ids" { - description = "SSH keys to use" - type = "list" -} - -variable "instance_size" { - description = "The instance size to use, e.g 2gb" -} - -variable "desired_capacity" { - description = "Desired instance count" - default = 3 -} - -#----------------------- -# Instances - -resource "digitalocean_droplet" "cluster" { - # set the image and instance type - name = "${var.name}${count.index}" - image = "${var.image_id}" - size = "${var.instance_size}" - - # the `element` function handles modulo - region = "${element(var.regions, count.index)}" - - ssh_keys = "${var.key_ids}" - - count = "${var.desired_capacity}" - lifecycle = { - prevent_destroy = false - } -} - -#----------------------- - -// The cluster name, e.g cdn -output "name" { - value = "${var.name}" -} - -// The list of cluster instance ids -output "instances" { - value = ["${digitalocean_droplet.cluster.*.id}"] -} - -// The list of cluster instance ips -output "private_ips" { - value = ["${digitalocean_droplet.cluster.*.ipv4_address_private}"] -} - -// The list of cluster instance ips -output "public_ips" { - value = ["${digitalocean_droplet.cluster.*.ipv4_address}"] -} diff --git a/terraforce/examples/dummy/bins b/terraforce/examples/dummy/bins deleted file mode 100644 index 0890780df..000000000 --- a/terraforce/examples/dummy/bins +++ /dev/null @@ -1,2 +0,0 @@ -$GOPATH/bin/tendermint -$GOPATH/bin/dummy diff --git a/terraforce/examples/dummy/run.sh b/terraforce/examples/dummy/run.sh deleted file mode 100644 index 469b260c6..000000000 --- a/terraforce/examples/dummy/run.sh +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -if [[ "$SEEDS" != "" ]]; then - SEEDS_FLAG="--seeds=$SEEDS" -fi - -./dummy --persist .tendermint/data/dummy_data >> app.log 2>&1 & -./tendermint node --log_level=info $SEEDS_FLAG >> tendermint.log 2>&1 & diff --git a/terraforce/examples/in-proc-linux/bins b/terraforce/examples/in-proc-linux/bins deleted file mode 100644 index fa0c1052d..000000000 --- a/terraforce/examples/in-proc-linux/bins +++ /dev/null @@ -1 +0,0 @@ -$GOPATH/bin/tendermint-linux diff --git a/terraforce/examples/in-proc-linux/run.sh b/terraforce/examples/in-proc-linux/run.sh deleted file mode 100644 index ff233950e..000000000 --- a/terraforce/examples/in-proc-linux/run.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash - -if [[ "$SEEDS" != "" ]]; then - SEEDS_FLAG="--seeds=$SEEDS" -fi - -./tendermint-linux node --proxy_app=dummy --log_level=note $SEEDS_FLAG >> tendermint.log 2>&1 & diff --git a/terraforce/examples/in-proc/bins b/terraforce/examples/in-proc/bins deleted file mode 100644 index f48f5e353..000000000 --- a/terraforce/examples/in-proc/bins +++ /dev/null @@ -1 +0,0 @@ -$GOPATH/bin/tendermint diff --git a/terraforce/examples/in-proc/run.sh b/terraforce/examples/in-proc/run.sh deleted file mode 100644 index 0627439b4..000000000 --- a/terraforce/examples/in-proc/run.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash - -if [[ "$SEEDS" != "" ]]; then - SEEDS_FLAG="--seeds=$SEEDS" -fi - -./tendermint node --proxy_app=dummy --log_level=note $SEEDS_FLAG >> tendermint.log 2>&1 & diff --git a/terraforce/main.tf b/terraforce/main.tf deleted file mode 100644 index f03a0be57..000000000 --- a/terraforce/main.tf +++ /dev/null @@ -1,34 +0,0 @@ -module "cluster" { - source = "./cluster" - environment = "test" - name = "tendermint-testnet" - - # curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" - key_ids = [8163311] - - image_id = "ubuntu-14-04-x64" - desired_capacity = 4 - instance_size = "2gb" - - regions = ["AMS2", "FRA1", "LON1", "NYC2", "SFO2", "SGP1", "TOR1"] -} - - -provider "digitalocean" { -} - -output "public_ips" { - value = "${module.cluster.public_ips}" -} - -output "private_ips" { - value = "${join(",",module.cluster.private_ips)}" -} - -output "seeds" { - value = "${join(":46656,",module.cluster.public_ips)}:46656" -} - -output "rpcs" { - value = "${join(":46657,",module.cluster.public_ips)}:46657" -} diff --git a/terraforce/scripts/copy_run.sh b/terraforce/scripts/copy_run.sh deleted file mode 100644 index 31c8eb117..000000000 --- a/terraforce/scripts/copy_run.sh +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/bash -set -u - -N=$1 # number of nodes -RUN=$2 # path to run script - -N_=$((N-1)) - -# stop all tendermint -terraforce scp --user root --ssh-key $HOME/.ssh/id_rsa --machines "[0-$N_]" $RUN run.sh diff --git a/terraforce/scripts/init.sh b/terraforce/scripts/init.sh deleted file mode 100644 index 15b8e3247..000000000 --- a/terraforce/scripts/init.sh +++ /dev/null @@ -1,43 +0,0 @@ -#! /bin/bash -set -u - -N=$1 # number of nodes -TESTNET=$2 # path to folder containing testnet info -CONFIG=$3 # path to folder containing `bins` and `run.sh` files - -if [[ ! -f $CONFIG/bins ]]; then - echo "config folder ($CONFIG) must contain bins file" - exit 1 -fi -if [[ ! -f $CONFIG/run.sh ]]; then - echo "config folder ($CONFIG) must contain run.sh file" - exit 1 -fi - -KEY=$HOME/.ssh/id_rsa - -FLAGS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - -N_=$((N-1)) # 0-based index - -MACH_ROOT="$TESTNET/mach?" - - -# mkdir -terraforce ssh --user root --ssh-key $KEY --machines "[0-$N_]" mkdir .tendermint - -# copy over genesis/priv_val -terraforce scp --user root --ssh-key $KEY --iterative --machines "[0-$N_]" "$MACH_ROOT/priv_validator.json" .tendermint/priv_validator.json -terraforce scp --user root --ssh-key $KEY --iterative --machines "[0-$N_]" "$MACH_ROOT/genesis.json" .tendermint/genesis.json - -# copy the run script -terraforce scp --user root --ssh-key $KEY --machines "[0-$N_]" $CONFIG/run.sh run.sh - -# copy the binaries -while read line; do - local_bin=$(eval echo $line) - remote_bin=$(basename $local_bin) - echo $local_bin - terraforce scp --user root --ssh-key $KEY --machines "[0-$N_]" $local_bin $remote_bin - terraforce ssh --user root --ssh-key $KEY --machines "[0-$N_]" chmod +x $remote_bin -done <$CONFIG/bins diff --git a/terraforce/scripts/query.sh b/terraforce/scripts/query.sh deleted file mode 100644 index c77cf61dc..000000000 --- a/terraforce/scripts/query.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash -set -u - -N=$1 # number of nodes -QUERY=$2 - -N_=$((N-1)) - -# start all tendermint nodes -terraforce ssh --user root --ssh-key $HOME/.ssh/id_rsa --machines "[0-$N_]" curl -s localhost:46657/$QUERY - diff --git a/terraforce/scripts/reset.sh b/terraforce/scripts/reset.sh deleted file mode 100644 index 2bef5324c..000000000 --- a/terraforce/scripts/reset.sh +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/bash -set -u - -N=$1 # number of nodes - -N_=$((N-1)) - -# stop all tendermint -terraforce ssh --user root --ssh-key $HOME/.ssh/id_rsa --machines "[0-$N_]" rm -rf .tendermint/data -terraforce ssh --user root --ssh-key $HOME/.ssh/id_rsa --machines "[0-$N_]" ./tendermint unsafe_reset_priv_validator diff --git a/terraforce/scripts/restart.sh b/terraforce/scripts/restart.sh deleted file mode 100644 index 03ff1918e..000000000 --- a/terraforce/scripts/restart.sh +++ /dev/null @@ -1,9 +0,0 @@ -#! /bin/bash -set -u - -N=$1 # number of nodes - -N_=$((N-1)) - -# start -terraforce ssh --user root --ssh-key $HOME/.ssh/id_rsa --machines "[0-$N_]" SEEDS=$(terraform output seeds) bash run.sh diff --git a/terraforce/scripts/start.sh b/terraforce/scripts/start.sh deleted file mode 100644 index e72a8da68..000000000 --- a/terraforce/scripts/start.sh +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/bash -set -u - -N=$1 # number of nodes - -N_=$((N-1)) - -# start all tendermint nodes -terraforce ssh --user root --ssh-key $HOME/.ssh/id_rsa --machines "[0-$N_]" SEEDS=$(terraform output seeds) bash run.sh - diff --git a/terraforce/scripts/stop.sh b/terraforce/scripts/stop.sh deleted file mode 100644 index bdb55869c..000000000 --- a/terraforce/scripts/stop.sh +++ /dev/null @@ -1,9 +0,0 @@ -#! /bin/bash -set -u - -N=$1 # number of nodes - -N_=$((N-1)) - -# stop all tendermint -terraforce ssh --user root --ssh-key $HOME/.ssh/id_rsa --machines "[0-$N_]" killall tendermint diff --git a/terraforce/test.sh b/terraforce/test.sh deleted file mode 100644 index d69dc9180..000000000 --- a/terraforce/test.sh +++ /dev/null @@ -1,30 +0,0 @@ -#! /bin/bash - -cd $GOPATH/src/github.com/tendermint/tendermint - -TEST_PATH=./test/net/new - -N=4 -TESTNET_DIR=mytestnet - -# install deps -# TODO: we should build a Docker image and -# really do everything that follows in the container -# bash setup.sh - - -# launch infra -terraform get -terraform apply - -# create testnet files -tendermint testnet -n $N -dir $TESTNET_DIR - -# expects a linux tendermint binary to be built already -bash scripts/init.sh $N $TESTNET_DIR test/net/examples/in-proc - -# testnet should now be running :) -bash scripts/start.sh 4 - - - diff --git a/terraform-digitalocean/.gitignore b/terraform-digitalocean/.gitignore new file mode 100644 index 000000000..2ded1561a --- /dev/null +++ b/terraform-digitalocean/.gitignore @@ -0,0 +1,3 @@ +cluster/networking.tf +networking-output.tf + diff --git a/terraform-digitalocean/README.rst b/terraform-digitalocean/README.rst new file mode 100644 index 000000000..95af507f0 --- /dev/null +++ b/terraform-digitalocean/README.rst @@ -0,0 +1,111 @@ +Using Terraform +=============== + +This is a generic `Terraform `__ +configuration that sets up DigitalOcean droplets. See the +`terraform-digitalocean `__ +for the required files. + +Prerequisites +------------- + +- Install `HashiCorp Terraform `__ on a linux + machine. +- Create a `DigitalOcean API + token `__ with + read and write capability. +- Create a private/public key pair for SSH. This is needed to log onto + your droplets as well as by Ansible to connect for configuration + changes. +- Set up the public SSH key at the `DigitalOcean security + page `__. + `Here `__'s + a tutorial. +- Find out your SSH key ID at DigitalOcean by querying the below + command on your linux box: + +:: + + DO_API_TOKEN="" + curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DO_API_TOKEN" "https://api.digitalocean.com/v2/account/keys" + +Initialization +-------------- + +If this is your first time using terraform, you have to initialize it by +running the below command. (Note: initialization can be run multiple +times) + +:: + + terraform init + +After initialization it's good measure to create a new Terraform +environment for the droplets so they are always managed together. + +:: + + TESTNET_NAME="testnet-servers" + terraform env new "$TESTNET_NAME" + +Note this ``terraform env`` command is only available in terraform +``v0.9`` and up. + +Execution +--------- + +The below command will create 4 nodes in DigitalOcean. They will be +named ``testnet-servers-node0`` to ``testnet-servers-node3`` and they +will be tagged as ``testnet-servers``. + +:: + + DO_API_TOKEN="" + SSH_IDS="[ \"\" ]" + terraform apply -var TESTNET_NAME="testnet-servers" -var servers=4 -var DO_API_TOKEN="$DO_API_TOKEN" -var ssh_keys="$SSH_IDS" + +Note: ``ssh_keys`` is a list of strings. You can add multiple keys. For +example: ``["1234567","9876543"]``. + +Alternatively you can use the default settings. The number of default +servers is 4 and the testnet name is ``tf-testnet1``. Variables can also +be defined as environment variables instead of the command-line. +Environment variables that start with ``TF_VAR_`` will be translated +into the Terraform configuration. For example the number of servers can +be overriden by setting the ``TF_VAR_servers`` variable. + +:: + + TF_VAR_DO_API_TOKEN="" + TF_VAR_TESTNET_NAME="testnet-servers" + terraform-apply + +Security +-------- + +DigitalOcean uses the root user by default on its droplets. This is fine +as long as SSH keys are used. However some people still would like to +disable root and use an alternative user to connect to the droplets - +then ``sudo`` from there. Terraform can do this but it requires SSH +agent running on the machine where terraform is run, with one of the SSH +keys of the droplets added to the agent. (This will be neede for ansible +too, so it's worth setting it up here. Check out the +`ansible `__ +page for more information.) After setting up the SSH key, run +``terraform apply`` with ``-var noroot=true`` to create your droplets. +Terraform will create a user called ``ec2-user`` and move the SSH keys +over, this way disabling SSH login for root. It also adds the +``ec2-user`` to the sudoers file, so after logging in as ec2-user you +can ``sudo`` to ``root``. + +DigitalOcean announced firewalls but the current version of Terraform +(0.9.8 as of this writing) does not support it yet. Fortunately it is +quite easy to set it up through the web interface (and not that bad +through the `RESTful +API `__ +either). When adding droplets to a firewall rule, you can add tags. All +droplets in a testnet are tagged with the testnet name so it's enough to +define the testnet name in the firewall rule. It is not necessary to add +the nodes one-by-one. Also, the firewall rule "remembers" the testnet +name tag so if you change the servers but keep the name, the firewall +rules will still apply. diff --git a/terraform-digitalocean/cluster/main.tf b/terraform-digitalocean/cluster/main.tf new file mode 100644 index 000000000..daab601c0 --- /dev/null +++ b/terraform-digitalocean/cluster/main.tf @@ -0,0 +1,23 @@ +resource "digitalocean_tag" "cluster" { + name = "${var.name}" +} + +resource "digitalocean_droplet" "cluster" { + name = "${var.name}-node${count.index}" + image = "${var.image_id}" + size = "${var.instance_size}" + region = "${element(var.regions, count.index)}" + ssh_keys = "${var.key_ids}" + count = "${var.servers}" + tags = ["${digitalocean_tag.cluster.id}"] + + lifecycle = { + prevent_destroy = false + } + + connection { + timeout = "30s" + } + +} + diff --git a/terraform-digitalocean/cluster/outputs.tf b/terraform-digitalocean/cluster/outputs.tf new file mode 100644 index 000000000..90255cfcd --- /dev/null +++ b/terraform-digitalocean/cluster/outputs.tf @@ -0,0 +1,25 @@ +// The cluster name +output "name" { + value = "${var.name}" +} + +// The list of cluster instance IDs +output "instances" { + value = ["${digitalocean_droplet.cluster.*.id}"] +} + +// The list of cluster instance private IPs +output "private_ips" { + value = ["${digitalocean_droplet.cluster.*.ipv4_address_private}"] +} + +// The list of cluster instance public IPs +output "public_ips" { + value = ["${digitalocean_droplet.cluster.*.ipv4_address}"] +} + +#// The list of cluster floating IPs +#output "floating_ips" { +# value = ["${digitalocean_floating_ip.cluster.*.ip_address}"] +#} + diff --git a/terraform-digitalocean/cluster/security.tf b/terraform-digitalocean/cluster/security.tf new file mode 100644 index 000000000..3da56395c --- /dev/null +++ b/terraform-digitalocean/cluster/security.tf @@ -0,0 +1,17 @@ +resource "null_resource" "cluster" { + count = "${ var.noroot ? var.servers : 0 }" + connection { + host = "${element(digitalocean_droplet.cluster.*.ipv4_address,count.index)}" + } + provisioner "remote-exec" { + inline = [ + "useradd -m -s /bin/bash ec2-user", + "echo 'ec2-user ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/ec2-user", + "cp -r /root/.ssh /home/ec2-user/.ssh", + "chown -R ec2-user.ec2-user /home/ec2-user/.ssh", + "chmod -R 700 /home/ec2-user/.ssh", + "rm -rf /root/.ssh" + ] + } +} + diff --git a/terraform-digitalocean/cluster/variables.tf b/terraform-digitalocean/cluster/variables.tf new file mode 100644 index 000000000..8f2f4d241 --- /dev/null +++ b/terraform-digitalocean/cluster/variables.tf @@ -0,0 +1,35 @@ +variable "name" { + description = "The cluster name, e.g cdn" +} + +variable "image_id" { + description = "Image ID" + default = "ubuntu-16-04-x64" +} + +variable "regions" { + description = "Regions to launch in" + type = "list" + default = ["AMS2", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"] +} + +variable "key_ids" { + description = "SSH keys to use on the nodes" + type = "list" +} + +variable "instance_size" { + description = "The instance size to use" + default = "2gb" +} + +variable "servers" { + description = "Desired instance count" + default = 4 +} + +variable "noroot" { + description = "Set this variable to true, if you want SSH keys set for ec2-user instead of root." + default = false +} + diff --git a/terraform-digitalocean/main.tf b/terraform-digitalocean/main.tf new file mode 100644 index 000000000..e2e262797 --- /dev/null +++ b/terraform-digitalocean/main.tf @@ -0,0 +1,64 @@ +#Terraform Configuration + +variable "DO_API_TOKEN" { + description = "DigitalOcean Access Token" +} + +variable "TESTNET_NAME" { + description = "Name of the cluster/testnet" + default = "tf-testnet1" +} + +variable "ssh_keys" { + description = "SSH keys provided in DigitalOcean to be used on the nodes" + # curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DO_API_TOKEN" "https://api.digitalocean.com/v2/account/keys" + default = [ + "6259615", + "7658963", + "7668263", + "7668264", + "8036767", + "8163311", + "9495227", + "10318834", + "11435493" + ] +} + +variable "servers" { + description = "Number of nodes in cluster" + default = "4" +} + +variable "image" { + description = "DigitalOcean image name" + default = "ubuntu-16-04-x64" +} + +variable "noroot" { + description = "Set this variable to true, if you want SSH keys set for ec2-user instead of root." + default = false +} + +provider "digitalocean" { + token = "${var.DO_API_TOKEN}" +} + +module "cluster" { + source = "./cluster" + name = "${var.TESTNET_NAME}" + key_ids = "${var.ssh_keys}" + servers = "${var.servers}" + noroot = "${var.noroot}" + image_id = "${var.image}" +} + + +output "public_ips" { + value = "${module.cluster.public_ips}" +} + +#output "floating_ips" { +# value = "${module.cluster.floating_ips}" +#} + diff --git a/tm-bench/README.md b/tm-bench/README.md deleted file mode 100644 index 2476ca11d..000000000 --- a/tm-bench/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# Tendermint blockchain benchmarking tool (tm-bench) - -`tm-bench` is a simple benchmarking tool for [Tendermint -core](https://github.com/tendermint/tendermint) nodes. - -``` -λ tm-bench -T 10 -r 1000 localhost:46657 -Stats Avg Stdev Max -Block latency 6.18ms 3.19ms 14ms -Blocks/sec 0.828 0.378 1 -Txs/sec 963 493 1811 -``` - -* [QuickStart using Docker](#quickstart-using-docker) -* [QuickStart using binaries](#quickstart-using-binaries) -* [Usage](#usage) - -## QuickStart using Docker - -``` -docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init -docker run -it --rm -v "/tmp:/tendermint" -p "46657:46657" --name=tm tendermint/tendermint - -docker run -it --rm --link=tm tendermint/bench tm:46657 -``` - -## QuickStart using binaries - -Linux: - -``` -curl -L https://s3-us-west-2.amazonaws.com/tendermint/0.8.0/tendermint_linux_amd64.zip && sudo unzip -d /usr/local/bin tendermint_linux_amd64.zip && sudo chmod +x tendermint -tendermint init -tendermint node --app_proxy=dummy - -tm-bench localhost:46657 -``` - -Max OS: - -``` -curl -L https://s3-us-west-2.amazonaws.com/tendermint/0.8.0/tendermint_darwin_amd64.zip && sudo unzip -d /usr/local/bin tendermint_darwin_amd64.zip && sudo chmod +x tendermint -tendermint init -tendermint node --app_proxy=dummy - -tm-bench localhost:46657 -``` - -## Usage - -``` -tm-bench [-c 1] [-T 10] [-r 1000] [endpoints] - -Examples: - tm-bench localhost:46657 -Flags: - -T int - Exit after the specified amount of time in seconds (default 10) - -c int - Connections to keep open per endpoint (default 1) - -r int - Txs per second to send in a connection (default 1000) - -v Verbose output -``` diff --git a/tm-bench/README.rst b/tm-bench/README.rst new file mode 100644 index 000000000..676f2bbfc --- /dev/null +++ b/tm-bench/README.rst @@ -0,0 +1,144 @@ +Benchmarking and Monitoring +=========================== + +tm-bench +-------- + +Tendermint blockchain benchmarking tool: https://github.com/tendermint/tools/tree/master/tm-bench + +For example, the following: + +:: + + tm-bench -T 10 -r 1000 localhost:46657 + +will output: + +:: + + Stats Avg Stdev Max + Block latency 6.18ms 3.19ms 14ms + Blocks/sec 0.828 0.378 1 + Txs/sec 963 493 1811 + +Quick Start +^^^^^^^^^^^ + +Docker +~~~~~~ + +:: + + docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init + docker run -it --rm -v "/tmp:/tendermint" -p "46657:46657" --name=tm tendermint/tendermint + + docker run -it --rm --link=tm tendermint/bench tm:46657 + +Binaries +~~~~~~~~ + +If **Linux**, start with: + +:: + + curl -L https://s3-us-west-2.amazonaws.com/tendermint/0.10.4/tendermint_linux_amd64.zip && sudo unzip -d /usr/local/bin tendermint_linux_amd64.zip && sudo chmod +x tendermint + +if **Mac OS**, start with: + +:: + + curl -L https://s3-us-west-2.amazonaws.com/tendermint/0.10.4/tendermint_darwin_amd64.zip && sudo unzip -d /usr/local/bin tendermint_darwin_amd64.zip && sudo chmod +x tendermint + +then run: + +:: + + tendermint init + tendermint node --app_proxy=dummy + + tm-bench localhost:46657 + +with the last command being in a seperate window. + +Usage +^^^^^ + +:: + + tm-bench [-c 1] [-T 10] [-r 1000] [endpoints] + + Examples: + tm-bench localhost:46657 + Flags: + -T int + Exit after the specified amount of time in seconds (default 10) + -c int + Connections to keep open per endpoint (default 1) + -r int + Txs per second to send in a connection (default 1000) + -v Verbose output + +tm-monitor +---------- + +Tendermint blockchain monitoring tool; watches over one or more nodes, collecting and providing various statistics to the user: https://github.com/tendermint/tools/tree/master/tm-monitor + +Quick Start +^^^^^^^^^^^ + +Docker +~~~~~~ + +:: + + docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init + docker run -it --rm -v "/tmp:/tendermint" -p "46657:46657" --name=tm tendermint/tendermint + + docker run -it --rm --link=tm tendermint/monitor tm:46657 + +Binaries +~~~~~~~~ + +This will be the same as you did for ``tm-bench`` above, except for the last line which should be: + +:: + + tm-monitor localhost:46657 + +Usage +^^^^^ + +:: + + tm-monitor [-v] [-no-ton] [-listen-addr="tcp://0.0.0.0:46670"] [endpoints] + + Examples: + # monitor single instance + tm-monitor localhost:46657 + + # monitor a few instances by providing comma-separated list of RPC endpoints + tm-monitor host1:46657,host2:46657 + Flags: + -listen-addr string + HTTP and Websocket server listen address (default "tcp://0.0.0.0:46670") + -no-ton + Do not show ton (table of nodes) + -v verbose logging + +RPC UI +^^^^^^ + +Run ``tm-monitor`` and visit http://localhost:46670 +You should see the list of the available RPC endpoints: + +:: + + http://localhost:46670/status + http://localhost:46670/status/network + http://localhost:46670/monitor?endpoint=_ + http://localhost:46670/status/node?name=_ + http://localhost:46670/unmonitor?endpoint=_ + +The API is available as GET requests with URI encoded parameters, or as JSONRPC +POST requests. The JSONRPC methods are also exposed over websocket. + diff --git a/tm-monitor/README.md b/tm-monitor/README.md deleted file mode 100644 index 85aacdeed..000000000 --- a/tm-monitor/README.md +++ /dev/null @@ -1,123 +0,0 @@ -# Tendermint monitor (tm-monitor) - -Tendermint monitor watches over one or more [Tendermint -core](https://github.com/tendermint/tendermint) applications (nodes), -collecting and providing various statistics to the user. - -* [QuickStart using Docker](#quickstart-using-docker) -* [QuickStart using binaries](#quickstart-using-binaries) -* [Usage](#usage) -* [RPC UI](#rpc-ui) - -## QuickStart using Docker - -``` -docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init -docker run -it --rm -v "/tmp:/tendermint" -p "46657:46657" --name=tm tendermint/tendermint - -docker run -it --rm --link=tm tendermint/monitor tm:46657 -``` - -## QuickStart using binaries - -Linux: - -``` -curl -L https://s3-us-west-2.amazonaws.com/tendermint/0.8.0/tendermint_linux_amd64.zip && sudo unzip -d /usr/local/bin tendermint_linux_amd64.zip && sudo chmod +x tendermint -tendermint init -tendermint node --app_proxy=dummy - -tm-monitor localhost:46657 -``` - -Max OS: - -``` -curl -L https://s3-us-west-2.amazonaws.com/tendermint/0.8.0/tendermint_darwin_amd64.zip && sudo unzip -d /usr/local/bin tendermint_darwin_amd64.zip && sudo chmod +x tendermint -tendermint init -tendermint node --app_proxy=dummy - -tm-monitor localhost:46657 -``` - -## Usage - -``` -tm-monitor [-v] [-no-ton] [-listen-addr="tcp://0.0.0.0:46670"] [endpoints] - -Examples: - # monitor single instance - tm-monitor localhost:46657 - - # monitor a few instances by providing comma-separated list of RPC endpoints - tm-monitor host1:46657,host2:46657 -Flags: - -listen-addr string - HTTP and Websocket server listen address (default "tcp://0.0.0.0:46670") - -no-ton - Do not show ton (table of nodes) - -v verbose logging -``` - -[![asciicast](https://asciinema.org/a/105974.png)](https://asciinema.org/a/105974) - -### RPC UI - -Run `tm-monitor` and visit [http://localhost:46670](http://localhost:46670). -You should see the list of the available RPC endpoints: - -``` -http://localhost:46670/status -http://localhost:46670/status/network -http://localhost:46670/monitor?endpoint=_ -http://localhost:46670/status/node?name=_ -http://localhost:46670/unmonitor?endpoint=_ -``` - -The API is available as GET requests with URI encoded parameters, or as JSONRPC -POST requests. The JSONRPC methods are also exposed over websocket. - -### Ideas - -- currently we get IPs and dial, but should reverse so the nodes dial the - netmon, both for node privacy and easier reconfig (validators changing - ip/port). It would be good to have both. For testnets with others we def need - them to dial the monitor. But I want to be able to run the monitor from my - laptop without openning ports. - If we don't want to open all the ports, maybe something like this would be a - good fit for us: tm-monitor agent running on each node, collecting all the - metrics. Each tm-monitor agent monitors local TM node and sends stats to a - single master tm-monitor master. That way we'll only need to open a single - port for UI on the node with tm-monitor master. And I believe it could be - done with a single package with a few subcommands. - ``` - # agent collecting metrics from localhost (default) - tm-monitor agent --master="192.168.1.17:8888" - - # agent collecting metrics from another TM node (useful for testing, development) - tm-monitor agent --master="192.168.1.17:8888" --node="192.168.1.18:46657" - - # master accepting stats from agents - tm-monitor master [--ton] OR [--ui] (`--ui` mode by default) - - # display table of nodes in the terminal (useful for testing, development, playing with TM) - # --nodes="localhost:46657" by default - tm-monitor - - # display table of nodes in the terminal (useful for testing, development, playing with TM) - tm-monitor --nodes="192.168.1.18:46657,192.168.1.19:46657" - ``` -- uptime over last day, month, year. There are different meanings for uptime. - One is to constantly ping the nodes and make sure they respond to eg. - /status. A more fine-grained one is to check for votes in the block commits. -- show network size + auto discovery. You can get a list of connected peers at - /net_info. But no single one will be connected to the whole network, so need - to tease out all the unique peers from calling /net_info on all of them. - Unless you have some prior information about how many peers in the net ... - More: we could add `-auto-discovery` option and try to connect to every node. -- input plugin for https://github.com/influxdata/telegraf, so the user is able - to get the metrics and send them whenever he wants to (grafana, prometheus, - etc.). - -Feel free to vote on the ideas or add your own by saying hello on -[Slack](http://forum.tendermint.com:3000/) or by opening an issue. diff --git a/terraforce/transact/transact.go b/transact/transact.go similarity index 100% rename from terraforce/transact/transact.go rename to transact/transact.go