added EC2 provisioning, optimised docker for EC2, added python and
cppjit client (python not yet working)
This commit is contained in:
		
							parent
							
								
									52e174b1f7
								
							
						
					
					
						commit
						58ec49a34d
					
				
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1,4 +1,6 @@ | ||||
| venv/ | ||||
| *~ | ||||
| *.swp | ||||
| *.swo | ||||
| .vagrant/ | ||||
| client-tests.pem | ||||
|  | ||||
							
								
								
									
										9
									
								
								ansible/ec2-setup.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								ansible/ec2-setup.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,9 @@ | ||||
| --- | ||||
| - name: Provision EC2 instances | ||||
|   hosts: local | ||||
|   gather_facts: false | ||||
|   roles: | ||||
|     - ec2  | ||||
| 
 | ||||
|   tasks: | ||||
|     - include: roles/ec2/tasks/setup.yml | ||||
							
								
								
									
										10
									
								
								ansible/ec2-terminate.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								ansible/ec2-terminate.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,10 @@ | ||||
| --- | ||||
| - name: Terminate all ec2 instances  | ||||
|   hosts: security_group_client-tests | ||||
|   remote_user: ubuntu # private key defined via ansible.cfg  | ||||
|   gather_facts: false | ||||
|   roles: | ||||
|       - ec2 | ||||
| 
 | ||||
|   tasks: | ||||
|       - include: roles/ec2/tasks/terminate.yml | ||||
							
								
								
									
										95
									
								
								ansible/ec2.ini
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								ansible/ec2.ini
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,95 @@ | ||||
| # Ansible EC2 external inventory script settings | ||||
| # | ||||
| 
 | ||||
| [ec2] | ||||
| 
 | ||||
| # to talk to a private eucalyptus instance uncomment these lines | ||||
| # and edit edit eucalyptus_host to be the host name of your cloud controller | ||||
| #eucalyptus = True | ||||
| #eucalyptus_host = clc.cloud.domain.org | ||||
| 
 | ||||
| # AWS regions to make calls to. Set this to 'all' to make request to all regions | ||||
| # in AWS and merge the results together. Alternatively, set this to a comma | ||||
| # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' | ||||
| regions = us-east-1 | ||||
| regions_exclude = us-gov-west-1,cn-north-1 | ||||
| 
 | ||||
| # When generating inventory, Ansible needs to know how to address a server. | ||||
| # Each EC2 instance has a lot of variables associated with it. Here is the list: | ||||
| #   http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance | ||||
| # Below are 2 variables that are used as the address of a server: | ||||
| #   - destination_variable | ||||
| #   - vpc_destination_variable | ||||
| 
 | ||||
| # This is the normal destination variable to use. If you are running Ansible | ||||
| # from outside EC2, then 'public_dns_name' makes the most sense. If you are | ||||
| # running Ansible from within EC2, then perhaps you want to use the internal | ||||
| # address, and should set this to 'private_dns_name'. | ||||
| destination_variable = public_dns_name | ||||
| 
 | ||||
| # For server inside a VPC, using DNS names may not make sense. When an instance | ||||
| # has 'subnet_id' set, this variable is used. If the subnet is public, setting | ||||
| # this to 'ip_address' will return the public IP address. For instances in a | ||||
| # private subnet, this should be set to 'private_ip_address', and Ansible must | ||||
| # be run from with EC2. | ||||
| vpc_destination_variable = ip_address | ||||
| 
 | ||||
| # To tag instances on EC2 with the resource records that point to them from | ||||
| # Route53, uncomment and set 'route53' to True. | ||||
| route53 = False | ||||
| 
 | ||||
| # To exclude RDS instances from the inventory, uncomment and set to False. | ||||
| #rds = False | ||||
| 
 | ||||
| # Additionally, you can specify the list of zones to exclude looking up in | ||||
| # 'route53_excluded_zones' as a comma-separated list. | ||||
| # route53_excluded_zones = samplezone1.com, samplezone2.com | ||||
| 
 | ||||
| # By default, only EC2 instances in the 'running' state are returned. Set | ||||
| # 'all_instances' to True to return all instances regardless of state. | ||||
| all_instances = False | ||||
| 
 | ||||
| # By default, only RDS instances in the 'available' state are returned.  Set | ||||
| # 'all_rds_instances' to True return all RDS instances regardless of state. | ||||
| all_rds_instances = False | ||||
| 
 | ||||
| # API calls to EC2 are slow. For this reason, we cache the results of an API | ||||
| # call. Set this to the path you want cache files to be written to. Two files | ||||
| # will be written to this directory: | ||||
| #   - ansible-ec2.cache | ||||
| #   - ansible-ec2.index | ||||
| cache_path = ~/.ansible/tmp | ||||
| 
 | ||||
| # The number of seconds a cache file is considered valid. After this many | ||||
| # seconds, a new API call will be made, and the cache file will be updated. | ||||
| # To disable the cache, set this value to 0 | ||||
| cache_max_age = 300 | ||||
| 
 | ||||
| # Organize groups into a nested/hierarchy instead of a flat namespace. | ||||
| nested_groups = False | ||||
| 
 | ||||
| # If you only want to include hosts that match a certain regular expression | ||||
| # pattern_include = stage-* | ||||
| 
 | ||||
| # If you want to exclude any hosts that match a certain regular expression | ||||
| # pattern_exclude = stage-* | ||||
| 
 | ||||
| # Instance filters can be used to control which instances are retrieved for | ||||
| # inventory. For the full list of possible filters, please read the EC2 API | ||||
| # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters | ||||
| # Filters are key/value pairs separated by '=', to list multiple filters use | ||||
| # a list separated by commas. See examples below. | ||||
| 
 | ||||
| # Retrieve only instances with (key=value) env=stage tag | ||||
| # instance_filters = tag:env=stage | ||||
| 
 | ||||
| # Retrieve only instances with role=webservers OR role=dbservers tag | ||||
| # instance_filters = tag:role=webservers,tag:role=dbservers | ||||
| 
 | ||||
| # Retrieve only t1.micro instances OR instances with tag env=stage | ||||
| # instance_filters = instance-type=t1.micro,tag:env=stage | ||||
| 
 | ||||
| # You can use wildcards in filter values also. Below will list instances which | ||||
| # tag Name value matches webservers1* | ||||
| # (ex. webservers15, webservers1a, webservers123 etc)  | ||||
| # instance_filters = tag:Name=webservers1* | ||||
							
								
								
									
										727
									
								
								ansible/ec2.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										727
									
								
								ansible/ec2.py
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,727 @@ | ||||
| #!/usr/bin/env python | ||||
| 
 | ||||
| ''' | ||||
| EC2 external inventory script | ||||
| ================================= | ||||
| 
 | ||||
| Generates inventory that Ansible can understand by making API request to | ||||
| AWS EC2 using the Boto library. | ||||
| 
 | ||||
| NOTE: This script assumes Ansible is being executed where the environment | ||||
| variables needed for Boto have already been set: | ||||
|     export AWS_ACCESS_KEY_ID='AK123' | ||||
|     export AWS_SECRET_ACCESS_KEY='abc123' | ||||
| 
 | ||||
| This script also assumes there is an ec2.ini file alongside it.  To specify a | ||||
| different path to ec2.ini, define the EC2_INI_PATH environment variable: | ||||
| 
 | ||||
|     export EC2_INI_PATH=/path/to/my_ec2.ini | ||||
| 
 | ||||
| If you're using eucalyptus you need to set the above variables and | ||||
| you need to define: | ||||
| 
 | ||||
|     export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus | ||||
| 
 | ||||
| For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html | ||||
| 
 | ||||
| When run against a specific host, this script returns the following variables: | ||||
|  - ec2_ami_launch_index | ||||
|  - ec2_architecture | ||||
|  - ec2_association | ||||
|  - ec2_attachTime | ||||
|  - ec2_attachment | ||||
|  - ec2_attachmentId | ||||
|  - ec2_client_token | ||||
|  - ec2_deleteOnTermination | ||||
|  - ec2_description | ||||
|  - ec2_deviceIndex | ||||
|  - ec2_dns_name | ||||
|  - ec2_eventsSet | ||||
|  - ec2_group_name | ||||
|  - ec2_hypervisor | ||||
|  - ec2_id | ||||
|  - ec2_image_id | ||||
|  - ec2_instanceState | ||||
|  - ec2_instance_type | ||||
|  - ec2_ipOwnerId | ||||
|  - ec2_ip_address | ||||
|  - ec2_item | ||||
|  - ec2_kernel | ||||
|  - ec2_key_name | ||||
|  - ec2_launch_time | ||||
|  - ec2_monitored | ||||
|  - ec2_monitoring | ||||
|  - ec2_networkInterfaceId | ||||
|  - ec2_ownerId | ||||
|  - ec2_persistent | ||||
|  - ec2_placement | ||||
|  - ec2_platform | ||||
|  - ec2_previous_state | ||||
|  - ec2_private_dns_name | ||||
|  - ec2_private_ip_address | ||||
|  - ec2_publicIp | ||||
|  - ec2_public_dns_name | ||||
|  - ec2_ramdisk | ||||
|  - ec2_reason | ||||
|  - ec2_region | ||||
|  - ec2_requester_id | ||||
|  - ec2_root_device_name | ||||
|  - ec2_root_device_type | ||||
|  - ec2_security_group_ids | ||||
|  - ec2_security_group_names | ||||
|  - ec2_shutdown_state | ||||
|  - ec2_sourceDestCheck | ||||
|  - ec2_spot_instance_request_id | ||||
|  - ec2_state | ||||
|  - ec2_state_code | ||||
|  - ec2_state_reason | ||||
|  - ec2_status | ||||
|  - ec2_subnet_id | ||||
|  - ec2_tenancy | ||||
|  - ec2_virtualization_type | ||||
|  - ec2_vpc_id | ||||
| 
 | ||||
| These variables are pulled out of a boto.ec2.instance object. There is a lack of | ||||
| consistency with variable spellings (camelCase and underscores) since this | ||||
| just loops through all variables the object exposes. It is preferred to use the | ||||
| ones with underscores when multiple exist. | ||||
| 
 | ||||
| In addition, if an instance has AWS Tags associated with it, each tag is a new | ||||
| variable named: | ||||
|  - ec2_tag_[Key] = [Value] | ||||
| 
 | ||||
| Security groups are comma-separated in 'ec2_security_group_ids' and | ||||
| 'ec2_security_group_names'. | ||||
| ''' | ||||
| 
 | ||||
| # (c) 2012, Peter Sankauskas | ||||
| # | ||||
| # This file is part of Ansible, | ||||
| # | ||||
| # Ansible is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| # | ||||
| # Ansible is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU General Public License for more details. | ||||
| # | ||||
| # You should have received a copy of the GNU General Public License | ||||
| # along with Ansible.  If not, see <http://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| ###################################################################### | ||||
| 
 | ||||
| import sys | ||||
| import os | ||||
| import argparse | ||||
| import re | ||||
| from time import time | ||||
| import boto | ||||
| from boto import ec2 | ||||
| from boto import rds | ||||
| from boto import route53 | ||||
| import ConfigParser | ||||
| from collections import defaultdict | ||||
| 
 | ||||
| try: | ||||
|     import json | ||||
| except ImportError: | ||||
|     import simplejson as json | ||||
| 
 | ||||
| 
 | ||||
| class Ec2Inventory(object): | ||||
|     def _empty_inventory(self): | ||||
|         return {"_meta" : {"hostvars" : {}}} | ||||
| 
 | ||||
|     def __init__(self): | ||||
|         ''' Main execution path ''' | ||||
| 
 | ||||
|         # Inventory grouped by instance IDs, tags, security groups, regions, | ||||
|         # and availability zones | ||||
|         self.inventory = self._empty_inventory() | ||||
| 
 | ||||
|         # Index of hostname (address) to instance ID | ||||
|         self.index = {} | ||||
| 
 | ||||
|         # Read settings and parse CLI arguments | ||||
|         self.read_settings() | ||||
|         self.parse_cli_args() | ||||
| 
 | ||||
|         # Cache | ||||
|         if self.args.refresh_cache: | ||||
|             self.do_api_calls_update_cache() | ||||
|         elif not self.is_cache_valid(): | ||||
|             self.do_api_calls_update_cache() | ||||
| 
 | ||||
|         # Data to print | ||||
|         if self.args.host: | ||||
|             data_to_print = self.get_host_info() | ||||
| 
 | ||||
|         elif self.args.list: | ||||
|             # Display list of instances for inventory | ||||
|             if self.inventory == self._empty_inventory(): | ||||
|                 data_to_print = self.get_inventory_from_cache() | ||||
|             else: | ||||
|                 data_to_print = self.json_format_dict(self.inventory, True) | ||||
| 
 | ||||
|         print data_to_print | ||||
| 
 | ||||
| 
 | ||||
|     def is_cache_valid(self): | ||||
|         ''' Determines if the cache files have expired, or if it is still valid ''' | ||||
| 
 | ||||
|         if os.path.isfile(self.cache_path_cache): | ||||
|             mod_time = os.path.getmtime(self.cache_path_cache) | ||||
|             current_time = time() | ||||
|             if (mod_time + self.cache_max_age) > current_time: | ||||
|                 if os.path.isfile(self.cache_path_index): | ||||
|                     return True | ||||
| 
 | ||||
|         return False | ||||
| 
 | ||||
| 
 | ||||
|     def read_settings(self): | ||||
|         ''' Reads the settings from the ec2.ini file ''' | ||||
| 
 | ||||
|         config = ConfigParser.SafeConfigParser() | ||||
|         ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') | ||||
|         ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) | ||||
|         config.read(ec2_ini_path) | ||||
| 
 | ||||
|         # is eucalyptus? | ||||
|         self.eucalyptus_host = None | ||||
|         self.eucalyptus = False | ||||
|         if config.has_option('ec2', 'eucalyptus'): | ||||
|             self.eucalyptus = config.getboolean('ec2', 'eucalyptus') | ||||
|         if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): | ||||
|             self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') | ||||
| 
 | ||||
|         # Regions | ||||
|         self.regions = [] | ||||
|         configRegions = config.get('ec2', 'regions') | ||||
|         configRegions_exclude = config.get('ec2', 'regions_exclude') | ||||
|         if (configRegions == 'all'): | ||||
|             if self.eucalyptus_host: | ||||
|                 self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) | ||||
|             else: | ||||
|                 for regionInfo in ec2.regions(): | ||||
|                     if regionInfo.name not in configRegions_exclude: | ||||
|                         self.regions.append(regionInfo.name) | ||||
|         else: | ||||
|             self.regions = configRegions.split(",") | ||||
| 
 | ||||
|         # Destination addresses | ||||
|         self.destination_variable = config.get('ec2', 'destination_variable') | ||||
|         self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') | ||||
| 
 | ||||
|         # Route53 | ||||
|         self.route53_enabled = config.getboolean('ec2', 'route53') | ||||
|         self.route53_excluded_zones = [] | ||||
|         if config.has_option('ec2', 'route53_excluded_zones'): | ||||
|             self.route53_excluded_zones.extend( | ||||
|                 config.get('ec2', 'route53_excluded_zones', '').split(',')) | ||||
| 
 | ||||
|         # Include RDS instances? | ||||
|         self.rds_enabled = True | ||||
|         if config.has_option('ec2', 'rds'): | ||||
|             self.rds_enabled = config.getboolean('ec2', 'rds') | ||||
| 
 | ||||
|         # Return all EC2 and RDS instances (if RDS is enabled) | ||||
|         if config.has_option('ec2', 'all_instances'): | ||||
|             self.all_instances = config.getboolean('ec2', 'all_instances') | ||||
|         else: | ||||
|             self.all_instances = False | ||||
|         if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: | ||||
|             self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') | ||||
|         else: | ||||
|             self.all_rds_instances = False | ||||
| 
 | ||||
|         # Cache related | ||||
|         cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) | ||||
|         if not os.path.exists(cache_dir): | ||||
|             os.makedirs(cache_dir) | ||||
| 
 | ||||
|         self.cache_path_cache = cache_dir + "/ansible-ec2.cache" | ||||
|         self.cache_path_index = cache_dir + "/ansible-ec2.index" | ||||
|         self.cache_max_age = config.getint('ec2', 'cache_max_age') | ||||
| 
 | ||||
|         # Configure nested groups instead of flat namespace. | ||||
|         if config.has_option('ec2', 'nested_groups'): | ||||
|             self.nested_groups = config.getboolean('ec2', 'nested_groups') | ||||
|         else: | ||||
|             self.nested_groups = False | ||||
| 
 | ||||
|         # Do we need to just include hosts that match a pattern? | ||||
|         try: | ||||
|             pattern_include = config.get('ec2', 'pattern_include') | ||||
|             if pattern_include and len(pattern_include) > 0: | ||||
|                 self.pattern_include = re.compile(pattern_include) | ||||
|             else: | ||||
|                 self.pattern_include = None | ||||
|         except ConfigParser.NoOptionError, e: | ||||
|             self.pattern_include = None | ||||
| 
 | ||||
|         # Do we need to exclude hosts that match a pattern? | ||||
|         try: | ||||
|             pattern_exclude = config.get('ec2', 'pattern_exclude'); | ||||
|             if pattern_exclude and len(pattern_exclude) > 0: | ||||
|                 self.pattern_exclude = re.compile(pattern_exclude) | ||||
|             else: | ||||
|                 self.pattern_exclude = None | ||||
|         except ConfigParser.NoOptionError, e: | ||||
|             self.pattern_exclude = None | ||||
| 
 | ||||
|         # Instance filters (see boto and EC2 API docs) | ||||
|         self.ec2_instance_filters = defaultdict(list) | ||||
|         if config.has_option('ec2', 'instance_filters'): | ||||
|             for x in config.get('ec2', 'instance_filters', '').split(','): | ||||
|                 filter_key, filter_value = x.split('=') | ||||
|                 self.ec2_instance_filters[filter_key].append(filter_value) | ||||
| 
 | ||||
|     def parse_cli_args(self): | ||||
|         ''' Command line argument processing ''' | ||||
| 
 | ||||
|         parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') | ||||
|         parser.add_argument('--list', action='store_true', default=True, | ||||
|                            help='List instances (default: True)') | ||||
|         parser.add_argument('--host', action='store', | ||||
|                            help='Get all the variables about a specific instance') | ||||
|         parser.add_argument('--refresh-cache', action='store_true', default=False, | ||||
|                            help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') | ||||
|         self.args = parser.parse_args() | ||||
| 
 | ||||
| 
 | ||||
|     def do_api_calls_update_cache(self): | ||||
|         ''' Do API calls to each region, and save data in cache files ''' | ||||
| 
 | ||||
|         if self.route53_enabled: | ||||
|             self.get_route53_records() | ||||
| 
 | ||||
|         for region in self.regions: | ||||
|             self.get_instances_by_region(region) | ||||
|             if self.rds_enabled: | ||||
|                 self.get_rds_instances_by_region(region) | ||||
| 
 | ||||
|         self.write_to_cache(self.inventory, self.cache_path_cache) | ||||
|         self.write_to_cache(self.index, self.cache_path_index) | ||||
| 
 | ||||
| 
 | ||||
|     def get_instances_by_region(self, region): | ||||
|         ''' Makes an AWS EC2 API call to the list of instances in a particular | ||||
|         region ''' | ||||
| 
 | ||||
|         try: | ||||
|             if self.eucalyptus: | ||||
|                 conn = boto.connect_euca(host=self.eucalyptus_host) | ||||
|                 conn.APIVersion = '2010-08-31' | ||||
|             else: | ||||
|                 conn = ec2.connect_to_region(region) | ||||
| 
 | ||||
|             # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported | ||||
|             if conn is None: | ||||
|                 print("region name: %s likely not supported, or AWS is down.  connection to region failed." % region) | ||||
|                 sys.exit(1) | ||||
| 
 | ||||
|             reservations = [] | ||||
|             if self.ec2_instance_filters: | ||||
|                 for filter_key, filter_values in self.ec2_instance_filters.iteritems(): | ||||
|                     reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) | ||||
|             else: | ||||
|                 reservations = conn.get_all_instances() | ||||
| 
 | ||||
|             for reservation in reservations: | ||||
|                 for instance in reservation.instances: | ||||
|                     self.add_instance(instance, region) | ||||
| 
 | ||||
|         except boto.exception.BotoServerError, e: | ||||
|             if  not self.eucalyptus: | ||||
|                 print "Looks like AWS is down again:" | ||||
|             print e | ||||
|             sys.exit(1) | ||||
| 
 | ||||
|     def get_rds_instances_by_region(self, region): | ||||
|         ''' Makes an AWS API call to the list of RDS instances in a particular | ||||
|         region ''' | ||||
| 
 | ||||
|         try: | ||||
|             conn = rds.connect_to_region(region) | ||||
|             if conn: | ||||
|                 instances = conn.get_all_dbinstances() | ||||
|                 for instance in instances: | ||||
|                     self.add_rds_instance(instance, region) | ||||
|         except boto.exception.BotoServerError, e: | ||||
|             if not e.reason == "Forbidden": | ||||
|                 print "Looks like AWS RDS is down: " | ||||
|                 print e | ||||
|                 sys.exit(1) | ||||
| 
 | ||||
|     def get_instance(self, region, instance_id): | ||||
|         ''' Gets details about a specific instance ''' | ||||
|         if self.eucalyptus: | ||||
|             conn = boto.connect_euca(self.eucalyptus_host) | ||||
|             conn.APIVersion = '2010-08-31' | ||||
|         else: | ||||
|             conn = ec2.connect_to_region(region) | ||||
| 
 | ||||
|         # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported | ||||
|         if conn is None: | ||||
|             print("region name: %s likely not supported, or AWS is down.  connection to region failed." % region) | ||||
|             sys.exit(1) | ||||
| 
 | ||||
|         reservations = conn.get_all_instances([instance_id]) | ||||
|         for reservation in reservations: | ||||
|             for instance in reservation.instances: | ||||
|                 return instance | ||||
| 
 | ||||
|     def add_instance(self, instance, region): | ||||
|         ''' Adds an instance to the inventory and index, as long as it is | ||||
|         addressable ''' | ||||
| 
 | ||||
|         # Only want running instances unless all_instances is True | ||||
|         if not self.all_instances and instance.state != 'running': | ||||
|             return | ||||
| 
 | ||||
|         # Select the best destination address | ||||
|         if instance.subnet_id: | ||||
|             dest = getattr(instance, self.vpc_destination_variable) | ||||
|         else: | ||||
|             dest =  getattr(instance, self.destination_variable) | ||||
| 
 | ||||
|         if not dest: | ||||
|             # Skip instances we cannot address (e.g. private VPC subnet) | ||||
|             return | ||||
| 
 | ||||
|         # if we only want to include hosts that match a pattern, skip those that don't | ||||
|         if self.pattern_include and not self.pattern_include.match(dest): | ||||
|             return | ||||
| 
 | ||||
|         # if we need to exclude hosts that match a pattern, skip those | ||||
|         if self.pattern_exclude and self.pattern_exclude.match(dest): | ||||
|             return | ||||
| 
 | ||||
|         # Add to index | ||||
|         self.index[dest] = [region, instance.id] | ||||
| 
 | ||||
|         # Inventory: Group by instance ID (always a group of 1) | ||||
|         self.inventory[instance.id] = [dest] | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, 'instances', instance.id) | ||||
| 
 | ||||
|         # Inventory: Group by region | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, 'regions', region) | ||||
|         else: | ||||
|             self.push(self.inventory, region, dest) | ||||
| 
 | ||||
|         # Inventory: Group by availability zone | ||||
|         self.push(self.inventory, instance.placement, dest) | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, region, instance.placement) | ||||
| 
 | ||||
|         # Inventory: Group by instance type | ||||
|         type_name = self.to_safe('type_' + instance.instance_type) | ||||
|         self.push(self.inventory, type_name, dest) | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, 'types', type_name) | ||||
| 
 | ||||
|         # Inventory: Group by key pair | ||||
|         if instance.key_name: | ||||
|             key_name = self.to_safe('key_' + instance.key_name) | ||||
|             self.push(self.inventory, key_name, dest) | ||||
|             if self.nested_groups: | ||||
|                 self.push_group(self.inventory, 'keys', key_name) | ||||
| 
 | ||||
|         # Inventory: Group by VPC | ||||
|         if instance.vpc_id: | ||||
|             self.push(self.inventory, self.to_safe('vpc_id_' + instance.vpc_id), dest) | ||||
| 
 | ||||
|         # Inventory: Group by security group | ||||
|         try: | ||||
|             for group in instance.groups: | ||||
|                 key = self.to_safe("security_group_" + group.name) | ||||
|                 self.push(self.inventory, key, dest) | ||||
|                 if self.nested_groups: | ||||
|                     self.push_group(self.inventory, 'security_groups', key) | ||||
|         except AttributeError: | ||||
|             print 'Package boto seems a bit older.' | ||||
|             print 'Please upgrade boto >= 2.3.0.' | ||||
|             sys.exit(1) | ||||
| 
 | ||||
|         # Inventory: Group by tag keys | ||||
|         for k, v in instance.tags.iteritems(): | ||||
|             key = self.to_safe("tag_" + k + "=" + v) | ||||
|             self.push(self.inventory, key, dest) | ||||
|             if self.nested_groups: | ||||
|                 self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) | ||||
|                 self.push_group(self.inventory, self.to_safe("tag_" + k), key) | ||||
| 
 | ||||
|         # Inventory: Group by Route53 domain names if enabled | ||||
|         if self.route53_enabled: | ||||
|             route53_names = self.get_instance_route53_names(instance) | ||||
|             for name in route53_names: | ||||
|                 self.push(self.inventory, name, dest) | ||||
|                 if self.nested_groups: | ||||
|                     self.push_group(self.inventory, 'route53', name) | ||||
| 
 | ||||
|         # Global Tag: instances without tags | ||||
|         if len(instance.tags) == 0: | ||||
|             self.push(self.inventory, 'tag_none', dest) | ||||
|              | ||||
|         # Global Tag: tag all EC2 instances | ||||
|         self.push(self.inventory, 'ec2', dest) | ||||
| 
 | ||||
|         self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) | ||||
| 
 | ||||
| 
 | ||||
|     def add_rds_instance(self, instance, region): | ||||
|         ''' Adds an RDS instance to the inventory and index, as long as it is | ||||
|         addressable ''' | ||||
| 
 | ||||
|         # Only want available instances unless all_rds_instances is True | ||||
|         if not self.all_rds_instances and instance.status != 'available': | ||||
|             return | ||||
| 
 | ||||
|         # Select the best destination address | ||||
|         #if instance.subnet_id: | ||||
|             #dest = getattr(instance, self.vpc_destination_variable) | ||||
|         #else: | ||||
|             #dest =  getattr(instance, self.destination_variable) | ||||
|         dest = instance.endpoint[0] | ||||
| 
 | ||||
|         if not dest: | ||||
|             # Skip instances we cannot address (e.g. private VPC subnet) | ||||
|             return | ||||
| 
 | ||||
|         # Add to index | ||||
|         self.index[dest] = [region, instance.id] | ||||
| 
 | ||||
|         # Inventory: Group by instance ID (always a group of 1) | ||||
|         self.inventory[instance.id] = [dest] | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, 'instances', instance.id) | ||||
| 
 | ||||
|         # Inventory: Group by region | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, 'regions', region) | ||||
|         else: | ||||
|             self.push(self.inventory, region, dest) | ||||
| 
 | ||||
|         # Inventory: Group by availability zone | ||||
|         self.push(self.inventory, instance.availability_zone, dest) | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, region, instance.availability_zone) | ||||
| 
 | ||||
|         # Inventory: Group by instance type | ||||
|         type_name = self.to_safe('type_' + instance.instance_class) | ||||
|         self.push(self.inventory, type_name, dest) | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, 'types', type_name) | ||||
| 
 | ||||
|         # Inventory: Group by security group | ||||
|         try: | ||||
|             if instance.security_group: | ||||
|                 key = self.to_safe("security_group_" + instance.security_group.name) | ||||
|                 self.push(self.inventory, key, dest) | ||||
|                 if self.nested_groups: | ||||
|                     self.push_group(self.inventory, 'security_groups', key) | ||||
| 
 | ||||
|         except AttributeError: | ||||
|             print 'Package boto seems a bit older.' | ||||
|             print 'Please upgrade boto >= 2.3.0.' | ||||
|             sys.exit(1) | ||||
| 
 | ||||
|         # Inventory: Group by engine | ||||
|         self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) | ||||
| 
 | ||||
|         # Inventory: Group by parameter group | ||||
|         self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) | ||||
|         if self.nested_groups: | ||||
|             self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) | ||||
| 
 | ||||
|         # Global Tag: all RDS instances | ||||
|         self.push(self.inventory, 'rds', dest) | ||||
| 
 | ||||
|         self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) | ||||
| 
 | ||||
| 
 | ||||
|     def get_route53_records(self): | ||||
|         ''' Get and store the map of resource records to domain names that | ||||
|         point to them. ''' | ||||
| 
 | ||||
|         r53_conn = route53.Route53Connection() | ||||
|         all_zones = r53_conn.get_zones() | ||||
| 
 | ||||
|         route53_zones = [ zone for zone in all_zones if zone.name[:-1] | ||||
|                           not in self.route53_excluded_zones ] | ||||
| 
 | ||||
|         self.route53_records = {} | ||||
| 
 | ||||
|         for zone in route53_zones: | ||||
|             rrsets = r53_conn.get_all_rrsets(zone.id) | ||||
| 
 | ||||
|             for record_set in rrsets: | ||||
|                 record_name = record_set.name | ||||
| 
 | ||||
|                 if record_name.endswith('.'): | ||||
|                     record_name = record_name[:-1] | ||||
| 
 | ||||
|                 for resource in record_set.resource_records: | ||||
|                     self.route53_records.setdefault(resource, set()) | ||||
|                     self.route53_records[resource].add(record_name) | ||||
| 
 | ||||
| 
 | ||||
|     def get_instance_route53_names(self, instance): | ||||
|         ''' Check if an instance is referenced in the records we have from | ||||
|         Route53. If it is, return the list of domain names pointing to said | ||||
|         instance. If nothing points to it, return an empty list. ''' | ||||
| 
 | ||||
|         instance_attributes = [ 'public_dns_name', 'private_dns_name', | ||||
|                                 'ip_address', 'private_ip_address' ] | ||||
| 
 | ||||
|         name_list = set() | ||||
| 
 | ||||
|         for attrib in instance_attributes: | ||||
|             try: | ||||
|                 value = getattr(instance, attrib) | ||||
|             except AttributeError: | ||||
|                 continue | ||||
| 
 | ||||
|             if value in self.route53_records: | ||||
|                 name_list.update(self.route53_records[value]) | ||||
| 
 | ||||
|         return list(name_list) | ||||
| 
 | ||||
| 
 | ||||
|     def get_host_info_dict_from_instance(self, instance): | ||||
|         instance_vars = {} | ||||
|         for key in vars(instance): | ||||
|             value = getattr(instance, key) | ||||
|             key = self.to_safe('ec2_' + key) | ||||
| 
 | ||||
|             # Handle complex types | ||||
|             # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 | ||||
|             if key == 'ec2__state': | ||||
|                 instance_vars['ec2_state'] = instance.state or '' | ||||
|                 instance_vars['ec2_state_code'] = instance.state_code | ||||
|             elif key == 'ec2__previous_state': | ||||
|                 instance_vars['ec2_previous_state'] = instance.previous_state or '' | ||||
|                 instance_vars['ec2_previous_state_code'] = instance.previous_state_code | ||||
|             elif type(value) in [int, bool]: | ||||
|                 instance_vars[key] = value | ||||
|             elif type(value) in [str, unicode]: | ||||
|                 instance_vars[key] = value.strip() | ||||
|             elif type(value) == type(None): | ||||
|                 instance_vars[key] = '' | ||||
|             elif key == 'ec2_region': | ||||
|                 instance_vars[key] = value.name | ||||
|             elif key == 'ec2__placement': | ||||
|                 instance_vars['ec2_placement'] = value.zone | ||||
|             elif key == 'ec2_tags': | ||||
|                 for k, v in value.iteritems(): | ||||
|                     key = self.to_safe('ec2_tag_' + k) | ||||
|                     instance_vars[key] = v | ||||
|             elif key == 'ec2_groups': | ||||
|                 group_ids = [] | ||||
|                 group_names = [] | ||||
|                 for group in value: | ||||
|                     group_ids.append(group.id) | ||||
|                     group_names.append(group.name) | ||||
|                 instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) | ||||
|                 instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) | ||||
|             else: | ||||
|                 pass | ||||
|                 # TODO Product codes if someone finds them useful | ||||
|                 #print key | ||||
|                 #print type(value) | ||||
|                 #print value | ||||
| 
 | ||||
|         return instance_vars | ||||
| 
 | ||||
|     def get_host_info(self): | ||||
|         ''' Get variables about a specific host ''' | ||||
| 
 | ||||
|         if len(self.index) == 0: | ||||
|             # Need to load index from cache | ||||
|             self.load_index_from_cache() | ||||
| 
 | ||||
|         if not self.args.host in self.index: | ||||
|             # try updating the cache | ||||
|             self.do_api_calls_update_cache() | ||||
|             if not self.args.host in self.index: | ||||
|                 # host might not exist anymore | ||||
|                 return self.json_format_dict({}, True) | ||||
| 
 | ||||
|         (region, instance_id) = self.index[self.args.host] | ||||
| 
 | ||||
|         instance = self.get_instance(region, instance_id) | ||||
|         return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) | ||||
| 
 | ||||
|     def push(self, my_dict, key, element): | ||||
|         ''' Push an element onto an array that may not have been defined in | ||||
|         the dict ''' | ||||
|         group_info = my_dict.setdefault(key, []) | ||||
|         if isinstance(group_info, dict): | ||||
|             host_list = group_info.setdefault('hosts', []) | ||||
|             host_list.append(element) | ||||
|         else: | ||||
|             group_info.append(element) | ||||
| 
 | ||||
|     def push_group(self, my_dict, key, element): | ||||
|         ''' Push a group as a child of another group. ''' | ||||
|         parent_group = my_dict.setdefault(key, {}) | ||||
|         if not isinstance(parent_group, dict): | ||||
|             parent_group = my_dict[key] = {'hosts': parent_group} | ||||
|         child_groups = parent_group.setdefault('children', []) | ||||
|         if element not in child_groups: | ||||
|             child_groups.append(element) | ||||
| 
 | ||||
|     def get_inventory_from_cache(self): | ||||
|         ''' Reads the inventory from the cache file and returns it as a JSON | ||||
|         object ''' | ||||
| 
 | ||||
|         cache = open(self.cache_path_cache, 'r') | ||||
|         json_inventory = cache.read() | ||||
|         return json_inventory | ||||
| 
 | ||||
| 
 | ||||
|     def load_index_from_cache(self): | ||||
|         ''' Reads the index from the cache file sets self.index ''' | ||||
| 
 | ||||
|         cache = open(self.cache_path_index, 'r') | ||||
|         json_index = cache.read() | ||||
|         self.index = json.loads(json_index) | ||||
| 
 | ||||
| 
 | ||||
|     def write_to_cache(self, data, filename): | ||||
|         ''' Writes data in JSON format to a file ''' | ||||
| 
 | ||||
|         json_data = self.json_format_dict(data, True) | ||||
|         cache = open(filename, 'w') | ||||
|         cache.write(json_data) | ||||
|         cache.close() | ||||
| 
 | ||||
| 
 | ||||
|     def to_safe(self, word): | ||||
|         ''' Converts 'bad' characters in a string to underscores so they can be | ||||
|         used as Ansible groups ''' | ||||
| 
 | ||||
|         return re.sub("[^A-Za-z0-9\-]", "_", word) | ||||
| 
 | ||||
| 
 | ||||
|     def json_format_dict(self, data, pretty=False): | ||||
|         ''' Converts a dict to a JSON object and dumps it as a formatted | ||||
|         string ''' | ||||
| 
 | ||||
|         if pretty: | ||||
|             return json.dumps(data, sort_keys=True, indent=2) | ||||
|         else: | ||||
|             return json.dumps(data) | ||||
| 
 | ||||
| 
 | ||||
| # Run the script | ||||
| Ec2Inventory() | ||||
| 
 | ||||
| @ -1,8 +1,10 @@ | ||||
| --- | ||||
| - name: Provision the operation system for tests | ||||
|   # testing | ||||
|   hosts: all | ||||
|   # hosts: all | ||||
|   # live | ||||
|   # hosts: TDB  | ||||
|   hosts: tag_Name_test_runner | ||||
|   remote_user: ubuntu | ||||
|   roles: | ||||
|       - common | ||||
|     - docker | ||||
|     - common | ||||
|  | ||||
							
								
								
									
										4
									
								
								ansible/roles/common/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								ansible/roles/common/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,4 @@ | ||||
| --- | ||||
| - name: restart sshd | ||||
|   sudo: true  | ||||
|   service: name=ssh state=restarted | ||||
| @ -1,28 +1,13 @@ | ||||
| --- | ||||
| - name: install docker | ||||
|   sudo: true | ||||
|   # install script from https://docs.docker.com/installation/ubuntulinux/ | ||||
|   shell: curl -sSL https://get.docker.com/ubuntu/ | sudo sh   | ||||
| 
 | ||||
| - name: install package dependencies | ||||
|   sudo: true | ||||
|   apt: name={{ item }} | ||||
|   with_items: | ||||
|       - python-pip | ||||
|       - htop | ||||
| 
 | ||||
| - name: install python dependencies | ||||
|   sudo: true | ||||
|   pip: name=docker-py | ||||
| 
 | ||||
| 
 | ||||
| - name: enable docker for standard user | ||||
|   sudo: true | ||||
|   # todo: how to logout after this command, otherwise won't be effective in this play | ||||
|   user: name=vagrant groups=docker append=yes | ||||
|     - parallel | ||||
|     - htop | ||||
| 
 | ||||
| - name: checkout test repo | ||||
|   git: | ||||
|       repo: https://github.com/sveneh/tests.git  | ||||
|       version: develop | ||||
|       dest: git | ||||
|     repo: https://github.com/ethereum/tests.git  | ||||
|     version: develop | ||||
|     dest: git | ||||
|  | ||||
							
								
								
									
										4
									
								
								ansible/roles/docker/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								ansible/roles/docker/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,4 @@ | ||||
| --- | ||||
| - name: restart sshd | ||||
|   sudo: true  | ||||
|   service: name=ssh state=restarted | ||||
							
								
								
									
										40
									
								
								ansible/roles/docker/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								ansible/roles/docker/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,40 @@ | ||||
| --- | ||||
| - name: update package list | ||||
|   sudo: true | ||||
|   apt: update_cache=true | ||||
| 
 | ||||
| - name: install docker dependencies | ||||
|   sudo: true | ||||
|   apt: name={{ item }} install_recommends=false | ||||
|   with_items: | ||||
|     # Docker has serious problems on EC2: http://www.danstutzman.com/2014/07/speed-up-docker-on-ec2 | ||||
|     # and https://github.com/docker/docker/issues/4036 | ||||
|     - linux-generic | ||||
|     - python-pip | ||||
| 
 | ||||
| - name: Kernel update needs a restart  | ||||
|   sudo: true | ||||
|   command: shutdown -r now  | ||||
|   async: 0 | ||||
|   poll: 0 | ||||
|   ignore_errors: true | ||||
| 
 | ||||
| - name: waiting for server to come back | ||||
|   local_action: wait_for host={{ inventory_hostname }} port=22 | ||||
|                 state=started | ||||
|   sudo: false | ||||
| 
 | ||||
| - name: install docker | ||||
|   sudo: true | ||||
|   # install script from https://docs.docker.com/installation/ubuntulinux/ | ||||
|   # TODO this is not idempotent | ||||
|   shell: curl -sSL https://get.docker.com/ubuntu/ | sudo sh   | ||||
| 
 | ||||
| - name: install docker python API | ||||
|   sudo: true | ||||
|   pip: name=docker-py | ||||
| 
 | ||||
| - name: enable docker for standard user | ||||
|   sudo: true | ||||
|   user: name={{ ansible_ssh_user }} groups=docker append=yes | ||||
|   notify: restart sshd | ||||
							
								
								
									
										33
									
								
								ansible/roles/ec2/tasks/setup.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								ansible/roles/ec2/tasks/setup.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,33 @@ | ||||
| --- | ||||
| - name: create default security group  | ||||
|   ec2_group: | ||||
|     name: "{{ security_group }}" | ||||
|     region: "{{ region }}" | ||||
|     description: "{{ project_description }}" | ||||
|     rules: | ||||
|         # ssh | ||||
|       - proto: tcp | ||||
|         from_port: 22 | ||||
|         to_port: 22 | ||||
|         cidr_ip: "{{ ip_access_range }}" | ||||
|     rules_egress: | ||||
|       - proto: all | ||||
|         cidr_ip: "{{ ip_access_range }}" | ||||
| 
 | ||||
| 
 | ||||
| - name: start ec2 instances | ||||
|   ec2:   | ||||
|     group: "{{ security_group }}" | ||||
|     instance_type: "{{ instance_type }}"  | ||||
|     image: "{{ image }}"  | ||||
|     wait: true  | ||||
|     region: "{{ region }}" | ||||
|     key_name: "{{ keypair }}" | ||||
|     instance_tags: | ||||
|       Name: test_runner | ||||
|     count_tag:  | ||||
|       Name: test_runner | ||||
|     exact_count: "{{ total_no_instances }}" | ||||
| #    volumes: | ||||
| #      - device_name: /dev/xvda | ||||
| #        volume_size: "{{ volume_size_gb }}" | ||||
							
								
								
									
										8
									
								
								ansible/roles/ec2/tasks/terminate.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								ansible/roles/ec2/tasks/terminate.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,8 @@ | ||||
| --- | ||||
| - name: Terminate ec2 instances  | ||||
|   local_action: ec2 | ||||
|         state=absent | ||||
|         instance_ids={{ ec2_id }} | ||||
|         region={{ region }} | ||||
|         wait=true | ||||
| 
 | ||||
							
								
								
									
										21
									
								
								ansible/roles/ec2/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								ansible/roles/ec2/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,21 @@ | ||||
| --- | ||||
| # default config for ec2 instances | ||||
| 
 | ||||
| instance_type:      c4.xlarge   | ||||
| security_group:     client-tests | ||||
| 
 | ||||
| # image:              ami-d6e7c084 | ||||
| image:              ami-9eaa1cf6 | ||||
| # region:             ap-southeast-1 | ||||
| region:             us-east-1 | ||||
| keypair:            christoph | ||||
| # keypair:            client-tests | ||||
| volume_size_gb:     50 | ||||
| 
 | ||||
| # limit access to AWS to these clients in CDIR notation | ||||
| ip_access_range:    0.0.0.0/0 | ||||
| 
 | ||||
| 
 | ||||
| project_description: https://github.com/ethereum/tests | ||||
| 
 | ||||
| total_no_instances: 1 | ||||
| @ -1,18 +1,35 @@ | ||||
| --- | ||||
| - name: update C++ client | ||||
|   sudo: true   | ||||
|   docker_image: | ||||
|       path: git/ansible/test-files/docker-cpp | ||||
|       name: cpp | ||||
|       path: /home/{{ ansible_ssh_user }}/git/ansible/test-files/docker-cppjit | ||||
|       name: cppjit | ||||
|       state: build | ||||
| #  command: docker build -t cpp /home/{{ ansible_ssh_user }}/git/ansible/test-files/docker-cpp | ||||
|   async: 3600 | ||||
|   poll: 5 | ||||
| 
 | ||||
| - name: update Go client | ||||
|   sudo: true  | ||||
|   docker_image: | ||||
|       path: git/ansible/test-files/docker-go | ||||
|       path: /home/{{ ansible_ssh_user }}/git/ansible/test-files/docker-go | ||||
|       name: go | ||||
|       state: build | ||||
| #  command: docker build -t go /home/{{ ansible_ssh_user }}/git/ansible/test-files/docker-go  | ||||
|   async: 3600 | ||||
|   poll: 5 | ||||
| 
 | ||||
| - name: Run infinite tests (press ^C to stop) | ||||
|   sudo: true | ||||
|   shell: git/ansible/test-files/testrunner.sh | ||||
| - name: update Python client | ||||
|   docker_image: | ||||
|       path: /home/{{ ansible_ssh_user }}/git/ansible/test-files/docker-python | ||||
|       name: python | ||||
|       state: build | ||||
|   async: 3600 | ||||
|   poll: 5 | ||||
| 
 | ||||
| - name: Run infinite tests  | ||||
|   shell: seq {{ ansible_processor_vcpus }} | parallel --max-args=0 /home/{{ ansible_ssh_user }}/git/ansible/test-files/testrunner.sh | ||||
|   async: "{{ 3600 * 2 }}" | ||||
|   poll: 0 | ||||
|   register: log_runner | ||||
| 
 | ||||
| - name: verify previous task | ||||
|   async_status: jid={{ log_runner.ansible_job_id }} | ||||
|  | ||||
							
								
								
									
										44
									
								
								ansible/test-files/docker-cppjit/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								ansible/test-files/docker-cppjit/Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,44 @@ | ||||
| # adjusted from https://github.com/ethereum/cpp-ethereum/blob/develop/docker/Dockerfile | ||||
| FROM ubuntu:14.04 | ||||
| 
 | ||||
| ENV DEBIAN_FRONTEND noninteractive | ||||
| RUN apt-get update | ||||
| RUN apt-get upgrade -y | ||||
| 
 | ||||
| # Ethereum dependencies | ||||
| RUN apt-get install -qy build-essential g++-4.8 git cmake libboost-all-dev libcurl4-openssl-dev wget | ||||
| RUN apt-get install -qy automake unzip libgmp-dev libtool libleveldb-dev yasm libminiupnpc-dev libreadline-dev scons | ||||
| RUN apt-get install -qy libjsoncpp-dev libargtable2-dev | ||||
| 
 | ||||
| # NCurses based GUI (not optional though for a succesful compilation, see https://github.com/ethereum/cpp-ethereum/issues/452 ) | ||||
| RUN apt-get install -qy libncurses5-dev | ||||
| 
 | ||||
| # Qt-based GUI | ||||
| # RUN apt-get install -qy qtbase5-dev qt5-default qtdeclarative5-dev libqt5webkit5-dev | ||||
| 
 | ||||
| RUN sudo apt-get -y install software-properties-common | ||||
| 
 | ||||
| # LLVM-3.5 | ||||
| RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key|sudo apt-key add - | ||||
| RUN echo "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.5 main\ndeb-src http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.5 main" > /etc/apt/sources.list.d/llvm-trusty.list | ||||
| RUN apt-get update | ||||
| RUN apt-get install -qy llvm-3.5 libedit-dev | ||||
| 
 | ||||
| # Fix llvm-3.5 cmake paths | ||||
| RUN mkdir -p /usr/lib/llvm-3.5/share/llvm && ln -s /usr/share/llvm-3.5/cmake /usr/lib/llvm-3.5/share/llvm/cmake | ||||
| 
 | ||||
| 
 | ||||
| # Ethereum PPA | ||||
| RUN apt-get install -qy software-properties-common | ||||
| RUN add-apt-repository ppa:ethereum/ethereum | ||||
| RUN apt-get update | ||||
| RUN apt-get install -qy libcryptopp-dev libjson-rpc-cpp-dev | ||||
| 
 | ||||
| # Build Ethereum (HEADLESS) | ||||
| RUN git clone --depth=1 --branch develop https://github.com/ethereum/cpp-ethereum | ||||
| RUN mkdir -p cpp-ethereum/build | ||||
| RUN cd cpp-ethereum/build && cmake .. -DCMAKE_BUILD_TYPE=Release -DHEADLESS=1 -DEVMJIT=1 && make -j $(cat /proc/cpuinfo | grep processor | wc -l) && make install | ||||
| RUN ldconfig | ||||
| 
 | ||||
| ENTRYPOINT ["/cpp-ethereum/build/test/checkRandomTest"] | ||||
| 
 | ||||
							
								
								
									
										14
									
								
								ansible/test-files/docker-python/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								ansible/test-files/docker-python/Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,14 @@ | ||||
| FROM sveneh/pyethereum-base | ||||
| 
 | ||||
| RUN git clone --branch master https://github.com/ethereum/pyethereum.git | ||||
| 
 | ||||
| RUN cd pyethereum && curl https://bootstrap.pypa.io/bootstrap-buildout.py | python | ||||
| 
 | ||||
| RUN cd pyethereum && bin/buildout | ||||
| 
 | ||||
| #default port for incoming requests | ||||
| EXPOSE 30303 | ||||
| 
 | ||||
| WORKDIR /pyethereum/bin  | ||||
| 
 | ||||
| ENTRYPOINT ["./python", "../tests/test_vm.py"] | ||||
| @ -4,53 +4,53 @@ | ||||
| #cd ~/software/Ethereum/pyethereum (python has local dependencies so only works from within the directory) | ||||
| while [ 1 ] | ||||
| do	 | ||||
| 	TEST="$(docker run --rm cpp)" | ||||
| 	TEST="$(docker run --rm --entrypoint="/cpp-ethereum/build/test/createRandomTest" cppjit)" | ||||
| 	# echo "$TEST" | ||||
| 
 | ||||
| 	# test pyethereum | ||||
| 	 | ||||
| 	 #OUTPUT_PYTHON="$(python ./tests/test_vm.py "$TEST")" | ||||
| 	 #RESULT_PYTHON=$? | ||||
| 	# test pyethereum | ||||
| 	#OUTPUT_PYTHON="$(python ./tests/test_vm.py "$TEST")" | ||||
| 	#RESULT_PYTHON=$? | ||||
| 
 | ||||
| 	# test go | ||||
| 	 OUTPUT_GO="$(docker run --rm go "$TEST")" | ||||
| 	 RESULT_GO=$? | ||||
| 
 | ||||
| 	OUTPUT_GO="$(docker run --rm go "$TEST")" | ||||
| 	RESULT_GO=$? | ||||
| 	 | ||||
| 	# test cpp-jit | ||||
| 	#OUTPUT_CPPJIT="$(~/software/Ethereum/cpp-ethereum/build/test/checkRandomTest "$TEST")" | ||||
| 	#RESULT_CPPJIT=$? | ||||
| 	OUTPUT_CPPJIT="$(docker run --rm cppjit "$TEST")" | ||||
| 	RESULT_CPPJIT=$? | ||||
| 
 | ||||
| # go fails | ||||
| if [ "$RESULT_GO" -ne 0 ]; then | ||||
| 	echo Failed: | ||||
| 	echo Output_GO: | ||||
| 	echo $OUTPUT_GO | ||||
| 	echo Test: | ||||
| 	echo "$TEST" | ||||
| 	echo "$TEST" > FailedTest.json | ||||
| 	mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")GO.json # replace with scp to central server | ||||
| fi | ||||
| 	# go fails | ||||
| 	if [ "$RESULT_GO" -ne 0 ]; then | ||||
| 		echo Failed: | ||||
| 		echo Output_GO: | ||||
| 		echo $OUTPUT_GO | ||||
| 		echo Test: | ||||
| 		echo "$TEST" | ||||
| 		echo "$TEST" > FailedTest.json | ||||
| 		mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")GO.json # replace with scp to central server | ||||
| 	fi | ||||
| 
 | ||||
| # python fails | ||||
| #if [ "$RESULT_PYTHON" -ne 0 ]; then | ||||
| #	echo Failed: | ||||
| #	echo Output_PYTHON: | ||||
| #	echo $OUTPUT_PYTHON | ||||
| #	echo Test: | ||||
| #	echo "$TEST" | ||||
| #	echo "$TEST" > FailedTest.json | ||||
| #	mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")PYTHON.json | ||||
| #fi | ||||
| 	# python fails | ||||
| 	#if [ "$RESULT_PYTHON" -ne 0 ]; then | ||||
| 	#	echo Failed: | ||||
| 	#	echo Output_PYTHON: | ||||
| 	#	echo $OUTPUT_PYTHON | ||||
| 	#	echo Test: | ||||
| 	#	echo "$TEST" | ||||
| 	#	echo "$TEST" > FailedTest.json | ||||
| 	#	mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")PYTHON.json | ||||
| 	#fi | ||||
| 
 | ||||
| # cppjit fails | ||||
| #if [ "$RESULT_CPPJIT" -ne 0 ]; then | ||||
| #	echo Failed: | ||||
| #	echo Output_CPPJIT: | ||||
| #	echo $OUTPUT_CPPJIT | ||||
| #	echo Test: | ||||
| #	echo "$TEST" | ||||
| #	echo "$TEST" > FailedTest.json | ||||
| #	mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")CPPJIT.json | ||||
| #fi | ||||
| 	# cppjit fails | ||||
| 	if [ "$RESULT_CPPJIT" -ne 0 ]; then | ||||
| 		echo Failed: | ||||
| 		echo Output_CPPJIT: | ||||
| 		echo $OUTPUT_CPPJIT | ||||
| 		echo Test: | ||||
| 		echo "$TEST" | ||||
| 		echo "$TEST" > FailedTest.json | ||||
| 		mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")CPPJIT.json | ||||
| 	fi | ||||
| 	exit | ||||
| done | ||||
| 
 | ||||
|  | ||||
| @ -1,12 +1,12 @@ | ||||
| --- | ||||
| - name: preparing and running tests | ||||
|   # testing | ||||
|   hosts: all | ||||
|   # hosts: all | ||||
|   # live | ||||
|   # hosts: TBD | ||||
|   hosts: tag_Name_test_runner | ||||
|    | ||||
|   # TODO use the right user for configuring, until credentials set, stay with default vagrant user  | ||||
|   # remote_user: ubuntu | ||||
|   remote_user: ubuntu | ||||
|    | ||||
|   roles: | ||||
|       - testrunner | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user