cloudstore-developers
Changes
.gitignore 5(+4 -1)
bin/config.ini.example 81(+23 -58)
bin/run.py 2(+1 -1)
cloudscale/deployment_scripts/config.py 40(+37 -3)
cloudscale/deployment_scripts/frontend.py 59(+25 -34)
setup.py 1(+1 -0)
Details
.gitignore 5(+4 -1)
diff --git a/.gitignore b/.gitignore
index eccb9f1..7d3fbd9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,4 +5,7 @@ bin/rds-tpcw-dump-latest.sql
bin/deployment_scripts.log
bin/paramiko.log
cloudscale_deployment_scripts.egg-info
-dist/cloudscale-deployment-scripts-0.1.0
\ No newline at end of file
+dist/cloudscale-deployment-scripts-0.1.0
+build
+dist
+deployment_scripts.log
bin/config.ini.example 81(+23 -58)
diff --git a/bin/config.ini.example b/bin/config.ini.example
index b55eda0..e4f61c3 100644
--- a/bin/config.ini.example
+++ b/bin/config.ini.example
@@ -1,69 +1,34 @@
-[COMMON]
-provider = aws
-db_provider = mysql
-num_instances = 2
+[DATABASE]
+name = tpcw
+user = root
+password = password
+dump_url = http://cloudscale.xlab.si/github/rds-tpcw-dump-latest.sql
+
+[APPLICATION]
+distribution_url = http://cloudscale.xlab.si/showcase/distribution/builds/showcaseV3-sql.war
+connection_pool_size = 150
+
+[AUTO_SCALABILITY]
+enabled = no
+cooldown = 300
+
+[AWS]
+aws_access_key_id = <your aws access key id>
+aws_secret_access_key = < your aws secret key >
+region = eu-west-1
+availability_zone = eu-west-1a
[RDS]
-generate_dump_path = /path/to/rds-tpcw-dump-latest.sql
instance_type = db.m3.medium
num_replicas = 0
-generate_type = dump
-region = eu-west-1
master_identifier = cloudscale-master
replica_identifier = cloudscale-replica
-database_name = tpcw
-database_user = root
-database_pass = password
-driver = com.mysql.jdbc.ReplicationDriver
-connection_pool_size = 150
[EC2]
-aws_access_key_id =
-aws_secret_access_key =
instance_type = t2.small
-key_name = cloudscale
-key_pair =
-region = eu-west-1
-availability_zones = eu-west-1a
ami_id = ami-4d5bd93a
-is_autoscalable = no
-cooldown = 300
+key_name = cloudscale
+key_pair = <auto-generated>
remote_user = ubuntu
-instances_identifier = cloudscale
-
-[OPENSTACK]
-username =
-password =
-tenant_name = cloudscale
-auth_url = http://172.16.93.211:5000/v2.0
-image_name = Ubuntu 14.04
-instance_type = 2GB-2CPU-10GB
-key_name = key-pair
-key_pair = key_pair
-image_username = xlab
-database_type = mysql
-own_infrastructure = no
-
-[MYSQL]
-generate_type = dump
-generate_dump_path = /path/to/rds-tpcw-dump-15.1.2014.sql
-instance_type = flavor 2
-database_name = tpcw
-database_user = root
-database_pass = password
-num_replicas = 5
-driver = com.mysql.jdbc.ReplicationDriver
-image_name = mysql-master-slave
-image_username = xlab
-showcase_war_url = http://cloudscale.xlab.si/showcase/builds/showcaseV3-sql.war
-
-[MONGODB]
-generate_type = dump
-generate_dump_path = /path/to/mongo-tpcw-dump-latest.tar.gz
-instance_type = flavor 2
-database_name = tpcw
-database_user = root
-database_pass = password
-num_replicas = 5
-showcase_war_url = http://cloudscale.xlab.si/showcase/builds/showcaseV3-nosql.war
-
+instance_identifier = cloudscale
+num_instances = 2
bin/run.py 2(+1 -1)
diff --git a/bin/run.py b/bin/run.py
index c4f633c..201e158 100644
--- a/bin/run.py
+++ b/bin/run.py
@@ -9,6 +9,6 @@ if __name__ == "__main__":
config_path = sys.argv[2]
infrastructure = sys.argv[1]
logger = Logger()
- deploy(config_path, os.path.abspath(os.path.dirname(__file__)), logger)
+ deploy(infrastructure, config_path, os.path.abspath(os.path.dirname(__file__)), logger)
else:
print """Usage: python run.py <aws|openstack> <path_to_config>"""
\ No newline at end of file
diff --git a/cloudscale/deployment_scripts/__init__.py b/cloudscale/deployment_scripts/__init__.py
index 041edd7..21c645b 100644
--- a/cloudscale/deployment_scripts/__init__.py
+++ b/cloudscale/deployment_scripts/__init__.py
@@ -6,8 +6,8 @@ from cloudscale.deployment_scripts.frontend import Frontend
from cloudscale.deployment_scripts.scripts.infrastructure.openstack import openstack_remove_all
-def deploy(config_path, results_dir, logger):
- config = Config(results_dir, config_path)
+def deploy(infrastructure, config_path, results_dir, logger):
+ config = Config(infrastructure, results_dir, config_path)
_setup_backend(config, logger)
_setup_frontend(config, logger)
cloudscale/deployment_scripts/config.py 40(+37 -3)
diff --git a/cloudscale/deployment_scripts/config.py b/cloudscale/deployment_scripts/config.py
index ea83afd..5007668 100644
--- a/cloudscale/deployment_scripts/config.py
+++ b/cloudscale/deployment_scripts/config.py
@@ -1,14 +1,48 @@
from cloudscale.deployment_scripts.scripts import read_config, create_user_path
+class Setup:
+
+ def __init__(self, config, logger):
+ self.logger = logger
+ self.config = config
+ self.cfg = config.cfg
+
+ self.read_config()
+
+ def read_config(self):
+ self.showcase_location = self.cfg.get('APPLICATION', 'distribution_url')
+ self.connection_pool_size = self.cfg.get('APPLICATION', 'connection_pool_size')
+ self.access_key = self.cfg.get('AWS', 'aws_access_key_id')
+ self.secret_key = self.cfg.get('AWS', 'aws_secret_access_key')
+ self.region = self.cfg.get('AWS', 'region')
+ self.availability_zone = self.cfg.get('AWS', 'availability_zone')
+ self.instance_type = self.cfg.get('EC2', 'instance_type')
+ self.ami_id = self.cfg.get('EC2', 'ami_id')
+ self.key_name = self.cfg.get('EC2', 'key_name')
+ self.key_pair = self.cfg.get('EC2', 'key_pair')
+ self.remote_user = self.cfg.get('EC2', 'remote_user')
+ self.instance_identifier = self.cfg.get('EC2', 'instance_identifier')
+ self.num_instances = self.cfg.get('EC2', 'num_instances')
+ self.cooldown = int(self.cfg.get('AUTO_SCALABILITY', 'cooldown'))
+ self.is_autoscalable = self.cfg.get('AUTO_SCALABILITY', 'enabled')
+ self.is_autoscalable = self.is_autoscalable == 'yes'
+ self.database_name = self.cfg.get('DATABASE', 'name')
+ self.database_user = self.cfg.get('DATABASE', 'user')
+ self.database_password = self.cfg.get('DATABASE', 'password')
+ self.database_dump_url = self.cfg.get('DATABASE', 'dump_url')
+ self.rds_instance_type = self.cfg.get('RDS', 'instance_type')
+ self.rds_num_replicas = int(self.cfg.get('RDS', 'num_replicas'))
+ self.rds_master_identifier = self.cfg.get('RDS', 'master_identifier')
+ self.rds_replica_identifier = self.cfg.get('RDS', 'replica_identifier')
+
class Config:
- def __init__(self, output_directory, config_path):
+ def __init__(self, infrastructure, output_directory, config_path):
+ self.provider = infrastructure
self.config_path = config_path
self.user_path = create_user_path(output_directory)
self.cfg = read_config(self.config_path)
- self.provider = self.cfg.get('COMMON', 'provider')
- self.db_provider = self.cfg.get('COMMON', 'db_provider')
def save(self, section, variable, value):
self.cfg.save_option(self.config_path, section, variable, str(value))
cloudscale/deployment_scripts/frontend.py 59(+25 -34)
diff --git a/cloudscale/deployment_scripts/frontend.py b/cloudscale/deployment_scripts/frontend.py
index e27451d..0a93b2d 100644
--- a/cloudscale/deployment_scripts/frontend.py
+++ b/cloudscale/deployment_scripts/frontend.py
@@ -1,5 +1,6 @@
import os
import time
+from cloudscale.deployment_scripts.config import Setup
from cloudscale.deployment_scripts.scripts.infrastructure.aws import aws_create_keypair
from cloudscale.deployment_scripts.scripts.infrastructure.aws import aws_create_instance
from cloudscale.deployment_scripts.scripts.infrastructure.aws import aws_create_loadbalancer
@@ -11,78 +12,68 @@ from cloudscale.deployment_scripts.scripts.infrastructure.openstack import opens
-class Frontend:
+class Frontend(Setup):
def __init__(self, config, logger):
- self.config = config
- self.logger=logger
+ Setup.__init__(self, config, logger)
self.instance_ids = []
+ self.ip_addresses = []
- def setup_aws_frontend(self):
- self.logger=self.logger
- self.cfg = self.config.cfg
- self.config = self.config
self.file_path = "/".join(os.path.abspath(__file__).split('/')[:-1])
- self.showcase_location = self.cfg.get('MYSQL', 'showcase_war_url')
- self.config.save('infrastructure', 'remote_user', self.cfg.get('EC2', 'remote_user'))
- self.remote_user = self.cfg.get('infrastructure', 'remote_user')
+
self.remote_deploy_path = self.cfg.get('software', 'remote_deploy_path')
- self.db_num_instances = int(self.cfg.get('RDS', 'num_replicas')) + 1
- self.database_name = self.cfg.get('RDS', 'database_name')
- self.database_user = self.cfg.get('RDS', 'database_user')
- self.database_pass = self.cfg.get('RDS', 'database_pass')
self.deploy_name = "showcase-1-a"
- self.connection_pool_size = self.cfg.get('RDS', 'connection_pool_size')
+
+ def setup_aws_frontend(self):
+ self.cfg = self.config.cfg
+ self.config = self.config
i = aws_create_keypair.CreateKeyPair(
- cfg=self.cfg,
- user_path=self.config.user_path
+ config=self.config,
+ user_path=self.config.user_path,
+ logger=self.logger
)
i.create()
self.config.save('EC2', 'key_pair', "%s/%s.pem" % (self.config.user_path, self.config.cfg.get('EC2', 'key_name')))
self.key_pair = self.cfg.get('EC2', 'key_pair')
+
showcase_url = None
- if self.cfg.get('EC2', 'is_autoscalable') == 'no':
- instances = []
- i = aws_create_instance.CreateEC2Instance(cfg=self.config.cfg, logger=self.logger)
- ip_addresses = []
- num_instances = int(self.cfg.get('COMMON', 'num_instances'))
- instances = i.create_all(num_instances)
+ if not self.is_autoscalable:
+ i = aws_create_instance.CreateEC2Instance(config=self.config, logger=self.logger)
+
+ instances = i.create_all(self.num_instances)
+
+
for instance in instances:
- ip_addresses.append(instance.ip_address)
+ self.ip_addresses.append(instance.ip_address)
- self.config.save('infrastructure', 'ip_address', ','.join(ip_addresses))
- self.ip_addresses = self.cfg.get('infrastructure', 'ip_address').split(",")
loadbalancer = None
if len(instances) > 1:
i = aws_create_loadbalancer.CreateLoadbalancer(
- instances=instances,
config=self.config,
logger=self.logger
)
- loadbalancer = i.create()
+ loadbalancer = i.create(instances)
deploy_showcase.DeploySoftware(self)
showcase_url = loadbalancer.dns_name if loadbalancer else instances[0].ip_address
- self.logger.log("Instance ids: %s" % ",".join([instance.id for instance in instances]))
- elif self.cfg.get('EC2', 'is_autoscalable') == 'yes':
- i = aws_create_instance.CreateEC2Instance(cfg=self.config.cfg, logger=self.logger)
+ else:
+ i = aws_create_instance.CreateEC2Instance(config=self.config, logger=self.logger)
instance = i.create()
self.config.save('infrastructure', 'ip_address', instance.ip_address)
- self.config.save('infrastructure', 'remote_user', 'ubuntu')
- self.ip_addresses = self.cfg.get('infrastructure', 'ip_address').split(",")
+ self.ip_addresses.append(instance.ip_address)
deploy_showcase.DeploySoftware(self)
aws_create_ami.EC2CreateAMI(config=self.config, logger=self.logger)
autoscalability = aws_create_autoscalability.Autoscalability(
- cfg=self.cfg,
+ config=self.config,
logger=self.logger
)
showcase_url = autoscalability.create()
diff --git a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_ami.py b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_ami.py
index 79927d8..3c7828d 100644
--- a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_ami.py
+++ b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_ami.py
@@ -2,20 +2,17 @@
import boto.ec2
import time
import sys
+from cloudscale.deployment_scripts.config import Setup
from cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger
-
-class EC2CreateAMI:
+class EC2CreateAMI(Setup):
def __init__(self, config, logger):
- self.logger = logger
- self.cfg = config.cfg
- self.config = config
- self.key_name = self.cfg.get('EC2', 'key_name')
- self.key_pair = self.cfg.get('EC2', 'key_pair')
- self.conn = boto.ec2.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ Setup.__init__(self, config, logger)
+
+ self.conn = boto.ec2.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key)
ami_id = self.create_ami(self.cfg.get('infrastructure', 'ip_address'))
self.config.save('infrastructure', 'ami_id', ami_id)
diff --git a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_autoscalability.py b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_autoscalability.py
index caa6a78..7cf91f3 100644
--- a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_autoscalability.py
+++ b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_autoscalability.py
@@ -7,20 +7,23 @@ import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import MetricAlarm
import sys
from boto.ec2.autoscale.tag import Tag
+from cloudscale.deployment_scripts.config import Setup
from cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger
-class Autoscalability:
- def __init__(self, cfg, logger):
- self.cfg = cfg
- self.logger = logger
- self.key_pair=self.cfg.get('EC2', 'key_pair')
- self.key_name=self.cfg.get('EC2', 'key_name')
+class Autoscalability(Setup):
+
+ def __init__(self, config, logger):
+ Setup.__init__(self, config, logger)
+ self.as_ami_id = self.cfg.get('infrastructure', 'ami_id')
def create(self):
- self.conn = boto.ec2.autoscale.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ self.conn = boto.ec2.autoscale.connect_to_region(
+ self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
+ )
+
lb = self.create_load_balancer()
self.create_security_group('http', 'Security group for HTTP', '0.0.0.0/0', '80')
self.create_security_group('ssh', 'Security group for SSH', '0.0.0.0/0', '22')
@@ -34,9 +37,11 @@ class Autoscalability:
def create_cloudwatch_alarms(self, scale_up_policy_arn, scale_down_policy_arn):
self.logger.log("Creating CloudWatch alarms ...")
- conn = boto.ec2.cloudwatch.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ conn = boto.ec2.cloudwatch.connect_to_region(
+ self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
+ )
alarm_dimensions = {'AutoScalingGroupName' : 'cloudscale-as'}
scale_up_alarm = MetricAlarm(
@@ -64,13 +69,14 @@ class Autoscalability:
adjustment_type='ChangeInCapacity',
as_name='cloudscale-as',
scaling_adjustment=1,
- cooldown=int(self.cfg.get('EC2', 'cooldown'))
- )
+ cooldown=self.cooldown
+ )
+
scale_down_policy = ScalingPolicy(name='scale_down',
adjustment_type='ChangeInCapacity',
as_name='cloudscale-as',
scaling_adjustment=-1,
- cooldown=int(self.cfg.get('EC2', 'cooldown'))
+ cooldown=self.cooldown
)
self.conn.create_scaling_policy(scale_up_policy)
self.conn.create_scaling_policy(scale_down_policy)
@@ -87,11 +93,11 @@ class Autoscalability:
try:
lc = LaunchConfiguration(self.conn,
"cloudscale-lc",
- self.cfg.get('infrastructure', 'ami_id'),
+ self.as_ami_id,
self.key_name,
['http'],
None,
- self.cfg.get('EC2', 'instance_type'),
+ self.instance_type,
instance_monitoring=True
)
@@ -109,13 +115,13 @@ class Autoscalability:
try:
tag = Tag(
key='Name',
- value = self.cfg.get('EC2', 'instances_identifier'),
+ value = self.instance_identifier,
propagate_at_launch=True,
resource_id='cloudscale-as'
)
ag = AutoScalingGroup(group_name='cloudscale-as',
load_balancers=[lb_name],
- availability_zones=self.cfg.get('EC2', 'availability_zones').split(","),
+ availability_zones=[self.availability_zone],
launch_config=lc,
min_size=1,
max_size=10,
@@ -129,9 +135,10 @@ class Autoscalability:
def create_security_group(self, name, description, cidr, port):
try:
- conn = boto.ec2.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ conn = boto.ec2.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
+ )
conn.create_security_group(name, description)
conn.authorize_security_group(group_name=name, ip_protocol='tcp', from_port=port, to_port=port, cidr_ip=cidr)
@@ -143,14 +150,15 @@ class Autoscalability:
def create_load_balancer(self):
self.logger.log("Creating load balancer ...")
- conn = boto.ec2.elb.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ conn = boto.ec2.elb.connect_to_region(
+ self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
+ )
- zones = self.cfg.get('EC2', 'availability_zones').split(",")
ports = [(80, 80, 'http')]
- lb = conn.create_load_balancer('cloudscale-lb', zones, ports)
+ lb = conn.create_load_balancer('cloudscale-lb', self.availability_zone, ports)
return lb
diff --git a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_instance.py b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_instance.py
index 43ab768..d69a5cc 100644
--- a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_instance.py
+++ b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_instance.py
@@ -5,39 +5,37 @@ import time
import paramiko
import sys, os
+from cloudscale.deployment_scripts.config import Setup
from cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger
-class CreateEC2Instance:
+class CreateEC2Instance(Setup):
+
+ def __init__(self, config, logger):
+ Setup.__init__(self, config, logger)
- def __init__(self, cfg, logger):
- self.key_pair = cfg.get('EC2', 'key_pair')
- self.key_name = cfg.get('EC2', 'key_name')
- self.cfg = cfg
self.conn = boto.ec2.connect_to_region(
- self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key')
+ self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
)
- self.logger = logger
def create(self):
self.create_security_groups()
instance = self.create_instance()
- #self.write_config(instance)
return instance
def create_all(self, num_instances):
res = self.conn.run_instances(
- self.cfg.get('EC2', 'ami_id'),
+ self.ami_id,
max_count=num_instances,
key_name=self.key_name,
- instance_type=self.cfg.get('EC2','instance_type'),
+ instance_type=self.instance_type,
security_groups=['http', 'ssh'],
monitoring_enabled=True,
- placement=self.cfg.get('EC2', 'availability_zones').split(',')[0]
+ placement=self.availability_zone
)
instance_ids = []
@@ -46,7 +44,7 @@ class CreateEC2Instance:
instance_ids.append(instance.id)
instances = self.conn.get_all_instances(instance_ids)[0].instances
- self.conn.create_tags(instance_ids, {'Name': 'cloudscale'})
+ self.conn.create_tags(instance_ids, {'Name': self.instance_identifier})
return instances
@@ -66,17 +64,17 @@ class CreateEC2Instance:
def create_instance(self):
self.logger.log("Creating EC2 instance ...")
res = self.conn.run_instances(
- self.cfg.get('EC2', 'ami_id'),
+ self.ami_id,
key_name=self.key_name,
- instance_type=self.cfg.get('EC2','instance_type'),
+ instance_type=self.instance_type,
security_groups=['http', 'ssh'],
monitoring_enabled=True,
- placement=self.cfg.get('EC2', 'availability_zones').split(',')[0]
+ placement=self.availability_zone
)
self.wait_available(res.instances[0])
instance = self.conn.get_all_instances([res.instances[0].id])[0].instances[0]
- self.conn.create_tags([instance.id], {'Name': 'cloudscale'})
+ self.conn.create_tags([instance.id], {'Name': self.instance_identifier})
self.conn.monitor_instances([instance.id])
return instance
@@ -94,16 +92,6 @@ class CreateEC2Instance:
self.logger.log("Instance is running!")
-
- def write_config(self, instance):
- self.cfg.save_user_option('infrastructure', 'remote_user', 'ubuntu')
- self.cfg.save_user_option('infrastructure', 'ip_address', instance.ip_address)
- # f = open(os.path.abspath('../infrastructure.ini'), 'w')
- # f.write('[EC2]\n')
- # f.write('remote_user=ubuntu\n')
- # f.write('ip_address=' + instance.ip_address + '\n')
- # f.close()
-
if __name__ == '__main__':
check_args(2, "<config_path>")
user_path, cfg, logger = get_cfg_logger(sys.argv[1], sys.argv[2])
diff --git a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_keypair.py b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_keypair.py
index 84347f5..a296b64 100644
--- a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_keypair.py
+++ b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_keypair.py
@@ -2,28 +2,29 @@ from boto import ec2
import os
from boto.exception import EC2ResponseError
import sys
+from cloudscale.deployment_scripts.config import Setup
from cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger
-class CreateKeyPair:
+class CreateKeyPair(Setup):
- def __init__(self, user_path, cfg):
- self.cfg = cfg
+ def __init__(self, user_path, config, logger):
+ Setup.__init__(self, config, logger)
self.user_path = user_path
def create(self):
conn = ec2.connect_to_region(
- self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key')
+ self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
)
try:
- keypair = conn.create_key_pair(self.cfg.get('EC2', 'key_name'))
+ keypair = conn.create_key_pair(self.key_name)
except EC2ResponseError as e:
if e.error_code == 'InvalidKeyPair.Duplicate':
- conn.delete_key_pair(key_name=self.cfg.get('EC2', 'key_name') )
- keypair = conn.create_key_pair(self.cfg.get('EC2', 'key_name'))
+ conn.delete_key_pair(key_name=self.key_name)
+ keypair = conn.create_key_pair(self.key_name)
else:
raise e
@@ -31,5 +32,5 @@ class CreateKeyPair:
if __name__ == '__main__':
check_args(2, "<config_path>")
- user_path, cfg, logger = get_cfg_logger(sys.argv[1], sys.argv[2])
- CreateKeyPair(user_path, cfg)
\ No newline at end of file
+ user_path, config, logger = get_cfg_logger(sys.argv[1], sys.argv[2])
+ CreateKeyPair(user_path, config, logger)
\ No newline at end of file
diff --git a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_loadbalancer.py b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_loadbalancer.py
index bc780f0..f144fc7 100644
--- a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_loadbalancer.py
+++ b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_loadbalancer.py
@@ -1,29 +1,26 @@
from boto import ec2
from boto.exception import BotoServerError
import sys
+import time
+from cloudscale.deployment_scripts.config import Setup
from cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger
-class CreateLoadbalancer:
-
- def __init__(self, instances, config, logger):
- self.instances = instances
- self.cfg = config.cfg
- self.config = config
- self.logger = logger
+class CreateLoadbalancer(Setup):
+ def __init__(self, config, logger):
+ Setup.__init__(self, config, logger)
- def create(self):
+ def create(self, instances):
+ self.instances = instances
self.logger.log("Creating load balancer ...")
- conn = ec2.elb.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ conn = ec2.elb.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key)
- zones = self.cfg.get('EC2', 'availability_zones').split(",")
+ zones = [self.availability_zone]
ports = [(80, 80, 'http')]
-
-
lb_name = 'cloudscale-lb'
try:
lb = conn.get_all_load_balancers(load_balancer_names=[lb_name])
@@ -39,6 +36,7 @@ class CreateLoadbalancer:
lb = []
i+=1
+ time.sleep(10)
lb = conn.create_load_balancer(lb_name, zones, ports)
self.attach_instances(lb)
diff --git a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_remove_all.py b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_remove_all.py
index 55a0bf8..efc5d5c 100644
--- a/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_remove_all.py
+++ b/cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_remove_all.py
@@ -9,21 +9,38 @@ from boto.exception import BotoServerError
from boto.ec2.cloudwatch import MetricAlarm
import time
import sys
+from cloudscale.deployment_scripts.config import Setup
from cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger
-class RemoveAll:
+class RemoveAll(Setup):
def __init__(self, cfg, logger):
- self.cfg = cfg
- self.logger = logger
- self.key_name = self.cfg.get('EC2', 'key_name')
- self.key_pair = self.cfg.get('EC2', 'key_pair')
- self.conn_ec2 = boto.ec2.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
- self.conn_as = boto.ec2.autoscale.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ Setup.__init__(self, cfg, logger)
+
+ self.conn_ec2 = boto.ec2.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key)
+
+ self.conn_as = boto.ec2.autoscale.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
+ )
+
+ self.conn_cloudwatch = boto.ec2.cloudwatch.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
+ )
+
+ self.conn_rds = boto.rds.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
+ )
+
+ conn_elb = boto.ec2.elb.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key)
+
+
self.remove_cloudwatch_alarms()
self.remove_load_balancer()
@@ -38,12 +55,8 @@ class RemoveAll:
def remove_rds_instances(self):
self.logger.log("Removing RDS instances ..")
- conn = boto.rds.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
-
try:
- conn.delete_dbinstance(id='cloudscale-master', skip_final_snapshot=True)
+ self.conn_rds.delete_dbinstance(id='cloudscale-master', skip_final_snapshot=True)
except BotoServerError as e:
import traceback
self.logger.log(traceback.format_exc())
@@ -52,7 +65,7 @@ class RemoveAll:
num = self.cfg.get('RDS', 'num_replicas')
for i in xrange(int(num)):
try:
- conn.delete_dbinstance(id='cloudscale-replica%s' % str(i+1), skip_final_snapshot=True)
+ self.conn_rds.delete_dbinstance(id='cloudscale-replica%s' % str(i+1), skip_final_snapshot=True)
except BotoServerError as e:
import traceback
self.logger.log(traceback.format_exc())
@@ -106,10 +119,7 @@ class RemoveAll:
def remove_cloudwatch_alarms(self):
self.logger.log("Removing cloudwatch alarms ...")
- conn_cw = boto.ec2.cloudwatch.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
- conn_cw.delete_alarms(['scale_up_on_cpu', 'scale_down_on_cpu'])
+ self.conn_cloudwatch.delete_alarms(['scale_up_on_cpu', 'scale_down_on_cpu'])
def remove_security_groups(self):
self.logger.log("Removing security groups ...")
@@ -118,10 +128,7 @@ class RemoveAll:
def remove_load_balancer(self):
self.logger.log("Removing load balancer ...")
- conn_elb = boto.ec2.elb.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
- conn_elb.delete_load_balancer('cloudscale-lb')
+ self.conn_elb.delete_load_balancer('cloudscale-lb')
def remove_ami(self):
self.logger.log("Removing ami ...")
@@ -130,117 +137,6 @@ class RemoveAll:
except:
pass
- def create_cloudwatch_alarms(self, scale_up_policy_arn, scale_down_policy_arn):
- self.logger.log("Creating CloudWatch alarms")
-
- conn = boto.ec2.cloudwatch.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
- alarm_dimensions = {'AutoScalingGroupName' : 'cloudscale-as'}
-
- scale_up_alarm = MetricAlarm(
- name='scale_up_on_cpu', namespace='AWS/EC2',
- metric='CPUUtilization', statistic='Average',
- comparison='>', threshold='70',
- period='60', evaluation_periods=2,
- alarm_actions=[scale_up_policy_arn],
- dimensions=alarm_dimensions)
-
- scale_down_alarm = MetricAlarm(
- name='scale_down_on_cpu', namespace='AWS/EC2',
- metric='CPUUtilization', statistic='Average',
- comparison='<', threshold='40',
- period='60', evaluation_periods=2,
- alarm_actions=[scale_down_policy_arn],
- dimensions=alarm_dimensions)
-
- conn.create_alarm(scale_up_alarm)
- conn.create_alarm(scale_down_alarm)
-
- def create_scaling_policy(self):
- self.logger.log("Creating scaling policy ...")
- scale_up_policy = ScalingPolicy(name='scale_up',
- adjustment_type='ChangeInCapacity',
- as_name='cloudscale-as',
- scaling_adjustment=2,
- cooldown=180
- )
- scale_down_policy = ScalingPolicy(name='scale_down',
- adjustment_type='ChangeInCapacity',
- as_name='cloudscale-as',
- scaling_adjustment=-2,
- cooldown=180)
- self.conn.create_scaling_policy(scale_up_policy)
- self.conn.create_scaling_policy(scale_down_policy)
- scale_up_policy = self.conn.get_all_policies(
- as_group='cloudscale-as', policy_names=['scale_up'])[0]
- scale_down_policy = self.conn.get_all_policies(
- as_group='cloudscale-as', policy_names=['scale_down'])[0]
-
- return scale_up_policy.policy_arn, scale_down_policy.policy_arn
-
- def create_launch_configuration(self):
- self.logger.log("Creating launch configuration ...")
-
- try:
- lc = LaunchConfiguration(self.conn,
- "cloudscale-lc",
- self.cfg.get('infrastructure', 'ami_id'),
- self.key_name,
- ['http'],
- None,
- self.cfg.get('EC2', 'instance_type'))
-
- self.conn.create_launch_configuration(lc)
- return lc
- except boto.exception.BotoServerError as e:
- if e.error_code == 'AlreadyExists':
- return self.conn.get_all_launch_configurations(names=['cloudscale-lc'])
- else:
- raise
-
- def create_autoscalability_group(self, lb_name, lc):
- self.logger.log("Creating autoscalability group ...")
-
- try:
- ag = AutoScalingGroup(group_name='cloudscale-as',
- load_balancers=[lb_name],
- availability_zones=self.cfg.get('EC2', 'availability_zones').split(","),
- launch_config=lc, min_size=2, max_size=8, connection=self.conn)
- self.conn.create_auto_scaling_group(ag)
- except boto.exception.BotoServerError as e:
- if e.error_code != 'AlreadyExists':
- raise # self.conn.get_all_groups(names=['cloudscale-as'])[0]
-
-
- def create_security_group(self, name, description, cidr, port):
- try:
- conn = boto.ec2.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
- conn.create_security_group(name, description)
- conn.authorize_security_group(group_name=name, ip_protocol='tcp', from_port=port, to_port=port, cidr_ip=cidr)
-
- conn.create_dbsecurity_group(name, description)
- conn.authorize_dbsecurity_group(name, cidr, name)
- except boto.exception.EC2ResponseError as e:
- if str(e.error_code) != 'InvalidGroup.Duplicate':
- raise
-
- def create_load_balancer(self):
- self.logger.log("Creating load balancer ...")
- conn = boto.ec2.elb.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
-
- zones = self.cfg.get('EC2', 'availability_zones').split(",")
- ports = [(80, 80, 'http')]
-
- lb = conn.create_load_balancer('cloudscale-lb', zones, ports)
-
- return lb.name
-
-
if __name__ == "__main__":
check_args(2, "<output_dir> <config_path>")
path, cfg, logger = get_cfg_logger(sys.argv[1], sys.argv[2])
diff --git a/cloudscale/deployment_scripts/scripts/platform/aws/configure_rds.py b/cloudscale/deployment_scripts/scripts/platform/aws/configure_rds.py
index 72dd94a..2354323 100644
--- a/cloudscale/deployment_scripts/scripts/platform/aws/configure_rds.py
+++ b/cloudscale/deployment_scripts/scripts/platform/aws/configure_rds.py
@@ -1,3 +1,4 @@
+import urllib
import boto.exception
import boto, boto.ec2, boto.rds
import boto.manage.cmdshell
@@ -5,56 +6,55 @@ import time
import subprocess
import os
import sys
+from cloudscale.deployment_scripts.config import Setup
from cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger
-class ConfigureRDS:
+class ConfigureRDS(Setup):
def __init__(self, config, logger):
- self.cfg = config.cfg
- self.config = config
- self.logger=logger
- self.db_password= self.cfg.get('RDS', 'database_pass')
-
- self.conn = boto.rds.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ Setup.__init__(self, config, logger)
+
+ self.conn = boto.rds.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key
+ )
+
sg_id = self.create_security_group('mysql', 'Security group for MYSQL protocol', '3306', '0.0.0.0/0')
instance = self.create_master(sg_id)
self.import_data(instance)
replicas_urls = []
- if int(self.cfg.get('RDS', 'num_replicas')) > 0:
+ if self.rds_num_replicas > 0:
replicas_urls = self.create_read_replicas()
self.write_config(instance.endpoint[0], replicas_urls)
def create_read_replicas(self):
- num = int(self.cfg.get('RDS', 'num_replicas'))
urls = []
instance_ids = []
- for i in xrange(int(num)):
+ for i in xrange(self.rds_num_replicas):
self.logger.log("Creating read replica " + str(i+1))
try:
instance = self.conn.create_dbinstance_read_replica(
- self.cfg.get('RDS', 'replica_identifier') + str(i+1),
- self.cfg.get('RDS', 'master_identifier'),
- self.cfg.get('RDS', 'instance_type'),
+ self.rds_replica_identifier + str(i+1),
+ self.rds_master_identifier,
+ self.rds_instance_type,
availability_zone="eu-west-1a"
)
except boto.exception.BotoServerError as e:
if not e.error_code == 'DBInstanceAlreadyExists':
raise
else:
- id = self.cfg.get('RDS', 'replica_identifier') + str(i+1)
+ id = self.rds_replica_identifier + str(i+1)
self.logger.log("Modifying RDS %s" % id)
- self.conn.modify_dbinstance(id=id, instance_class=self.cfg.get('RDS', 'instance_type'), apply_immediately=True)
+ self.conn.modify_dbinstance(id=id, instance_class=self.rds_instance_type, apply_immediately=True)
time.sleep(60)
- for i in xrange(int(num)):
- instance = self.conn.get_all_dbinstances(instance_id=self.cfg.get('RDS', 'replica_identifier') + str(i+1))[0]
+ for i in xrange(self.rds_num_replicas):
+ instance = self.conn.get_all_dbinstances(instance_id=self.rds_replica_identifier + str(i+1))[0]
self.wait_available(instance)
- instance = self.conn.get_all_dbinstances(instance_id=self.cfg.get('RDS', 'replica_identifier')+ str(i+1))[0]
+ instance = self.conn.get_all_dbinstances(instance_id=self.rds_replica_identifier + str(i+1))[0]
urls.append(instance.endpoint[0])
return urls
@@ -62,22 +62,26 @@ class ConfigureRDS:
def import_data(self, instance):
self.logger.log("Importing data. This may take a while, please wait ...")
- generate_type = self.cfg.get('RDS', 'generate_type')
+ generate_type = "dump"
if generate_type == "script":
config_path = self.write_showcase_database_config(instance)
self.generate(config_path)
elif generate_type == "dump":
self.dump(instance)
+ else:
+ msg = "Wrong generate type for import data!"
+ logger.error(msg)
+ raise Exception(msg)
self.logger.log("Successfully imported data")
def dump(self, instance):
- dump_file = self.cfg.get('RDS', 'generate_dump_path')
- db = self.cfg.get('RDS', 'database_name')
- user = self.cfg.get('RDS', 'database_user')
- passwd = self.cfg.get('RDS', 'database_pass')
- cmd = [os.path.dirname(__file__) + "/dump.sh", str(instance.endpoint[0]), user, passwd, db, dump_file]
+ dump_file = "/tmp/cloudscale-dump.sql"
+ if not os.path.isfile(dump_file):
+ urllib.urlretrieve(self.database_dump_url, dump_file)
+
+ cmd = [os.path.dirname(__file__) + "/dump.sh", str(instance.endpoint[0]), self.database_user, self.database_password, self.database_name, dump_file]
subprocess.check_output(cmd)
def write_showcase_database_config(self, instance):
@@ -103,9 +107,9 @@ class ConfigureRDS:
f.write('')
def create_security_group(self, name, description, port, cidr):
- ec2_conn = boto.ec2.connect_to_region(self.cfg.get('EC2', 'region'),
- aws_access_key_id=self.cfg.get('EC2', 'aws_access_key_id'),
- aws_secret_access_key=self.cfg.get('EC2', 'aws_secret_access_key'))
+ ec2_conn = boto.ec2.connect_to_region(self.region,
+ aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key)
try:
ec2_conn.create_security_group(name, description)
ec2_conn.authorize_security_group(group_name=name, ip_protocol='tcp', from_port=port, to_port=port, cidr_ip=cidr)
@@ -124,12 +128,12 @@ class ConfigureRDS:
try:
instance = self.conn.create_dbinstance(
- self.cfg.get('RDS', 'master_identifier'),
+ self.rds_master_identifier,
5,
- self.cfg.get('RDS', 'instance_type'),
- self.cfg.get('RDS', 'database_user'),
- self.cfg.get('RDS', 'database_pass'),
- db_name=self.cfg.get('RDS', 'database_name'),
+ self.rds_instance_type,
+ self.database_user,
+ self.database_password,
+ db_name=self.database_name,
vpc_security_groups=[sg_id],
availability_zone='eu-west-1a',
backup_retention_period=1
@@ -140,14 +144,14 @@ class ConfigureRDS:
else:
id = self.cfg.get('RDS', 'master_identifier')
self.logger.log("Modifying RDS %s ..." % id)
- self.conn.modify_dbinstance(id=id, instance_class=self.cfg.get('RDS', 'instance_type'), apply_immediately=True)
+ self.conn.modify_dbinstance(id=id, instance_class=self.rds_instance_type, apply_immediately=True)
#time.sleep(60)
finally:
- instance = self.conn.get_all_dbinstances(instance_id=self.cfg.get('RDS', 'master_identifier'))[0]
+ instance = self.conn.get_all_dbinstances(instance_id=self.rds_master_identifier)[0]
self.wait_available(instance)
- instance = self.conn.get_all_dbinstances(instance_id=self.cfg.get('RDS', 'master_identifier'))[0]
+ instance = self.conn.get_all_dbinstances(instance_id=self.rds_master_identifier)[0]
return instance
diff --git a/cloudscale/deployment_scripts/scripts/software/check-running-showcase_instances.py b/cloudscale/deployment_scripts/scripts/software/check-running-showcase_instances.py
index 231d6c0..d5fbc73 100644
--- a/cloudscale/deployment_scripts/scripts/software/check-running-showcase_instances.py
+++ b/cloudscale/deployment_scripts/scripts/software/check-running-showcase_instances.py
@@ -1,5 +1,5 @@
import subprocess
-from novaclient.v1_1 import client as novaclient
+from novaclient.v2 import client as novaclient
import time
import boto
diff --git a/cloudscale/deployment_scripts/scripts/software/deploy_showcase.py b/cloudscale/deployment_scripts/scripts/software/deploy_showcase.py
index f17a4e4..5b09ad7 100644
--- a/cloudscale/deployment_scripts/scripts/software/deploy_showcase.py
+++ b/cloudscale/deployment_scripts/scripts/software/deploy_showcase.py
@@ -1,9 +1,7 @@
import logging
import time
import paramiko
-import ConfigParser
-import sys, os
-import subprocess
+import sys
import requests
import select
from cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger
@@ -21,7 +19,7 @@ class DeploySoftware:
def write_db_config(self, ssh, path):
cfg = "jdbc.dbtype=mysql\n"
- if int(self.props.db_num_instances) > 1:
+ if int(self.props.rds_num_replicas) > 1:
cfg += 'jdbc.driverClassName=com.mysql.jdbc.ReplicationDriver\n'
cfg += 'jdbc.url=jdbc:mysql:replication://%s/%s\n' % (self.props.cfg.get('platform', 'urls'), self.props.database_name )
else:
@@ -29,7 +27,7 @@ class DeploySoftware:
cfg += 'jdbc.url=jdbc:mysql://%s/%s\n' % (self.props.cfg.get('platform', 'urls'), self.props.database_name)
cfg += 'jdbc.username=%s\n' % self.props.database_user
- cfg += 'jdbc.password=%s\n' % self.props.database_pass
+ cfg += 'jdbc.password=%s\n' % self.props.database_password
cfg += 'jdbc.hibernate.dialect=org.hibernate.dialect.MySQLDialect\n'
_, stdout, _ = ssh.exec_command('echo "%s" | sudo tee %s' % (cfg, path))
@@ -63,7 +61,6 @@ class DeploySoftware:
self.props.logger.log("Deploying showcase on " + ip_address)
self.props.logger.log("This may take a while. Please wait ...")
time.sleep(60)
- paramiko.util.log_to_file('paramiko.log')
ssh = self.ssh_to_instance(ip_address)
@@ -110,20 +107,6 @@ class DeploySoftware:
# Print data from stdout
self.props.logger.log(msg=stdout.channel.recv(1024), level=logging.DEBUG)
-
- def check(self, ip_address):
- r = requests.get('http://%s/showcase-1-a' % ip_address)
- return r.status_code == 200
-
- def parse_config_file(self, config_file):
- f = open(config_file, 'r')
- ip_addresses = []
- for line in f.readlines():
- ip_addresses.append(line)
- return ip_addresses
-
-
-
if __name__ == '__main__':
check_args(2, "<output_dir> <config_path>")
path, cfg, logger = get_cfg_logger(sys.argv[1], sys.argv[2])
diff --git a/cloudscale/deployment_scripts/scripts/software/install-tomcat-apache.py b/cloudscale/deployment_scripts/scripts/software/install-tomcat-apache.py
index a57b807..d559dc4 100644
--- a/cloudscale/deployment_scripts/scripts/software/install-tomcat-apache.py
+++ b/cloudscale/deployment_scripts/scripts/software/install-tomcat-apache.py
@@ -3,7 +3,6 @@ import time
import paramiko
import ConfigParser
import sys, os
-from common.Cloudscale import *
class InstallTomcatApache:
setup.py 1(+1 -0)
diff --git a/setup.py b/setup.py
index febe1c9..34d8d13 100644
--- a/setup.py
+++ b/setup.py
@@ -8,6 +8,7 @@ setup(
author_email='simon.ivansek@xlab.si',
packages=find_packages(),
package_data={'' : ['*.cfg', '*.sh', '*.conf']},
+ url='http://www.cloudscale-project.eu',
license='LICENSE.txt',
description='Deployment scripts for CloudScale project',
long_description=open('README.txt').read(),