1
2 """
3 EC2/S3 Utility Classes
4 """
5
6 import os
7 import sys
8 import time
9 import string
10 import platform
11 from pprint import pprint
12
13 import boto
14 import boto.ec2
15 import boto.s3
16 from starcluster import static
17 from starcluster import utils
18 from starcluster import exception
19 from starcluster.logger import log
20 from starcluster.utils import print_timing
21 from starcluster.hacks import register_image as _register_image
24 - def __init__(self, aws_access_key_id, aws_secret_access_key,
25 connection_authenticator, **kwargs):
26 """
27 Create an EasyAWS object.
28
29 Requires AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY from an Amazon Web
30 Services (AWS) account and a CONNECTION_AUTHENTICATOR function that
31 returns an authenticated AWS connection object
32
33 Providing only the keys will default to using Amazon EC2
34
35 kwargs are passed to the connection_authenticator constructor
36 """
37 self.aws_access_key = aws_access_key_id
38 self.aws_secret_access_key = aws_secret_access_key
39 self.connection_authenticator = connection_authenticator
40 self._conn = None
41 self._kwargs = kwargs
42
43 @property
45 if self._conn is None:
46 log.debug('creating self._conn w/ connection_authenticator kwargs' +
47 ' = %s' % self._kwargs)
48 self._conn = self.connection_authenticator(
49 self.aws_access_key, self.aws_secret_access_key, **self._kwargs
50 )
51 return self._conn
52
55 - def __init__(self, aws_access_key_id, aws_secret_access_key, aws_ec2_path='/',
56 aws_s3_path='/', aws_port=None, aws_region_name=None,
57 aws_is_secure=True, aws_region_host=None, cache=False, **kwargs):
58 aws_region = None
59 if aws_region_name and aws_region_host:
60 aws_region = boto.ec2.regioninfo.RegionInfo(name=aws_region_name,
61 endpoint=aws_region_host)
62 kwargs = dict(is_secure=aws_is_secure, region=aws_region,
63 port=aws_port, path=aws_ec2_path)
64 super(EasyEC2, self).__init__(aws_access_key_id, aws_secret_access_key,
65 boto.connect_ec2, **kwargs)
66
67 kwargs = dict(aws_s3_path=aws_s3_path, aws_port=aws_port,
68 aws_is_secure=aws_is_secure,
69 cache=cache)
70 if aws_region_host:
71 kwargs.update(dict(aws_region_host=aws_region_host))
72 self.s3 = EasyS3(aws_access_key_id, aws_secret_access_key, **kwargs)
73 self.cache = cache
74 self._instance_response = None
75 self._keypair_response = None
76 self._images = None
77 self._executable_images = None
78 self._security_group_response = None
79
81 if e.error_code == "AuthFailure":
82 raise e
83
84 @property
86 if not self.cache or self._images is None:
87 self._images = self.conn.get_all_images(owners=["self"])
88 return self._images
89
90 @property
92 if not self.cache or self._images is None:
93 self._executable_images = self.conn.get_all_images(
94 executable_by=["self"])
95 return self._executable_images
96
103
104 - def create_group(self, name, description, auth_ssh=True,
105 auth_group_traffic=False):
106 if not name:
107 return None
108 log.info("Creating security group %s..." % name)
109 sg = self.conn.create_security_group(name, description)
110 if auth_ssh:
111 sg.authorize('tcp', 22, 22, '0.0.0.0/0')
112 if auth_group_traffic:
113 sg.authorize(src_group=self.get_group_or_none(name))
114 return sg
115
117 try:
118 sg = self.conn.get_all_security_groups(groupnames=[name])[0]
119 return sg
120 except boto.exception.EC2ResponseError, e:
121 self.__check_for_auth_failure(e)
122 except IndexError, e:
123 pass
124
125 - def get_or_create_group(self, name, description, auth_ssh=True,
126 auth_group_traffic=False):
127 """
128 Try to return a security group by name.
129 If the group is not found, attempt to create it.
130 Description only applies to creation.
131
132 Authorizes all traffic between members of the group
133 """
134 sg = self.get_group_or_none(name)
135 if not sg:
136 sg = self.create_group(name, description, auth_ssh,
137 auth_group_traffic)
138 return sg
139
140 - def request_spot_instances(self, price, image_id, instance_type='m1.small',
141 count=1, launch_group=None, key_name=None,
142 availability_zone_group=None, security_groups=None,
143 placement=None):
153
154 - def run_instances(self, image_id, instance_type='m1.small', min_count=1,
155 max_count=1, key_name=None, security_groups=None,
156 placement=None):
162
163 - def register_image(self, name, description=None, image_location=None,
164 architecture=None, kernel_id=None, ramdisk_id=None,
165 root_device_name=None, block_device_map=None):
166 return _register_image(self.conn, name, description, image_location,
167 architecture, kernel_id, ramdisk_id,
168 root_device_name, block_device_map)
169
178
180 try:
181 return self.get_keypair(keypair)
182 except:
183 pass
184
186 print msg
187 print "-" * len(msg)
188
190 return img.location.split('/')[1].split('.manifest.xml')[0]
191
205
212
223
225 spots = self.conn.get_all_spot_instance_requests()
226 for spot in spots:
227 state = spot.state or 'N/A'
228 if not show_closed and state == 'closed':
229 continue
230 spot_id = spot.id or 'N/A'
231 type = spot.type
232 instance_id = getattr(spot, 'instanceId', 'N/A')
233 create_time = spot.create_time or 'N/A'
234 launch_group = spot.launch_group or 'N/A'
235 zone_group = spot.availability_zone_group or 'N/A'
236 price = spot.price or 'N/A'
237 lspec = spot.launch_specification
238 instance_type = lspec.instance_type
239 groups = ', '.join([ g.id for g in lspec.groups])
240 print "id: %s" % spot_id
241 print "price: $%0.2f" % price
242 print "spot_request_type: %s" % type
243 print "state: %s" % state
244 print "instance_id: %s" % instance_id
245 print "instance_type: %s" % instance_type
246 print "create_time: %s" % create_time
247 print "launch_group: %s" % launch_group
248 print "zone_group: %s" % zone_group
249 print "security_groups: %s" % groups
250 print
251
253 reservations = self.conn.get_all_instances()
254 if not reservations:
255 log.info("No instances found")
256 for res in reservations:
257 groups = ', '.join([ g.id for g in res.groups]) or 'N/A'
258 for instance in res.instances:
259 if instance.state == 'terminated' and not show_terminated:
260 continue
261 id = instance.id or 'N/A'
262 dns_name = instance.dns_name or 'N/A'
263 private_dns_name = instance.private_dns_name or 'N/A'
264 state = instance.state or 'N/A'
265 private_ip = instance.private_ip_address or 'N/A'
266 public_ip = instance.ip_address or 'N/A'
267 zone = instance.placement or 'N/A'
268 ami = instance.image_id or 'N/A'
269 keypair = instance.key_name or 'N/A'
270 print "id: %s" % id
271 print "dns_name: %s" % dns_name
272 print "private_dns_name: %s" % private_dns_name
273 print "state: %s" % state
274 print "public_ip: %s" % public_ip
275 print "private_ip: %s" % private_ip
276 print "zone: %s" % zone
277 print "ami: %s" % ami
278 print "groups: %s" % groups
279 print "keypair: %s" % keypair
280 print
281
283 def get_key(obj):
284 return str(obj.region) + ' ' + str(obj.location)
285 imgs_i386 = [ img for img in images if img.architecture == "i386" ]
286 imgs_i386.sort(key=get_key)
287 imgs_x86_64 = [ img for img in images if img.architecture == "x86_64" ]
288 imgs_x86_64.sort(key=get_key)
289 print
290 self.__list_images("32bit Images:", imgs_i386)
291 self.__list_images("\n64bit Images:", imgs_x86_64)
292 print "\ntotal images: %d" % len(images)
293 print
294
299
304
312
336
337 @print_timing
355
361
363 vol = self.get_volume(volume_id)
364 vol.delete()
365
370
372 for zone in self.conn.get_all_zones():
373 print 'name: ', zone.name
374 print 'region: ', zone.region.name
375 print 'status: ', zone.state
376 print
377
386
388 try:
389 return self.get_zone(zone)
390 except:
391 pass
392
401
407
409 image = self.get_image(image_id)
410 bucketname = image.location.split('/')[0]
411 bucket = self.s3.get_bucket(bucketname)
412 files = bucket.list(prefix=os.path.basename(image.location).split('.manifest.xml')[0])
413
414
415 files = [ file for file in files if hasattr(file,'delete')]
416 return files
417
422
423 @property
425 if not self.cache or self._instance_response is None:
426 log.debug('instance_response = %s, cache = %s' %
427 (self._instance_response, self.cache))
428 self._instance_response=self.conn.get_all_instances()
429 return self._instance_response
430
431 @property
433 if not self.cache or self._keypair_response is None:
434 log.debug('keypair_response = %s, cache = %s' %
435 (self._keypair_response, self.cache))
436 self._keypair_response = self.conn.get_all_key_pairs()
437 return self._keypair_response
438
442
448
457
459 try:
460 return self.get_volume(volume_id)
461 except:
462 pass
463
465 vols = self.get_volumes()
466 if vols:
467 for vol in vols:
468 print "volume_id: %s" % vol.id
469 print "size: %sGB" % vol.size
470 print "status: %s" % vol.status
471 print "availability_zone: %s" % vol.zone
472 if vol.snapshot_id:
473 print "snapshot_id: %s" % vol.snapshot_id
474 snapshots=vol.snapshots()
475 if snapshots:
476 print 'snapshots: %s' % ' '.join([snap.id for snap in snapshots])
477 print
478
487
489 return self.conn.get_all_security_groups()
490
491 - def get_spot_history(self, instance_type, start=None, end=None, plot=False):
492 if not utils.is_iso_time(start):
493 raise exception.InvalidIsoDate(start)
494 if not utils.is_iso_time(end):
495 raise exception.InvalidIsoDate(end)
496 hist = self.conn.get_spot_price_history(start_time=start,
497 end_time=end,
498 instance_type=instance_type,
499 product_description="Linux/UNIX")
500 if not hist:
501 raise exception.SpotHistoryError(start,end)
502 dates = [ utils.iso_to_datetime_tuple(i.timestamp) for i in hist]
503 prices = [ i.price for i in hist ]
504 maximum = max(prices)
505 avg = sum(prices)/len(prices)
506 log.info("Current price: $%.2f" % hist[-1].price)
507 log.info("Max price: $%.2f" % maximum)
508 log.info("Average price: $%.2f" % avg)
509 if plot:
510 try:
511 import pylab
512 pylab.plot_date(pylab.date2num(dates), prices, linestyle='-')
513 pylab.xlabel('date')
514 pylab.ylabel('price (cents)')
515 pylab.title('%s Price vs Date (%s - %s)' % (instance_type, start, end))
516 pylab.grid(True)
517 pylab.show()
518 except ImportError,e:
519 log.error("Error importing pylab:")
520 log.error(str(e))
521 log.error("please check that matplotlib is installed and that:")
522 log.error(" $ python -c 'import pylab'")
523 log.error("completes without error")
524 return zip(dates,prices)
525
527 instance = self.get_instance(instance_id)
528 print ''.join([c for c in instance.get_console_output().output
529 if c in string.printable])
530
532 DefaultHost = 's3.amazonaws.com'
533 _calling_format=boto.s3.connection.OrdinaryCallingFormat()
534 - def __init__(self, aws_access_key_id, aws_secret_access_key,
535 aws_s3_path='/', aws_port=None, aws_is_secure=True,
536 aws_region_host=DefaultHost, cache=False, **kwargs):
537 kwargs = dict(is_secure=aws_is_secure, host=aws_region_host,
538 calling_format=self._calling_format, port=aws_port,
539 path=aws_s3_path)
540 super(EasyS3, self).__init__(aws_access_key_id, aws_secret_access_key,
541 boto.connect_s3, **kwargs)
542 self.cache = cache
543
545 if e.error_code == "InvalidAccessKeyId":
546 raise e
547
556
562
565
573
575 try:
576 buckets = self.conn.get_all_buckets()
577 except TypeError,e:
578
579 raise exception.AWSError("AWS credentials are not valid")
580 return buckets
581
585
587 files = []
588 try:
589 bucket = self.get_bucket(bucketname)
590 files = [ file for file in bucket.list() ]
591 except:
592 pass
593 return files
594
600
601 if __name__ == "__main__":
602 from starcluster.config import get_easy_ec2
603 ec2 = get_easy_ec2()
604 ec2.list_all_instances()
605 ec2.list_registered_images()
606