1 """
2 EC2/S3 Utility Classes
3 """
4
5 import os
6 import re
7 import time
8 import base64
9 import string
10 import tempfile
11
12 import boto
13 import boto.ec2
14 import boto.s3.connection
15
16 from starcluster import image
17 from starcluster import utils
18 from starcluster import static
19 from starcluster import webtools
20 from starcluster import exception
21 from starcluster import progressbar
22 from starcluster.utils import print_timing
23 from starcluster.logger import log
27 - def __init__(self, aws_access_key_id, aws_secret_access_key,
28 connection_authenticator, **kwargs):
29 """
30 Create an EasyAWS object.
31
32 Requires aws_access_key_id/aws_secret_access_key from an Amazon Web
33 Services (AWS) account and a connection_authenticator function that
34 returns an authenticated AWS connection object
35
36 Providing only the keys will default to using Amazon EC2
37
38 kwargs are passed to the connection_authenticator's constructor
39 """
40 self.aws_access_key_id = aws_access_key_id
41 self.aws_secret_access_key = aws_secret_access_key
42 self.connection_authenticator = connection_authenticator
43 self._conn = None
44 self._kwargs = kwargs
45
47 self._conn = None
48 return self.conn
49
50 @property
52 if self._conn is None:
53 log.debug('creating self._conn w/ connection_authenticator ' +
54 'kwargs = %s' % self._kwargs)
55 self._conn = self.connection_authenticator(
56 self.aws_access_key_id, self.aws_secret_access_key,
57 **self._kwargs)
58 return self._conn
59
62 - def __init__(self, aws_access_key_id, aws_secret_access_key,
63 aws_ec2_path='/', aws_s3_host=None, aws_s3_path='/',
64 aws_port=None, aws_region_name=None, aws_is_secure=True,
65 aws_region_host=None, aws_proxy=None, aws_proxy_port=None,
66 aws_proxy_user=None, aws_proxy_pass=None, **kwargs):
67 aws_region = None
68 if aws_region_name and aws_region_host:
69 aws_region = boto.ec2.regioninfo.RegionInfo(
70 name=aws_region_name, endpoint=aws_region_host)
71 kwargs = dict(is_secure=aws_is_secure, region=aws_region,
72 port=aws_port, path=aws_ec2_path, proxy=aws_proxy,
73 proxy_port=aws_proxy_port, proxy_user=aws_proxy_user,
74 proxy_pass=aws_proxy_pass)
75 super(EasyEC2, self).__init__(aws_access_key_id, aws_secret_access_key,
76 boto.connect_ec2, **kwargs)
77 kwargs = dict(aws_s3_host=aws_s3_host, aws_s3_path=aws_s3_path,
78 aws_port=aws_port, aws_is_secure=aws_is_secure,
79 aws_proxy=aws_proxy, aws_proxy_port=aws_proxy_port,
80 aws_proxy_user=aws_proxy_user,
81 aws_proxy_pass=aws_proxy_pass)
82 self.s3 = EasyS3(aws_access_key_id, aws_secret_access_key, **kwargs)
83 self._regions = None
84
86 return '<EasyEC2: %s (%s)>' % (self.region.name, self.region.endpoint)
87
89 """
90 Connects to a given region if it exists, raises RegionDoesNotExist
91 otherwise. Once connected, this object will return only data from the
92 given region.
93 """
94 region = self.get_region(region_name)
95 self._kwargs['region'] = region
96 self.reload()
97 return self
98
99 @property
101 """
102 Returns the current EC2 region used by this EasyEC2 object
103 """
104 return self.conn.region
105
106 @property
108 """
109 This property returns all AWS Regions, caching the results the first
110 time a request is made to Amazon
111 """
112 if not self._regions:
113 self._regions = {}
114 regions = self.conn.get_all_regions()
115 for region in regions:
116 self._regions[region.name] = region
117 return self._regions
118
120 """
121 Returns boto Region object if it exists, raises RegionDoesNotExist
122 otherwise.
123 """
124 if not region_name in self.regions:
125 raise exception.RegionDoesNotExist(region_name)
126 return self.regions.get(region_name)
127
129 """
130 Print name/endpoint for all AWS regions
131 """
132 regions = self.regions.items()
133 regions.sort(reverse=True)
134 for region in regions:
135 name, endpoint = region
136 print 'name: ', name
137 print 'endpoint: ', endpoint
138 print
139
140 @property
142 return self.conn.get_all_images(owners=["self"])
143
144 @property
146 return self.conn.get_all_images(executable_by=["self"])
147
154
155 - def create_group(self, name, description, auth_ssh=False,
156 auth_group_traffic=False):
157 """
158 Create security group with name/description. auth_ssh=True
159 will open port 22 to world (0.0.0.0/0). auth_group_traffic
160 will allow all traffic between instances in the same security
161 group
162 """
163 if not name:
164 return None
165 log.info("Creating security group %s..." % name)
166 sg = self.conn.create_security_group(name, description)
167 if auth_ssh:
168 ssh_port = static.DEFAULT_SSH_PORT
169 sg.authorize('tcp', ssh_port, ssh_port, static.WORLD_CIDRIP)
170 if auth_group_traffic:
171 sg.authorize('icmp', -1, -1,
172 src_group=self.get_group_or_none(name))
173 sg.authorize('tcp', 1, 65535,
174 src_group=self.get_group_or_none(name))
175 sg.authorize('udp', 1, 65535,
176 src_group=self.get_group_or_none(name))
177 return sg
178
180 """
181 Returns all security groups
182
183 groupnames - optional list of group names to retrieve
184 """
185 filters = {}
186 if groupnames:
187 filters = {'group-name': groupnames}
188 return self.get_security_groups(filters=filters)
189
198
199 - def get_or_create_group(self, name, description, auth_ssh=True,
200 auth_group_traffic=False):
201 """
202 Try to return a security group by name. If the group is not found,
203 attempt to create it. Description only applies to creation.
204
205 auth_ssh - authorize ssh traffic from world
206 auth_group_traffic - authorizes all traffic between members of the
207 group
208 """
209 sg = self.get_group_or_none(name)
210 if not sg:
211 sg = self.create_group(name, description, auth_ssh,
212 auth_group_traffic)
213 return sg
214
225
231
234 """
235 Returns the rule with the specified port range permission (ip_protocol,
236 from_port, to_port, cidr_ip) defined or None if no such rule exists
237 """
238 for rule in group.rules:
239 if rule.ip_protocol != ip_protocol:
240 continue
241 if int(rule.from_port) != from_port:
242 continue
243 if int(rule.to_port) != to_port:
244 continue
245 if cidr_ip:
246 cidr_grants = [g for g in rule.grants if g.cidr_ip == cidr_ip]
247 if not cidr_grants:
248 continue
249 return rule
250
251 - def has_permission(self, group, ip_protocol, from_port, to_port, cidr_ip):
252 """
253 Checks whether group has the specified port range permission
254 (ip_protocol, from_port, to_port, cidr_ip) defined
255 """
256 for rule in group.rules:
257 if rule.ip_protocol != ip_protocol:
258 continue
259 if int(rule.from_port) != from_port:
260 continue
261 if int(rule.to_port) != to_port:
262 continue
263 cidr_grants = [g for g in rule.grants if g.cidr_ip == cidr_ip]
264 if not cidr_grants:
265 continue
266 return True
267 return False
268
270 """
271 Create a new placement group for your account.
272 This will create the placement group within the region you
273 are currently connected to.
274 """
275 log.info("Creating placement group %s..." % name)
276 success = self.conn.create_placement_group(name)
277 if not success:
278 log.debug(
279 "failed to create placement group '%s' (error = %s)" %
280 (name, success))
281 raise exception.AWSError(
282 "failed to create placement group '%s'" % name)
283 return self.get_placement_group(name)
284
286 return self.conn.get_all_placement_groups(filters=filters)
287
298
313
324
325 - def request_instances(self, image_id, price=None, instance_type='m1.small',
326 min_count=1, max_count=1, count=1, key_name=None,
327 security_groups=None, launch_group=None,
328 availability_zone_group=None, placement=None,
329 user_data=None, placement_group=None):
330 """
331 Convenience method for running spot or flat-rate instances
332 """
333 if price:
334 return self.request_spot_instances(
335 price, image_id, instance_type=instance_type,
336 count=count, launch_group=launch_group, key_name=key_name,
337 security_groups=security_groups,
338 availability_zone_group=availability_zone_group,
339 placement=placement, user_data=user_data)
340 else:
341 return self.run_instances(
342 image_id, instance_type=instance_type,
343 min_count=min_count, max_count=max_count,
344 key_name=key_name, security_groups=security_groups,
345 placement=placement, user_data=user_data,
346 placement_group=placement_group)
347
348 - def request_spot_instances(self, price, image_id, instance_type='m1.small',
349 count=1, launch_group=None, key_name=None,
350 availability_zone_group=None,
351 security_groups=None,
352 placement=None, user_data=None):
359
360 - def run_instances(self, image_id, instance_type='m1.small', min_count=1,
361 max_count=1, key_name=None, security_groups=None,
362 placement=None, user_data=None, placement_group=None):
371
372 - def create_image(self, instance_id, name, description=None,
373 no_reboot=False):
374 return self.conn.create_image(instance_id, name,
375 description=description,
376 no_reboot=no_reboot)
377
378 - def register_image(self, name, description=None, image_location=None,
379 architecture=None, kernel_id=None, ramdisk_id=None,
380 root_device_name=None, block_device_map=None):
381 return self.conn.register_image(name=name, description=description,
382 image_location=image_location,
383 architecture=architecture,
384 kernel_id=kernel_id,
385 ramdisk_id=ramdisk_id,
386 root_device_name=root_device_name,
387 block_device_map=block_device_map)
388
390 return self.conn.delete_key_pair(name)
391
393 """
394 Create a new EC2 keypair and optionally save to output_file
395
396 Returns boto.ec2.keypair.KeyPair
397 """
398 if output_file:
399 output_dir = os.path.dirname(output_file)
400 if output_dir and not os.path.exists(output_dir):
401 raise exception.BaseException(
402 "output directory does not exist")
403 if os.path.exists(output_file):
404 raise exception.BaseException(
405 "cannot save keypair %s: file already exists" %
406 output_file)
407 kp = self.conn.create_key_pair(name)
408 if output_file:
409 try:
410 kfile = open(output_file, 'wb')
411 kfile.write(kp.material)
412 kfile.close()
413 os.chmod(output_file, 0400)
414 except IOError, e:
415 raise exception.BaseException(str(e))
416 return kp
417
419 return self.conn.get_all_key_pairs(filters=filters)
420
430
436
438 print msg
439 print "-" * len(msg)
440
442 image_name = re.sub('\.manifest\.xml$', '',
443 img.location.split('/')[-1])
444 return image_name
445
447 try:
448 attrs = self.conn.get_instance_attribute(instance_id, 'userData')
449 user_data = attrs.get('userData', '')
450 return base64.b64decode(user_data)
451 except boto.exception.EC2ResponseError, e:
452 if e.error_code == "InvalidInstanceID.NotFound":
453 raise exception.InstanceDoesNotExist(instance_id)
454 raise e
455
467
478
480 try:
481 self.get_all_instances()
482 return True
483 except boto.exception.EC2ResponseError, e:
484 cred_errs = ['AuthFailure', 'SignatureDoesNotMatch']
485 if e.error_code in cred_errs:
486 return False
487 raise
488
490 spots = self.conn.get_all_spot_instance_requests(spot_ids,
491 filters=filters)
492 return spots
493
495 s = self.conn.get_all_spot_instance_requests()
496 if not s:
497 log.info("No spot instance requests found...")
498 return
499 spots = []
500 for spot in s:
501 if spot.state in ['closed', 'cancelled'] and not show_closed:
502 continue
503 state = spot.state or 'N/A'
504 spot_id = spot.id or 'N/A'
505 spots.append(spot_id)
506 type = spot.type
507 instance_id = spot.instance_id or 'N/A'
508 create_time = spot.create_time or 'N/A'
509 launch_group = spot.launch_group or 'N/A'
510 zone_group = spot.availability_zone_group or 'N/A'
511 price = spot.price or 'N/A'
512 lspec = spot.launch_specification
513 instance_type = lspec.instance_type
514 image_id = lspec.image_id
515 zone = lspec.placement
516 groups = ', '.join([g.id for g in lspec.groups])
517 print "id: %s" % spot_id
518 print "price: $%0.2f" % price
519 print "spot_request_type: %s" % type
520 print "state: %s" % state
521 print "instance_id: %s" % instance_id
522 print "instance_type: %s" % instance_type
523 print "image_id: %s" % image_id
524 print "zone: %s" % zone
525 print "create_time: %s" % create_time
526 print "launch_group: %s" % launch_group
527 print "zone_group: %s" % zone_group
528 print "security_groups: %s" % groups
529 print
530 if not spots:
531 log.info("No spot instance requests found...")
532
562
564 insts = self.get_all_instances()
565 if not insts:
566 log.info("No instances found")
567 return
568 tstates = ['shutting-down', 'terminated']
569 for instance in insts:
570 if not instance.state in tstates or show_terminated:
571 self.show_instance(instance)
572
573 - def list_images(self, images, sort_key=None, reverse=False):
574 def get_key(obj):
575 return ' '.join([obj.region.name, obj.location])
576 if not sort_key:
577 sort_key = get_key
578 imgs_i386 = [img for img in images if img.architecture == "i386"]
579 imgs_i386.sort(key=sort_key, reverse=reverse)
580 imgs_x86_64 = [img for img in images if img.architecture == "x86_64"]
581 imgs_x86_64.sort(key=sort_key, reverse=reverse)
582 print
583 self.__list_images("32bit Images:", imgs_i386)
584 self.__list_images("\n64bit Images:", imgs_x86_64)
585 print "\ntotal images: %d" % len(images)
586 print
587
592
597
599 counter = 0
600 self.__print_header(msg)
601 for img in imgs:
602 name = self.get_image_name(img)
603 template = "[%d] %s %s %s"
604 if img.virtualization_type == 'hvm':
605 template += ' (HVM-EBS)'
606 elif img.root_device_type == 'ebs':
607 template += ' (EBS)'
608 print template % (counter, img.id, img.region.name, name)
609 counter += 1
610
612 if pretend:
613 log.info("Pretending to remove image files...")
614 else:
615 log.info('Removing image files...')
616 files = self.get_image_files(image_name)
617 for f in files:
618 if pretend:
619 log.info("Would remove file: %s" % f.name)
620 else:
621 log.info('Removing file %s' % f.name)
622 f.delete()
623 if not pretend:
624 files = self.get_image_files(image_name)
625 if len(files) != 0:
626 log.warn('Not all files deleted, recursing...')
627 self.remove_image_files(image_name, pretend)
628
629 @print_timing("Removing image")
630 - def remove_image(self, image_name, pretend=True, keep_image_data=True):
631 img = self.get_image(image_name)
632 if pretend:
633 log.info('Pretending to deregister AMI: %s' % img.id)
634 else:
635 log.info('Deregistering AMI: %s' % img.id)
636 img.deregister()
637 if img.root_device_type == "instance-store" and not keep_image_data:
638 self.remove_image_files(img, pretend=pretend)
639 elif img.root_device_type == "ebs" and not keep_image_data:
640 rootdevtype = img.block_device_mapping.get('/dev/sda1', None)
641 if rootdevtype:
642 snapid = rootdevtype.snapshot_id
643 if snapid:
644 snap = self.get_snapshot(snapid)
645 if pretend:
646 log.info("Would remove snapshot: %s" % snapid)
647 else:
648 log.info("Removing snapshot: %s" % snapid)
649 snap.delete()
650
652 images = self.conn.get_all_images(owners=[static.STARCLUSTER_OWNER_ID])
653 log.info("Listing all public StarCluster images...")
654 imgs = [img for img in images if img.is_public]
655
656 def sc_public_sort(obj):
657 split = obj.name.split('-')
658 osname, osversion, arch = split[2:5]
659 osversion = float(osversion)
660 rc = 0
661 if split[-1].startswith('rc'):
662 rc = int(split[-1].replace('rc', ''))
663 return (osversion, rc)
664 self.list_images(imgs, sort_key=sc_public_sort, reverse=True)
665
668
670 vol = self.get_volume(volume_id)
671 vol.delete()
672
674 keypairs = self.keypairs
675 if not keypairs:
676 log.info("No keypairs found...")
677 return
678 max_length = max([len(key.name) for key in keypairs])
679 templ = "%" + str(max_length) + "s %s"
680 for key in self.keypairs:
681 print templ % (key.name, key.fingerprint)
682
704
706 return self.conn.get_all_zones(filters=filters)
707
720
722 """
723 Return zone object representing an EC2 availability zone
724 Returns None if unsuccessful
725 """
726 try:
727 return self.get_zone(zone)
728 except exception.ZoneDoesNotExist:
729 pass
730
731 - def create_s3_image(self, instance_id, key_location, aws_user_id,
732 ec2_cert, ec2_private_key, bucket, image_name="image",
733 description=None, kernel_id=None, ramdisk_id=None,
734 remove_image_files=False, **kwargs):
735 """
736 Create instance-store (S3) image from running instance
737 """
738 icreator = image.S3ImageCreator(self, instance_id, key_location,
739 aws_user_id, ec2_cert,
740 ec2_private_key, bucket,
741 image_name=image_name,
742 description=description,
743 kernel_id=kernel_id,
744 ramdisk_id=ramdisk_id,
745 remove_image_files=remove_image_files)
746 return icreator.create_image()
747
748 - def create_ebs_image(self, instance_id, key_location, name,
749 description=None, snapshot_description=None,
750 kernel_id=None, ramdisk_id=None, root_vol_size=15,
751 **kwargs):
752 """
753 Create EBS-backed image from running instance
754 """
755 sdescription = snapshot_description
756 icreator = image.EBSImageCreator(self, instance_id, key_location,
757 name, description=description,
758 snapshot_description=sdescription,
759 kernel_id=kernel_id,
760 ramdisk_id=ramdisk_id,
761 **kwargs)
762 return icreator.create_image(size=root_vol_size)
763
765 return self.conn.get_all_images(filters=filters)
766
780
790
792 """
793 Returns a list of files on S3 for an EC2 instance-store (S3-backed)
794 image. This includes the image's manifest and part files.
795 """
796 if not hasattr(image, 'id'):
797 image = self.get_image(image)
798 if image.root_device_type == 'ebs':
799 raise exception.AWSError(
800 "Image %s is an EBS image. No image files on S3." % image.id)
801 bucket = self.get_image_bucket(image)
802 bname = re.escape(bucket.name)
803 prefix = re.sub('^%s\/' % bname, '', image.location)
804 prefix = re.sub('\.manifest\.xml$', '', prefix)
805 files = bucket.list(prefix=prefix)
806 manifest_regex = re.compile(r'%s\.manifest\.xml' % prefix)
807 part_regex = re.compile(r'%s\.part\.(\d*)' % prefix)
808
809
810 files = [f for f in files if hasattr(f, 'delete') and
811 part_regex.match(f.name) or manifest_regex.match(f.name)]
812 return files
813
815 bucket_name = image.location.split('/')[0]
816 return self.s3.get_bucket(bucket_name)
817
819 return image.location.split('/')[-1]
820
821 @print_timing("Migrating image")
822 - def migrate_image(self, image_id, destbucket, migrate_manifest=False,
823 kernel_id=None, ramdisk_id=None, region=None, cert=None,
824 private_key=None):
825 """
826 Migrate image_id files to destbucket
827 """
828 if migrate_manifest:
829 utils.check_required(['ec2-migrate-manifest'])
830 if not cert:
831 raise exception.BaseException("no cert specified")
832 if not private_key:
833 raise exception.BaseException("no private_key specified")
834 if not kernel_id:
835 raise exception.BaseException("no kernel_id specified")
836 if not ramdisk_id:
837 raise exception.BaseException("no ramdisk_id specified")
838 image = self.get_image(image_id)
839 if image.root_device_type == "ebs":
840 raise exception.AWSError(
841 "The image you wish to migrate is EBS-based. " +
842 "This method only works for instance-store images")
843 files = self.get_image_files(image)
844 if not files:
845 log.info("No files found for image: %s" % image_id)
846 return
847 log.info("Migrating image: %s" % image_id)
848 widgets = [files[0].name, progressbar.Percentage(), ' ',
849 progressbar.Bar(marker=progressbar.RotatingMarker()), ' ',
850 progressbar.ETA(), ' ', ' ']
851 counter = 0
852 num_files = len(files)
853 pbar = progressbar.ProgressBar(widgets=widgets,
854 maxval=num_files).start()
855 for f in files:
856 widgets[0] = "%s: (%s/%s)" % (f.name, counter + 1, num_files)
857
858 f.copy(destbucket, f.name)
859 pbar.update(counter)
860 counter += 1
861 pbar.finish()
862 if migrate_manifest:
863 dbucket = self.s3.get_bucket(destbucket)
864 manifest_key = dbucket.get_key(self.get_image_manifest(image))
865 f = tempfile.NamedTemporaryFile()
866 manifest_key.get_contents_to_file(f.file)
867 f.file.close()
868 cmd = ('ec2-migrate-manifest -c %s -k %s -m %s --kernel %s ' +
869 '--ramdisk %s --no-mapping ') % (cert, private_key,
870 f.name, kernel_id,
871 ramdisk_id)
872 register_cmd = "ec2-register %s/%s" % (destbucket,
873 manifest_key.name)
874 if region:
875 cmd += '--region %s' % region
876 register_cmd += " --region %s" % region
877 log.info("Migrating manifest file...")
878 retval = os.system(cmd)
879 if retval != 0:
880 raise exception.BaseException(
881 "ec2-migrate-manifest failed with status %s" % retval)
882 f.file = open(f.name, 'r')
883 manifest_key.set_contents_from_file(f.file)
884
885 manifest_key.add_email_grant('READ', 'za-team@amazon.com')
886 f.close()
887 os.unlink(f.name + '.bak')
888 log.info("Manifest migrated successfully. You can now run:\n" +
889 register_cmd + "\nto register your migrated image.")
890
891 - def create_root_block_device_map(self, snapshot_id,
892 root_device_name='/dev/sda1',
893 add_ephemeral_drives=False,
894 ephemeral_drive_0='/dev/sdb1',
895 ephemeral_drive_1='/dev/sdc1',
896 ephemeral_drive_2='/dev/sdd1',
897 ephemeral_drive_3='/dev/sde1'):
898 """
899 Utility method for building a new block_device_map for a given snapshot
900 id. This is useful when creating a new image from a volume snapshot.
901 The returned block device map can be used with self.register_image
902 """
903 bmap = boto.ec2.blockdevicemapping.BlockDeviceMapping()
904 sda1 = boto.ec2.blockdevicemapping.BlockDeviceType()
905 sda1.snapshot_id = snapshot_id
906 sda1.delete_on_termination = True
907 bmap[root_device_name] = sda1
908 if add_ephemeral_drives:
909 sdb1 = boto.ec2.blockdevicemapping.BlockDeviceType()
910 sdb1.ephemeral_name = 'ephemeral0'
911 bmap[ephemeral_drive_0] = sdb1
912 sdc1 = boto.ec2.blockdevicemapping.BlockDeviceType()
913 sdc1.ephemeral_name = 'ephemeral1'
914 bmap[ephemeral_drive_1] = sdc1
915 sdd1 = boto.ec2.blockdevicemapping.BlockDeviceType()
916 sdd1.ephemeral_name = 'ephemeral2'
917 bmap[ephemeral_drive_2] = sdd1
918 sde1 = boto.ec2.blockdevicemapping.BlockDeviceType()
919 sde1.ephemeral_name = 'ephemeral3'
920 bmap[ephemeral_drive_3] = sde1
921 return bmap
922
923 @print_timing("Downloading image")
938 log.info("Downloading image: %s" % image_id)
939 for file in files:
940 widgets[0] = "%s:" % file.name
941 pbar = progressbar.ProgressBar(widgets=widgets,
942 maxval=file.size).start()
943 file.get_contents_to_filename(os.path.join(destdir, file.name),
944 cb=_dl_progress_cb)
945 pbar.finish()
946
948 """
949 Print a list of files for image_id to the screen
950 """
951 files = self.get_image_files(image_id)
952 for file in files:
953 print file.name
954
955 @property
958
959 @property
962
966
968 """
969 Returns a list of all EBS volumes
970 """
971 return self.conn.get_all_volumes(filters=filters)
972
974 """
975 Returns EBS volume object representing volume_id.
976 Raises exception.VolumeDoesNotExist if unsuccessful
977 """
978 try:
979 return self.get_volumes(filters={'volume-id': volume_id})[0]
980 except boto.exception.EC2ResponseError, e:
981 if e.error_code == "InvalidVolume.NotFound":
982 raise exception.VolumeDoesNotExist(volume_id)
983 raise
984 except IndexError:
985 raise exception.VolumeDoesNotExist(volume_id)
986
988 """
989 Returns EBS volume object representing volume_id.
990 Returns None if unsuccessful
991 """
992 try:
993 return self.get_volume(volume_id)
994 except exception.VolumeDoesNotExist:
995 pass
996
1013
1014 - def create_snapshot(self, vol, description=None, wait_for_snapshot=False,
1015 refresh_interval=30):
1021
1022 - def get_snapshots(self, volume_ids=[], filters=None, owner='self'):
1023 """
1024 Returns a list of all EBS volume snapshots
1025 """
1026 filters = filters or {}
1027 if volume_ids:
1028 filters['volume-id'] = volume_ids
1029 return self.conn.get_all_snapshots(owner=owner, filters=filters)
1030
1032 """
1033 Returns EBS snapshot object for snapshot_id.
1034
1035 Raises exception.SnapshotDoesNotExist if unsuccessful
1036 """
1037 try:
1038 return self.get_snapshots(filters={'snapshot-id': snapshot_id},
1039 owner=owner)[0]
1040 except boto.exception.EC2ResponseError, e:
1041 if e.error_code == "InvalidSnapshot.NotFound":
1042 raise exception.SnapshotDoesNotExist(snapshot_id)
1043 raise
1044 except IndexError:
1045 raise exception.SnapshotDoesNotExist(snapshot_id)
1046
1047 - def list_volumes(self, volume_id=None, status=None, attach_status=None,
1048 size=None, zone=None, snapshot_id=None,
1049 show_deleted=False, tags=None, name=None):
1050 """
1051 Print a list of volumes to the screen
1052 """
1053 filters = {}
1054 if status:
1055 filters['status'] = status
1056 else:
1057 filters['status'] = ['creating', 'available', 'in-use', 'error']
1058 if show_deleted:
1059 filters['status'] += ['deleting', 'deleted']
1060 if attach_status:
1061 filters['attachment.status'] = attach_status
1062 if volume_id:
1063 filters['volume-id'] = volume_id
1064 if size:
1065 filters['size'] = size
1066 if zone:
1067 filters['availability-zone'] = zone
1068 if snapshot_id:
1069 filters['snapshot-id'] = snapshot_id
1070 if tags:
1071 tagkeys = []
1072 for tag in tags:
1073 val = tags.get(tag)
1074 if val:
1075 filters["tag:%s" % tag] = val
1076 elif tag:
1077 tagkeys.append(tag)
1078 if tagkeys:
1079 filters['tag-key'] = tagkeys
1080 if name:
1081 filters['tag:Name'] = name
1082 vols = self.get_volumes(filters=filters)
1083 vols.sort(key=lambda x: x.create_time)
1084 if vols:
1085 for vol in vols:
1086 print "volume_id: %s" % vol.id
1087 print "size: %sGB" % vol.size
1088 print "status: %s" % vol.status
1089 if vol.attachment_state():
1090 print "attachment_status: %s" % vol.attachment_state()
1091 print "availability_zone: %s" % vol.zone
1092 if vol.snapshot_id:
1093 print "snapshot_id: %s" % vol.snapshot_id
1094 snapshots = self.get_snapshots(volume_ids=[vol.id])
1095 if snapshots:
1096 snap_list = ' '.join([snap.id for snap in snapshots])
1097 print 'snapshots: %s' % snap_list
1098 if vol.create_time:
1099 lt = utils.iso_to_localtime_tuple(vol.create_time)
1100 print "create_time: %s" % lt
1101 tags = []
1102 for tag in vol.tags:
1103 val = vol.tags.get(tag)
1104 if val:
1105 tags.append("%s=%s" % (tag, val))
1106 else:
1107 tags.append(tag)
1108 if tags:
1109 print "tags: %s" % ', '.join(tags)
1110 print
1111 print 'Total: %s' % len(vols)
1112
1113 - def get_spot_history(self, instance_type, start=None, end=None, plot=False,
1114 plot_server_interface="localhost",
1115 plot_launch_browser=True, plot_web_browser=None,
1116 plot_shutdown_server=True):
1117 if start and not utils.is_iso_time(start):
1118 raise exception.InvalidIsoDate(start)
1119 if end and not utils.is_iso_time(end):
1120 raise exception.InvalidIsoDate(end)
1121 pdesc = "Linux/UNIX"
1122 hist = self.conn.get_spot_price_history(start_time=start, end_time=end,
1123 instance_type=instance_type,
1124 product_description=pdesc)
1125 if not hist:
1126 raise exception.SpotHistoryError(start, end)
1127 dates = []
1128 prices = []
1129 data = []
1130 for item in hist:
1131 timestamp = utils.iso_to_javascript_timestamp(item.timestamp)
1132 price = item.price
1133 dates.append(timestamp)
1134 prices.append(price)
1135 data.append([timestamp, price])
1136 maximum = max(prices)
1137 avg = sum(prices) / float(len(prices))
1138 log.info("Current price: $%.2f" % prices[-1])
1139 log.info("Max price: $%.2f" % maximum)
1140 log.info("Average price: $%.2f" % avg)
1141 if plot:
1142 xaxisrange = dates[-1] - dates[0]
1143 xpanrange = [dates[0] - xaxisrange / 2.,
1144 dates[-1] + xaxisrange / 2.]
1145 xzoomrange = [0.1, xpanrange[-1] - xpanrange[0]]
1146 minimum = min(prices)
1147 yaxisrange = maximum - minimum
1148 ypanrange = [minimum - yaxisrange / 2., maximum + yaxisrange / 2.]
1149 yzoomrange = [0.1, ypanrange[-1] - ypanrange[0]]
1150 context = dict(instance_type=instance_type,
1151 start=start, end=end,
1152 time_series_data=str(data).replace('L', ''),
1153 shutdown=plot_shutdown_server,
1154 xpanrange=xpanrange, ypanrange=ypanrange,
1155 xzoomrange=xzoomrange, yzoomrange=yzoomrange)
1156 log.info("", extra=dict(__raw__=True))
1157 log.info("Starting StarCluster Webserver...")
1158 s = webtools.get_template_server('web', context=context,
1159 interface=plot_server_interface)
1160 base_url = "http://%s:%s" % s.server_address
1161 shutdown_url = '/'.join([base_url, 'shutdown'])
1162 spot_url = "http://%s:%s/spothistory.html" % s.server_address
1163 log.info("Server address is %s" % base_url)
1164 log.info("(use CTRL-C or navigate to %s to shutdown server)" %
1165 shutdown_url)
1166 if plot_launch_browser:
1167 webtools.open_browser(spot_url, plot_web_browser)
1168 else:
1169 log.info("Browse to %s to view the spot history plot" %
1170 spot_url)
1171 s.serve_forever()
1172 return data
1173
1175 instance = self.get_instance(instance_id)
1176 console_output = instance.get_console_output().output
1177 print ''.join([c for c in console_output if c in string.printable])
1178
1181 DefaultHost = 's3.amazonaws.com'
1182 _calling_format = boto.s3.connection.OrdinaryCallingFormat()
1183
1184 - def __init__(self, aws_access_key_id, aws_secret_access_key,
1185 aws_s3_path='/', aws_port=None, aws_is_secure=True,
1186 aws_s3_host=DefaultHost, aws_proxy=None, aws_proxy_port=None,
1187 aws_proxy_user=None, aws_proxy_pass=None, **kwargs):
1188 kwargs = dict(is_secure=aws_is_secure, host=aws_s3_host or
1189 self.DefaultHost, port=aws_port, path=aws_s3_path,
1190 proxy=aws_proxy, proxy_port=aws_proxy_port,
1191 proxy_user=aws_proxy_user, proxy_pass=aws_proxy_pass)
1192 if aws_s3_host:
1193 kwargs.update(dict(calling_format=self._calling_format))
1194 super(EasyS3, self).__init__(aws_access_key_id, aws_secret_access_key,
1195 boto.connect_s3, **kwargs)
1196
1198 return '<EasyS3: %s>' % self.conn.server_name()
1199
1201 """
1202 Create a new bucket on S3. bucket_name must be unique, the bucket
1203 namespace is shared by all AWS users
1204 """
1205 bucket_name = bucket_name.split('/')[0]
1206 try:
1207 return self.conn.create_bucket(bucket_name)
1208 except boto.exception.S3CreateError, e:
1209 if e.error_code == "BucketAlreadyExists":
1210 raise exception.BucketAlreadyExists(bucket_name)
1211 raise
1212
1221
1228
1230 """
1231 Returns bucket object representing S3 bucket
1232 Returns None if unsuccessful
1233 """
1234 try:
1235 return self.get_bucket(bucket_name)
1236 except exception.BucketDoesNotExist:
1237 pass
1238
1240 """
1241 Returns bucket object representing S3 bucket
1242 """
1243 try:
1244 return self.conn.get_bucket(bucketname)
1245 except boto.exception.S3ResponseError, e:
1246 if e.error_code == "NoSuchBucket":
1247 raise exception.BucketDoesNotExist(bucketname)
1248 raise
1249
1251 bucket = self.get_bucket(bucketname)
1252 for file in bucket.list():
1253 if file.name:
1254 print file.name
1255
1257 try:
1258 buckets = self.conn.get_all_buckets()
1259 except TypeError:
1260
1261 raise exception.AWSError("AWS credentials are not valid")
1262 return buckets
1263
1267
1272
1273 if __name__ == "__main__":
1274 from starcluster.config import get_easy_ec2
1275 ec2 = get_easy_ec2()
1276 ec2.list_all_instances()
1277 ec2.list_registered_images()
1278