1
2 """
3 EC2/S3 Utility Classes
4 """
5
6 import os
7 import re
8 import time
9 import base64
10 import string
11 import tempfile
12
13 import boto
14 import boto.ec2
15 import boto.s3.connection
16
17 from starcluster import image
18 from starcluster import utils
19 from starcluster import static
20 from starcluster import webtools
21 from starcluster import exception
22 from starcluster import progressbar
23 from starcluster.utils import print_timing
24 from starcluster.logger import log
28 - def __init__(self, aws_access_key_id, aws_secret_access_key,
29 connection_authenticator, **kwargs):
30 """
31 Create an EasyAWS object.
32
33 Requires aws_access_key_id/aws_secret_access_key from an Amazon Web
34 Services (AWS) account and a connection_authenticator function that
35 returns an authenticated AWS connection object
36
37 Providing only the keys will default to using Amazon EC2
38
39 kwargs are passed to the connection_authenticator's constructor
40 """
41 self.aws_access_key_id = aws_access_key_id
42 self.aws_secret_access_key = aws_secret_access_key
43 self.connection_authenticator = connection_authenticator
44 self._conn = None
45 self._kwargs = kwargs
46
48 self._conn = None
49 return self.conn
50
51 @property
53 if self._conn is None:
54 log.debug('creating self._conn w/ connection_authenticator ' +
55 'kwargs = %s' % self._kwargs)
56 self._conn = self.connection_authenticator(
57 self.aws_access_key_id, self.aws_secret_access_key,
58 **self._kwargs)
59 return self._conn
60
63 - def __init__(self, aws_access_key_id, aws_secret_access_key,
64 aws_ec2_path='/', aws_s3_host=None, aws_s3_path='/',
65 aws_port=None, aws_region_name=None, aws_is_secure=True,
66 aws_region_host=None, **kwargs):
67 aws_region = None
68 if aws_region_name and aws_region_host:
69 aws_region = boto.ec2.regioninfo.RegionInfo(
70 name=aws_region_name, endpoint=aws_region_host)
71 kwargs = dict(is_secure=aws_is_secure, region=aws_region,
72 port=aws_port, path=aws_ec2_path)
73 super(EasyEC2, self).__init__(aws_access_key_id, aws_secret_access_key,
74 boto.connect_ec2, **kwargs)
75 kwargs = dict(aws_s3_host=aws_s3_host,
76 aws_s3_path=aws_s3_path,
77 aws_port=aws_port,
78 aws_is_secure=aws_is_secure)
79 self.s3 = EasyS3(aws_access_key_id, aws_secret_access_key, **kwargs)
80 self._regions = None
81
83 return '<EasyEC2: %s (%s)>' % (self.region.name, self.region.endpoint)
84
86 """
87 Connects to a given region if it exists, raises RegionDoesNotExist
88 otherwise. Once connected, this object will return only data from the
89 given region.
90 """
91 region = self.get_region(region_name)
92 self._kwargs['region'] = region
93 self.reload()
94 return self
95
96 @property
98 """
99 Returns the current EC2 region used by this EasyEC2 object
100 """
101 return self.conn.region
102
103 @property
105 """
106 This property returns all AWS Regions, caching the results the first
107 time a request is made to Amazon
108 """
109 if not self._regions:
110 self._regions = {}
111 regions = self.conn.get_all_regions()
112 for region in regions:
113 self._regions[region.name] = region
114 return self._regions
115
117 """
118 Returns boto Region object if it exists, raises RegionDoesNotExist
119 otherwise.
120 """
121 if not region_name in self.regions:
122 raise exception.RegionDoesNotExist(region_name)
123 return self.regions.get(region_name)
124
126 """
127 Print name/endpoint for all AWS regions
128 """
129 regions = self.regions.items()
130 regions.sort(reverse=True)
131 for region in regions:
132 name, endpoint = region
133 print 'name: ', name
134 print 'endpoint: ', endpoint
135 print
136
137 @property
139 return self.conn.get_all_images(owners=["self"])
140
141 @property
143 return self.conn.get_all_images(executable_by=["self"])
144
151
152 - def create_group(self, name, description, auth_ssh=False,
153 auth_group_traffic=False):
154 """
155 Create security group with name/description. auth_ssh=True
156 will open port 22 to world (0.0.0.0/0). auth_group_traffic
157 will allow all traffic between instances in the same security
158 group
159 """
160 if not name:
161 return None
162 log.info("Creating security group %s..." % name)
163 sg = self.conn.create_security_group(name, description)
164 if auth_ssh:
165 ssh_port = static.DEFAULT_SSH_PORT
166 sg.authorize('tcp', ssh_port, ssh_port, static.WORLD_CIDRIP)
167 if auth_group_traffic:
168 sg.authorize('icmp', -1, -1,
169 src_group=self.get_group_or_none(name))
170 sg.authorize('tcp', 1, 65535,
171 src_group=self.get_group_or_none(name))
172 sg.authorize('udp', 1, 65535,
173 src_group=self.get_group_or_none(name))
174 return sg
175
177 """
178 Returns all security groups
179
180 groupnames - optional list of group names to retrieve
181 """
182 filters = {}
183 if groupnames:
184 filters = {'group-name': groupnames}
185 return self.get_security_groups(filters=filters)
186
195
196 - def get_or_create_group(self, name, description, auth_ssh=True,
197 auth_group_traffic=False):
198 """
199 Try to return a security group by name. If the group is not found,
200 attempt to create it. Description only applies to creation.
201
202 auth_ssh - authorize ssh traffic from world
203 auth_group_traffic - authorizes all traffic between members of the
204 group
205 """
206 sg = self.get_group_or_none(name)
207 if not sg:
208 sg = self.create_group(name, description, auth_ssh,
209 auth_group_traffic)
210 return sg
211
222
228
231 """
232 Returns the rule with the specified port range permission (ip_protocol,
233 from_port, to_port, cidr_ip) defined or None if no such rule exists
234 """
235 for rule in group.rules:
236 if rule.ip_protocol != ip_protocol:
237 continue
238 if int(rule.from_port) != from_port:
239 continue
240 if int(rule.to_port) != to_port:
241 continue
242 if cidr_ip:
243 cidr_grants = [g for g in rule.grants if g.cidr_ip == cidr_ip]
244 if not cidr_grants:
245 continue
246 return rule
247
248 - def has_permission(self, group, ip_protocol, from_port, to_port, cidr_ip):
249 """
250 Checks whether group has the specified port range permission
251 (ip_protocol, from_port, to_port, cidr_ip) defined
252 """
253 for rule in group.rules:
254 if rule.ip_protocol != ip_protocol:
255 continue
256 if int(rule.from_port) != from_port:
257 continue
258 if int(rule.to_port) != to_port:
259 continue
260 cidr_grants = [g for g in rule.grants if g.cidr_ip == cidr_ip]
261 if not cidr_grants:
262 continue
263 return True
264 return False
265
267 """
268 Create a new placement group for your account.
269 This will create the placement group within the region you
270 are currently connected to.
271 """
272 log.info("Creating placement group %s..." % name)
273 success = self.conn.create_placement_group(name)
274 if success:
275 return self.get_placement_group_or_none(name)
276
278 return self.conn.get_all_placement_groups(filters=filters)
279
290
305
316
317 - def request_instances(self, image_id, price=None, instance_type='m1.small',
318 min_count=1, max_count=1, count=1, key_name=None,
319 security_groups=None, launch_group=None,
320 availability_zone_group=None, placement=None,
321 user_data=None, placement_group=None):
322 """
323 Convenience method for running spot or flat-rate instances
324 """
325 if price:
326 return self.request_spot_instances(
327 price, image_id, instance_type=instance_type,
328 count=count, launch_group=launch_group, key_name=key_name,
329 security_groups=security_groups,
330 availability_zone_group=availability_zone_group,
331 placement=placement, user_data=user_data)
332 else:
333 return self.run_instances(
334 image_id, instance_type=instance_type,
335 min_count=min_count, max_count=max_count,
336 key_name=key_name, security_groups=security_groups,
337 placement=placement, user_data=user_data,
338 placement_group=placement_group)
339
340 - def request_spot_instances(self, price, image_id, instance_type='m1.small',
341 count=1, launch_group=None, key_name=None,
342 availability_zone_group=None,
343 security_groups=None,
344 placement=None, user_data=None):
351
352 - def run_instances(self, image_id, instance_type='m1.small', min_count=1,
353 max_count=1, key_name=None, security_groups=None,
354 placement=None, user_data=None, placement_group=None):
363
364 - def create_image(self, instance_id, name, description=None,
365 no_reboot=False):
366 return self.conn.create_image(instance_id, name,
367 description=description,
368 no_reboot=no_reboot)
369
370 - def register_image(self, name, description=None, image_location=None,
371 architecture=None, kernel_id=None, ramdisk_id=None,
372 root_device_name=None, block_device_map=None):
373 return self.conn.register_image(name=name, description=description,
374 image_location=image_location,
375 architecture=architecture,
376 kernel_id=kernel_id,
377 ramdisk_id=ramdisk_id,
378 root_device_name=root_device_name,
379 block_device_map=block_device_map)
380
382 return self.conn.delete_key_pair(name)
383
385 """
386 Create a new EC2 keypair and optionally save to output_file
387
388 Returns boto.ec2.keypair.KeyPair
389 """
390 if output_file:
391 output_dir = os.path.dirname(output_file)
392 if output_dir and not os.path.exists(output_dir):
393 raise exception.BaseException(
394 "output directory does not exist")
395 if os.path.exists(output_file):
396 raise exception.BaseException(
397 "cannot save keypair %s: file already exists" % \
398 output_file)
399 kp = self.conn.create_key_pair(name)
400 if output_file:
401 try:
402 kfile = open(output_file, 'wb')
403 kfile.write(kp.material)
404 kfile.close()
405 os.chmod(output_file, 0400)
406 except IOError, e:
407 raise exception.BaseException(str(e))
408 return kp
409
411 return self.conn.get_all_key_pairs(filters=filters)
412
422
428
430 print msg
431 print "-" * len(msg)
432
434 image_name = re.sub('\.manifest\.xml$', '',
435 img.location.split('/')[-1])
436 return image_name
437
439 try:
440 attrs = self.conn.get_instance_attribute(instance_id, 'userData')
441 user_data = attrs.get('userData', '')
442 return base64.b64decode(user_data)
443 except boto.exception.EC2ResponseError, e:
444 if e.error_code == "InvalidInstanceID.NotFound":
445 raise exception.InstanceDoesNotExist(instance_id)
446 raise e
447
459
470
472 try:
473 self.get_all_instances()
474 return True
475 except boto.exception.EC2ResponseError, e:
476 cred_errs = ['AuthFailure', 'SignatureDoesNotMatch']
477 if e.error_code in cred_errs:
478 return False
479 raise
480
482 spots = self.conn.get_all_spot_instance_requests(spot_ids,
483 filters=filters)
484 return spots
485
487 s = self.conn.get_all_spot_instance_requests()
488 if not s:
489 log.info("No spot instance requests found...")
490 return
491 spots = []
492 for spot in s:
493 if spot.state in ['closed', 'cancelled'] and not show_closed:
494 continue
495 state = spot.state or 'N/A'
496 spot_id = spot.id or 'N/A'
497 spots.append(spot_id)
498 type = spot.type
499 instance_id = spot.instance_id or 'N/A'
500 create_time = spot.create_time or 'N/A'
501 launch_group = spot.launch_group or 'N/A'
502 zone_group = spot.availability_zone_group or 'N/A'
503 price = spot.price or 'N/A'
504 lspec = spot.launch_specification
505 instance_type = lspec.instance_type
506 image_id = lspec.image_id
507 zone = lspec.placement
508 groups = ', '.join([g.id for g in lspec.groups])
509 print "id: %s" % spot_id
510 print "price: $%0.2f" % price
511 print "spot_request_type: %s" % type
512 print "state: %s" % state
513 print "instance_id: %s" % instance_id
514 print "instance_type: %s" % instance_type
515 print "image_id: %s" % image_id
516 print "zone: %s" % zone
517 print "create_time: %s" % create_time
518 print "launch_group: %s" % launch_group
519 print "zone_group: %s" % zone_group
520 print "security_groups: %s" % groups
521 print
522 if not spots:
523 log.info("No spot instance requests found...")
524
554
556 insts = self.get_all_instances()
557 if not insts:
558 log.info("No instances found")
559 return
560 tstates = ['shutting-down', 'terminated']
561 for instance in insts:
562 if not instance.state in tstates or show_terminated:
563 self.show_instance(instance)
564
565 - def list_images(self, images, sort_key=None, reverse=False):
566 def get_key(obj):
567 return ' '.join([obj.region.name, obj.location])
568 if not sort_key:
569 sort_key = get_key
570 imgs_i386 = [img for img in images if img.architecture == "i386"]
571 imgs_i386.sort(key=sort_key, reverse=reverse)
572 imgs_x86_64 = [img for img in images if img.architecture == "x86_64"]
573 imgs_x86_64.sort(key=sort_key, reverse=reverse)
574 print
575 self.__list_images("32bit Images:", imgs_i386)
576 self.__list_images("\n64bit Images:", imgs_x86_64)
577 print "\ntotal images: %d" % len(images)
578 print
579
584
589
591 counter = 0
592 self.__print_header(msg)
593 for img in imgs:
594 name = self.get_image_name(img)
595 template = "[%d] %s %s %s"
596 if img.virtualization_type == 'hvm':
597 template += ' (HVM-EBS)'
598 elif img.root_device_type == 'ebs':
599 template += ' (EBS)'
600 print template % (counter, img.id, img.region.name, name)
601 counter += 1
602
604 if pretend:
605 log.info("Pretending to remove image files...")
606 else:
607 log.info('Removing image files...')
608 files = self.get_image_files(image_name)
609 for f in files:
610 if pretend:
611 log.info("Would remove file: %s" % f.name)
612 else:
613 log.info('Removing file %s' % f.name)
614 f.delete()
615 if not pretend:
616 files = self.get_image_files(image_name)
617 if len(files) != 0:
618 log.warn('Not all files deleted, recursing...')
619 self.remove_image_files(image_name, pretend)
620
621 @print_timing("Removing image")
622 - def remove_image(self, image_name, pretend=True, keep_image_data=True):
623 img = self.get_image(image_name)
624 if pretend:
625 log.info('Pretending to deregister AMI: %s' % img.id)
626 else:
627 log.info('Deregistering AMI: %s' % img.id)
628 img.deregister()
629 if img.root_device_type == "instance-store" and not keep_image_data:
630 self.remove_image_files(img, pretend=pretend)
631 elif img.root_device_type == "ebs" and not keep_image_data:
632 rootdevtype = img.block_device_mapping.get('/dev/sda1', None)
633 if rootdevtype:
634 snapid = rootdevtype.snapshot_id
635 if snapid:
636 snap = self.get_snapshot(snapid)
637 if pretend:
638 log.info("Would remove snapshot: %s" % snapid)
639 else:
640 log.info("Removing snapshot: %s" % snapid)
641 snap.delete()
642
644 images = self.conn.get_all_images(owners=[static.STARCLUSTER_OWNER_ID])
645 log.info("Listing all public StarCluster images...")
646 imgs = [img for img in images if img.is_public]
647
648 def sc_public_sort(obj):
649 split = obj.name.split('-')
650 osname, osversion, arch = split[2:5]
651 osversion = float(osversion)
652 rc = 0
653 if split[-1].startswith('rc'):
654 rc = int(split[-1].replace('rc', ''))
655 return (osversion, rc)
656 self.list_images(imgs, sort_key=sc_public_sort, reverse=True)
657
660
662 vol = self.get_volume(volume_id)
663 vol.delete()
664
666 keypairs = self.keypairs
667 if not keypairs:
668 log.info("No keypairs found...")
669 return
670 max_length = max([len(key.name) for key in keypairs])
671 templ = "%" + str(max_length) + "s %s"
672 for key in self.keypairs:
673 print templ % (key.name, key.fingerprint)
674
696
698 return self.conn.get_all_zones(filters=filters)
699
712
714 """
715 Return zone object respresenting an EC2 availability zone
716 Returns None if unsuccessful
717 """
718 try:
719 return self.get_zone(zone)
720 except exception.ZoneDoesNotExist:
721 pass
722
723 - def create_s3_image(self, instance_id, key_location, aws_user_id,
724 ec2_cert, ec2_private_key, bucket, image_name="image",
725 description=None, kernel_id=None, ramdisk_id=None,
726 remove_image_files=False, **kwargs):
727 """
728 Create instance-store (S3) image from running instance
729 """
730 icreator = image.S3ImageCreator(self, instance_id, key_location,
731 aws_user_id, ec2_cert,
732 ec2_private_key, bucket,
733 image_name=image_name,
734 description=description,
735 kernel_id=kernel_id,
736 ramdisk_id=ramdisk_id,
737 remove_image_files=remove_image_files)
738 return icreator.create_image()
739
740 - def create_ebs_image(self, instance_id, key_location, name,
741 description=None, snapshot_description=None,
742 kernel_id=None, ramdisk_id=None, root_vol_size=15,
743 **kwargs):
744 """
745 Create EBS-backed image from running instance
746 """
747 sdescription = snapshot_description
748 icreator = image.EBSImageCreator(self, instance_id, key_location,
749 name, description=description,
750 snapshot_description=sdescription,
751 kernel_id=kernel_id,
752 ramdisk_id=ramdisk_id,
753 **kwargs)
754 return icreator.create_image(size=root_vol_size)
755
757 return self.conn.get_all_images(filters=filters)
758
772
782
784 """
785 Returns a list of files on S3 for an EC2 instance-store (S3-backed)
786 image. This includes the image's manifest and part files.
787 """
788 if not hasattr(image, 'id'):
789 image = self.get_image(image)
790 if image.root_device_type == 'ebs':
791 raise exception.AWSError(
792 "Image %s is an EBS image. No image files on S3." % image.id)
793 bucket = self.get_image_bucket(image)
794 bname = re.escape(bucket.name)
795 prefix = re.sub('^%s\/' % bname, '', image.location)
796 prefix = re.sub('\.manifest\.xml$', '', prefix)
797 files = bucket.list(prefix=prefix)
798 manifest_regex = re.compile(r'%s\.manifest\.xml' % prefix)
799 part_regex = re.compile(r'%s\.part\.(\d*)' % prefix)
800
801
802 files = [f for f in files if hasattr(f, 'delete') and
803 part_regex.match(f.name) or manifest_regex.match(f.name)]
804 return files
805
807 bucket_name = image.location.split('/')[0]
808 return self.s3.get_bucket(bucket_name)
809
811 return image.location.split('/')[-1]
812
813 @print_timing("Migrating image")
814 - def migrate_image(self, image_id, destbucket, migrate_manifest=False,
815 kernel_id=None, ramdisk_id=None, region=None, cert=None,
816 private_key=None):
817 """
818 Migrate image_id files to destbucket
819 """
820 if migrate_manifest:
821 utils.check_required(['ec2-migrate-manifest'])
822 if not cert:
823 raise exception.BaseException("no cert specified")
824 if not private_key:
825 raise exception.BaseException("no private_key specified")
826 if not kernel_id:
827 raise exception.BaseException("no kernel_id specified")
828 if not ramdisk_id:
829 raise exception.BaseException("no ramdisk_id specified")
830 image = self.get_image(image_id)
831 if image.root_device_type == "ebs":
832 raise exception.AWSError(
833 "The image you wish to migrate is EBS-based. " +
834 "This method only works for instance-store images")
835 files = self.get_image_files(image)
836 if not files:
837 log.info("No files found for image: %s" % image_id)
838 return
839 log.info("Migrating image: %s" % image_id)
840 widgets = [files[0].name, progressbar.Percentage(), ' ',
841 progressbar.Bar(marker=progressbar.RotatingMarker()), ' ',
842 progressbar.ETA(), ' ', ' ']
843 counter = 0
844 num_files = len(files)
845 pbar = progressbar.ProgressBar(widgets=widgets,
846 maxval=num_files).start()
847 for f in files:
848 widgets[0] = "%s: (%s/%s)" % (f.name, counter + 1, num_files)
849
850 f.copy(destbucket, f.name)
851 pbar.update(counter)
852 counter += 1
853 pbar.finish()
854 if migrate_manifest:
855 dbucket = self.s3.get_bucket(destbucket)
856 manifest_key = dbucket.get_key(self.get_image_manifest(image))
857 f = tempfile.NamedTemporaryFile()
858 manifest_key.get_contents_to_file(f.file)
859 f.file.close()
860 cmd = ('ec2-migrate-manifest -c %s -k %s -m %s --kernel %s ' +
861 '--ramdisk %s --no-mapping ') % (cert, private_key,
862 f.name, kernel_id,
863 ramdisk_id)
864 register_cmd = "ec2-register %s/%s" % (destbucket,
865 manifest_key.name)
866 if region:
867 cmd += '--region %s' % region
868 register_cmd += " --region %s" % region
869 log.info("Migrating manifest file...")
870 retval = os.system(cmd)
871 if retval != 0:
872 raise exception.BaseException(
873 "ec2-migrate-manifest failed with status %s" % retval)
874 f.file = open(f.name, 'r')
875 manifest_key.set_contents_from_file(f.file)
876
877 manifest_key.add_email_grant('READ', 'za-team@amazon.com')
878 f.close()
879 os.unlink(f.name + '.bak')
880 log.info("Manifest migrated successfully. You can now run:\n" +
881 register_cmd + "\nto register your migrated image.")
882
883 - def create_root_block_device_map(self, snapshot_id,
884 root_device_name='/dev/sda1',
885 add_ephemeral_drives=False,
886 ephemeral_drive_0='/dev/sdb1',
887 ephemeral_drive_1='/dev/sdc1',
888 ephemeral_drive_2='/dev/sdd1',
889 ephemeral_drive_3='/dev/sde1'):
890 """
891 Utility method for building a new block_device_map for a given snapshot
892 id. This is useful when creating a new image from a volume snapshot.
893 The returned block device map can be used with self.register_image
894 """
895 bmap = boto.ec2.blockdevicemapping.BlockDeviceMapping()
896 sda1 = boto.ec2.blockdevicemapping.BlockDeviceType()
897 sda1.snapshot_id = snapshot_id
898 sda1.delete_on_termination = True
899 bmap[root_device_name] = sda1
900 if add_ephemeral_drives:
901 sdb1 = boto.ec2.blockdevicemapping.BlockDeviceType()
902 sdb1.ephemeral_name = 'ephemeral0'
903 bmap[ephemeral_drive_0] = sdb1
904 sdc1 = boto.ec2.blockdevicemapping.BlockDeviceType()
905 sdc1.ephemeral_name = 'ephemeral1'
906 bmap[ephemeral_drive_1] = sdc1
907 sdd1 = boto.ec2.blockdevicemapping.BlockDeviceType()
908 sdd1.ephemeral_name = 'ephemeral2'
909 bmap[ephemeral_drive_2] = sdd1
910 sde1 = boto.ec2.blockdevicemapping.BlockDeviceType()
911 sde1.ephemeral_name = 'ephemeral3'
912 bmap[ephemeral_drive_3] = sde1
913 return bmap
914
915 @print_timing("Downloading image")
930 log.info("Downloading image: %s" % image_id)
931 for file in files:
932 widgets[0] = "%s:" % file.name
933 pbar = progressbar.ProgressBar(widgets=widgets,
934 maxval=file.size).start()
935 file.get_contents_to_filename(os.path.join(destdir, file.name),
936 cb=_dl_progress_cb)
937 pbar.finish()
938
940 """
941 Print a list of files for image_id to the screen
942 """
943 files = self.get_image_files(image_id)
944 for file in files:
945 print file.name
946
947 @property
950
951 @property
954
958
960 """
961 Returns a list of all EBS volumes
962 """
963 return self.conn.get_all_volumes(filters=filters)
964
966 """
967 Returns EBS volume object representing volume_id.
968 Raises exception.VolumeDoesNotExist if unsuccessful
969 """
970 try:
971 return self.get_volumes(filters={'volume-id': volume_id})[0]
972 except boto.exception.EC2ResponseError, e:
973 if e.error_code == "InvalidVolume.NotFound":
974 raise exception.VolumeDoesNotExist(volume_id)
975 raise
976 except IndexError:
977 raise exception.VolumeDoesNotExist(volume_id)
978
980 """
981 Returns EBS volume object representing volume_id.
982 Returns None if unsuccessful
983 """
984 try:
985 return self.get_volume(volume_id)
986 except exception.VolumeDoesNotExist:
987 pass
988
1005
1006 - def create_snapshot(self, vol, description=None, wait_for_snapshot=False,
1007 refresh_interval=30):
1013
1014 - def get_snapshots(self, volume_ids=[], filters=None, owner='self'):
1015 """
1016 Returns a list of all EBS volume snapshots
1017 """
1018 filters = filters or {}
1019 if volume_ids:
1020 filters['volume-id'] = volume_ids
1021 return self.conn.get_all_snapshots(owner=owner, filters=filters)
1022
1024 """
1025 Returns EBS snapshot object for snapshot_id.
1026
1027 Raises exception.SnapshotDoesNotExist if unsuccessful
1028 """
1029 try:
1030 return self.get_snapshots(filters={'snapshot-id': snapshot_id},
1031 owner=owner)[0]
1032 except boto.exception.EC2ResponseError, e:
1033 if e.error_code == "InvalidSnapshot.NotFound":
1034 raise exception.SnapshotDoesNotExist(snapshot_id)
1035 raise
1036 except IndexError:
1037 raise exception.SnapshotDoesNotExist(snapshot_id)
1038
1039 - def list_volumes(self, volume_id=None, status=None, attach_status=None,
1040 size=None, zone=None, snapshot_id=None,
1041 show_deleted=False, tags=None, name=None):
1042 """
1043 Print a list of volumes to the screen
1044 """
1045 filters = {}
1046 if status:
1047 filters['status'] = status
1048 else:
1049 filters['status'] = ['creating', 'available', 'in-use', 'error']
1050 if show_deleted:
1051 filters['status'] += ['deleting', 'deleted']
1052 if attach_status:
1053 filters['attachment.status'] = attach_status
1054 if volume_id:
1055 filters['volume-id'] = volume_id
1056 if size:
1057 filters['size'] = size
1058 if zone:
1059 filters['availability-zone'] = zone
1060 if snapshot_id:
1061 filters['snapshot-id'] = snapshot_id
1062 if tags:
1063 tagkeys = []
1064 for tag in tags:
1065 val = tags.get(tag)
1066 if val:
1067 filters["tag:%s" % tag] = val
1068 elif tag:
1069 tagkeys.append(tag)
1070 if tagkeys:
1071 filters['tag-key'] = tagkeys
1072 if name:
1073 filters['tag:Name'] = name
1074 vols = self.get_volumes(filters=filters)
1075 vols.sort(key=lambda x: x.create_time)
1076 if vols:
1077 for vol in vols:
1078 print "volume_id: %s" % vol.id
1079 print "size: %sGB" % vol.size
1080 print "status: %s" % vol.status
1081 if vol.attachment_state():
1082 print "attachment_status: %s" % vol.attachment_state()
1083 print "availability_zone: %s" % vol.zone
1084 if vol.snapshot_id:
1085 print "snapshot_id: %s" % vol.snapshot_id
1086 snapshots = self.get_snapshots(volume_ids=[vol.id])
1087 if snapshots:
1088 snap_list = ' '.join([snap.id for snap in snapshots])
1089 print 'snapshots: %s' % snap_list
1090 if vol.create_time:
1091 lt = utils.iso_to_localtime_tuple(vol.create_time)
1092 print "create_time: %s" % lt
1093 tags = []
1094 for tag in vol.tags:
1095 val = vol.tags.get(tag)
1096 if val:
1097 tags.append("%s=%s" % (tag, val))
1098 else:
1099 tags.append(tag)
1100 if tags:
1101 print "tags: %s" % ', '.join(tags)
1102 print
1103 print 'Total: %s' % len(vols)
1104
1105 - def get_spot_history(self, instance_type, start=None, end=None, plot=False,
1106 plot_server_interface="localhost",
1107 plot_launch_browser=True, plot_web_browser=None,
1108 plot_shutdown_server=True):
1109 if start and not utils.is_iso_time(start):
1110 raise exception.InvalidIsoDate(start)
1111 if end and not utils.is_iso_time(end):
1112 raise exception.InvalidIsoDate(end)
1113 pdesc = "Linux/UNIX"
1114 hist = self.conn.get_spot_price_history(start_time=start, end_time=end,
1115 instance_type=instance_type,
1116 product_description=pdesc)
1117 if not hist:
1118 raise exception.SpotHistoryError(start, end)
1119 dates = []
1120 prices = []
1121 data = []
1122 for item in hist:
1123 timestamp = utils.iso_to_javascript_timestamp(item.timestamp)
1124 price = item.price
1125 dates.append(timestamp)
1126 prices.append(price)
1127 data.append([timestamp, price])
1128 maximum = max(prices)
1129 avg = sum(prices) / float(len(prices))
1130 log.info("Current price: $%.2f" % prices[-1])
1131 log.info("Max price: $%.2f" % maximum)
1132 log.info("Average price: $%.2f" % avg)
1133 if plot:
1134 xaxisrange = dates[-1] - dates[0]
1135 xpanrange = [dates[0] - xaxisrange / 2.,
1136 dates[-1] + xaxisrange / 2.]
1137 xzoomrange = [0.1, xpanrange[-1] - xpanrange[0]]
1138 minimum = min(prices)
1139 yaxisrange = maximum - minimum
1140 ypanrange = [minimum - yaxisrange / 2., maximum + yaxisrange / 2.]
1141 yzoomrange = [0.1, ypanrange[-1] - ypanrange[0]]
1142 context = dict(instance_type=instance_type,
1143 start=start, end=end,
1144 time_series_data=str(data),
1145 shutdown=plot_shutdown_server,
1146 xpanrange=xpanrange, ypanrange=ypanrange,
1147 xzoomrange=xzoomrange, yzoomrange=yzoomrange)
1148 log.info("", extra=dict(__raw__=True))
1149 log.info("Starting StarCluster Webserver...")
1150 s = webtools.get_template_server('web', context=context,
1151 interface=plot_server_interface)
1152 base_url = "http://%s:%s" % s.server_address
1153 shutdown_url = '/'.join([base_url, 'shutdown'])
1154 spot_url = "http://%s:%s/spothistory.html" % s.server_address
1155 log.info("Server address is %s" % base_url)
1156 log.info("(use CTRL-C or navigate to %s to shutdown server)" %
1157 shutdown_url)
1158 if plot_launch_browser:
1159 webtools.open_browser(spot_url, plot_web_browser)
1160 else:
1161 log.info("Browse to %s to view the spot history plot" %
1162 spot_url)
1163 s.serve_forever()
1164 return data
1165
1167 instance = self.get_instance(instance_id)
1168 console_output = instance.get_console_output().output
1169 print ''.join([c for c in console_output if c in string.printable])
1170
1173 DefaultHost = 's3.amazonaws.com'
1174 _calling_format = boto.s3.connection.OrdinaryCallingFormat()
1175
1176 - def __init__(self, aws_access_key_id, aws_secret_access_key,
1177 aws_s3_path='/', aws_port=None, aws_is_secure=True,
1178 aws_s3_host=DefaultHost, **kwargs):
1179 kwargs = dict(is_secure=aws_is_secure,
1180 host=aws_s3_host or self.DefaultHost,
1181 port=aws_port,
1182 path=aws_s3_path)
1183 if aws_s3_host:
1184 kwargs.update(dict(calling_format=self._calling_format))
1185 super(EasyS3, self).__init__(aws_access_key_id, aws_secret_access_key,
1186 boto.connect_s3, **kwargs)
1187
1189 return '<EasyS3: %s>' % self.conn.server_name()
1190
1192 """
1193 Create a new bucket on S3. bucket_name must be unique, the bucket
1194 namespace is shared by all AWS users
1195 """
1196 bucket_name = bucket_name.split('/')[0]
1197 try:
1198 return self.conn.create_bucket(bucket_name)
1199 except boto.exception.S3CreateError, e:
1200 if e.error_code == "BucketAlreadyExists":
1201 raise exception.BucketAlreadyExists(bucket_name)
1202 raise
1203
1212
1214 """
1215 Returns bucket object representing S3 bucket
1216 Returns None if unsuccessful
1217 """
1218 try:
1219 return self.get_bucket(bucket_name)
1220 except exception.BucketDoesNotExist:
1221 pass
1222
1224 """
1225 Returns bucket object representing S3 bucket
1226 """
1227 try:
1228 return self.conn.get_bucket(bucketname)
1229 except boto.exception.S3ResponseError, e:
1230 if e.error_code == "NoSuchBucket":
1231 raise exception.BucketDoesNotExist(bucketname)
1232 raise
1233
1235 bucket = self.get_bucket(bucketname)
1236 for file in bucket.list():
1237 if file.name:
1238 print file.name
1239
1241 try:
1242 buckets = self.conn.get_all_buckets()
1243 except TypeError:
1244
1245 raise exception.AWSError("AWS credentials are not valid")
1246 return buckets
1247
1251
1256
1257 if __name__ == "__main__":
1258 from starcluster.config import get_easy_ec2
1259 ec2 = get_easy_ec2()
1260 ec2.list_all_instances()
1261 ec2.list_registered_images()
1262