1
2 """
3 EC2/S3 Utility Classes
4 """
5
6 import os
7 import re
8 import time
9 import base64
10 import string
11 import tempfile
12
13 import boto
14 import boto.ec2
15 import boto.s3.connection
16
17 from starcluster import image
18 from starcluster import utils
19 from starcluster import static
20 from starcluster import webtools
21 from starcluster import exception
22 from starcluster import progressbar
23 from starcluster.utils import print_timing
24 from starcluster.logger import log
28 - def __init__(self, aws_access_key_id, aws_secret_access_key,
29 connection_authenticator, **kwargs):
30 """
31 Create an EasyAWS object.
32
33 Requires aws_access_key_id/aws_secret_access_key from an Amazon Web
34 Services (AWS) account and a connection_authenticator function that
35 returns an authenticated AWS connection object
36
37 Providing only the keys will default to using Amazon EC2
38
39 kwargs are passed to the connection_authenticator's constructor
40 """
41 self.aws_access_key_id = aws_access_key_id
42 self.aws_secret_access_key = aws_secret_access_key
43 self.connection_authenticator = connection_authenticator
44 self._conn = None
45 self._kwargs = kwargs
46
48 self._conn = None
49 return self.conn
50
51 @property
53 if self._conn is None:
54 log.debug('creating self._conn w/ connection_authenticator ' +
55 'kwargs = %s' % self._kwargs)
56 self._conn = self.connection_authenticator(
57 self.aws_access_key_id, self.aws_secret_access_key,
58 **self._kwargs)
59 return self._conn
60
63 - def __init__(self, aws_access_key_id, aws_secret_access_key,
64 aws_ec2_path='/', aws_s3_host=None, aws_s3_path='/',
65 aws_port=None, aws_region_name=None, aws_is_secure=True,
66 aws_region_host=None, **kwargs):
67 aws_region = None
68 if aws_region_name and aws_region_host:
69 aws_region = boto.ec2.regioninfo.RegionInfo(
70 name=aws_region_name, endpoint=aws_region_host)
71 kwargs = dict(is_secure=aws_is_secure, region=aws_region,
72 port=aws_port, path=aws_ec2_path)
73 super(EasyEC2, self).__init__(aws_access_key_id, aws_secret_access_key,
74 boto.connect_ec2, **kwargs)
75 kwargs = dict(aws_s3_host=aws_s3_host,
76 aws_s3_path=aws_s3_path,
77 aws_port=aws_port,
78 aws_is_secure=aws_is_secure)
79 self.s3 = EasyS3(aws_access_key_id, aws_secret_access_key, **kwargs)
80 self._regions = None
81
83 return '<EasyEC2: %s (%s)>' % (self.region.name, self.region.endpoint)
84
86 """
87 Connects to a given region if it exists, raises RegionDoesNotExist
88 otherwise. Once connected, this object will return only data from the
89 given region.
90 """
91 region = self.get_region(region_name)
92 self._kwargs['region'] = region
93 self.reload()
94 return self
95
96 @property
98 """
99 Returns the current EC2 region used by this EasyEC2 object
100 """
101 return self.conn.region
102
103 @property
105 """
106 This property returns all AWS Regions, caching the results the first
107 time a request is made to Amazon
108 """
109 if not self._regions:
110 self._regions = {}
111 regions = self.conn.get_all_regions()
112 for region in regions:
113 self._regions[region.name] = region
114 return self._regions
115
117 """
118 Returns boto Region object if it exists, raises RegionDoesNotExist
119 otherwise.
120 """
121 if not region_name in self.regions:
122 raise exception.RegionDoesNotExist(region_name)
123 return self.regions.get(region_name)
124
126 """
127 Print name/endpoint for all AWS regions
128 """
129 for r in self.regions:
130 region = self.regions.get(r)
131 print 'name: ', region.name
132 print 'endpoint: ', region.endpoint
133 print
134
135 @property
137 return self.conn.get_all_images(owners=["self"])
138
139 @property
141 return self.conn.get_all_images(executable_by=["self"])
142
149
150 - def create_group(self, name, description, auth_ssh=False,
151 auth_group_traffic=False):
152 """
153 Create security group with name/description. auth_ssh=True
154 will open port 22 to world (0.0.0.0/0). auth_group_traffic
155 will allow all traffic between instances in the same security
156 group
157 """
158 if not name:
159 return None
160 log.info("Creating security group %s..." % name)
161 sg = self.conn.create_security_group(name, description)
162 if auth_ssh:
163 ssh_port = static.DEFAULT_SSH_PORT
164 sg.authorize('tcp', ssh_port, ssh_port, static.WORLD_CIDRIP)
165 if auth_group_traffic:
166 sg.authorize('icmp', -1, -1,
167 src_group=self.get_group_or_none(name))
168 sg.authorize('tcp', 1, 65535,
169 src_group=self.get_group_or_none(name))
170 sg.authorize('udp', 1, 65535,
171 src_group=self.get_group_or_none(name))
172 return sg
173
175 """
176 Returns all security groups
177
178 groupnames - optional list of group names to retrieve
179 """
180 filters = {}
181 if groupnames:
182 filters = {'group-name': groupnames}
183 return self.get_security_groups(filters=filters)
184
193
194 - def get_or_create_group(self, name, description, auth_ssh=True,
195 auth_group_traffic=False):
196 """
197 Try to return a security group by name. If the group is not found,
198 attempt to create it. Description only applies to creation.
199
200 auth_ssh - authorize ssh traffic from world
201 auth_group_traffic - authorizes all traffic between members of the
202 group
203 """
204 sg = self.get_group_or_none(name)
205 if not sg:
206 sg = self.create_group(name, description, auth_ssh,
207 auth_group_traffic)
208 return sg
209
220
226
229 """
230 Returns the rule with the specified port range permission (ip_protocol,
231 from_port, to_port, cidr_ip) defined or None if no such rule exists
232 """
233 for rule in group.rules:
234 if rule.ip_protocol != ip_protocol:
235 continue
236 if int(rule.from_port) != from_port:
237 continue
238 if int(rule.to_port) != to_port:
239 continue
240 if cidr_ip:
241 cidr_grants = [g for g in rule.grants if g.cidr_ip == cidr_ip]
242 if not cidr_grants:
243 continue
244 return rule
245
246 - def has_permission(self, group, ip_protocol, from_port, to_port, cidr_ip):
247 """
248 Checks whether group has the specified port range permission
249 (ip_protocol, from_port, to_port, cidr_ip) defined
250 """
251 for rule in group.rules:
252 if rule.ip_protocol != ip_protocol:
253 continue
254 if int(rule.from_port) != from_port:
255 continue
256 if int(rule.to_port) != to_port:
257 continue
258 cidr_grants = [g for g in rule.grants if g.cidr_ip == cidr_ip]
259 if not cidr_grants:
260 continue
261 return True
262 return False
263
265 """
266 Create a new placement group for your account.
267 This will create the placement group within the region you
268 are currently connected to.
269 """
270 log.info("Creating placement group %s..." % name)
271 success = self.conn.create_placement_group(name)
272 if success:
273 return self.get_placement_group_or_none(name)
274
276 return self.conn.get_all_placement_groups(filters=filters)
277
288
297
308
309 - def request_instances(self, image_id, price=None, instance_type='m1.small',
310 min_count=1, max_count=1, count=1, key_name=None,
311 security_groups=None, launch_group=None,
312 availability_zone_group=None, placement=None,
313 user_data=None, placement_group=None):
314 """
315 Convenience method for running spot or flat-rate instances
316 """
317 if price:
318 return self.request_spot_instances(
319 price, image_id, instance_type=instance_type,
320 count=count, launch_group=launch_group, key_name=key_name,
321 security_groups=security_groups,
322 availability_zone_group=availability_zone_group,
323 placement=placement, user_data=user_data)
324 else:
325 return self.run_instances(
326 image_id, instance_type=instance_type,
327 min_count=min_count, max_count=max_count,
328 key_name=key_name, security_groups=security_groups,
329 placement=placement, user_data=user_data,
330 placement_group=placement_group)
331
332 - def request_spot_instances(self, price, image_id, instance_type='m1.small',
333 count=1, launch_group=None, key_name=None,
334 availability_zone_group=None,
335 security_groups=None,
336 placement=None, user_data=None):
343
344 - def run_instances(self, image_id, instance_type='m1.small', min_count=1,
345 max_count=1, key_name=None, security_groups=None,
346 placement=None, user_data=None, placement_group=None):
355
356 - def create_image(self, instance_id, name, description=None,
357 no_reboot=False):
358 return self.conn.create_image(instance_id, name,
359 description=description,
360 no_reboot=no_reboot)
361
362 - def register_image(self, name, description=None, image_location=None,
363 architecture=None, kernel_id=None, ramdisk_id=None,
364 root_device_name=None, block_device_map=None):
365 return self.conn.register_image(name=name, description=description,
366 image_location=image_location,
367 architecture=architecture,
368 kernel_id=kernel_id,
369 ramdisk_id=ramdisk_id,
370 root_device_name=root_device_name,
371 block_device_map=block_device_map)
372
374 return self.conn.delete_key_pair(name)
375
377 """
378 Create a new EC2 keypair and optionally save to output_file
379
380 Returns boto.ec2.keypair.KeyPair
381 """
382 if output_file:
383 output_dir = os.path.dirname(output_file)
384 if output_dir and not os.path.exists(output_dir):
385 raise exception.BaseException(
386 "output directory does not exist")
387 if os.path.exists(output_file):
388 raise exception.BaseException(
389 "cannot save keypair %s: file already exists" % \
390 output_file)
391 kp = self.conn.create_key_pair(name)
392 if output_file:
393 try:
394 kfile = open(output_file, 'wb')
395 kfile.write(kp.material)
396 kfile.close()
397 os.chmod(output_file, 0400)
398 except IOError, e:
399 raise exception.BaseException(str(e))
400 return kp
401
403 return self.conn.get_all_key_pairs(filters=filters)
404
414
420
422 print msg
423 print "-" * len(msg)
424
426 image_name = re.sub('\.manifest\.xml$', '',
427 img.location.split('/')[-1])
428 return image_name
429
431 try:
432 attrs = self.conn.get_instance_attribute(instance_id, 'userData')
433 user_data = attrs.get('userData', '')
434 return base64.b64decode(user_data)
435 except boto.exception.EC2ResponseError, e:
436 if e.error_code == "InvalidInstanceID.NotFound":
437 raise exception.InstanceDoesNotExist(instance_id)
438 raise e
439
451
462
464 try:
465 self.get_all_instances()
466 return True
467 except boto.exception.EC2ResponseError, e:
468 cred_errs = ['AuthFailure', 'SignatureDoesNotMatch']
469 if e.error_code in cred_errs:
470 return False
471 raise
472
474 spots = self.conn.get_all_spot_instance_requests(spot_ids,
475 filters=filters)
476 return spots
477
479 s = self.conn.get_all_spot_instance_requests()
480 if not s:
481 log.info("No spot instance requests found...")
482 return
483 spots = []
484 for spot in s:
485 if spot.state in ['closed', 'cancelled'] and not show_closed:
486 continue
487 state = spot.state or 'N/A'
488 spot_id = spot.id or 'N/A'
489 spots.append(spot_id)
490 type = spot.type
491 instance_id = spot.instance_id or 'N/A'
492 create_time = spot.create_time or 'N/A'
493 launch_group = spot.launch_group or 'N/A'
494 zone_group = spot.availability_zone_group or 'N/A'
495 price = spot.price or 'N/A'
496 lspec = spot.launch_specification
497 instance_type = lspec.instance_type
498 image_id = lspec.image_id
499 zone = lspec.placement
500 groups = ', '.join([g.id for g in lspec.groups])
501 print "id: %s" % spot_id
502 print "price: $%0.2f" % price
503 print "spot_request_type: %s" % type
504 print "state: %s" % state
505 print "instance_id: %s" % instance_id
506 print "instance_type: %s" % instance_type
507 print "image_id: %s" % image_id
508 print "zone: %s" % zone
509 print "create_time: %s" % create_time
510 print "launch_group: %s" % launch_group
511 print "zone_group: %s" % zone_group
512 print "security_groups: %s" % groups
513 print
514 if not spots:
515 log.info("No spot instance requests found...")
516
556
557 - def list_images(self, images, sort_key=None, reverse=False):
558 def get_key(obj):
559 return ' '.join([obj.region.name, obj.location])
560 if not sort_key:
561 sort_key = get_key
562 imgs_i386 = [img for img in images if img.architecture == "i386"]
563 imgs_i386.sort(key=sort_key, reverse=reverse)
564 imgs_x86_64 = [img for img in images if img.architecture == "x86_64"]
565 imgs_x86_64.sort(key=sort_key, reverse=reverse)
566 print
567 self.__list_images("32bit Images:", imgs_i386)
568 self.__list_images("\n64bit Images:", imgs_x86_64)
569 print "\ntotal images: %d" % len(images)
570 print
571
576
581
583 counter = 0
584 self.__print_header(msg)
585 for img in imgs:
586 name = self.get_image_name(img)
587 template = "[%d] %s %s %s"
588 if img.virtualization_type == 'hvm':
589 template += ' (HVM-EBS)'
590 elif img.root_device_type == 'ebs':
591 template += ' (EBS)'
592 print template % (counter, img.id, img.region.name, name)
593 counter += 1
594
596 if pretend:
597 log.info("Pretending to remove image files...")
598 else:
599 log.info('Removing image files...')
600 files = self.get_image_files(image_name)
601 for f in files:
602 if pretend:
603 log.info("Would remove file: %s" % f.name)
604 else:
605 log.info('Removing file %s' % f.name)
606 f.delete()
607 if not pretend:
608 files = self.get_image_files(image_name)
609 if len(files) != 0:
610 log.warn('Not all files deleted, recursing...')
611 self.remove_image_files(image_name, pretend)
612
613 @print_timing("Removing image")
614 - def remove_image(self, image_name, pretend=True, keep_image_data=True):
615 img = self.get_image(image_name)
616 if pretend:
617 log.info('Pretending to deregister AMI: %s' % img.id)
618 else:
619 log.info('Deregistering AMI: %s' % img.id)
620 img.deregister()
621 if img.root_device_type == "instance-store" and not keep_image_data:
622 self.remove_image_files(img, pretend=pretend)
623 elif img.root_device_type == "ebs" and not keep_image_data:
624 rootdevtype = img.block_device_mapping.get('/dev/sda1', None)
625 if rootdevtype:
626 snapid = rootdevtype.snapshot_id
627 if snapid:
628 snap = self.get_snapshot(snapid)
629 if pretend:
630 log.info("Would remove snapshot: %s" % snapid)
631 else:
632 log.info("Removing snapshot: %s" % snapid)
633 snap.delete()
634
636 images = self.conn.get_all_images(owners=[static.STARCLUSTER_OWNER_ID])
637 log.info("Listing all public StarCluster images...")
638 imgs = [img for img in images if img.is_public]
639
640 def sc_public_sort(obj):
641 split = obj.name.split('-')
642 osname, osversion, arch = split[2:5]
643 osversion = float(osversion)
644 rc = 0
645 if split[-1].startswith('rc'):
646 rc = int(split[-1].replace('rc', ''))
647 return (osversion, rc)
648 self.list_images(imgs, sort_key=sc_public_sort, reverse=True)
649
652
654 vol = self.get_volume(volume_id)
655 vol.delete()
656
658 max_length = max([len(key.name) for key in self.keypairs])
659 templ = "%" + str(max_length) + "s %s"
660 for key in self.keypairs:
661 print templ % (key.name, key.fingerprint)
662
684
686 return self.conn.get_all_zones(filters=filters)
687
700
702 """
703 Return zone object respresenting an EC2 availability zone
704 Returns None if unsuccessful
705 """
706 try:
707 return self.get_zone(zone)
708 except exception.ZoneDoesNotExist:
709 pass
710
711 - def create_s3_image(self, instance_id, key_location, aws_user_id,
712 ec2_cert, ec2_private_key, bucket, image_name="image",
713 description=None, kernel_id=None, ramdisk_id=None,
714 remove_image_files=False, **kwargs):
715 """
716 Create instance-store (S3) image from running instance
717 """
718 icreator = image.S3ImageCreator(self, instance_id, key_location,
719 aws_user_id, ec2_cert,
720 ec2_private_key, bucket,
721 image_name=image_name,
722 description=description,
723 kernel_id=kernel_id,
724 ramdisk_id=ramdisk_id,
725 remove_image_files=remove_image_files)
726 return icreator.create_image()
727
728 - def create_ebs_image(self, instance_id, key_location, name,
729 description=None, snapshot_description=None,
730 kernel_id=None, ramdisk_id=None, root_vol_size=15,
731 **kwargs):
732 """
733 Create EBS-backed image from running instance
734 """
735 sdescription = snapshot_description
736 icreator = image.EBSImageCreator(self, instance_id, key_location,
737 name, description=description,
738 snapshot_description=sdescription,
739 kernel_id=kernel_id,
740 ramdisk_id=ramdisk_id,
741 **kwargs)
742 return icreator.create_image(size=root_vol_size)
743
745 return self.conn.get_all_images(filters=filters)
746
760
770
772 """
773 Returns a list of files on S3 for an EC2 instance-store (S3-backed)
774 image. This includes the image's manifest and part files.
775 """
776 if not hasattr(image, 'id'):
777 image = self.get_image(image)
778 if image.root_device_type == 'ebs':
779 raise exception.AWSError(
780 "Image %s is an EBS image. No image files on S3." % image.id)
781 bucket = self.get_image_bucket(image)
782 bname = re.escape(bucket.name)
783 prefix = re.sub('^%s\/' % bname, '', image.location)
784 prefix = re.sub('\.manifest\.xml$', '', prefix)
785 files = bucket.list(prefix=prefix)
786 manifest_regex = re.compile(r'%s\.manifest\.xml' % prefix)
787 part_regex = re.compile(r'%s\.part\.(\d*)' % prefix)
788
789
790 files = [f for f in files if hasattr(f, 'delete') and
791 part_regex.match(f.name) or manifest_regex.match(f.name)]
792 return files
793
795 bucket_name = image.location.split('/')[0]
796 return self.s3.get_bucket(bucket_name)
797
799 return image.location.split('/')[-1]
800
801 @print_timing("Migrating image")
802 - def migrate_image(self, image_id, destbucket, migrate_manifest=False,
803 kernel_id=None, ramdisk_id=None, region=None, cert=None,
804 private_key=None):
805 """
806 Migrate image_id files to destbucket
807 """
808 if migrate_manifest:
809 utils.check_required(['ec2-migrate-manifest'])
810 if not cert:
811 raise exception.BaseException("no cert specified")
812 if not private_key:
813 raise exception.BaseException("no private_key specified")
814 if not kernel_id:
815 raise exception.BaseException("no kernel_id specified")
816 if not ramdisk_id:
817 raise exception.BaseException("no ramdisk_id specified")
818 image = self.get_image(image_id)
819 if image.root_device_type == "ebs":
820 raise exception.AWSError(
821 "The image you wish to migrate is EBS-based. " +
822 "This method only works for instance-store images")
823 files = self.get_image_files(image)
824 if not files:
825 log.info("No files found for image: %s" % image_id)
826 return
827 log.info("Migrating image: %s" % image_id)
828 widgets = [files[0].name, progressbar.Percentage(), ' ',
829 progressbar.Bar(marker=progressbar.RotatingMarker()), ' ',
830 progressbar.ETA(), ' ', ' ']
831 counter = 0
832 num_files = len(files)
833 pbar = progressbar.ProgressBar(widgets=widgets,
834 maxval=num_files).start()
835 for f in files:
836 widgets[0] = "%s: (%s/%s)" % (f.name, counter + 1, num_files)
837
838 f.copy(destbucket, f.name)
839 pbar.update(counter)
840 counter += 1
841 pbar.finish()
842 if migrate_manifest:
843 dbucket = self.s3.get_bucket(destbucket)
844 manifest_key = dbucket.get_key(self.get_image_manifest(image))
845 f = tempfile.NamedTemporaryFile()
846 manifest_key.get_contents_to_file(f.file)
847 f.file.close()
848 cmd = ('ec2-migrate-manifest -c %s -k %s -m %s --kernel %s ' +
849 '--ramdisk %s --no-mapping ') % (cert, private_key,
850 f.name, kernel_id,
851 ramdisk_id)
852 register_cmd = "ec2-register %s/%s" % (destbucket,
853 manifest_key.name)
854 if region:
855 cmd += '--region %s' % region
856 register_cmd += " --region %s" % region
857 log.info("Migrating manifest file...")
858 retval = os.system(cmd)
859 if retval != 0:
860 raise exception.BaseException(
861 "ec2-migrate-manifest failed with status %s" % retval)
862 f.file = open(f.name, 'r')
863 manifest_key.set_contents_from_file(f.file)
864
865 manifest_key.add_email_grant('READ', 'za-team@amazon.com')
866 f.close()
867 os.unlink(f.name + '.bak')
868 log.info("Manifest migrated successfully. You can now run:\n" +
869 register_cmd + "\nto register your migrated image.")
870
871 - def create_root_block_device_map(self, snapshot_id,
872 root_device_name='/dev/sda1',
873 add_ephemeral_drives=False,
874 ephemeral_drive_0='/dev/sdb1',
875 ephemeral_drive_1='/dev/sdc1',
876 ephemeral_drive_2='/dev/sdd1',
877 ephemeral_drive_3='/dev/sde1'):
878 """
879 Utility method for building a new block_device_map for a given snapshot
880 id. This is useful when creating a new image from a volume snapshot.
881 The returned block device map can be used with self.register_image
882 """
883 bmap = boto.ec2.blockdevicemapping.BlockDeviceMapping()
884 sda1 = boto.ec2.blockdevicemapping.BlockDeviceType()
885 sda1.snapshot_id = snapshot_id
886 sda1.delete_on_termination = True
887 bmap[root_device_name] = sda1
888 if add_ephemeral_drives:
889 sdb1 = boto.ec2.blockdevicemapping.BlockDeviceType()
890 sdb1.ephemeral_name = 'ephemeral0'
891 bmap[ephemeral_drive_0] = sdb1
892 sdc1 = boto.ec2.blockdevicemapping.BlockDeviceType()
893 sdc1.ephemeral_name = 'ephemeral1'
894 bmap[ephemeral_drive_1] = sdc1
895 sdd1 = boto.ec2.blockdevicemapping.BlockDeviceType()
896 sdd1.ephemeral_name = 'ephemeral2'
897 bmap[ephemeral_drive_2] = sdd1
898 sde1 = boto.ec2.blockdevicemapping.BlockDeviceType()
899 sde1.ephemeral_name = 'ephemeral3'
900 bmap[ephemeral_drive_3] = sde1
901 return bmap
902
903 @print_timing("Downloading image")
918 log.info("Downloading image: %s" % image_id)
919 for file in files:
920 widgets[0] = "%s:" % file.name
921 pbar = progressbar.ProgressBar(widgets=widgets,
922 maxval=file.size).start()
923 file.get_contents_to_filename(os.path.join(destdir, file.name),
924 cb=_dl_progress_cb)
925 pbar.finish()
926
928 """
929 Print a list of files for image_id to the screen
930 """
931 files = self.get_image_files(image_id)
932 for file in files:
933 print file.name
934
935 @property
938
939 @property
942
946
948 """
949 Returns a list of all EBS volumes
950 """
951 return self.conn.get_all_volumes(filters=filters)
952
954 """
955 Returns EBS volume object representing volume_id.
956 Raises exception.VolumeDoesNotExist if unsuccessful
957 """
958 try:
959 return self.get_volumes(filters={'volume-id': volume_id})[0]
960 except boto.exception.EC2ResponseError, e:
961 if e.error_code == "InvalidVolume.NotFound":
962 raise exception.VolumeDoesNotExist(volume_id)
963 raise
964 except IndexError:
965 raise exception.VolumeDoesNotExist(volume_id)
966
968 """
969 Returns EBS volume object representing volume_id.
970 Returns None if unsuccessful
971 """
972 try:
973 return self.get_volume(volume_id)
974 except exception.VolumeDoesNotExist:
975 pass
976
993
994 - def create_snapshot(self, vol, description=None, wait_for_snapshot=False,
995 refresh_interval=30):
1001
1003 """
1004 Returns a list of all EBS volume snapshots for this account
1005 """
1006 filters = filters or {}
1007 if volume_ids:
1008 filters['volume-id'] = volume_ids
1009 return self.conn.get_all_snapshots(owner='self', filters=filters)
1010
1012 """
1013 Returns EBS snapshot object representing snapshot_id.
1014 Raises exception.SnapshotDoesNotExist if unsuccessful
1015 """
1016 try:
1017 return self.get_snapshots(filters={'snapshot-id': snapshot_id})[0]
1018 except boto.exception.EC2ResponseError, e:
1019 if e.error_code == "InvalidSnapshot.NotFound":
1020 raise exception.SnapshotDoesNotExist(snapshot_id)
1021 raise
1022 except IndexError:
1023 raise exception.SnapshotDoesNotExist(snapshot_id)
1024
1025 - def list_volumes(self, volume_id=None, status=None,
1026 attach_status=None, size=None, zone=None,
1027 snapshot_id=None, show_deleted=False):
1028 """
1029 Print a list of volumes to the screen
1030 """
1031 filters = {}
1032 if status:
1033 filters['status'] = status
1034 else:
1035 filters['status'] = ['creating', 'available', 'in-use', 'error']
1036 if show_deleted:
1037 filters['status'] += ['deleting', 'deleted']
1038 if attach_status:
1039 filters['attachment.status'] = attach_status
1040 if volume_id:
1041 filters['volume-id'] = volume_id
1042 if size:
1043 filters['size'] = size
1044 if zone:
1045 filters['availability-zone'] = zone
1046 if snapshot_id:
1047 filters['snapshot-id'] = snapshot_id
1048 vols = self.get_volumes(filters=filters)
1049 vols.sort(key=lambda x: x.create_time)
1050 if vols:
1051 for vol in vols:
1052 print "volume_id: %s" % vol.id
1053 print "size: %sGB" % vol.size
1054 print "status: %s" % vol.status
1055 if vol.attachment_state():
1056 print "attachment_status: %s" % vol.attachment_state()
1057 print "availability_zone: %s" % vol.zone
1058 if vol.snapshot_id:
1059 print "snapshot_id: %s" % vol.snapshot_id
1060 snapshots = self.get_snapshots(volume_ids=[vol.id])
1061 if snapshots:
1062 snap_list = ' '.join([snap.id for snap in snapshots])
1063 print 'snapshots: %s' % snap_list
1064 if vol.create_time:
1065 lt = utils.iso_to_localtime_tuple(vol.create_time)
1066 print "create_time: %s" % lt
1067 print
1068 print 'Total: %s' % len(vols)
1069
1070 - def get_spot_history(self, instance_type, start=None, end=None, plot=False,
1071 plot_server_interface="localhost",
1072 plot_launch_browser=True, plot_web_browser=None,
1073 plot_shutdown_server=True):
1074 if start and not utils.is_iso_time(start):
1075 raise exception.InvalidIsoDate(start)
1076 if end and not utils.is_iso_time(end):
1077 raise exception.InvalidIsoDate(end)
1078 pdesc = "Linux/UNIX"
1079 hist = self.conn.get_spot_price_history(start_time=start, end_time=end,
1080 instance_type=instance_type,
1081 product_description=pdesc)
1082 if not hist:
1083 raise exception.SpotHistoryError(start, end)
1084 dates = []
1085 prices = []
1086 data = []
1087 for item in hist:
1088 timestamp = utils.iso_to_javascript_timestamp(item.timestamp)
1089 price = item.price
1090 dates.append(timestamp)
1091 prices.append(price)
1092 data.append([timestamp, price])
1093 maximum = max(prices)
1094 avg = sum(prices) / float(len(prices))
1095 log.info("Current price: $%.2f" % prices[-1])
1096 log.info("Max price: $%.2f" % maximum)
1097 log.info("Average price: $%.2f" % avg)
1098 if plot:
1099 xaxisrange = dates[-1] - dates[0]
1100 xpanrange = [dates[0] - xaxisrange / 2.,
1101 dates[-1] + xaxisrange / 2.]
1102 xzoomrange = [0.1, xpanrange[-1] - xpanrange[0]]
1103 minimum = min(prices)
1104 yaxisrange = maximum - minimum
1105 ypanrange = [minimum - yaxisrange / 2., maximum + yaxisrange / 2.]
1106 yzoomrange = [0.1, ypanrange[-1] - ypanrange[0]]
1107 context = dict(instance_type=instance_type,
1108 start=start, end=end,
1109 time_series_data=str(data),
1110 shutdown=plot_shutdown_server,
1111 xpanrange=xpanrange, ypanrange=ypanrange,
1112 xzoomrange=xzoomrange, yzoomrange=yzoomrange)
1113 log.info("", extra=dict(__raw__=True))
1114 log.info("Starting StarCluster Webserver...")
1115 s = webtools.get_template_server('web', context=context,
1116 interface=plot_server_interface)
1117 base_url = "http://%s:%s" % s.server_address
1118 shutdown_url = '/'.join([base_url, 'shutdown'])
1119 spot_url = "http://%s:%s/spothistory.html" % s.server_address
1120 log.info("Server address is %s" % base_url)
1121 log.info("(use CTRL-C or navigate to %s to shutdown server)" %
1122 shutdown_url)
1123 if plot_launch_browser:
1124 webtools.open_browser(spot_url, plot_web_browser)
1125 else:
1126 log.info("Browse to %s to view the spot history plot" %
1127 spot_url)
1128 s.serve_forever()
1129 return data
1130
1132 instance = self.get_instance(instance_id)
1133 console_output = instance.get_console_output().output
1134 print ''.join([c for c in console_output if c in string.printable])
1135
1138 DefaultHost = 's3.amazonaws.com'
1139 _calling_format = boto.s3.connection.OrdinaryCallingFormat()
1140
1141 - def __init__(self, aws_access_key_id, aws_secret_access_key,
1142 aws_s3_path='/', aws_port=None, aws_is_secure=True,
1143 aws_s3_host=DefaultHost, **kwargs):
1144 kwargs = dict(is_secure=aws_is_secure,
1145 host=aws_s3_host or self.DefaultHost,
1146 port=aws_port,
1147 path=aws_s3_path)
1148 if aws_s3_host:
1149 kwargs.update(dict(calling_format=self._calling_format))
1150 super(EasyS3, self).__init__(aws_access_key_id, aws_secret_access_key,
1151 boto.connect_s3, **kwargs)
1152
1154 return '<EasyS3: %s>' % self.conn.server_name()
1155
1157 """
1158 Create a new bucket on S3. bucket_name must be unique, the bucket
1159 namespace is shared by all AWS users
1160 """
1161 bucket_name = bucket_name.split('/')[0]
1162 try:
1163 return self.conn.create_bucket(bucket_name)
1164 except boto.exception.S3CreateError, e:
1165 if e.error_code == "BucketAlreadyExists":
1166 raise exception.BucketAlreadyExists(bucket_name)
1167 raise
1168
1177
1179 """
1180 Returns bucket object representing S3 bucket
1181 Returns None if unsuccessful
1182 """
1183 try:
1184 return self.get_bucket(bucket_name)
1185 except exception.BucketDoesNotExist:
1186 pass
1187
1189 """
1190 Returns bucket object representing S3 bucket
1191 """
1192 try:
1193 return self.conn.get_bucket(bucketname)
1194 except boto.exception.S3ResponseError, e:
1195 if e.error_code == "NoSuchBucket":
1196 raise exception.BucketDoesNotExist(bucketname)
1197 raise
1198
1200 bucket = self.get_bucket(bucketname)
1201 for file in bucket.list():
1202 if file.name:
1203 print file.name
1204
1206 try:
1207 buckets = self.conn.get_all_buckets()
1208 except TypeError:
1209
1210 raise exception.AWSError("AWS credentials are not valid")
1211 return buckets
1212
1216
1221
1222 if __name__ == "__main__":
1223 from starcluster.config import get_easy_ec2
1224 ec2 = get_easy_ec2()
1225 ec2.list_all_instances()
1226 ec2.list_registered_images()
1227