1
2 """
3 EC2/S3 Utility Classes
4 """
5
6 import os
7 import re
8 import time
9 import base64
10 import string
11 import tempfile
12
13 import boto
14 import boto.ec2
15 import boto.s3.connection
16
17 from starcluster import image
18 from starcluster import utils
19 from starcluster import static
20 from starcluster import exception
21 from starcluster import progressbar
22 from starcluster.utils import print_timing
23 from starcluster.logger import log
27 - def __init__(self, aws_access_key_id, aws_secret_access_key,
28 connection_authenticator, **kwargs):
29 """
30 Create an EasyAWS object.
31
32 Requires aws_access_key_id/aws_secret_access_key from an Amazon Web
33 Services (AWS) account and a connection_authenticator function that
34 returns an authenticated AWS connection object
35
36 Providing only the keys will default to using Amazon EC2
37
38 kwargs are passed to the connection_authenticator's constructor
39 """
40 self.aws_access_key_id = aws_access_key_id
41 self.aws_secret_access_key = aws_secret_access_key
42 self.connection_authenticator = connection_authenticator
43 self._conn = None
44 self._kwargs = kwargs
45
47 self._conn = None
48 return self.conn
49
50 @property
52 if self._conn is None:
53 log.debug('creating self._conn w/ connection_authenticator ' +
54 'kwargs = %s' % self._kwargs)
55 self._conn = self.connection_authenticator(
56 self.aws_access_key_id, self.aws_secret_access_key,
57 **self._kwargs)
58 return self._conn
59
62 - def __init__(self, aws_access_key_id, aws_secret_access_key,
63 aws_ec2_path='/', aws_s3_host=None, aws_s3_path='/',
64 aws_port=None, aws_region_name=None, aws_is_secure=True,
65 aws_region_host=None, cache=False, **kwargs):
66 aws_region = None
67 if aws_region_name and aws_region_host:
68 aws_region = boto.ec2.regioninfo.RegionInfo(
69 name=aws_region_name, endpoint=aws_region_host)
70 kwargs = dict(is_secure=aws_is_secure, region=aws_region,
71 port=aws_port, path=aws_ec2_path)
72 super(EasyEC2, self).__init__(aws_access_key_id, aws_secret_access_key,
73 boto.connect_ec2, **kwargs)
74 kwargs = dict(aws_s3_host=aws_s3_host,
75 aws_s3_path=aws_s3_path,
76 aws_port=aws_port,
77 aws_is_secure=aws_is_secure,
78 cache=cache)
79 self.s3 = EasyS3(aws_access_key_id, aws_secret_access_key, **kwargs)
80 self.cache = cache
81 self._instance_response = None
82 self._keypair_response = None
83 self._images = None
84 self._executable_images = None
85 self._security_group_response = None
86 self._regions = None
87
89 return '<EasyEC2: %s (%s)>' % (self.region.name, self.region.endpoint)
90
92 if e.error_code in ["AuthFailure", "SignatureDoesNotMatch"]:
93 raise e
94
96 """
97 Connects to a given region if it exists, raises RegionDoesNotExist
98 otherwise. Once connected, this object will return only data from the
99 given region.
100 """
101 region = self.get_region(region_name)
102 self._kwargs['region'] = region
103 self.reload()
104 return self
105
106 @property
108 """
109 Returns the current EC2 region used by this EasyEC2 object
110 """
111 return self.conn.region
112
113 @property
115 """
116 This property returns all AWS Regions, caching the results the first
117 time a request is made to Amazon
118 """
119 if not self._regions:
120 self._regions = {}
121 regions = self.conn.get_all_regions()
122 for region in regions:
123 self._regions[region.name] = region
124 return self._regions
125
127 """
128 Returns boto Region object if it exists, raises RegionDoesNotExist
129 otherwise.
130 """
131 if not region_name in self.regions:
132 raise exception.RegionDoesNotExist(region_name)
133 return self.regions.get(region_name)
134
136 """
137 Print name/endpoint for all AWS regions
138 """
139 for r in self.regions:
140 region = self.regions.get(r)
141 print 'name: ', region.name
142 print 'endpoint: ', region.endpoint
143 print
144
145 @property
147 if not self.cache or self._images is None:
148 self._images = self.conn.get_all_images(owners=["self"])
149 return self._images
150
151 @property
153 if not self.cache or self._images is None:
154 self._executable_images = self.conn.get_all_images(
155 executable_by=["self"])
156 return self._executable_images
157
164
165 - def create_group(self, name, description, auth_ssh=False,
166 auth_group_traffic=False):
167 """
168 Create security group with name/description. auth_ssh=True
169 will open port 22 to world (0.0.0.0/0). auth_group_traffic
170 will allow all traffic between instances in the same security
171 group
172 """
173 if not name:
174 return None
175 log.info("Creating security group %s..." % name)
176 sg = self.conn.create_security_group(name, description)
177 if auth_ssh:
178 sg.authorize('tcp', 22, 22, '0.0.0.0/0')
179 if auth_group_traffic:
180 sg.authorize('icmp', -1, -1,
181 src_group=self.get_group_or_none(name))
182 sg.authorize('tcp', 1, 65535,
183 src_group=self.get_group_or_none(name))
184 sg.authorize('udp', 1, 65535,
185 src_group=self.get_group_or_none(name))
186 return sg
187
198
200 """
201 Returns group with name if it exists otherwise returns None
202 """
203 sgs = self.get_all_security_groups(groupnames=[name])
204 if sgs:
205 return sgs[0]
206
207 - def get_or_create_group(self, name, description, auth_ssh=True,
208 auth_group_traffic=False):
209 """
210 Try to return a security group by name.
211 If the group is not found, attempt to create it.
212 Description only applies to creation.
213
214 Authorizes all traffic between members of the group
215 """
216 sg = self.get_group_or_none(name)
217 if not sg:
218 sg = self.create_group(name, description, auth_ssh,
219 auth_group_traffic)
220 return sg
221
222 - def has_permission(self, group, ip_protocol, from_port, to_port, cidr_ip):
223 """
224 Checks whether group has the specified port range permission
225 (ip_protocol, from_port, to_port, cidr_ip) defined
226 """
227 for rule in group.rules:
228 if rule.ip_protocol != ip_protocol:
229 continue
230 if int(rule.from_port) != from_port:
231 continue
232 if int(rule.to_port) != to_port:
233 continue
234 cidr_grants = [g for g in rule.grants if g.cidr_ip == cidr_ip]
235 if not cidr_grants:
236 continue
237 return True
238 return False
239
241 """
242 Returns placement group with name if it exists otherwise returns None
243 """
244 try:
245 pg = self.conn.get_all_placement_groups(groupnames=[name])[0]
246 return pg
247 except boto.exception.EC2ResponseError, e:
248 self.__check_for_auth_failure(e)
249 except IndexError:
250 pass
251
253 """
254 Create a new placement group for your account.
255 This will create the placement group within the region you
256 are currently connected to.
257 """
258 if not name:
259 return
260 log.info("Creating placement group %s..." % name)
261 success = self.conn.create_placement_group(name)
262 if success:
263 return self.get_placement_group_or_none(name)
264
274
275 - def request_instances(self, image_id, price=None, instance_type='m1.small',
276 min_count=1, max_count=1, count=1, key_name=None,
277 security_groups=None, launch_group=None,
278 availability_zone_group=None, placement=None,
279 user_data=None, placement_group=None):
280 """
281 Convenience method for running spot or flat-rate instances
282 """
283 if price:
284 return self.request_spot_instances(
285 price, image_id, instance_type=instance_type,
286 count=count, launch_group=launch_group, key_name=key_name,
287 security_groups=security_groups,
288 availability_zone_group=availability_zone_group,
289 placement=placement, user_data=user_data)
290 else:
291 return self.run_instances(
292 image_id, instance_type=instance_type,
293 min_count=min_count, max_count=max_count,
294 key_name=key_name, security_groups=security_groups,
295 placement=placement, user_data=user_data,
296 placement_group=placement_group)
297
298 - def request_spot_instances(self, price, image_id, instance_type='m1.small',
299 count=1, launch_group=None, key_name=None,
300 availability_zone_group=None,
301 security_groups=None,
302 placement=None, user_data=None):
309
310 - def run_instances(self, image_id, instance_type='m1.small', min_count=1,
311 max_count=1, key_name=None, security_groups=None,
312 placement=None, user_data=None, placement_group=None):
321
322 - def create_image(self, instance_id, name, description=None,
323 no_reboot=False):
324 return self.conn.create_image(instance_id, name,
325 description=description,
326 no_reboot=no_reboot)
327
328 - def register_image(self, name, description=None, image_location=None,
329 architecture=None, kernel_id=None, ramdisk_id=None,
330 root_device_name=None, block_device_map=None):
331 return self.conn.register_image(name=name, description=description,
332 image_location=image_location,
333 architecture=architecture,
334 kernel_id=kernel_id,
335 ramdisk_id=ramdisk_id,
336 root_device_name=root_device_name,
337 block_device_map=block_device_map)
338
340 return self.conn.delete_key_pair(name)
341
343 """
344 Create a new EC2 keypair and optionally save to output_file
345
346 Returns boto.ec2.keypair.KeyPair
347 """
348 if output_file:
349 output_dir = os.path.dirname(output_file)
350 if output_dir and not os.path.exists(output_dir):
351 raise exception.BaseException(
352 "output directory does not exist")
353 if os.path.exists(output_file):
354 raise exception.BaseException(
355 "cannot save keypair %s: file already exists" % \
356 output_file)
357 kp = self.conn.create_key_pair(name)
358 if output_file:
359 try:
360 kfile = open(output_file, 'wb')
361 kfile.write(kp.material)
362 kfile.close()
363 os.chmod(output_file, 0400)
364 except IOError, e:
365 raise exception.BaseException(str(e))
366 return kp
367
376
378 try:
379 return self.get_keypair(keypair)
380 except:
381 pass
382
384 print msg
385 print "-" * len(msg)
386
388 image_name = re.sub('\.manifest\.xml$', '',
389 img.location.split('/')[-1])
390 return image_name
391
393 i = self.get_instance(instance_id)
394 attributes = self.conn.get_instance_attribute(i.id, 'userData')
395 user_data = attributes['userData'] or ''
396 return base64.b64decode(user_data)
397
412
414 try:
415 self.get_all_instances()
416 return True
417 except boto.exception.EC2ResponseError, e:
418 if e.status in [401, 403]:
419 return False
420 raise
421
423 spots = self.conn.get_all_spot_instance_requests(spot_ids,
424 filters=filters)
425 return spots
426
438
440 s = self.conn.get_all_spot_instance_requests()
441 if not s:
442 log.info("No spot instance requests found...")
443 return
444 spots = []
445 for spot in s:
446 if spot.state in ['closed', 'cancelled'] and not show_closed:
447 continue
448 state = spot.state or 'N/A'
449 spot_id = spot.id or 'N/A'
450 spots.append(spot_id)
451 type = spot.type
452 instance_id = spot.instance_id or 'N/A'
453 create_time = spot.create_time or 'N/A'
454 launch_group = spot.launch_group or 'N/A'
455 zone_group = spot.availability_zone_group or 'N/A'
456 price = spot.price or 'N/A'
457 lspec = spot.launch_specification
458 instance_type = lspec.instance_type
459 image_id = lspec.image_id
460 zone = lspec.placement
461 groups = ', '.join([g.id for g in lspec.groups])
462 print "id: %s" % spot_id
463 print "price: $%0.2f" % price
464 print "spot_request_type: %s" % type
465 print "state: %s" % state
466 print "instance_id: %s" % instance_id
467 print "instance_type: %s" % instance_type
468 print "image_id: %s" % image_id
469 print "zone: %s" % zone
470 print "create_time: %s" % create_time
471 print "launch_group: %s" % launch_group
472 print "zone_group: %s" % zone_group
473 print "security_groups: %s" % groups
474 print
475 if not spots:
476 log.info("No spot instance requests found...")
477
517
518 - def list_images(self, images, sort_key=None, reverse=False):
519 def get_key(obj):
520 return ' '.join([obj.region.name, obj.location])
521 if not sort_key:
522 sort_key = get_key
523 imgs_i386 = [img for img in images if img.architecture == "i386"]
524 imgs_i386.sort(key=sort_key, reverse=reverse)
525 imgs_x86_64 = [img for img in images if img.architecture == "x86_64"]
526 imgs_x86_64.sort(key=sort_key, reverse=reverse)
527 print
528 self.__list_images("32bit Images:", imgs_i386)
529 self.__list_images("\n64bit Images:", imgs_x86_64)
530 print "\ntotal images: %d" % len(images)
531 print
532
537
542
544 counter = 0
545 self.__print_header(msg)
546 for img in imgs:
547 name = self.get_image_name(img)
548 template = "[%d] %s %s %s"
549 if img.virtualization_type == 'hvm':
550 template += ' (HVM-EBS)'
551 elif img.root_device_type == 'ebs':
552 template += ' (EBS)'
553 print template % (counter, img.id, img.region.name, name)
554 counter += 1
555
557 files = self.get_image_files(image_name)
558 for file in files:
559 if pretend:
560 print file
561 else:
562 print 'removing file %s' % file
563 file.delete()
564
565
566 files = self.get_image_files(image_name)
567 if len(files) != 0:
568 if pretend:
569 log.info('Not all files deleted, would recurse...exiting')
570 return
571 else:
572 log.info('Not all files deleted, recursing...')
573 self.remove_image_files(image_name, pretend)
574
575 @print_timing("Removing image")
577 img = self.get_image(image_name)
578 if pretend:
579 log.info("Pretending to remove AMI: %s" % image_name)
580 else:
581 log.info("Removing AMI: %s" % image_name)
582
583
584 log.info('Removing image files...')
585 self.remove_image_files(image_name, pretend=pretend)
586
587
588 if pretend:
589 log.info('Would run deregister_image for ami: %s)' % img.id)
590 else:
591 log.info('Deregistering ami: %s' % img.id)
592 img.deregister()
593
595 images = self.conn.get_all_images(owners=[static.STARCLUSTER_OWNER_ID])
596 log.info("Listing all public StarCluster images...")
597 imgs = [img for img in images if img.is_public]
598
599 def sc_public_sort(obj):
600 split = obj.name.split('-')
601 osname, osversion, arch = split[2:5]
602 osversion = float(osversion)
603 rc = 0
604 if split[-1].startswith('rc'):
605 rc = int(split[-1].replace('rc', ''))
606 return (osversion, rc)
607 self.list_images(imgs, sort_key=sc_public_sort, reverse=True)
608
611
613 vol = self.get_volume(volume_id)
614 vol.delete()
615
617 max_length = max([len(key.name) for key in self.keypairs])
618 templ = "%" + str(max_length) + "s %s"
619 for key in self.keypairs:
620 print templ % (key.name, key.fingerprint)
621
643
656
658 """
659 Return zone object respresenting an EC2 availability zone
660 Returns None if unsuccessful
661 """
662 try:
663 return self.get_zone(zone)
664 except exception.ZoneDoesNotExist:
665 pass
666
667 - def create_s3_image(self, instance_id, key_location, aws_user_id,
668 ec2_cert, ec2_private_key, bucket, image_name="image",
669 description=None, kernel_id=None, ramdisk_id=None,
670 remove_image_files=False, **kwargs):
671 """
672 Create instance-store (S3) image from running instance
673 """
674 icreator = image.S3ImageCreator(self, instance_id, key_location,
675 aws_user_id, ec2_cert,
676 ec2_private_key, bucket,
677 image_name=image_name,
678 description=description,
679 kernel_id=kernel_id,
680 ramdisk_id=ramdisk_id,
681 remove_image_files=remove_image_files)
682 return icreator.create_image()
683
684 - def create_ebs_image(self, instance_id, key_location, name,
685 description=None, snapshot_description=None,
686 kernel_id=None, ramdisk_id=None, root_vol_size=15,
687 **kwargs):
688 """
689 Create EBS-backed image from running instance
690 """
691 sdescription = snapshot_description
692 icreator = image.EBSImageCreator(self, instance_id, key_location,
693 name, description=description,
694 snapshot_description=sdescription,
695 kernel_id=kernel_id,
696 ramdisk_id=ramdisk_id,
697 **kwargs)
698 return icreator.create_image(size=root_vol_size)
699
712
714 """
715 Return image object representing an AMI.
716 Returns None if unsuccessful
717 """
718 try:
719 return self.get_image(image_id)
720 except:
721 pass
722
724 """
725 """
726 bname = re.escape(bucket.name)
727 prefix = re.sub('^%s\/' % bname, '', image.location)
728 prefix = re.sub('\.manifest\.xml$', '', prefix)
729 files = bucket.list(prefix=prefix)
730 manifest_regex = re.compile(r'%s\.manifest\.xml' % prefix)
731 part_regex = re.compile(r'%s\.part\.(\d*)' % prefix)
732
733
734 files = [f for f in files if hasattr(f, 'delete') and
735 part_regex.match(f.name) or manifest_regex.match(f.name)]
736 return files
737
749
751 bucket_name = image.location.split('/')[0]
752 return self.s3.get_bucket(bucket_name)
753
755 return image.location.split('/')[-1]
756
757 @print_timing("Migrating image")
758 - def migrate_image(self, image_id, destbucket, migrate_manifest=False,
759 kernel_id=None, ramdisk_id=None, region=None, cert=None,
760 private_key=None):
761 """
762 Migrate image_id files to destbucket
763 """
764 if migrate_manifest:
765 utils.check_required(['ec2-migrate-manifest'])
766 if not cert:
767 raise exception.BaseException("no cert specified")
768 if not private_key:
769 raise exception.BaseException("no private_key specified")
770 if not kernel_id:
771 raise exception.BaseException("no kernel_id specified")
772 if not ramdisk_id:
773 raise exception.BaseException("no ramdisk_id specified")
774 image = self.get_image(image_id)
775 if image.root_device_type == "ebs":
776 raise exception.AWSError(
777 "The image you wish to migrate is EBS-based. " +
778 "This method only works for instance-store images")
779 ibucket = self.get_image_bucket(image)
780 files = self._get_image_files(image, ibucket)
781 if not files:
782 log.info("No files found for image: %s" % image_id)
783 return
784 log.info("Migrating image: %s" % image_id)
785 widgets = [files[0].name, progressbar.Percentage(), ' ',
786 progressbar.Bar(marker=progressbar.RotatingMarker()), ' ',
787 progressbar.ETA(), ' ', ' ']
788 counter = 0
789 num_files = len(files)
790 pbar = progressbar.ProgressBar(widgets=widgets,
791 maxval=num_files).start()
792 for f in files:
793 widgets[0] = "%s: (%s/%s)" % (f.name, counter + 1, num_files)
794
795 f.copy(destbucket, f.name)
796 pbar.update(counter)
797 counter += 1
798 pbar.finish()
799 if migrate_manifest:
800 dbucket = self.s3.get_bucket(destbucket)
801 manifest_key = dbucket.get_key(self.get_image_manifest(image))
802 f = tempfile.NamedTemporaryFile()
803 manifest_key.get_contents_to_file(f.file)
804 f.file.close()
805 cmd = ('ec2-migrate-manifest -c %s -k %s -m %s --kernel %s ' +
806 '--ramdisk %s --no-mapping ') % (cert, private_key,
807 f.name, kernel_id,
808 ramdisk_id)
809 register_cmd = "ec2-register %s/%s" % (destbucket,
810 manifest_key.name)
811 if region:
812 cmd += '--region %s' % region
813 register_cmd += " --region %s" % region
814 log.info("Migrating manifest file...")
815 retval = os.system(cmd)
816 if retval != 0:
817 raise exception.BaseException(
818 "ec2-migrate-manifest failed with status %s" % retval)
819 f.file = open(f.name, 'r')
820 manifest_key.set_contents_from_file(f.file)
821
822 manifest_key.add_email_grant('READ', 'za-team@amazon.com')
823 f.close()
824 os.unlink(f.name + '.bak')
825 log.info("Manifest migrated successfully. You can now run:\n" +
826 register_cmd + "\nto register your migrated image.")
827
828 - def create_root_block_device_map(self, snapshot_id,
829 root_device_name='/dev/sda1',
830 add_ephemeral_drives=False,
831 ephemeral_drive_0='/dev/sdb1',
832 ephemeral_drive_1='/dev/sdc1',
833 ephemeral_drive_2='/dev/sdd1',
834 ephemeral_drive_3='/dev/sde1'):
835 """
836 Utility method for building a new block_device_map for a given snapshot
837 id. This is useful when creating a new image from a volume snapshot.
838 The returned block device map can be used with self.register_image
839 """
840 bmap = boto.ec2.blockdevicemapping.BlockDeviceMapping()
841 sda1 = boto.ec2.blockdevicemapping.BlockDeviceType()
842 sda1.snapshot_id = snapshot_id
843 sda1.delete_on_termination = True
844 bmap[root_device_name] = sda1
845 if add_ephemeral_drives:
846 sdb1 = boto.ec2.blockdevicemapping.BlockDeviceType()
847 sdb1.ephemeral_name = 'ephemeral0'
848 bmap[ephemeral_drive_0] = sdb1
849 sdc1 = boto.ec2.blockdevicemapping.BlockDeviceType()
850 sdc1.ephemeral_name = 'ephemeral1'
851 bmap[ephemeral_drive_1] = sdc1
852 sdd1 = boto.ec2.blockdevicemapping.BlockDeviceType()
853 sdd1.ephemeral_name = 'ephemeral2'
854 bmap[ephemeral_drive_2] = sdd1
855 sde1 = boto.ec2.blockdevicemapping.BlockDeviceType()
856 sde1.ephemeral_name = 'ephemeral3'
857 bmap[ephemeral_drive_3] = sde1
858 return bmap
859
860 @print_timing("Downloading image")
875 log.info("Downloading image: %s" % image_id)
876 for file in files:
877 widgets[0] = "%s:" % file.name
878 pbar = progressbar.ProgressBar(widgets=widgets,
879 maxval=file.size).start()
880 file.get_contents_to_filename(os.path.join(destdir, file.name),
881 cb=_dl_progress_cb)
882 pbar.finish()
883
885 """
886 Print a list of files for image_id to the screen
887 """
888 files = self.get_image_files(image_id)
889 for file in files:
890 print file.name
891
892 @property
894 if not self.cache or self._instance_response is None:
895 log.debug('instance_response = %s, cache = %s' %
896 (self._instance_response, self.cache))
897 self._instance_response = self.conn.get_all_instances()
898 return self._instance_response
899
900 @property
902 if not self.cache or self._keypair_response is None:
903 log.debug('keypair_response = %s, cache = %s' %
904 (self._keypair_response, self.cache))
905 self._keypair_response = self.conn.get_all_key_pairs()
906 return self._keypair_response
907
911
913 """
914 Returns a list of all EBS volumes
915 """
916 try:
917 return self.conn.get_all_volumes(filters=filters)
918 except boto.exception.EC2ResponseError, e:
919 self.__check_for_auth_failure(e)
920
934
951
952 - def create_snapshot(self, vol, description=None, wait_for_snapshot=False,
953 refresh_interval=30):
959
961 """
962 Returns a list of all EBS volume snapshots for this account
963 """
964 filters = {}
965 if volume_ids:
966 filters['volume-id'] = volume_ids
967 try:
968 return self.conn.get_all_snapshots(owner='self', filters=filters)
969 except boto.exception.EC2ResponseError, e:
970 self.__check_for_auth_failure(e)
971
984
986 """
987 Returns EBS volume object representing volume_id.
988 Returns none if unsuccessful
989 """
990 try:
991 return self.get_volume(volume_id)
992 except:
993 pass
994
995 - def list_volumes(self, volume_id=None, status=None,
996 attach_status=None, size=None, zone=None,
997 snapshot_id=None, show_deleted=False):
998 """
999 Print a list of volumes to the screen
1000 """
1001 filters = {}
1002 if status:
1003 filters['status'] = status
1004 else:
1005 filters['status'] = ['creating', 'available', 'in-use', 'error']
1006 if show_deleted:
1007 filters['status'] += ['deleting', 'deleted']
1008 if attach_status:
1009 filters['attachment.status'] = attach_status
1010 if volume_id:
1011 filters['volume-id'] = volume_id
1012 if size:
1013 filters['size'] = size
1014 if zone:
1015 filters['availability-zone'] = zone
1016 if snapshot_id:
1017 filters['snapshot-id'] = snapshot_id
1018 vols = self.get_volumes(filters=filters)
1019 vols.sort(key=lambda x: x.create_time)
1020 if vols:
1021 for vol in vols:
1022 print "volume_id: %s" % vol.id
1023 print "size: %sGB" % vol.size
1024 print "status: %s" % vol.status
1025 if vol.attachment_state():
1026 print "attachment_status: %s" % vol.attachment_state()
1027 print "availability_zone: %s" % vol.zone
1028 if vol.snapshot_id:
1029 print "snapshot_id: %s" % vol.snapshot_id
1030 snapshots = self.get_snapshots(volume_ids=[vol.id])
1031 if snapshots:
1032 snap_list = ' '.join([snap.id for snap in snapshots])
1033 print 'snapshots: %s' % snap_list
1034 if vol.create_time:
1035 lt = utils.iso_to_localtime_tuple(vol.create_time)
1036 print "create_time: %s" % lt
1037 print
1038 print 'Total: %s' % len(vols)
1039
1049
1052
1053 - def get_spot_history(self, instance_type,
1054 start=None, end=None, plot=False):
1055 if not utils.is_iso_time(start):
1056 raise exception.InvalidIsoDate(start)
1057 if not utils.is_iso_time(end):
1058 raise exception.InvalidIsoDate(end)
1059 hist = self.conn.get_spot_price_history(start_time=start,
1060 end_time=end,
1061 instance_type=instance_type,
1062 product_description="Linux/UNIX")
1063 if not hist:
1064 raise exception.SpotHistoryError(start, end)
1065 dates = [utils.iso_to_datetime_tuple(i.timestamp) for i in hist]
1066 prices = [i.price for i in hist]
1067 maximum = max(prices)
1068 avg = sum(prices) / len(prices)
1069 log.info("Current price: $%.2f" % hist[-1].price)
1070 log.info("Max price: $%.2f" % maximum)
1071 log.info("Average price: $%.2f" % avg)
1072 if plot:
1073 try:
1074 import pylab
1075 pylab.plot_date(pylab.date2num(dates), prices, linestyle='-')
1076 pylab.xlabel('Date')
1077 pylab.ylabel('Price (US Dollars)')
1078 pylab.title('%s Price vs Date (%s - %s)' % (instance_type,
1079 start, end))
1080 xmin, xmax = pylab.xlim()
1081 ymin, ymax = pylab.ylim()
1082 pylab.xlim([xmin - 1, xmax + 1])
1083 pylab.ylim([0, ymax * (1.02)])
1084 pylab.grid(True)
1085 pylab.show()
1086 except ImportError, e:
1087 log.error("Error importing pylab:")
1088 log.error(str(e))
1089 log.error("please ensure matplotlib is installed and that:")
1090 log.error(" $ python -c 'import pylab'")
1091 log.error("completes without error")
1092 return zip(dates, prices)
1093
1095 instance = self.get_instance(instance_id)
1096 console_output = instance.get_console_output().output
1097 print ''.join([c for c in console_output if c in string.printable])
1098
1101 DefaultHost = 's3.amazonaws.com'
1102 _calling_format = boto.s3.connection.OrdinaryCallingFormat()
1103
1104 - def __init__(self, aws_access_key_id, aws_secret_access_key,
1105 aws_s3_path='/', aws_port=None, aws_is_secure=True,
1106 aws_s3_host=DefaultHost, cache=False, **kwargs):
1107 kwargs = dict(is_secure=aws_is_secure,
1108 host=aws_s3_host or self.DefaultHost,
1109 port=aws_port,
1110 path=aws_s3_path)
1111 if aws_s3_host:
1112 kwargs.update(dict(calling_format=self._calling_format))
1113 super(EasyS3, self).__init__(aws_access_key_id, aws_secret_access_key,
1114 boto.connect_s3, **kwargs)
1115 self.cache = cache
1116
1118 return '<EasyS3: %s>' % self.conn.server_name()
1119
1121 if e.error_code == "InvalidAccessKeyId":
1122 raise e
1123
1125 """
1126 Create a new bucket on S3. bucket_name must be unique, the bucket
1127 namespace is shared by all AWS users
1128 """
1129 bucket_name = bucket_name.split('/')[0]
1130 try:
1131 return self.conn.create_bucket(bucket_name)
1132 except boto.exception.S3CreateError, e:
1133 self.__check_for_auth_failure(e)
1134 if e.error_code == "BucketAlreadyExists":
1135 raise exception.BucketAlreadyExists(bucket_name)
1136 raise
1137
1148
1150 """
1151 Returns bucket object representing S3 bucket
1152 Returns None if unsuccessful
1153 """
1154 try:
1155 return self.get_bucket(bucket_name)
1156 except exception.BucketDoesNotExist:
1157 pass
1158
1168
1170 bucket = self.get_bucket(bucketname)
1171 for file in bucket.list():
1172 if file.name:
1173 print file.name
1174
1176 try:
1177 buckets = self.conn.get_all_buckets()
1178 except TypeError:
1179
1180 raise exception.AWSError("AWS credentials are not valid")
1181 return buckets
1182
1186
1188 files = []
1189 try:
1190 bucket = self.get_bucket(bucketname)
1191 files = [file for file in bucket.list()]
1192 except:
1193 pass
1194 return files
1195
1196 if __name__ == "__main__":
1197 from starcluster.config import get_easy_ec2
1198 ec2 = get_easy_ec2()
1199 ec2.list_all_instances()
1200 ec2.list_registered_images()
1201