1 import time
2 import string
3
4 from starcluster import static
5 from starcluster import utils
6 from starcluster import exception
7 from starcluster import cluster
8 from starcluster.utils import print_timing
9 from starcluster.logger import log
13 """
14 Handles creating, partitioning, and formatting a new EBS volume.
15 By default this class will format the entire drive (without partitioning)
16 using the ext3 filesystem.
17
18 host_instance - EC2 instance to use when formatting volume. must exist in
19 the same zone as the new volume. if not specified this class will look for
20 host instances in the @sc-volumecreator security group. If it can't find
21 an instance in the @sc-volumecreator group that matches the zone of the
22 new volume, a new instance is launched.
23
24 shutdown_instance - True will shutdown the host instance after volume
25 creation
26 """
27 - def __init__(self, ec2_conn, spot_bid=None, keypair=None,
28 key_location=None, host_instance=None, device='/dev/sdz',
29 image_id=static.BASE_AMI_32, instance_type="m1.small",
30 shutdown_instance=False, detach_vol=False,
31 mkfs_cmd='mkfs.ext3', resizefs_cmd='resize2fs', **kwargs):
32 self._host_instance = host_instance
33 self._instance = None
34 self._volume = None
35 self._device = device or '/dev/sdz'
36 self._image_id = image_id or static.BASE_AMI_32
37 self._instance_type = instance_type or 'm1.small'
38 self._shutdown = shutdown_instance
39 self._detach_vol = detach_vol
40 self._mkfs_cmd = mkfs_cmd
41 self._resizefs_cmd = resizefs_cmd
42 self._alias_tmpl = "volhost-%s"
43 super(VolumeCreator, self).__init__(
44 ec2_conn=ec2_conn, spot_bid=spot_bid, keyname=keypair,
45 key_location=key_location, cluster_tag=static.VOLUME_GROUP_NAME,
46 cluster_size=1, cluster_user="sgeadmin", cluster_shell="bash",
47 node_image_id=self._image_id,
48 node_instance_type=self._instance_type)
49
51 return "<VolumeCreator: %s>" % self._mkfs_cmd
52
54 """
55 Returns any existing instance in the @sc-volumecreator group that's
56 located in zone.
57 """
58 active_states = ['pending', 'running']
59 i = self._host_instance
60 if i and self._validate_host_instance(i, zone):
61 log.info("Using specified host instance %s" % i.id)
62 return i
63 for node in self.nodes:
64 if node.state in active_states and node.placement == zone:
65 log.info("Using existing instance %s in group %s" %
66 (node.id, self.cluster_group.name))
67 return node
68
89
91 msg = "Creating %sGB volume in zone %s" % (size, zone)
92 if snapshot_id:
93 msg += " from snapshot %s" % snapshot_id
94 log.info(msg)
95 vol = self.ec2.create_volume(size, zone, snapshot_id)
96 log.info("New volume id: %s" % vol.id)
97 s = self.get_spinner("Waiting for new volume to become 'available'...")
98 while vol.status != 'available':
99 time.sleep(5)
100 vol.update()
101 s.stop()
102 self._volume = vol
103 return self._volume
104
106 block_dev_map = self._instance.block_device_mapping
107 for char in string.lowercase[::-1]:
108 dev = '/dev/sd%s' % char
109 if not block_dev_map.get(dev):
110 self._device = dev
111 return self._device
112
114 s = self.get_spinner("Attaching volume %s to instance %s..." %
115 (vol.id, instance_id))
116 vol.attach(instance_id, device)
117 while True:
118 vol.update()
119 if vol.attachment_state() == 'attached':
120 break
121 time.sleep(5)
122 s.stop()
123 return self._volume
124
133
152
154 z = self.ec2.get_zone(zone)
155 if z.state != 'available':
156 log.warn('zone %s is not available at this time' % zone)
157 return True
158
167
172
174 log.info("Checking for required remote commands...")
175 self._instance.ssh.check_required(progs)
176
177 - def validate(self, size, zone, device):
181
182 - def is_valid(self, size, zone, device):
189
191 self._instance.ssh.execute('echo ",,L" | sfdisk %s' % self._device,
192 silent=False)
193
198
200 sg = self.ec2.get_group_or_none(static.VOLUME_GROUP)
201 if not sg:
202 return
203 vol_hosts = filter(lambda x: x.state in ['running', 'pending'],
204 sg.instances())
205 vol_hosts = map(lambda x: x.id, vol_hosts)
206 if vol_hosts:
207 log.warn("There are still volume hosts running: %s" %
208 ', '.join(vol_hosts))
209 log.warn("Run 'starcluster terminate %s' to terminate *all* "
210 "volume host instances once they're no longer needed" %
211 static.VOLUME_GROUP_NAME)
212 else:
213 log.info("No active volume hosts found. Run 'starcluster "
214 "terminate %(g)s' to remove the '%(g)s' group" %
215 {'g': static.VOLUME_GROUP_NAME})
216
218 vol = self._volume
219 host = self._instance
220 if self._detach_vol:
221 log.info("Detaching volume %s from instance %s" %
222 (vol.id, host.id))
223 vol.detach()
224 else:
225 log.info("Leaving volume %s attached to instance %s" %
226 (vol.id, host.id))
227 if self._shutdown:
228 log.info("Terminating host instance %s" % host.id)
229 host.terminate()
230 else:
231 log.info("Not terminating host instance %s" %
232 host.id)
233
234 @print_timing("Creating volume")
235 - def create(self, volume_size, volume_zone, name=None, tags=None):
270
272 self._validate_size(size)
273 if vol.size > size:
274 log.warn("You are attempting to shrink an EBS volume. "
275 "Data loss may occur")
276
277 - def resize(self, vol, size, dest_zone=None):
278 """
279 Resize EBS volume
280
281 vol - boto volume object
282 size - new volume size
283 dest_zone - zone to create the new resized volume in. this must be
284 within the original volume's region otherwise a manual copy (rsync)
285 is required. this is currently not implemented.
286 """
287 try:
288 self._validate_device(self._device)
289 self._validate_resize(vol, size)
290 zone = vol.zone
291 if dest_zone:
292 self._validate_zone(dest_zone)
293 zone = dest_zone
294 host = self._request_instance(zone)
295 self._validate_required_progs([self._resizefs_cmd.split()[0]])
296 self._determine_device()
297 snap = self.ec2.create_snapshot(vol, wait_for_snapshot=True)
298 new_vol = self._create_volume(size, zone, snap.id)
299 self._attach_volume(new_vol, host.id, self._device)
300 devs = filter(lambda x: x.startswith(self._device),
301 host.ssh.ls('/dev'))
302 device = self._device
303 if len(devs) == 1:
304 log.info("No partitions found, resizing entire device")
305 elif len(devs) == 2:
306 log.info("One partition found, resizing partition...")
307 self._partition_volume()
308 device += '1'
309 else:
310 raise exception.InvalidOperation(
311 "EBS volume %s has more than 1 partition. "
312 "You must resize this volume manually" % vol.id)
313 host.ssh.execute(' '.join([self._resizefs_cmd, device]))
314 log.info("Removing generated snapshot %s" % snap.id)
315 snap.delete()
316 self.shutdown()
317 self._warn_about_volume_hosts()
318 return new_vol.id
319 except Exception:
320 self._warn_about_volume_hosts()
321 raise
322