1
2 import time
3 import string
4
5 from starcluster import static
6 from starcluster import utils
7 from starcluster import exception
8 from starcluster import cluster
9 from starcluster.utils import print_timing
10 from starcluster.logger import log
14 """
15 Handles creating, partitioning, and formatting a new EBS volume.
16 By default this class will format the entire drive (without partitioning)
17 using the ext3 filesystem.
18
19 host_instance - EC2 instance to use when formatting volume. must exist in
20 the same zone as the new volume. if not specified this class will look for
21 host instances in the @sc-volumecreator security group. If it can't find
22 an instance in the @sc-volumecreator group that matches the zone of the
23 new volume, a new instance is launched.
24
25 shutdown_instance - True will shutdown the host instance after volume
26 creation
27 """
28 - def __init__(self, ec2_conn, spot_bid=None, keypair=None,
29 key_location=None, host_instance=None, device='/dev/sdz',
30 image_id=static.BASE_AMI_32, instance_type="m1.small",
31 shutdown_instance=False, detach_vol=False,
32 mkfs_cmd='mkfs.ext3', resizefs_cmd='resize2fs', **kwargs):
33 self._host_instance = host_instance
34 self._instance = None
35 self._volume = None
36 self._device = device or '/dev/sdz'
37 self._image_id = image_id or static.BASE_AMI_32
38 self._instance_type = instance_type or 'm1.small'
39 self._shutdown = shutdown_instance
40 self._detach_vol = detach_vol
41 self._mkfs_cmd = mkfs_cmd
42 self._resizefs_cmd = resizefs_cmd
43 self._alias_tmpl = "volhost-%s"
44 super(VolumeCreator, self).__init__(
45 ec2_conn=ec2_conn, spot_bid=spot_bid, keyname=keypair,
46 key_location=key_location, cluster_tag=static.VOLUME_GROUP_NAME,
47 cluster_size=1, cluster_user="sgeadmin", cluster_shell="bash",
48 node_image_id=self._image_id,
49 node_instance_type=self._instance_type)
50
52 return "<VolumeCreator: %s>" % self._mkfs_cmd
53
55 """
56 Returns any existing instance in the @sc-volumecreator group that's
57 located in zone.
58 """
59 active_states = ['pending', 'running']
60 i = self._host_instance
61 if i and self._validate_host_instance(i, zone):
62 log.info("Using specified host instance %s" % i.id)
63 return i
64 for node in self.nodes:
65 if node.state in active_states and node.placement == zone:
66 log.info("Using existing instance %s in group %s" % \
67 (node.id, self.cluster_group.name))
68 return node
69
90
92 msg = "Creating %sGB volume in zone %s" % (size, zone)
93 if snapshot_id:
94 msg += " from snapshot %s" % snapshot_id
95 log.info(msg)
96 vol = self.ec2.create_volume(size, zone, snapshot_id)
97 log.info("New volume id: %s" % vol.id)
98 s = self.get_spinner("Waiting for new volume to become 'available'...")
99 while vol.status != 'available':
100 time.sleep(5)
101 vol.update()
102 s.stop()
103 self._volume = vol
104 return self._volume
105
107 block_dev_map = self._instance.block_device_mapping
108 for char in string.lowercase[::-1]:
109 dev = '/dev/sd%s' % char
110 if not block_dev_map.get(dev):
111 self._device = dev
112 return self._device
113
115 s = self.get_spinner("Attaching volume %s to instance %s..." % \
116 (vol.id, instance_id))
117 vol.attach(instance_id, device)
118 while True:
119 vol.update()
120 if vol.attachment_state() == 'attached':
121 break
122 time.sleep(5)
123 s.stop()
124 return self._volume
125
134
153
155 z = self.ec2.get_zone(zone)
156 if z.state != 'available':
157 log.warn('zone %s is not available at this time' % zone)
158 return True
159
168
173
175 log.info("Checking for required remote commands...")
176 self._instance.ssh.check_required(progs)
177
178 - def validate(self, size, zone, device):
182
183 - def is_valid(self, size, zone, device):
190
192 self._instance.ssh.execute('echo ",,L" | sfdisk %s' % self._device,
193 silent=False)
194
199
201 sg = self.ec2.get_group_or_none(static.VOLUME_GROUP)
202 if not sg:
203 return
204 vol_hosts = filter(lambda x: x.state in ['running', 'pending'],
205 sg.instances())
206 vol_hosts = map(lambda x: x.id, vol_hosts)
207 if vol_hosts:
208 log.warn("There are still volume hosts running: %s" % \
209 ', '.join(vol_hosts))
210 log.warn(("Run 'starcluster terminate %s' to terminate *all* " + \
211 "volume host instances once they're no longer needed") % \
212 static.VOLUME_GROUP_NAME)
213 else:
214 log.info("No active volume hosts found. Run 'starcluster " + \
215 "terminate %(g)s' to remove the '%(g)s' group" % \
216 {'g': static.VOLUME_GROUP_NAME})
217
219 vol = self._volume
220 host = self._instance
221 if self._detach_vol:
222 log.info("Detaching volume %s from instance %s" % \
223 (vol.id, host.id))
224 vol.detach()
225 else:
226 log.info("Leaving volume %s attached to instance %s" % \
227 (vol.id, host.id))
228 if self._shutdown:
229 log.info("Terminating host instance %s" % host.id)
230 host.terminate()
231 else:
232 log.info("Not terminating host instance %s" % \
233 host.id)
234
235 @print_timing("Creating volume")
236 - def create(self, volume_size, volume_zone, name=None, tags=None):
271
273 self._validate_size(size)
274 if vol.size > size:
275 log.warn("You are attempting to shrink an EBS volume. " + \
276 "Data loss may occur")
277
278 - def resize(self, vol, size, dest_zone=None):
279 """
280 Resize EBS volume
281
282 vol - boto volume object
283 size - new volume sze
284 dest_zone - zone to create the new resized volume in. this must be
285 within the original volume's region otherwise a manual copy (rsync)
286 is required. this is currently not implemented.
287 """
288 try:
289 self._validate_device(self._device)
290 self._validate_resize(vol, size)
291 zone = vol.zone
292 if dest_zone:
293 self._validate_zone(dest_zone)
294 zone = dest_zone
295 host = self._request_instance(zone)
296 self._validate_required_progs([self._resizefs_cmd.split()[0]])
297 self._determine_device()
298 snap = self.ec2.create_snapshot(vol, wait_for_snapshot=True)
299 new_vol = self._create_volume(size, zone, snap.id)
300 self._attach_volume(new_vol, host.id, self._device)
301 devs = filter(lambda x: x.startswith(self._device),
302 host.ssh.ls('/dev'))
303 device = self._device
304 if len(devs) == 1:
305 log.info("No partitions found, resizing entire device")
306 elif len(devs) == 2:
307 log.info("One partition found, resizing partition...")
308 self._partition_volume()
309 device += '1'
310 else:
311 raise exception.InvalidOperation(
312 "EBS volume %s has more than 1 partition. "
313 "You must resize this volume manually" % vol.id)
314 host.ssh.execute(' '.join([self._resizefs_cmd, device]))
315 log.info("Removing generated snapshot %s" % snap.id)
316 snap.delete()
317 self.shutdown()
318 self._warn_about_volume_hosts()
319 return new_vol.id
320 except Exception:
321 self._warn_about_volume_hosts()
322 raise
323