1# Copyright The Cloud Custodian Authors.
2# SPDX-License-Identifier: Apache-2.0
3from collections import Counter
4import logging
5import itertools
6import json
7import time
8
9from botocore.exceptions import ClientError
10from concurrent.futures import as_completed
11from dateutil.parser import parse as parse_date
12
13from c7n.actions import BaseAction
14from c7n.exceptions import PolicyValidationError
15from c7n.filters import (
16 CrossAccountAccessFilter, Filter, AgeFilter, ValueFilter,
17 ANNOTATION_KEY, ListItemFilter)
18from c7n.filters.health import HealthEventFilter
19from c7n.filters.related import RelatedResourceFilter
20
21from c7n.manager import resources
22from c7n.resources.kms import ResourceKmsKeyAlias
23from c7n.resources.securityhub import PostFinding
24from c7n.query import QueryResourceManager, TypeInfo
25from c7n.tags import Tag, coalesce_copy_user_tags
26from c7n.utils import (
27 camelResource,
28 chunks,
29 get_retry,
30 local_session,
31 select_keys,
32 set_annotation,
33 type_schema,
34 QueryParser,
35 get_support_region,
36 group_by
37)
38from c7n.resources.ami import AMI
39
40log = logging.getLogger('custodian.ebs')
41
42
43@resources.register('ebs-snapshot')
44class Snapshot(QueryResourceManager):
45
46 class resource_type(TypeInfo):
47 service = 'ec2'
48 arn_type = 'snapshot'
49 enum_spec = (
50 'describe_snapshots', 'Snapshots', None)
51 id = 'SnapshotId'
52 id_prefix = 'snap-'
53 filter_name = 'SnapshotIds'
54 filter_type = 'list'
55 name = 'SnapshotId'
56 date = 'StartTime'
57
58 default_report_fields = (
59 'SnapshotId',
60 'VolumeId',
61 'tag:InstanceId',
62 'VolumeSize',
63 'StartTime',
64 'State',
65 )
66
67 def resources(self, query=None):
68 qfilters = SnapshotQueryParser.parse(self.data.get('query', []))
69 query = query or {}
70 if qfilters:
71 query['Filters'] = qfilters
72 if query.get('OwnerIds') is None:
73 query['OwnerIds'] = ['self']
74 if 'MaxResults' not in query:
75 query['MaxResults'] = 1000
76 return super(Snapshot, self).resources(query=query)
77
78 def get_resources(self, ids, cache=True, augment=True):
79 if cache:
80 resources = self._get_cached_resources(ids)
81 if resources is not None:
82 return resources
83 while ids:
84 try:
85 return self.source.get_resources(ids)
86 except ClientError as e:
87 bad_snap = ErrorHandler.extract_bad_snapshot(e)
88 if bad_snap:
89 ids.remove(bad_snap)
90 continue
91 raise
92 return []
93
94
95class ErrorHandler:
96
97 @staticmethod
98 def remove_snapshot(rid, resource_set):
99 found = None
100 for r in resource_set:
101 if r['SnapshotId'] == rid:
102 found = r
103 break
104 if found:
105 resource_set.remove(found)
106
107 @staticmethod
108 def extract_bad_snapshot(e):
109 """Handle various client side errors when describing snapshots"""
110 msg = e.response['Error']['Message']
111 error = e.response['Error']['Code']
112 e_snap_id = None
113 if error == 'InvalidSnapshot.NotFound':
114 e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")]
115 log.warning("Snapshot not found %s" % e_snap_id)
116 elif error == 'InvalidSnapshotID.Malformed':
117 e_snap_id = msg[msg.find('"') + 1:msg.rfind('"')]
118 log.warning("Snapshot id malformed %s" % e_snap_id)
119 return e_snap_id
120
121 @staticmethod
122 def extract_bad_volume(e):
123 """Handle various client side errors when describing volumes"""
124 msg = e.response['Error']['Message']
125 error = e.response['Error']['Code']
126 e_vol_id = None
127 if error == 'InvalidVolume.NotFound':
128 e_vol_id = msg[msg.find("'") + 1:msg.rfind("'")]
129 log.warning("Volume not found %s" % e_vol_id)
130 elif error == 'InvalidVolumeID.Malformed':
131 e_vol_id = msg[msg.find('"') + 1:msg.rfind('"')]
132 log.warning("Volume id malformed %s" % e_vol_id)
133 return e_vol_id
134
135
136class SnapshotQueryParser(QueryParser):
137
138 QuerySchema = {
139 'description': str,
140 'owner-alias': ('amazon', 'amazon-marketplace', 'microsoft'),
141 'owner-id': str,
142 'progress': str,
143 'snapshot-id': str,
144 'start-time': str,
145 'status': ('pending', 'completed', 'error'),
146 'tag': str,
147 'tag-key': str,
148 'volume-id': str,
149 'volume-size': str,
150 }
151
152 type_name = 'EBS'
153
154
155@Snapshot.action_registry.register('tag')
156class SnapshotTag(Tag):
157
158 permissions = ('ec2:CreateTags',)
159
160 def process_resource_set(self, client, resource_set, tags):
161 while resource_set:
162 try:
163 return super(SnapshotTag, self).process_resource_set(
164 client, resource_set, tags)
165 except ClientError as e:
166 bad_snap = ErrorHandler.extract_bad_snapshot(e)
167 if bad_snap:
168 ErrorHandler.remove_snapshot(bad_snap, resource_set)
169 continue
170 raise
171
172
173@Snapshot.filter_registry.register('age')
174class SnapshotAge(AgeFilter):
175 """EBS Snapshot Age Filter
176
177 Filters an EBS snapshot based on the age of the snapshot (in days)
178
179 :example:
180
181 .. code-block:: yaml
182
183 policies:
184 - name: ebs-snapshots-week-old
185 resource: ebs-snapshot
186 filters:
187 - type: age
188 days: 7
189 op: ge
190 """
191
192 schema = type_schema(
193 'age',
194 days={'type': 'number'},
195 op={'$ref': '#/definitions/filters_common/comparison_operators'})
196 date_attribute = 'StartTime'
197
198
199def _filter_ami_snapshots(self, snapshots):
200 if not self.data.get('value', True):
201 return snapshots
202 # try using cache first to get a listing of all AMI snapshots and compares resources to the list
203 # This will populate the cache.
204 amis = self.manager.get_resource_manager('ami').resources()
205 ami_snaps = []
206 for i in amis:
207 for dev in i.get('BlockDeviceMappings'):
208 if 'Ebs' in dev and 'SnapshotId' in dev['Ebs']:
209 ami_snaps.append(dev['Ebs']['SnapshotId'])
210 matches = []
211 for snap in snapshots:
212 if snap['SnapshotId'] not in ami_snaps:
213 matches.append(snap)
214 return matches
215
216
217@Snapshot.filter_registry.register('cross-account')
218class SnapshotCrossAccountAccess(CrossAccountAccessFilter):
219
220 permissions = ('ec2:DescribeSnapshotAttribute',)
221
222 def process(self, resources, event=None):
223 self.accounts = self.get_accounts()
224 results = []
225 client = local_session(self.manager.session_factory).client('ec2')
226 with self.executor_factory(max_workers=3) as w:
227 futures = []
228 for resource_set in chunks(resources, 50):
229 futures.append(w.submit(
230 self.process_resource_set, client, resource_set))
231 for f in as_completed(futures):
232 if f.exception():
233 self.log.error(
234 "Exception checking cross account access \n %s" % (
235 f.exception()))
236 continue
237 results.extend(f.result())
238 return results
239
240 def process_resource_set(self, client, resource_set):
241 results = []
242 everyone_only = self.data.get('everyone_only', False)
243 for r in resource_set:
244 attrs = self.manager.retry(
245 client.describe_snapshot_attribute,
246 SnapshotId=r['SnapshotId'],
247 Attribute='createVolumePermission')['CreateVolumePermissions']
248 shared_accounts = set()
249 if everyone_only:
250 for g in attrs:
251 if g.get('Group') == 'all':
252 shared_accounts = {g.get('Group')}
253 else:
254 shared_accounts = {
255 g.get('Group') or g.get('UserId') for g in attrs}
256 delta_accounts = shared_accounts.difference(self.accounts)
257 if delta_accounts:
258 r['c7n:CrossAccountViolations'] = list(delta_accounts)
259 results.append(r)
260 return results
261
262
263@Snapshot.filter_registry.register('unused')
264class SnapshotUnusedFilter(Filter):
265 """Filters snapshots based on usage
266
267 true: snapshot is not used by launch-template, launch-config, or ami.
268
269 false: snapshot is being used by launch-template, launch-config, or ami.
270
271 :example:
272
273 .. code-block:: yaml
274
275 policies:
276 - name: snapshot-unused
277 resource: ebs-snapshot
278 filters:
279 - type: unused
280 value: true
281 """
282
283 schema = type_schema('unused', value={'type': 'boolean'})
284
285 def get_permissions(self):
286 return list(itertools.chain(*[
287 self.manager.get_resource_manager(m).get_permissions()
288 for m in ('asg', 'launch-config', 'ami')]))
289
290 def _pull_asg_snapshots(self):
291 asgs = self.manager.get_resource_manager('asg').resources()
292 snap_ids = set()
293 lcfgs = set(a['LaunchConfigurationName'] for a in asgs if 'LaunchConfigurationName' in a)
294 lcfg_mgr = self.manager.get_resource_manager('launch-config')
295
296 if lcfgs:
297 for lc in lcfg_mgr.resources():
298 for b in lc.get('BlockDeviceMappings'):
299 if 'Ebs' in b and 'SnapshotId' in b['Ebs']:
300 snap_ids.add(b['Ebs']['SnapshotId'])
301
302 tmpl_mgr = self.manager.get_resource_manager('launch-template-version')
303 for tversion in tmpl_mgr.get_resources(
304 list(tmpl_mgr.get_asg_templates(asgs).keys())):
305 for bd in tversion['LaunchTemplateData'].get('BlockDeviceMappings', ()):
306 if 'Ebs' in bd and 'SnapshotId' in bd['Ebs']:
307 snap_ids.add(bd['Ebs']['SnapshotId'])
308 return snap_ids
309
310 def _pull_ami_snapshots(self):
311 amis = self.manager.get_resource_manager('ami').resources()
312 ami_snaps = set()
313 for i in amis:
314 for dev in i.get('BlockDeviceMappings'):
315 if 'Ebs' in dev and 'SnapshotId' in dev['Ebs']:
316 ami_snaps.add(dev['Ebs']['SnapshotId'])
317 return ami_snaps
318
319 def process(self, resources, event=None):
320 snaps = self._pull_asg_snapshots().union(self._pull_ami_snapshots())
321 if self.data.get('value', True):
322 return [r for r in resources if r['SnapshotId'] not in snaps]
323 return [r for r in resources if r['SnapshotId'] in snaps]
324
325
326@Snapshot.filter_registry.register('skip-ami-snapshots')
327class SnapshotSkipAmiSnapshots(Filter):
328 """
329 Filter to remove snapshots of AMIs from results
330
331 This filter is 'true' by default.
332
333 :example:
334
335 implicit with no parameters, 'true' by default
336
337 .. code-block:: yaml
338
339 policies:
340 - name: delete-ebs-stale-snapshots
341 resource: ebs-snapshot
342 filters:
343 - type: age
344 days: 28
345 op: ge
346 - skip-ami-snapshots
347
348 :example:
349
350 explicit with parameter
351
352 .. code-block:: yaml
353
354 policies:
355 - name: delete-snapshots
356 resource: ebs-snapshot
357 filters:
358 - type: age
359 days: 28
360 op: ge
361 - type: skip-ami-snapshots
362 value: false
363
364 """
365
366 schema = type_schema('skip-ami-snapshots', value={'type': 'boolean'})
367
368 def get_permissions(self):
369 return AMI(self.manager.ctx, {}).get_permissions()
370
371 def process(self, snapshots, event=None):
372 resources = _filter_ami_snapshots(self, snapshots)
373 return resources
374
375
376@Snapshot.filter_registry.register('volume')
377class SnapshotVolumeFilter(RelatedResourceFilter):
378 """
379 Filter EBS snapshots by their volume attributes.
380
381 .. code-block:: yaml
382
383 policies:
384 - name: snapshot-with-no-volume
385 description: Find any snapshots that do not have a corresponding volume.
386 resource: aws.ebs-snapshot
387 filters:
388 - type: volume
389 key: VolumeId
390 value: absent
391 - name: find-snapshots-from-volume
392 resource: aws.ebs-snapshot
393 filters:
394 - type: volume
395 key: VolumeId
396 value: vol-foobarbaz
397 """
398
399 RelatedResource = 'c7n.resources.ebs.EBS'
400 RelatedIdsExpression = 'VolumeId'
401 AnnotationKey = 'Volume'
402
403 schema = type_schema(
404 'volume', rinherit=ValueFilter.schema)
405
406
407@Snapshot.action_registry.register('delete')
408class SnapshotDelete(BaseAction):
409 """Deletes EBS snapshots
410
411 :example:
412
413 .. code-block:: yaml
414
415 policies:
416 - name: delete-stale-snapshots
417 resource: ebs-snapshot
418 filters:
419 - type: age
420 days: 28
421 op: ge
422 actions:
423 - delete
424 """
425
426 schema = type_schema(
427 'delete', **{'skip-ami-snapshots': {'type': 'boolean'}})
428 permissions = ('ec2:DeleteSnapshot',)
429
430 def process(self, snapshots):
431 self.image_snapshots = set()
432 # Be careful re image snapshots, we do this by default
433 # to keep things safe by default, albeit we'd get an error
434 # if we did try to delete something associated to an image.
435 pre = len(snapshots)
436 snapshots = list(filter(None, _filter_ami_snapshots(self, snapshots)))
437 post = len(snapshots)
438 log.info("Deleting %d snapshots, auto-filtered %d ami-snapshots",
439 post, pre - post)
440
441 client = local_session(self.manager.session_factory).client('ec2')
442 deleted_snapshots = []
443 with self.executor_factory(max_workers=2) as w:
444 futures = []
445 for snapshot_set in chunks(reversed(snapshots), size=50):
446 futures.append(
447 w.submit(self.process_snapshot_set, client, snapshot_set, deleted_snapshots))
448 for f in as_completed(futures):
449 if f.exception():
450 self.log.error(
451 "Exception deleting snapshot set \n %s" % (
452 f.exception()))
453 return deleted_snapshots
454
455 def process_snapshot_set(self, client, snapshots_set, deleted_snapshots):
456 retry = get_retry((
457 'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
458
459 for s in snapshots_set:
460 if s['SnapshotId'] in self.image_snapshots:
461 continue
462 try:
463 retry(client.delete_snapshot,
464 SnapshotId=s['SnapshotId'],
465 DryRun=self.manager.config.dryrun)
466 deleted_snapshots.append(s)
467 except ClientError as e:
468 if e.response['Error']['Code'] == "InvalidSnapshot.NotFound":
469 continue
470 raise
471
472
473@Snapshot.action_registry.register('copy')
474class CopySnapshot(BaseAction):
475 """Copy a snapshot across regions
476
477 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html
478
479 :example:
480
481 .. code-block:: yaml
482
483 policies:
484 - name: copy-snapshot-east-west
485 resource: ebs-snapshot
486 filters:
487 - type: age
488 days: 7
489 op: le
490 actions:
491 - type: copy
492 target_region: us-west-2
493 target_key: target_kms_key
494 encrypted: true
495 """
496
497 schema = type_schema(
498 'copy',
499 target_region={'type': 'string'},
500 target_key={'type': 'string'},
501 encrypted={'type': 'boolean'},
502 )
503 permissions = (
504 'ec2:CreateTags', 'ec2:CopySnapshot', 'ec2:DescribeSnapshots')
505
506 def validate(self):
507 if self.data.get('encrypted', True):
508 key = self.data.get('target_key')
509 if not key:
510 raise PolicyValidationError(
511 "Encrypted snapshot copy requires kms key on %s" % (
512 self.manager.data,))
513 return self
514
515 def process(self, resources):
516 with self.executor_factory(max_workers=2) as w:
517 list(w.map(self.process_resource_set, chunks(resources, 20)))
518
519 def process_resource_set(self, resource_set):
520 client = self.manager.session_factory(
521 region=self.data['target_region']).client('ec2')
522
523 cross_region = self.data['target_region'] != self.manager.config.region
524
525 params = {}
526 params['Encrypted'] = self.data.get('encrypted', True)
527 if params['Encrypted']:
528 params['KmsKeyId'] = self.data['target_key']
529
530 for snapshot_set in chunks(resource_set, 5):
531 for r in snapshot_set:
532 snapshot_id = client.copy_snapshot(
533 SourceRegion=self.manager.config.region,
534 SourceSnapshotId=r['SnapshotId'],
535 Description=r.get('Description', ''),
536 **params)['SnapshotId']
537 if r.get('Tags'):
538 client.create_tags(
539 Resources=[snapshot_id], Tags=r['Tags'])
540 r['c7n:CopiedSnapshot'] = snapshot_id
541
542 if not cross_region or len(snapshot_set) < 5:
543 continue
544
545 copy_ids = [r['c7n:CopiedSnapshot'] for r in snapshot_set]
546 self.log.debug(
547 "Waiting on cross-region snapshot copy %s", ",".join(copy_ids))
548 waiter = client.get_waiter('snapshot_completed')
549 waiter.config.delay = 60
550 waiter.config.max_attempts = 60
551 waiter.wait(SnapshotIds=copy_ids)
552 self.log.debug(
553 "Cross region copy complete %s", ",".join(copy_ids))
554
555
556@Snapshot.action_registry.register('set-permissions')
557class SetPermissions(BaseAction):
558 """Action to set permissions for creating volumes from a snapshot
559
560 Use the 'add' and 'remove' parameters to control which accounts to
561 add or remove respectively. The default is to remove any create
562 volume permissions granted to other AWS accounts.
563
564 Combining this action with the 'cross-account' filter allows you
565 greater control over which accounts will be removed, e.g. using a
566 whitelist:
567
568 :example:
569
570 .. code-block:: yaml
571
572 policies:
573 - name: ebs-dont-share-cross-account
574 resource: ebs-snapshot
575 filters:
576 - type: cross-account
577 whitelist:
578 - '112233445566'
579 actions:
580 - type: set-permissions
581 remove: matched
582 """
583 schema = type_schema(
584 'set-permissions',
585 remove={
586 'oneOf': [
587 {'enum': ['matched']},
588 {'type': 'array', 'items': {
589 'type': 'string', 'minLength': 12, 'maxLength': 12}},
590 ]},
591 add={
592 'type': 'array', 'items': {
593 'type': 'string', 'minLength': 12, 'maxLength': 12}},
594 )
595
596 permissions = ('ec2:ModifySnapshotAttribute',)
597
598 def validate(self):
599 if self.data.get('remove') == 'matched':
600 found = False
601 for f in self.manager.iter_filters():
602 if isinstance(f, SnapshotCrossAccountAccess):
603 found = True
604 break
605 if not found:
606 raise PolicyValidationError(
607 "policy:%s filter:%s with matched requires cross-account filter" % (
608 self.manager.ctx.policy.name, self.type))
609
610 def process(self, snapshots):
611 client = local_session(self.manager.session_factory).client('ec2')
612 for i in snapshots:
613 self.process_image(client, i)
614
615 def process_image(self, client, snapshot):
616 add_accounts = self.data.get('add', [])
617 remove_accounts = self.data.get('remove', [])
618 if not add_accounts and not remove_accounts:
619 return client.reset_snapshot_attribute(
620 SnapshotId=snapshot['SnapshotId'], Attribute="createVolumePermission")
621 if remove_accounts == 'matched':
622 remove_accounts = snapshot.get(
623 'c7n:' + SnapshotCrossAccountAccess.annotation_key)
624
625 remove = []
626 remove.extend([{'UserId': a} for a in remove_accounts if a != 'all'])
627 if 'all' in remove_accounts:
628 remove.append({'Group': 'all'})
629 remove_accounts.remove('all')
630
631 add = [{'UserId': a} for a in add_accounts]
632
633 if remove:
634 client.modify_snapshot_attribute(
635 SnapshotId=snapshot['SnapshotId'],
636 CreateVolumePermission={'Remove': remove},
637 OperationType='remove')
638 if add:
639 client.modify_snapshot_attribute(
640 SnapshotId=snapshot['SnapshotId'],
641 CreateVolumePermission={'Add': add},
642 OperationType='add')
643
644
645@resources.register('ebs')
646class EBS(QueryResourceManager):
647
648 class resource_type(TypeInfo):
649 service = 'ec2'
650 arn_type = 'volume'
651 enum_spec = ('describe_volumes', 'Volumes', None)
652 name = id = 'VolumeId'
653 id_prefix = 'vol-'
654 filter_name = 'VolumeIds'
655 filter_type = 'list'
656 date = 'createTime'
657 dimension = 'VolumeId'
658 metrics_namespace = 'AWS/EBS'
659 cfn_type = config_type = "AWS::EC2::Volume"
660 default_report_fields = (
661 'VolumeId',
662 'Attachments[0].InstanceId',
663 'Size',
664 'VolumeType',
665 'KmsKeyId'
666 )
667
668 def get_resources(self, ids, cache=True, augment=True):
669 if cache:
670 resources = self._get_cached_resources(ids)
671 if resources is not None:
672 return resources
673 while ids:
674 try:
675 return self.source.get_resources(ids)
676 except ClientError as e:
677 bad_vol = ErrorHandler.extract_bad_volume(e)
678 if bad_vol:
679 ids.remove(bad_vol)
680 continue
681 raise
682 return []
683
684
685@EBS.filter_registry.register('snapshots')
686class EBSSnapshotsFilter(ListItemFilter):
687 """
688 Filter volumes by all their snapshots.
689
690 :example:
691
692 .. code-block:: yaml
693
694 policies:
695 - name: ebs-volumes
696 resource: aws.ebs
697 filters:
698 - not:
699 - type: snapshots
700 attrs:
701 - type: value
702 key: StartTime
703 value_type: age
704 value: 2
705 op: less-than
706 """
707 schema = type_schema(
708 'snapshots',
709 attrs={'$ref': '#/definitions/filters_common/list_item_attrs'},
710 count={'type': 'number'},
711 count_op={'$ref': '#/definitions/filters_common/comparison_operators'}
712 )
713 permissions = ('ec2:DescribeSnapshots', )
714 item_annotation_key = 'c7n:Snapshots'
715 annotate_items = True
716
717 def _process_resources_set(self, client, resources):
718 snapshots = client.describe_snapshots(
719 Filters=[{
720 'Name': 'volume-id',
721 'Values': [r['VolumeId'] for r in resources]
722 }]
723 ).get('Snapshots') or []
724 grouped = group_by(snapshots, 'VolumeId')
725 for res in resources:
726 res[self.item_annotation_key] = grouped.get(res['VolumeId']) or []
727
728 def process(self, resources, event=None):
729 client = local_session(self.manager.session_factory).client('ec2')
730 with self.manager.executor_factory(max_workers=3) as w:
731 futures = []
732 # 200 max value for a single call
733 for resources_set in chunks(resources, 30):
734 futures.append(w.submit(self._process_resources_set, client,
735 resources_set))
736 for f in as_completed(futures):
737 if f.exception():
738 self.log.error(
739 "Exception getting snapshots by volume ids \n %s" % (
740 f.exception())
741 )
742 continue
743 return super().process(resources, event)
744
745 def get_item_values(self, resource):
746 if self.annotate_items:
747 return resource[self.item_annotation_key]
748 return resource.pop(self.item_annotation_key)
749
750
751@EBS.action_registry.register('post-finding')
752class EBSPostFinding(PostFinding):
753
754 resource_type = 'AwsEc2Volume'
755
756 def format_resource(self, r):
757 envelope, payload = self.format_envelope(r)
758 details = select_keys(
759 r, ['KmsKeyId', 'Size', 'SnapshotId', 'Status', 'CreateTime', 'Encrypted'])
760 details['CreateTime'] = details['CreateTime'].isoformat()
761 self.filter_empty(details)
762 for attach in r.get('Attachments', ()):
763 details.setdefault('Attachments', []).append(
764 self.filter_empty({
765 'AttachTime': attach['AttachTime'].isoformat(),
766 'InstanceId': attach.get('InstanceId'),
767 'DeleteOnTermination': attach['DeleteOnTermination'],
768 'Status': attach['State']}))
769 payload.update(details)
770 return envelope
771
772
773@EBS.action_registry.register('detach')
774class VolumeDetach(BaseAction):
775 """
776 Detach an EBS volume from an Instance.
777
778 If 'Force' Param is True, then we'll do a forceful detach
779 of the Volume. The default value for 'Force' is False.
780
781 :example:
782
783 .. code-block:: yaml
784
785 policies:
786 - name: detach-ebs-volumes
787 resource: ebs
788 filters:
789 - VolumeId : volumeid
790 actions:
791 - detach
792
793
794 """
795
796 schema = type_schema('detach', force={'type': 'boolean'})
797 permissions = ('ec2:DetachVolume',)
798
799 def process(self, volumes, event=None):
800 client = local_session(self.manager.session_factory).client('ec2')
801
802 for vol in volumes:
803 for attachment in vol.get('Attachments', []):
804 client.detach_volume(InstanceId=attachment['InstanceId'],
805 VolumeId=attachment['VolumeId'],
806 Force=self.data.get('force', False))
807
808
809@EBS.filter_registry.register('instance')
810class AttachedInstanceFilter(ValueFilter):
811 """Filter volumes based on filtering on their attached instance
812
813 :example:
814
815 .. code-block:: yaml
816
817 policies:
818 - name: instance-ebs-volumes
819 resource: ebs
820 filters:
821 - type: instance
822 key: tag:Name
823 value: OldManBySea
824 """
825
826 schema = type_schema('instance', rinherit=ValueFilter.schema)
827 schema_alias = False
828
829 def get_permissions(self):
830 return self.manager.get_resource_manager('ec2').get_permissions()
831
832 def process(self, resources, event=None):
833 original_count = len(resources)
834 resources = [r for r in resources if r.get('Attachments')]
835 self.log.debug('Filtered from %d volumes to %d attached volumes' % (
836 original_count, len(resources)))
837 self.instance_map = self.get_instance_mapping(resources)
838 return list(filter(self, resources))
839
840 def __call__(self, r):
841 instance = self.instance_map[r['Attachments'][0]['InstanceId']]
842 if self.match(instance):
843 r['Instance'] = instance
844 set_annotation(r, ANNOTATION_KEY, "instance-%s" % self.k)
845 return True
846
847 def get_instance_mapping(self, resources):
848 instance_ids = [r['Attachments'][0]['InstanceId'] for r in resources]
849 instances = self.manager.get_resource_manager(
850 'ec2').get_resources(instance_ids)
851 self.log.debug("Queried %d instances for %d volumes" % (
852 len(instances), len(resources)))
853 return {i['InstanceId']: i for i in instances}
854
855
856@EBS.filter_registry.register('kms-alias')
857class KmsKeyAlias(ResourceKmsKeyAlias):
858
859 def process(self, resources, event=None):
860 return self.get_matching_aliases(resources)
861
862
863@EBS.filter_registry.register('fault-tolerant')
864class FaultTolerantSnapshots(Filter):
865 """
866 This filter will return any EBS volume that does/does not have a
867 snapshot within the last 7 days. 'Fault-Tolerance' in this instance
868 means that, in the event of a failure, the volume can be restored
869 from a snapshot with (reasonable) data loss
870
871 .. code-block:: yaml
872
873 policies:
874 - name: ebs-volume-tolerance
875 resource: ebs
876 filters:
877 - type: fault-tolerant
878 tolerant: True
879 """
880 schema = type_schema('fault-tolerant', tolerant={'type': 'boolean'})
881 check_id = 'H7IgTzjTYb'
882 permissions = ('support:RefreshTrustedAdvisorCheck',
883 'support:DescribeTrustedAdvisorCheckResult')
884
885 def pull_check_results(self):
886 result = set()
887 support_region = get_support_region(self.manager)
888 client = local_session(self.manager.session_factory).client(
889 'support', region_name=support_region)
890 client.refresh_trusted_advisor_check(checkId=self.check_id)
891 results = client.describe_trusted_advisor_check_result(
892 checkId=self.check_id, language='en')['result']
893 for r in results['flaggedResources']:
894 result.update([r['metadata'][1]])
895 return result
896
897 def process(self, resources, event=None):
898 flagged = self.pull_check_results()
899 if self.data.get('tolerant', True):
900 return [r for r in resources if r['VolumeId'] not in flagged]
901 return [r for r in resources if r['VolumeId'] in flagged]
902
903
904@EBS.filter_registry.register('health-event')
905class HealthFilter(HealthEventFilter):
906
907 schema_alias = False
908 schema = type_schema(
909 'health-event',
910 types={'type': 'array', 'items': {
911 'type': 'string',
912 'enum': ['AWS_EBS_DEGRADED_EBS_VOLUME_PERFORMANCE',
913 'AWS_EBS_VOLUME_LOST']}},
914 statuses={'type': 'array', 'items': {
915 'type': 'string',
916 'enum': ['open', 'upcoming', 'closed']
917 }})
918
919 permissions = HealthEventFilter.permissions + (
920 'config:GetResourceConfigHistory',)
921
922 def process(self, resources, event=None):
923 if 'AWS_EBS_VOLUME_LOST' not in self.data['types']:
924 return super(HealthFilter, self).process(resources, event)
925 if not resources:
926 return resources
927
928 client = local_session(self.manager.session_factory).client(
929 'health', region_name='us-east-1')
930 f = self.get_filter_parameters()
931 resource_map = {}
932
933 paginator = client.get_paginator('describe_events')
934 events = list(itertools.chain(
935 *[p['events']for p in paginator.paginate(filter=f)]))
936 entities = self.process_event(client, events)
937
938 event_map = {e['arn']: e for e in events}
939 config = local_session(self.manager.session_factory).client('config')
940 for e in entities:
941 rid = e['entityValue']
942 if not resource_map.get(rid):
943 resource_map[rid] = self.load_resource(config, rid)
944 resource_map[rid].setdefault(
945 'c7n:HealthEvent', []).append(event_map[e['eventArn']])
946 return list(resource_map.values())
947
948 def load_resource(self, config, rid):
949 resources_histories = config.get_resource_config_history(
950 resourceType='AWS::EC2::Volume',
951 resourceId=rid,
952 limit=2)['configurationItems']
953 for r in resources_histories:
954 if r['configurationItemStatus'] != u'ResourceDeleted':
955 return camelResource(json.loads(r['configuration']))
956 return {"VolumeId": rid}
957
958
959@EBS.action_registry.register('copy-instance-tags')
960class CopyInstanceTags(BaseAction):
961 """Copy instance tags to its attached volume.
962
963 Useful for cost allocation to ebs volumes and tracking usage
964 info for volumes.
965
966 Mostly useful for volumes not set to delete on termination, which
967 are otherwise candidates for garbage collection, copying the
968 instance tags gives us more semantic information to determine if
969 their useful, as well letting us know the last time the volume
970 was actually used.
971
972 :example:
973
974 .. code-block:: yaml
975
976 policies:
977 - name: ebs-copy-instance-tags
978 resource: ebs
979 filters:
980 - type: value
981 key: "Attachments[0].Device"
982 value: not-null
983 actions:
984 - type: copy-instance-tags
985 tags:
986 - Name
987 """
988
989 schema = type_schema(
990 'copy-instance-tags',
991 tags={'type': 'array', 'items': {'type': 'string'}})
992
993 def get_permissions(self):
994 perms = self.manager.get_resource_manager('ec2').get_permissions()
995 perms.append('ec2:CreateTags')
996 return perms
997
998 def process(self, volumes):
999 vol_count = len(volumes)
1000 volumes = [v for v in volumes if v['Attachments']]
1001 if len(volumes) != vol_count:
1002 self.log.warning(
1003 "ebs copy tags action implicitly filtered from %d to %d",
1004 vol_count, len(volumes))
1005 self.initialize(volumes)
1006 client = local_session(self.manager.session_factory).client('ec2')
1007 with self.executor_factory(max_workers=10) as w:
1008 futures = []
1009 for instance_set in chunks(sorted(
1010 self.instance_map.keys(), reverse=True), size=100):
1011 futures.append(
1012 w.submit(self.process_instance_set, client, instance_set))
1013 for f in as_completed(futures):
1014 if f.exception():
1015 self.log.error(
1016 "Exception copying instance tags \n %s" % (
1017 f.exception()))
1018
1019 def initialize(self, volumes):
1020 instance_vol_map = {}
1021 for v in volumes:
1022 instance_vol_map.setdefault(
1023 v['Attachments'][0]['InstanceId'], []).append(v)
1024 instance_map = {
1025 i['InstanceId']: i for i in
1026 self.manager.get_resource_manager('ec2').get_resources(
1027 list(instance_vol_map.keys()))}
1028 self.instance_vol_map = instance_vol_map
1029 self.instance_map = instance_map
1030
1031 def process_instance_set(self, client, instance_ids):
1032 for i in instance_ids:
1033 try:
1034 self.process_instance_volumes(
1035 client,
1036 self.instance_map[i],
1037 self.instance_vol_map[i])
1038 except Exception as e:
1039 self.log.exception(
1040 "Error copy instance:%s tags to volumes: %s \n %s",
1041 i, ",".join([v['VolumeId'] for v in self.instance_vol_map[i]]),
1042 e)
1043
1044 def process_instance_volumes(self, client, instance, volumes):
1045 for v in volumes:
1046 copy_tags = self.get_volume_tags(v, instance, v['Attachments'][0])
1047 if not copy_tags:
1048 continue
1049 # Can't add more tags than the resource supports could try
1050 # to delete extant ones inline, else trim-tags action.
1051 if len(copy_tags) > 40:
1052 log.warning(
1053 "action:%s volume:%s instance:%s too many tags to copy" % (
1054 self.__class__.__name__.lower(),
1055 v['VolumeId'], instance['InstanceId']))
1056 continue
1057 try:
1058 self.manager.retry(
1059 client.create_tags,
1060 Resources=[v['VolumeId']],
1061 Tags=copy_tags,
1062 DryRun=self.manager.config.dryrun)
1063 except ClientError as e:
1064 if e.response['Error']['Code'] == "InvalidVolume.NotFound":
1065 continue
1066 raise
1067
1068 def get_volume_tags(self, volume, instance, attachment):
1069 only_tags = self.data.get('tags', []) # specify which tags to copy
1070 copy_tags = []
1071 extant_tags = dict([
1072 (t['Key'], t['Value']) for t in volume.get('Tags', [])])
1073
1074 for t in instance.get('Tags', ()):
1075 if only_tags and t['Key'] not in only_tags:
1076 continue
1077 if t['Key'] in extant_tags and t['Value'] == extant_tags[t['Key']]:
1078 continue
1079 if t['Key'].startswith('aws:'):
1080 continue
1081 copy_tags.append(t)
1082
1083 # Don't add attachment tags if we're already current
1084 if 'LastAttachInstance' in extant_tags \
1085 and extant_tags['LastAttachInstance'] == attachment['InstanceId']:
1086 return copy_tags
1087
1088 copy_tags.append(
1089 {'Key': 'LastAttachTime',
1090 'Value': attachment['AttachTime'].isoformat()})
1091 copy_tags.append(
1092 {'Key': 'LastAttachInstance', 'Value': attachment['InstanceId']})
1093 return copy_tags
1094
1095
1096@EBS.action_registry.register('encrypt-instance-volumes')
1097class EncryptInstanceVolumes(BaseAction):
1098 """Encrypt extant volumes attached to an instance
1099
1100 - Requires instance restart
1101 - Not suitable for autoscale groups.
1102
1103 Multistep process:
1104
1105 - Stop instance (if running)
1106 - For each volume
1107 - Create snapshot
1108 - Wait on snapshot creation
1109 - Copy Snapshot to create encrypted snapshot
1110 - Wait on snapshot creation
1111 - Create encrypted volume from snapshot
1112 - Wait on volume creation
1113 - Delete transient snapshots
1114 - Detach Unencrypted Volume
1115 - Attach Encrypted Volume
1116 - Set DeleteOnTermination instance attribute equal to source volume
1117 - For each volume
1118 - Delete unencrypted volume
1119 - Start Instance (if originally running)
1120 - For each newly encrypted volume
1121 - Delete transient tags
1122
1123 :example:
1124
1125 .. code-block:: yaml
1126
1127 policies:
1128 - name: encrypt-unencrypted-ebs
1129 resource: ebs
1130 filters:
1131 - Encrypted: false
1132 actions:
1133 - type: encrypt-instance-volumes
1134 key: alias/encrypted
1135 """
1136
1137 schema = type_schema(
1138 'encrypt-instance-volumes',
1139 required=['key'],
1140 key={'type': 'string'},
1141 delay={'type': 'number'},
1142 verbose={'type': 'boolean'})
1143
1144 permissions = (
1145 'ec2:CopySnapshot',
1146 'ec2:CreateSnapshot',
1147 'ec2:CreateVolume',
1148 'ec2:DescribeInstances',
1149 'ec2:DescribeSnapshots',
1150 'ec2:DescribeVolumes',
1151 'ec2:StopInstances',
1152 'ec2:StartInstances',
1153 'ec2:ModifyInstanceAttribute',
1154 'ec2:DeleteTags')
1155
1156 def validate(self):
1157 self.verbose = self.data.get('verbose', False)
1158 return self
1159
1160 def process(self, volumes):
1161 original_count = len(volumes)
1162 volumes = [v for v in volumes
1163 if not v['Encrypted'] or not v['Attachments']]
1164 log.debug(
1165 "EncryptVolumes filtered from %d to %d "
1166 " unencrypted attached volumes" % (
1167 original_count, len(volumes)))
1168
1169 # Group volumes by instance id
1170 instance_vol_map = {}
1171 for v in volumes:
1172 instance_id = v['Attachments'][0]['InstanceId']
1173 instance_vol_map.setdefault(instance_id, []).append(v)
1174
1175 # Query instances to find current instance state
1176 self.instance_map = {
1177 i['InstanceId']: i for i in
1178 self.manager.get_resource_manager('ec2').get_resources(
1179 list(instance_vol_map.keys()), cache=False)}
1180
1181 client = local_session(self.manager.session_factory).client('ec2')
1182
1183 with self.executor_factory(max_workers=3) as w:
1184 futures = {}
1185 for instance_id, vol_set in instance_vol_map.items():
1186 futures[w.submit(
1187 self.process_volume, client,
1188 instance_id, vol_set)] = instance_id
1189
1190 for f in as_completed(futures):
1191 if f.exception():
1192 instance_id = futures[f]
1193 log.error(
1194 "Exception processing instance:%s volset: %s \n %s" % (
1195 instance_id, instance_vol_map[instance_id],
1196 f.exception()))
1197
1198 def process_volume(self, client, instance_id, vol_set):
1199 """Encrypt attached unencrypted ebs volumes
1200
1201 vol_set corresponds to all the unencrypted volumes on a given instance.
1202 """
1203 key_id = self.get_encryption_key()
1204 if self.verbose:
1205 self.log.debug("Using encryption key: %s" % key_id)
1206
1207 # Only stop and start the instance if it was running.
1208 instance_running = self.stop_instance(client, instance_id)
1209 if instance_running is None:
1210 return
1211
1212 # Create all the volumes before patching the instance.
1213 paired = []
1214 for v in vol_set:
1215 vol_id = self.create_encrypted_volume(client, v, key_id, instance_id)
1216 paired.append((v, vol_id))
1217
1218 # Next detach and reattach
1219 for v, vol_id in paired:
1220 client.detach_volume(
1221 InstanceId=instance_id, VolumeId=v['VolumeId'])
1222 # 5/8/2016 The detach isn't immediately consistent
1223 time.sleep(self.data.get('delay', 15))
1224 client.attach_volume(
1225 InstanceId=instance_id, VolumeId=vol_id,
1226 Device=v['Attachments'][0]['Device'])
1227
1228 # Set DeleteOnTermination attribute the same as source volume
1229 if v['Attachments'][0]['DeleteOnTermination']:
1230 client.modify_instance_attribute(
1231 InstanceId=instance_id,
1232 BlockDeviceMappings=[
1233 {
1234 'DeviceName': v['Attachments'][0]['Device'],
1235 'Ebs': {
1236 'VolumeId': vol_id,
1237 'DeleteOnTermination': True
1238 }
1239 }
1240 ]
1241 )
1242
1243 if instance_running:
1244 client.start_instances(InstanceIds=[instance_id])
1245
1246 if self.verbose:
1247 self.log.debug(
1248 "Deleting unencrypted volumes for: %s" % instance_id)
1249
1250 for v in vol_set:
1251 client.delete_volume(VolumeId=v['VolumeId'])
1252
1253 # Clean-up transient tags on newly created encrypted volume.
1254 for v, vol_id in paired:
1255 client.delete_tags(
1256 Resources=[vol_id],
1257 Tags=[
1258 {'Key': 'maid-crypt-remediation'},
1259 {'Key': 'maid-origin-volume'},
1260 {'Key': 'maid-instance-device'}
1261 ]
1262 )
1263
1264 def stop_instance(self, client, instance_id):
1265 instance_state = self.instance_map[instance_id]['State']['Name']
1266 if instance_state in ('shutting-down', 'terminated'):
1267 self.log.debug('Skipping terminating instance: %s' % instance_id)
1268 return
1269 elif instance_state in ('running',):
1270 client.stop_instances(InstanceIds=[instance_id])
1271 self.wait_on_resource(client, instance_id=instance_id)
1272 return True
1273 return False
1274
1275 def create_encrypted_volume(self, ec2, v, key_id, instance_id):
1276 unencrypted_volume_tags = v.get('Tags', [])
1277 # Create a current snapshot
1278 results = ec2.create_snapshot(
1279 VolumeId=v['VolumeId'],
1280 Description="maid transient snapshot for encryption",)
1281 transient_snapshots = [results['SnapshotId']]
1282 ec2.create_tags(
1283 Resources=[results['SnapshotId']],
1284 Tags=[
1285 {'Key': 'maid-crypto-remediation', 'Value': 'true'}])
1286 self.wait_on_resource(ec2, snapshot_id=results['SnapshotId'])
1287
1288 # Create encrypted snapshot from current
1289 results = ec2.copy_snapshot(
1290 SourceSnapshotId=results['SnapshotId'],
1291 SourceRegion=v['AvailabilityZone'][:-1],
1292 Description='maid transient snapshot for encryption',
1293 Encrypted=True,
1294 KmsKeyId=key_id)
1295 transient_snapshots.append(results['SnapshotId'])
1296 ec2.create_tags(
1297 Resources=[results['SnapshotId']],
1298 Tags=[
1299 {'Key': 'maid-crypto-remediation', 'Value': 'true'}
1300 ])
1301 self.wait_on_resource(ec2, snapshot_id=results['SnapshotId'])
1302
1303 # Create encrypted volume, also tag so we can recover
1304 results = ec2.create_volume(
1305 Size=v['Size'],
1306 VolumeType=v['VolumeType'],
1307 SnapshotId=results['SnapshotId'],
1308 AvailabilityZone=v['AvailabilityZone'],
1309 Encrypted=True)
1310 ec2.create_tags(
1311 Resources=[results['VolumeId']],
1312 Tags=[
1313 {'Key': 'maid-crypt-remediation', 'Value': instance_id},
1314 {'Key': 'maid-origin-volume', 'Value': v['VolumeId']},
1315 {'Key': 'maid-instance-device',
1316 'Value': v['Attachments'][0]['Device']}] + unencrypted_volume_tags)
1317
1318 # Wait on encrypted volume creation
1319 self.wait_on_resource(ec2, volume_id=results['VolumeId'])
1320
1321 # Delete transient snapshots
1322 for sid in transient_snapshots:
1323 ec2.delete_snapshot(SnapshotId=sid)
1324 return results['VolumeId']
1325
1326 def get_encryption_key(self):
1327 kms = local_session(self.manager.session_factory).client('kms')
1328 key_alias = self.data.get('key')
1329 result = kms.describe_key(KeyId=key_alias)
1330 key_id = result['KeyMetadata']['KeyId']
1331 return key_id
1332
1333 def wait_on_resource(self, *args, **kw):
1334 # Sigh this is dirty, but failure in the middle of our workflow
1335 # due to overly long resource creation is complex to unwind,
1336 # with multi-volume instances. Wait up to three times (actual
1337 # wait time is a per resource type configuration.
1338
1339 # Note we wait for all resource creation before attempting to
1340 # patch an instance, so even on resource creation failure, the
1341 # instance is not modified
1342 try:
1343 return self._wait_on_resource(*args, **kw)
1344 except Exception:
1345 try:
1346 return self._wait_on_resource(*args, **kw)
1347 except Exception:
1348 return self._wait_on_resource(*args, **kw)
1349
1350 def _wait_on_resource(
1351 self, client, snapshot_id=None, volume_id=None, instance_id=None):
1352 # boto client waiters poll every 15 seconds up to a max 600s (5m)
1353 if snapshot_id:
1354 if self.verbose:
1355 self.log.debug(
1356 "Waiting on snapshot completion %s" % snapshot_id)
1357 waiter = client.get_waiter('snapshot_completed')
1358 waiter.wait(SnapshotIds=[snapshot_id])
1359 if self.verbose:
1360 self.log.debug("Snapshot: %s completed" % snapshot_id)
1361 elif volume_id:
1362 if self.verbose:
1363 self.log.debug("Waiting on volume creation %s" % volume_id)
1364 waiter = client.get_waiter('volume_available')
1365 waiter.wait(VolumeIds=[volume_id])
1366 if self.verbose:
1367 self.log.debug("Volume: %s created" % volume_id)
1368 elif instance_id:
1369 if self.verbose:
1370 self.log.debug("Waiting on instance stop")
1371 waiter = client.get_waiter('instance_stopped')
1372 waiter.wait(InstanceIds=[instance_id])
1373 if self.verbose:
1374 self.log.debug("Instance: %s stopped" % instance_id)
1375
1376
1377@EBS.action_registry.register('snapshot')
1378class CreateSnapshot(BaseAction):
1379 """Snapshot an EBS volume.
1380
1381 Tags may be optionally added to the snapshot during creation.
1382
1383 - `copy-volume-tags` copies all the tags from the specified
1384 volume to the corresponding snapshot.
1385 - `copy-tags` copies the listed tags from each volume
1386 to the snapshot. This is mutually exclusive with
1387 `copy-volume-tags`.
1388 - `tags` allows new tags to be added to each snapshot. If
1389 no tags are specified, then the tag `custodian_snapshot`
1390 is added.
1391
1392 The default behavior is `copy-volume-tags: true`.
1393
1394 :example:
1395
1396 .. code-block:: yaml
1397
1398 policies:
1399 - name: snapshot-volumes
1400 resource: ebs
1401 filters:
1402 - Attachments: []
1403 - State: available
1404 actions:
1405 - type: snapshot
1406 copy-tags:
1407 - Name
1408 tags:
1409 custodian_snapshot: True
1410 """
1411 schema = type_schema(
1412 'snapshot',
1413 **{'copy-tags': {'type': 'array', 'items': {'type': 'string'}},
1414 'copy-volume-tags': {'type': 'boolean'},
1415 'tags': {'type': 'object'},
1416 'description': {'type': 'string'}})
1417 permissions = ('ec2:CreateSnapshot', 'ec2:CreateTags',)
1418
1419 def validate(self):
1420 if self.data.get('copy-tags') and 'copy-volume-tags' in self.data:
1421 raise PolicyValidationError(
1422 "Can specify copy-tags or copy-volume-tags, not both")
1423
1424 def process(self, volumes):
1425 client = local_session(self.manager.session_factory).client('ec2')
1426 retry = get_retry(['Throttled'], max_attempts=5)
1427 for vol in volumes:
1428 vol_id = vol['VolumeId']
1429 tags = [{
1430 'ResourceType': 'snapshot',
1431 'Tags': self.get_snapshot_tags(vol)
1432 }]
1433 retry(self.process_volume, client=client, volume=vol_id, tags=tags)
1434
1435 def process_volume(self, client, volume, tags):
1436 description = self.data.get('description')
1437 if not description:
1438 description = "Automated snapshot by c7n - %s" % (self.manager.ctx.policy.name)
1439
1440 try:
1441 client.create_snapshot(
1442 VolumeId=volume,
1443 Description=description,
1444 TagSpecifications=tags
1445 )
1446 except ClientError as e:
1447 if e.response['Error']['Code'] == 'InvalidVolume.NotFound':
1448 return
1449 raise
1450
1451 def get_snapshot_tags(self, resource):
1452 user_tags = self.data.get('tags', {}) or {'custodian_snapshot': ''}
1453 copy_tags = self.data.get('copy-tags', []) or self.data.get('copy-volume-tags', True)
1454 return coalesce_copy_user_tags(resource, copy_tags, user_tags)
1455
1456
1457@EBS.action_registry.register('delete')
1458class Delete(BaseAction):
1459 """Delete an ebs volume.
1460
1461 If the force boolean is true, we will detach an attached volume
1462 from an instance. Note this cannot be done for running instance
1463 root volumes.
1464
1465 :example:
1466
1467 .. code-block:: yaml
1468
1469 policies:
1470 - name: delete-unattached-volumes
1471 resource: ebs
1472 filters:
1473 - Attachments: []
1474 - State: available
1475 actions:
1476 - delete
1477 """
1478 schema = type_schema('delete', force={'type': 'boolean'})
1479 permissions = (
1480 'ec2:DetachVolume', 'ec2:DeleteVolume', 'ec2:DescribeVolumes')
1481
1482 def process(self, volumes):
1483 client = local_session(self.manager.session_factory).client('ec2')
1484 with self.executor_factory(max_workers=3) as w:
1485 futures = {}
1486 for v in volumes:
1487 futures[
1488 w.submit(self.process_volume, client, v)] = v
1489 for f in as_completed(futures):
1490 v = futures[f]
1491 if f.exception():
1492 self.log.error(
1493 "Error processing volume:%s error:%s",
1494 v['VolumeId'], f.exception())
1495
1496 def process_volume(self, client, volume):
1497 try:
1498 if self.data.get('force') and len(volume['Attachments']):
1499 client.detach_volume(VolumeId=volume['VolumeId'], Force=True)
1500 waiter = client.get_waiter('volume_available')
1501 waiter.wait(VolumeIds=[volume['VolumeId']])
1502 self.manager.retry(
1503 client.delete_volume, VolumeId=volume['VolumeId'])
1504 except ClientError as e:
1505 if e.response['Error']['Code'] == "InvalidVolume.NotFound":
1506 return
1507 raise
1508
1509
1510@EBS.filter_registry.register('modifyable')
1511class ModifyableVolume(Filter):
1512 """Check if an ebs volume is modifyable online.
1513
1514 Considerations:
1515 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/limitations.html
1516
1517 Consideration Summary
1518 - only current instance types are supported (one exception m3.medium)
1519 Current Generation Instances (2017-2)
1520 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#current-gen-instances
1521
1522 - older magnetic volume types are not supported
1523 - shrinking volumes is not supported
1524 - must wait at least 6hrs between modifications to the same volume.
1525 - volumes must have been attached after nov 1st, 2016.
1526
1527 See :ref:`modify action <aws.ebs.actions.modify>` for examples.
1528 """
1529
1530 schema = type_schema('modifyable')
1531
1532 older_generation = {
1533 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge',
1534 'c1.medium', 'c1.xlarge', 'cc2.8xlarge',
1535 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cr1.8xlarge',
1536 'hi1.4xlarge', 'hs1.8xlarge', 'cg1.4xlarge', 't1.micro',
1537 # two legs good, not all current gen work either.
1538 'm3.large', 'm3.xlarge', 'm3.2xlarge'
1539 }
1540
1541 permissions = ("ec2:DescribeInstances",)
1542
1543 def process(self, resources, event=None):
1544 results = []
1545 filtered = []
1546 attached = []
1547 stats = Counter()
1548 marker_date = parse_date('2016-11-01T00:00:00+00:00')
1549
1550 # Filter volumes
1551 for r in resources:
1552 # unsupported type
1553 if r['VolumeType'] == 'standard':
1554 stats['vol-type'] += 1
1555 filtered.append(r['VolumeId'])
1556 continue
1557
1558 # unattached are easy
1559 if not r.get('Attachments'):
1560 results.append(r)
1561 continue
1562
1563 # check for attachment date older then supported date
1564 if r['Attachments'][0]['AttachTime'] < marker_date:
1565 stats['attach-time'] += 1
1566 filtered.append(r['VolumeId'])
1567 continue
1568
1569 attached.append(r)
1570
1571 # Filter volumes attached to unsupported instance types
1572 ec2 = self.manager.get_resource_manager('ec2')
1573 instance_map = {}
1574 for v in attached:
1575 instance_map.setdefault(
1576 v['Attachments'][0]['InstanceId'], []).append(v)
1577
1578 instances = ec2.get_resources(list(instance_map.keys()))
1579 for i in instances:
1580 if i['InstanceType'] in self.older_generation:
1581 stats['instance-type'] += len(instance_map[i['InstanceId']])
1582 filtered.extend([v['VolumeId'] for v in instance_map.pop(i['InstanceId'])])
1583 else:
1584 results.extend(instance_map.pop(i['InstanceId']))
1585
1586 # Filter volumes that are currently under modification
1587 client = local_session(self.manager.session_factory).client('ec2')
1588 modifying = set()
1589
1590 # Re 197 - Max number of filters is 200, and we have to use
1591 # three additional attribute filters.
1592 for vol_set in chunks(list(results), 197):
1593 vol_ids = [v['VolumeId'] for v in vol_set]
1594 mutating = client.describe_volumes_modifications(
1595 Filters=[
1596 {'Name': 'volume-id',
1597 'Values': vol_ids},
1598 {'Name': 'modification-state',
1599 'Values': ['modifying', 'optimizing', 'failed']}])
1600 for vm in mutating.get('VolumesModifications', ()):
1601 stats['vol-mutation'] += 1
1602 filtered.append(vm['VolumeId'])
1603 modifying.add(vm['VolumeId'])
1604
1605 self.log.debug(
1606 "filtered %d of %d volumes due to %s",
1607 len(filtered), len(resources), sorted(stats.items()))
1608
1609 return [r for r in results if r['VolumeId'] not in modifying]
1610
1611
1612@EBS.action_registry.register('modify')
1613class ModifyVolume(BaseAction):
1614 """Modify an ebs volume online.
1615
1616 **Note this action requires use of modifyable filter**
1617
1618 Intro Blog & Use Cases:
1619 https://aws.amazon.com/blogs/aws/amazon-ebs-update-new-elastic-volumes-change-everything/
1620 Docs:
1621 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modify-volume.html
1622 Considerations:
1623 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/limitations.html
1624
1625 :example:
1626
1627 Find under utilized provisioned iops volumes older than a week
1628 and change their type.
1629
1630 .. code-block:: yaml
1631
1632 policies:
1633 - name: ebs-remove-piops
1634 resource: ebs
1635 filters:
1636 - type: value
1637 key: CreateTime
1638 value_type: age
1639 value: 7
1640 op: greater-than
1641 - VolumeType: io1
1642 - type: metrics
1643 name: VolumeConsumedReadWriteOps
1644 statistics: Maximum
1645 value: 100
1646 op: less-than
1647 days: 7
1648 - modifyable
1649 actions:
1650 - type: modify
1651 volume-type: gp2
1652
1653 `iops-percent` and `size-percent` can be used to modify
1654 respectively iops on io1/io2 volumes and volume size.
1655
1656 When converting to io1/io2, `iops-percent` is used to set the iops
1657 allocation for the new volume against the extant value for the old
1658 volume.
1659
1660 :example:
1661
1662 Double storage and quadruple iops for all io1 volumes.
1663
1664 .. code-block:: yaml
1665
1666 policies:
1667 - name: ebs-upsize-piops
1668 resource: ebs
1669 filters:
1670 - VolumeType: io1
1671 - modifyable
1672 actions:
1673 - type: modify
1674 size-percent: 200
1675 iops-percent: 400
1676
1677
1678 **Note** resizing down aka shrinking requires OS and FS support
1679 and potentially additional preparation, else data-loss may occur.
1680 To prevent accidents, shrinking must be explicitly enabled by also
1681 setting `shrink: true` on the action.
1682 """
1683
1684 schema = type_schema(
1685 'modify',
1686 **{'volume-type': {'enum': ['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1']},
1687 'shrink': False,
1688 'size-percent': {'type': 'number'},
1689 'iops-percent': {'type': 'number'}})
1690
1691 # assumptions as its the closest i can find.
1692 permissions = ("ec2:ModifyVolumeAttribute",)
1693
1694 def validate(self):
1695 if 'modifyable' not in self.manager.data.get('filters', ()):
1696 raise PolicyValidationError(
1697 "modify action requires modifyable filter in policy")
1698 if self.data.get('size-percent', 100) < 100 and not self.data.get('shrink', False):
1699 raise PolicyValidationError((
1700 "shrinking volumes requires os/fs support "
1701 "or data-loss may ensue, use `shrink: true` to override"))
1702 return self
1703
1704 def process(self, resources):
1705 client = local_session(self.manager.session_factory).client('ec2')
1706 for resource_set in chunks(resources, 50):
1707 self.process_resource_set(client, resource_set)
1708
1709 def process_resource_set(self, client, resource_set):
1710 vtype = self.data.get('volume-type')
1711 psize = self.data.get('size-percent')
1712 piops = self.data.get('iops-percent')
1713
1714 for r in resource_set:
1715 params = {'VolumeId': r['VolumeId']}
1716 if piops and ('io1' in (vtype, r['VolumeType']) or
1717 'io2' in (vtype, r['VolumeType'])):
1718 # default here if we're changing to io1
1719 params['Iops'] = max(int(r.get('Iops', 10) * piops / 100.0), 100)
1720 if psize:
1721 params['Size'] = max(int(r['Size'] * psize / 100.0), 1)
1722 if vtype:
1723 params['VolumeType'] = vtype
1724 self.manager.retry(client.modify_volume, **params)