Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/c7n/resources/ebs.py: 28%

736 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-08 06:51 +0000

1# Copyright The Cloud Custodian Authors. 

2# SPDX-License-Identifier: Apache-2.0 

3from collections import Counter 

4import logging 

5import itertools 

6import json 

7import time 

8 

9from botocore.exceptions import ClientError 

10from concurrent.futures import as_completed 

11from dateutil.parser import parse as parse_date 

12 

13from c7n.actions import BaseAction 

14from c7n.exceptions import PolicyValidationError 

15from c7n.filters import ( 

16 CrossAccountAccessFilter, Filter, AgeFilter, ValueFilter, 

17 ANNOTATION_KEY) 

18from c7n.filters.health import HealthEventFilter 

19from c7n.filters.related import RelatedResourceFilter 

20 

21from c7n.manager import resources 

22from c7n.resources.kms import ResourceKmsKeyAlias 

23from c7n.resources.securityhub import PostFinding 

24from c7n.query import QueryResourceManager, TypeInfo 

25from c7n.tags import Tag, coalesce_copy_user_tags 

26from c7n.utils import ( 

27 camelResource, 

28 chunks, 

29 get_retry, 

30 local_session, 

31 select_keys, 

32 set_annotation, 

33 type_schema, 

34 QueryParser, 

35 get_support_region 

36) 

37from c7n.resources.ami import AMI 

38 

39log = logging.getLogger('custodian.ebs') 

40 

41 

42@resources.register('ebs-snapshot') 

43class Snapshot(QueryResourceManager): 

44 

45 class resource_type(TypeInfo): 

46 service = 'ec2' 

47 arn_type = 'snapshot' 

48 enum_spec = ( 

49 'describe_snapshots', 'Snapshots', None) 

50 id = 'SnapshotId' 

51 id_prefix = 'snap-' 

52 filter_name = 'SnapshotIds' 

53 filter_type = 'list' 

54 name = 'SnapshotId' 

55 date = 'StartTime' 

56 

57 default_report_fields = ( 

58 'SnapshotId', 

59 'VolumeId', 

60 'tag:InstanceId', 

61 'VolumeSize', 

62 'StartTime', 

63 'State', 

64 ) 

65 

66 def resources(self, query=None): 

67 qfilters = SnapshotQueryParser.parse(self.data.get('query', [])) 

68 query = query or {} 

69 if qfilters: 

70 query['Filters'] = qfilters 

71 if query.get('OwnerIds') is None: 

72 query['OwnerIds'] = ['self'] 

73 if 'MaxResults' not in query: 

74 query['MaxResults'] = 1000 

75 return super(Snapshot, self).resources(query=query) 

76 

77 def get_resources(self, ids, cache=True, augment=True): 

78 if cache: 

79 resources = self._get_cached_resources(ids) 

80 if resources is not None: 

81 return resources 

82 while ids: 

83 try: 

84 return self.source.get_resources(ids) 

85 except ClientError as e: 

86 bad_snap = ErrorHandler.extract_bad_snapshot(e) 

87 if bad_snap: 

88 ids.remove(bad_snap) 

89 continue 

90 raise 

91 return [] 

92 

93 

94class ErrorHandler: 

95 

96 @staticmethod 

97 def remove_snapshot(rid, resource_set): 

98 found = None 

99 for r in resource_set: 

100 if r['SnapshotId'] == rid: 

101 found = r 

102 break 

103 if found: 

104 resource_set.remove(found) 

105 

106 @staticmethod 

107 def extract_bad_snapshot(e): 

108 """Handle various client side errors when describing snapshots""" 

109 msg = e.response['Error']['Message'] 

110 error = e.response['Error']['Code'] 

111 e_snap_id = None 

112 if error == 'InvalidSnapshot.NotFound': 

113 e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")] 

114 log.warning("Snapshot not found %s" % e_snap_id) 

115 elif error == 'InvalidSnapshotID.Malformed': 

116 e_snap_id = msg[msg.find('"') + 1:msg.rfind('"')] 

117 log.warning("Snapshot id malformed %s" % e_snap_id) 

118 return e_snap_id 

119 

120 @staticmethod 

121 def extract_bad_volume(e): 

122 """Handle various client side errors when describing volumes""" 

123 msg = e.response['Error']['Message'] 

124 error = e.response['Error']['Code'] 

125 e_vol_id = None 

126 if error == 'InvalidVolume.NotFound': 

127 e_vol_id = msg[msg.find("'") + 1:msg.rfind("'")] 

128 log.warning("Volume not found %s" % e_vol_id) 

129 elif error == 'InvalidVolumeID.Malformed': 

130 e_vol_id = msg[msg.find('"') + 1:msg.rfind('"')] 

131 log.warning("Volume id malformed %s" % e_vol_id) 

132 return e_vol_id 

133 

134 

135class SnapshotQueryParser(QueryParser): 

136 

137 QuerySchema = { 

138 'description': str, 

139 'owner-alias': ('amazon', 'amazon-marketplace', 'microsoft'), 

140 'owner-id': str, 

141 'progress': str, 

142 'snapshot-id': str, 

143 'start-time': str, 

144 'status': ('pending', 'completed', 'error'), 

145 'tag': str, 

146 'tag-key': str, 

147 'volume-id': str, 

148 'volume-size': str, 

149 } 

150 

151 type_name = 'EBS' 

152 

153 

154@Snapshot.action_registry.register('tag') 

155class SnapshotTag(Tag): 

156 

157 permissions = ('ec2:CreateTags',) 

158 

159 def process_resource_set(self, client, resource_set, tags): 

160 while resource_set: 

161 try: 

162 return super(SnapshotTag, self).process_resource_set( 

163 client, resource_set, tags) 

164 except ClientError as e: 

165 bad_snap = ErrorHandler.extract_bad_snapshot(e) 

166 if bad_snap: 

167 ErrorHandler.remove_snapshot(bad_snap, resource_set) 

168 continue 

169 raise 

170 

171 

172@Snapshot.filter_registry.register('age') 

173class SnapshotAge(AgeFilter): 

174 """EBS Snapshot Age Filter 

175 

176 Filters an EBS snapshot based on the age of the snapshot (in days) 

177 

178 :example: 

179 

180 .. code-block:: yaml 

181 

182 policies: 

183 - name: ebs-snapshots-week-old 

184 resource: ebs-snapshot 

185 filters: 

186 - type: age 

187 days: 7 

188 op: ge 

189 """ 

190 

191 schema = type_schema( 

192 'age', 

193 days={'type': 'number'}, 

194 op={'$ref': '#/definitions/filters_common/comparison_operators'}) 

195 date_attribute = 'StartTime' 

196 

197 

198def _filter_ami_snapshots(self, snapshots): 

199 if not self.data.get('value', True): 

200 return snapshots 

201 # try using cache first to get a listing of all AMI snapshots and compares resources to the list 

202 # This will populate the cache. 

203 amis = self.manager.get_resource_manager('ami').resources() 

204 ami_snaps = [] 

205 for i in amis: 

206 for dev in i.get('BlockDeviceMappings'): 

207 if 'Ebs' in dev and 'SnapshotId' in dev['Ebs']: 

208 ami_snaps.append(dev['Ebs']['SnapshotId']) 

209 matches = [] 

210 for snap in snapshots: 

211 if snap['SnapshotId'] not in ami_snaps: 

212 matches.append(snap) 

213 return matches 

214 

215 

216@Snapshot.filter_registry.register('cross-account') 

217class SnapshotCrossAccountAccess(CrossAccountAccessFilter): 

218 

219 permissions = ('ec2:DescribeSnapshotAttribute',) 

220 

221 def process(self, resources, event=None): 

222 self.accounts = self.get_accounts() 

223 results = [] 

224 client = local_session(self.manager.session_factory).client('ec2') 

225 with self.executor_factory(max_workers=3) as w: 

226 futures = [] 

227 for resource_set in chunks(resources, 50): 

228 futures.append(w.submit( 

229 self.process_resource_set, client, resource_set)) 

230 for f in as_completed(futures): 

231 if f.exception(): 

232 self.log.error( 

233 "Exception checking cross account access \n %s" % ( 

234 f.exception())) 

235 continue 

236 results.extend(f.result()) 

237 return results 

238 

239 def process_resource_set(self, client, resource_set): 

240 results = [] 

241 everyone_only = self.data.get('everyone_only', False) 

242 for r in resource_set: 

243 attrs = self.manager.retry( 

244 client.describe_snapshot_attribute, 

245 SnapshotId=r['SnapshotId'], 

246 Attribute='createVolumePermission')['CreateVolumePermissions'] 

247 shared_accounts = set() 

248 if everyone_only: 

249 for g in attrs: 

250 if g.get('Group') == 'all': 

251 shared_accounts = {g.get('Group')} 

252 else: 

253 shared_accounts = { 

254 g.get('Group') or g.get('UserId') for g in attrs} 

255 delta_accounts = shared_accounts.difference(self.accounts) 

256 if delta_accounts: 

257 r['c7n:CrossAccountViolations'] = list(delta_accounts) 

258 results.append(r) 

259 return results 

260 

261 

262@Snapshot.filter_registry.register('unused') 

263class SnapshotUnusedFilter(Filter): 

264 """Filters snapshots based on usage 

265 

266 true: snapshot is not used by launch-template, launch-config, or ami. 

267 

268 false: snapshot is being used by launch-template, launch-config, or ami. 

269 

270 :example: 

271 

272 .. code-block:: yaml 

273 

274 policies: 

275 - name: snapshot-unused 

276 resource: ebs-snapshot 

277 filters: 

278 - type: unused 

279 value: true 

280 """ 

281 

282 schema = type_schema('unused', value={'type': 'boolean'}) 

283 

284 def get_permissions(self): 

285 return list(itertools.chain(*[ 

286 self.manager.get_resource_manager(m).get_permissions() 

287 for m in ('asg', 'launch-config', 'ami')])) 

288 

289 def _pull_asg_snapshots(self): 

290 asgs = self.manager.get_resource_manager('asg').resources() 

291 snap_ids = set() 

292 lcfgs = set(a['LaunchConfigurationName'] for a in asgs if 'LaunchConfigurationName' in a) 

293 lcfg_mgr = self.manager.get_resource_manager('launch-config') 

294 

295 if lcfgs: 

296 for lc in lcfg_mgr.resources(): 

297 for b in lc.get('BlockDeviceMappings'): 

298 if 'Ebs' in b and 'SnapshotId' in b['Ebs']: 

299 snap_ids.add(b['Ebs']['SnapshotId']) 

300 

301 tmpl_mgr = self.manager.get_resource_manager('launch-template-version') 

302 for tversion in tmpl_mgr.get_resources( 

303 list(tmpl_mgr.get_asg_templates(asgs).keys())): 

304 for bd in tversion['LaunchTemplateData'].get('BlockDeviceMappings', ()): 

305 if 'Ebs' in bd and 'SnapshotId' in bd['Ebs']: 

306 snap_ids.add(bd['Ebs']['SnapshotId']) 

307 return snap_ids 

308 

309 def _pull_ami_snapshots(self): 

310 amis = self.manager.get_resource_manager('ami').resources() 

311 ami_snaps = set() 

312 for i in amis: 

313 for dev in i.get('BlockDeviceMappings'): 

314 if 'Ebs' in dev and 'SnapshotId' in dev['Ebs']: 

315 ami_snaps.add(dev['Ebs']['SnapshotId']) 

316 return ami_snaps 

317 

318 def process(self, resources, event=None): 

319 snaps = self._pull_asg_snapshots().union(self._pull_ami_snapshots()) 

320 if self.data.get('value', True): 

321 return [r for r in resources if r['SnapshotId'] not in snaps] 

322 return [r for r in resources if r['SnapshotId'] in snaps] 

323 

324 

325@Snapshot.filter_registry.register('skip-ami-snapshots') 

326class SnapshotSkipAmiSnapshots(Filter): 

327 """ 

328 Filter to remove snapshots of AMIs from results 

329 

330 This filter is 'true' by default. 

331 

332 :example: 

333 

334 implicit with no parameters, 'true' by default 

335 

336 .. code-block:: yaml 

337 

338 policies: 

339 - name: delete-ebs-stale-snapshots 

340 resource: ebs-snapshot 

341 filters: 

342 - type: age 

343 days: 28 

344 op: ge 

345 - skip-ami-snapshots 

346 

347 :example: 

348 

349 explicit with parameter 

350 

351 .. code-block:: yaml 

352 

353 policies: 

354 - name: delete-snapshots 

355 resource: ebs-snapshot 

356 filters: 

357 - type: age 

358 days: 28 

359 op: ge 

360 - type: skip-ami-snapshots 

361 value: false 

362 

363 """ 

364 

365 schema = type_schema('skip-ami-snapshots', value={'type': 'boolean'}) 

366 

367 def get_permissions(self): 

368 return AMI(self.manager.ctx, {}).get_permissions() 

369 

370 def process(self, snapshots, event=None): 

371 resources = _filter_ami_snapshots(self, snapshots) 

372 return resources 

373 

374 

375@Snapshot.filter_registry.register('volume') 

376class SnapshotVolumeFilter(RelatedResourceFilter): 

377 """ 

378 Filter EBS snapshots by their volume attributes. 

379 

380 .. code-block:: yaml 

381 

382 policies: 

383 - name: snapshot-with-no-volume 

384 description: Find any snapshots that do not have a corresponding volume. 

385 resource: aws.ebs-snapshot 

386 filters: 

387 - type: volume 

388 key: VolumeId 

389 value: absent 

390 - name: find-snapshots-from-volume 

391 resource: aws.ebs-snapshot 

392 filters: 

393 - type: volume 

394 key: VolumeId 

395 value: vol-foobarbaz 

396 """ 

397 

398 RelatedResource = 'c7n.resources.ebs.EBS' 

399 RelatedIdsExpression = 'VolumeId' 

400 AnnotationKey = 'Volume' 

401 

402 schema = type_schema( 

403 'volume', rinherit=ValueFilter.schema) 

404 

405 

406@Snapshot.action_registry.register('delete') 

407class SnapshotDelete(BaseAction): 

408 """Deletes EBS snapshots 

409 

410 :example: 

411 

412 .. code-block:: yaml 

413 

414 policies: 

415 - name: delete-stale-snapshots 

416 resource: ebs-snapshot 

417 filters: 

418 - type: age 

419 days: 28 

420 op: ge 

421 actions: 

422 - delete 

423 """ 

424 

425 schema = type_schema( 

426 'delete', **{'skip-ami-snapshots': {'type': 'boolean'}}) 

427 permissions = ('ec2:DeleteSnapshot',) 

428 

429 def process(self, snapshots): 

430 self.image_snapshots = set() 

431 # Be careful re image snapshots, we do this by default 

432 # to keep things safe by default, albeit we'd get an error 

433 # if we did try to delete something associated to an image. 

434 pre = len(snapshots) 

435 snapshots = list(filter(None, _filter_ami_snapshots(self, snapshots))) 

436 post = len(snapshots) 

437 log.info("Deleting %d snapshots, auto-filtered %d ami-snapshots", 

438 post, pre - post) 

439 

440 client = local_session(self.manager.session_factory).client('ec2') 

441 with self.executor_factory(max_workers=2) as w: 

442 futures = [] 

443 for snapshot_set in chunks(reversed(snapshots), size=50): 

444 futures.append( 

445 w.submit(self.process_snapshot_set, client, snapshot_set)) 

446 for f in as_completed(futures): 

447 if f.exception(): 

448 self.log.error( 

449 "Exception deleting snapshot set \n %s" % ( 

450 f.exception())) 

451 return snapshots 

452 

453 def process_snapshot_set(self, client, snapshots_set): 

454 retry = get_retry(( 

455 'RequestLimitExceeded', 'Client.RequestLimitExceeded')) 

456 

457 for s in snapshots_set: 

458 if s['SnapshotId'] in self.image_snapshots: 

459 continue 

460 try: 

461 retry(client.delete_snapshot, 

462 SnapshotId=s['SnapshotId'], 

463 DryRun=self.manager.config.dryrun) 

464 except ClientError as e: 

465 if e.response['Error']['Code'] == "InvalidSnapshot.NotFound": 

466 continue 

467 raise 

468 

469 

470@Snapshot.action_registry.register('copy') 

471class CopySnapshot(BaseAction): 

472 """Copy a snapshot across regions 

473 

474 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html 

475 

476 :example: 

477 

478 .. code-block:: yaml 

479 

480 policies: 

481 - name: copy-snapshot-east-west 

482 resource: ebs-snapshot 

483 filters: 

484 - type: age 

485 days: 7 

486 op: le 

487 actions: 

488 - type: copy 

489 target_region: us-west-2 

490 target_key: target_kms_key 

491 encrypted: true 

492 """ 

493 

494 schema = type_schema( 

495 'copy', 

496 target_region={'type': 'string'}, 

497 target_key={'type': 'string'}, 

498 encrypted={'type': 'boolean'}, 

499 ) 

500 permissions = ( 

501 'ec2:CreateTags', 'ec2:CopySnapshot', 'ec2:DescribeSnapshots') 

502 

503 def validate(self): 

504 if self.data.get('encrypted', True): 

505 key = self.data.get('target_key') 

506 if not key: 

507 raise PolicyValidationError( 

508 "Encrypted snapshot copy requires kms key on %s" % ( 

509 self.manager.data,)) 

510 return self 

511 

512 def process(self, resources): 

513 with self.executor_factory(max_workers=2) as w: 

514 list(w.map(self.process_resource_set, chunks(resources, 20))) 

515 

516 def process_resource_set(self, resource_set): 

517 client = self.manager.session_factory( 

518 region=self.data['target_region']).client('ec2') 

519 

520 cross_region = self.data['target_region'] != self.manager.config.region 

521 

522 params = {} 

523 params['Encrypted'] = self.data.get('encrypted', True) 

524 if params['Encrypted']: 

525 params['KmsKeyId'] = self.data['target_key'] 

526 

527 for snapshot_set in chunks(resource_set, 5): 

528 for r in snapshot_set: 

529 snapshot_id = client.copy_snapshot( 

530 SourceRegion=self.manager.config.region, 

531 SourceSnapshotId=r['SnapshotId'], 

532 Description=r.get('Description', ''), 

533 **params)['SnapshotId'] 

534 if r.get('Tags'): 

535 client.create_tags( 

536 Resources=[snapshot_id], Tags=r['Tags']) 

537 r['c7n:CopiedSnapshot'] = snapshot_id 

538 

539 if not cross_region or len(snapshot_set) < 5: 

540 continue 

541 

542 copy_ids = [r['c7n:CopiedSnapshot'] for r in snapshot_set] 

543 self.log.debug( 

544 "Waiting on cross-region snapshot copy %s", ",".join(copy_ids)) 

545 waiter = client.get_waiter('snapshot_completed') 

546 waiter.config.delay = 60 

547 waiter.config.max_attempts = 60 

548 waiter.wait(SnapshotIds=copy_ids) 

549 self.log.debug( 

550 "Cross region copy complete %s", ",".join(copy_ids)) 

551 

552 

553@Snapshot.action_registry.register('set-permissions') 

554class SetPermissions(BaseAction): 

555 """Action to set permissions for creating volumes from a snapshot 

556 

557 Use the 'add' and 'remove' parameters to control which accounts to 

558 add or remove respectively. The default is to remove any create 

559 volume permissions granted to other AWS accounts. 

560 

561 Combining this action with the 'cross-account' filter allows you 

562 greater control over which accounts will be removed, e.g. using a 

563 whitelist: 

564 

565 :example: 

566 

567 .. code-block:: yaml 

568 

569 policies: 

570 - name: ebs-dont-share-cross-account 

571 resource: ebs-snapshot 

572 filters: 

573 - type: cross-account 

574 whitelist: 

575 - '112233445566' 

576 actions: 

577 - type: set-permissions 

578 remove: matched 

579 """ 

580 schema = type_schema( 

581 'set-permissions', 

582 remove={ 

583 'oneOf': [ 

584 {'enum': ['matched']}, 

585 {'type': 'array', 'items': { 

586 'type': 'string', 'minLength': 12, 'maxLength': 12}}, 

587 ]}, 

588 add={ 

589 'type': 'array', 'items': { 

590 'type': 'string', 'minLength': 12, 'maxLength': 12}}, 

591 ) 

592 

593 permissions = ('ec2:ModifySnapshotAttribute',) 

594 

595 def validate(self): 

596 if self.data.get('remove') == 'matched': 

597 found = False 

598 for f in self.manager.iter_filters(): 

599 if isinstance(f, SnapshotCrossAccountAccess): 

600 found = True 

601 break 

602 if not found: 

603 raise PolicyValidationError( 

604 "policy:%s filter:%s with matched requires cross-account filter" % ( 

605 self.manager.ctx.policy.name, self.type)) 

606 

607 def process(self, snapshots): 

608 client = local_session(self.manager.session_factory).client('ec2') 

609 for i in snapshots: 

610 self.process_image(client, i) 

611 

612 def process_image(self, client, snapshot): 

613 add_accounts = self.data.get('add', []) 

614 remove_accounts = self.data.get('remove', []) 

615 if not add_accounts and not remove_accounts: 

616 return client.reset_snapshot_attribute( 

617 SnapshotId=snapshot['SnapshotId'], Attribute="createVolumePermission") 

618 if remove_accounts == 'matched': 

619 remove_accounts = snapshot.get( 

620 'c7n:' + SnapshotCrossAccountAccess.annotation_key) 

621 

622 remove = [] 

623 remove.extend([{'UserId': a} for a in remove_accounts if a != 'all']) 

624 if 'all' in remove_accounts: 

625 remove.append({'Group': 'all'}) 

626 remove_accounts.remove('all') 

627 

628 add = [{'UserId': a} for a in add_accounts] 

629 

630 if remove: 

631 client.modify_snapshot_attribute( 

632 SnapshotId=snapshot['SnapshotId'], 

633 CreateVolumePermission={'Remove': remove}, 

634 OperationType='remove') 

635 if add: 

636 client.modify_snapshot_attribute( 

637 SnapshotId=snapshot['SnapshotId'], 

638 CreateVolumePermission={'Add': add}, 

639 OperationType='add') 

640 

641 

642@resources.register('ebs') 

643class EBS(QueryResourceManager): 

644 

645 class resource_type(TypeInfo): 

646 service = 'ec2' 

647 arn_type = 'volume' 

648 enum_spec = ('describe_volumes', 'Volumes', None) 

649 name = id = 'VolumeId' 

650 id_prefix = 'vol-' 

651 filter_name = 'VolumeIds' 

652 filter_type = 'list' 

653 date = 'createTime' 

654 dimension = 'VolumeId' 

655 metrics_namespace = 'AWS/EBS' 

656 cfn_type = config_type = "AWS::EC2::Volume" 

657 default_report_fields = ( 

658 'VolumeId', 

659 'Attachments[0].InstanceId', 

660 'Size', 

661 'VolumeType', 

662 'KmsKeyId' 

663 ) 

664 

665 def get_resources(self, ids, cache=True, augment=True): 

666 if cache: 

667 resources = self._get_cached_resources(ids) 

668 if resources is not None: 

669 return resources 

670 while ids: 

671 try: 

672 return self.source.get_resources(ids) 

673 except ClientError as e: 

674 bad_vol = ErrorHandler.extract_bad_volume(e) 

675 if bad_vol: 

676 ids.remove(bad_vol) 

677 continue 

678 raise 

679 return [] 

680 

681 

682@EBS.action_registry.register('post-finding') 

683class EBSPostFinding(PostFinding): 

684 

685 resource_type = 'AwsEc2Volume' 

686 

687 def format_resource(self, r): 

688 envelope, payload = self.format_envelope(r) 

689 details = select_keys( 

690 r, ['KmsKeyId', 'Size', 'SnapshotId', 'Status', 'CreateTime', 'Encrypted']) 

691 details['CreateTime'] = details['CreateTime'].isoformat() 

692 self.filter_empty(details) 

693 for attach in r.get('Attachments', ()): 

694 details.setdefault('Attachments', []).append( 

695 self.filter_empty({ 

696 'AttachTime': attach['AttachTime'].isoformat(), 

697 'InstanceId': attach.get('InstanceId'), 

698 'DeleteOnTermination': attach['DeleteOnTermination'], 

699 'Status': attach['State']})) 

700 payload.update(details) 

701 return envelope 

702 

703 

704@EBS.action_registry.register('detach') 

705class VolumeDetach(BaseAction): 

706 """ 

707 Detach an EBS volume from an Instance. 

708 

709 If 'Force' Param is True, then we'll do a forceful detach 

710 of the Volume. The default value for 'Force' is False. 

711 

712 :example: 

713 

714 .. code-block:: yaml 

715 

716 policies: 

717 - name: detach-ebs-volumes 

718 resource: ebs 

719 filters: 

720 - VolumeId : volumeid 

721 actions: 

722 - detach 

723 

724 

725 """ 

726 

727 schema = type_schema('detach', force={'type': 'boolean'}) 

728 permissions = ('ec2:DetachVolume',) 

729 

730 def process(self, volumes, event=None): 

731 client = local_session(self.manager.session_factory).client('ec2') 

732 

733 for vol in volumes: 

734 for attachment in vol.get('Attachments', []): 

735 client.detach_volume(InstanceId=attachment['InstanceId'], 

736 VolumeId=attachment['VolumeId'], 

737 Force=self.data.get('force', False)) 

738 

739 

740@EBS.filter_registry.register('instance') 

741class AttachedInstanceFilter(ValueFilter): 

742 """Filter volumes based on filtering on their attached instance 

743 

744 :example: 

745 

746 .. code-block:: yaml 

747 

748 policies: 

749 - name: instance-ebs-volumes 

750 resource: ebs 

751 filters: 

752 - type: instance 

753 key: tag:Name 

754 value: OldManBySea 

755 """ 

756 

757 schema = type_schema('instance', rinherit=ValueFilter.schema) 

758 schema_alias = False 

759 

760 def get_permissions(self): 

761 return self.manager.get_resource_manager('ec2').get_permissions() 

762 

763 def process(self, resources, event=None): 

764 original_count = len(resources) 

765 resources = [r for r in resources if r.get('Attachments')] 

766 self.log.debug('Filtered from %d volumes to %d attached volumes' % ( 

767 original_count, len(resources))) 

768 self.instance_map = self.get_instance_mapping(resources) 

769 return list(filter(self, resources)) 

770 

771 def __call__(self, r): 

772 instance = self.instance_map[r['Attachments'][0]['InstanceId']] 

773 if self.match(instance): 

774 r['Instance'] = instance 

775 set_annotation(r, ANNOTATION_KEY, "instance-%s" % self.k) 

776 return True 

777 

778 def get_instance_mapping(self, resources): 

779 instance_ids = [r['Attachments'][0]['InstanceId'] for r in resources] 

780 instances = self.manager.get_resource_manager( 

781 'ec2').get_resources(instance_ids) 

782 self.log.debug("Queried %d instances for %d volumes" % ( 

783 len(instances), len(resources))) 

784 return {i['InstanceId']: i for i in instances} 

785 

786 

787@EBS.filter_registry.register('kms-alias') 

788class KmsKeyAlias(ResourceKmsKeyAlias): 

789 

790 def process(self, resources, event=None): 

791 return self.get_matching_aliases(resources) 

792 

793 

794@EBS.filter_registry.register('fault-tolerant') 

795class FaultTolerantSnapshots(Filter): 

796 """ 

797 This filter will return any EBS volume that does/does not have a 

798 snapshot within the last 7 days. 'Fault-Tolerance' in this instance 

799 means that, in the event of a failure, the volume can be restored 

800 from a snapshot with (reasonable) data loss 

801 

802 .. code-block:: yaml 

803 

804 policies: 

805 - name: ebs-volume-tolerance 

806 resource: ebs 

807 filters: 

808 - type: fault-tolerant 

809 tolerant: True 

810 """ 

811 schema = type_schema('fault-tolerant', tolerant={'type': 'boolean'}) 

812 check_id = 'H7IgTzjTYb' 

813 permissions = ('support:RefreshTrustedAdvisorCheck', 

814 'support:DescribeTrustedAdvisorCheckResult') 

815 

816 def pull_check_results(self): 

817 result = set() 

818 support_region = get_support_region(self.manager) 

819 client = local_session(self.manager.session_factory).client( 

820 'support', region_name=support_region) 

821 client.refresh_trusted_advisor_check(checkId=self.check_id) 

822 results = client.describe_trusted_advisor_check_result( 

823 checkId=self.check_id, language='en')['result'] 

824 for r in results['flaggedResources']: 

825 result.update([r['metadata'][1]]) 

826 return result 

827 

828 def process(self, resources, event=None): 

829 flagged = self.pull_check_results() 

830 if self.data.get('tolerant', True): 

831 return [r for r in resources if r['VolumeId'] not in flagged] 

832 return [r for r in resources if r['VolumeId'] in flagged] 

833 

834 

835@EBS.filter_registry.register('health-event') 

836class HealthFilter(HealthEventFilter): 

837 

838 schema_alias = False 

839 schema = type_schema( 

840 'health-event', 

841 types={'type': 'array', 'items': { 

842 'type': 'string', 

843 'enum': ['AWS_EBS_DEGRADED_EBS_VOLUME_PERFORMANCE', 

844 'AWS_EBS_VOLUME_LOST']}}, 

845 statuses={'type': 'array', 'items': { 

846 'type': 'string', 

847 'enum': ['open', 'upcoming', 'closed'] 

848 }}) 

849 

850 permissions = HealthEventFilter.permissions + ( 

851 'config:GetResourceConfigHistory',) 

852 

853 def process(self, resources, event=None): 

854 if 'AWS_EBS_VOLUME_LOST' not in self.data['types']: 

855 return super(HealthFilter, self).process(resources, event) 

856 if not resources: 

857 return resources 

858 

859 client = local_session(self.manager.session_factory).client( 

860 'health', region_name='us-east-1') 

861 f = self.get_filter_parameters() 

862 resource_map = {} 

863 

864 paginator = client.get_paginator('describe_events') 

865 events = list(itertools.chain( 

866 *[p['events']for p in paginator.paginate(filter=f)])) 

867 entities = self.process_event(client, events) 

868 

869 event_map = {e['arn']: e for e in events} 

870 config = local_session(self.manager.session_factory).client('config') 

871 for e in entities: 

872 rid = e['entityValue'] 

873 if not resource_map.get(rid): 

874 resource_map[rid] = self.load_resource(config, rid) 

875 resource_map[rid].setdefault( 

876 'c7n:HealthEvent', []).append(event_map[e['eventArn']]) 

877 return list(resource_map.values()) 

878 

879 def load_resource(self, config, rid): 

880 resources_histories = config.get_resource_config_history( 

881 resourceType='AWS::EC2::Volume', 

882 resourceId=rid, 

883 limit=2)['configurationItems'] 

884 for r in resources_histories: 

885 if r['configurationItemStatus'] != u'ResourceDeleted': 

886 return camelResource(json.loads(r['configuration'])) 

887 return {"VolumeId": rid} 

888 

889 

890@EBS.action_registry.register('copy-instance-tags') 

891class CopyInstanceTags(BaseAction): 

892 """Copy instance tags to its attached volume. 

893 

894 Useful for cost allocation to ebs volumes and tracking usage 

895 info for volumes. 

896 

897 Mostly useful for volumes not set to delete on termination, which 

898 are otherwise candidates for garbage collection, copying the 

899 instance tags gives us more semantic information to determine if 

900 their useful, as well letting us know the last time the volume 

901 was actually used. 

902 

903 :example: 

904 

905 .. code-block:: yaml 

906 

907 policies: 

908 - name: ebs-copy-instance-tags 

909 resource: ebs 

910 filters: 

911 - type: value 

912 key: "Attachments[0].Device" 

913 value: not-null 

914 actions: 

915 - type: copy-instance-tags 

916 tags: 

917 - Name 

918 """ 

919 

920 schema = type_schema( 

921 'copy-instance-tags', 

922 tags={'type': 'array', 'items': {'type': 'string'}}) 

923 

924 def get_permissions(self): 

925 perms = self.manager.get_resource_manager('ec2').get_permissions() 

926 perms.append('ec2:CreateTags') 

927 return perms 

928 

929 def process(self, volumes): 

930 vol_count = len(volumes) 

931 volumes = [v for v in volumes if v['Attachments']] 

932 if len(volumes) != vol_count: 

933 self.log.warning( 

934 "ebs copy tags action implicitly filtered from %d to %d", 

935 vol_count, len(volumes)) 

936 self.initialize(volumes) 

937 client = local_session(self.manager.session_factory).client('ec2') 

938 with self.executor_factory(max_workers=10) as w: 

939 futures = [] 

940 for instance_set in chunks(sorted( 

941 self.instance_map.keys(), reverse=True), size=100): 

942 futures.append( 

943 w.submit(self.process_instance_set, client, instance_set)) 

944 for f in as_completed(futures): 

945 if f.exception(): 

946 self.log.error( 

947 "Exception copying instance tags \n %s" % ( 

948 f.exception())) 

949 

950 def initialize(self, volumes): 

951 instance_vol_map = {} 

952 for v in volumes: 

953 instance_vol_map.setdefault( 

954 v['Attachments'][0]['InstanceId'], []).append(v) 

955 instance_map = { 

956 i['InstanceId']: i for i in 

957 self.manager.get_resource_manager('ec2').get_resources( 

958 list(instance_vol_map.keys()))} 

959 self.instance_vol_map = instance_vol_map 

960 self.instance_map = instance_map 

961 

962 def process_instance_set(self, client, instance_ids): 

963 for i in instance_ids: 

964 try: 

965 self.process_instance_volumes( 

966 client, 

967 self.instance_map[i], 

968 self.instance_vol_map[i]) 

969 except Exception as e: 

970 self.log.exception( 

971 "Error copy instance:%s tags to volumes: %s \n %s", 

972 i, ",".join([v['VolumeId'] for v in self.instance_vol_map[i]]), 

973 e) 

974 

975 def process_instance_volumes(self, client, instance, volumes): 

976 for v in volumes: 

977 copy_tags = self.get_volume_tags(v, instance, v['Attachments'][0]) 

978 if not copy_tags: 

979 continue 

980 # Can't add more tags than the resource supports could try 

981 # to delete extant ones inline, else trim-tags action. 

982 if len(copy_tags) > 40: 

983 log.warning( 

984 "action:%s volume:%s instance:%s too many tags to copy" % ( 

985 self.__class__.__name__.lower(), 

986 v['VolumeId'], instance['InstanceId'])) 

987 continue 

988 try: 

989 self.manager.retry( 

990 client.create_tags, 

991 Resources=[v['VolumeId']], 

992 Tags=copy_tags, 

993 DryRun=self.manager.config.dryrun) 

994 except ClientError as e: 

995 if e.response['Error']['Code'] == "InvalidVolume.NotFound": 

996 continue 

997 raise 

998 

999 def get_volume_tags(self, volume, instance, attachment): 

1000 only_tags = self.data.get('tags', []) # specify which tags to copy 

1001 copy_tags = [] 

1002 extant_tags = dict([ 

1003 (t['Key'], t['Value']) for t in volume.get('Tags', [])]) 

1004 

1005 for t in instance.get('Tags', ()): 

1006 if only_tags and t['Key'] not in only_tags: 

1007 continue 

1008 if t['Key'] in extant_tags and t['Value'] == extant_tags[t['Key']]: 

1009 continue 

1010 if t['Key'].startswith('aws:'): 

1011 continue 

1012 copy_tags.append(t) 

1013 

1014 # Don't add attachment tags if we're already current 

1015 if 'LastAttachInstance' in extant_tags \ 

1016 and extant_tags['LastAttachInstance'] == attachment['InstanceId']: 

1017 return copy_tags 

1018 

1019 copy_tags.append( 

1020 {'Key': 'LastAttachTime', 

1021 'Value': attachment['AttachTime'].isoformat()}) 

1022 copy_tags.append( 

1023 {'Key': 'LastAttachInstance', 'Value': attachment['InstanceId']}) 

1024 return copy_tags 

1025 

1026 

1027@EBS.action_registry.register('encrypt-instance-volumes') 

1028class EncryptInstanceVolumes(BaseAction): 

1029 """Encrypt extant volumes attached to an instance 

1030 

1031 - Requires instance restart 

1032 - Not suitable for autoscale groups. 

1033 

1034 Multistep process: 

1035 

1036 - Stop instance (if running) 

1037 - For each volume 

1038 - Create snapshot 

1039 - Wait on snapshot creation 

1040 - Copy Snapshot to create encrypted snapshot 

1041 - Wait on snapshot creation 

1042 - Create encrypted volume from snapshot 

1043 - Wait on volume creation 

1044 - Delete transient snapshots 

1045 - Detach Unencrypted Volume 

1046 - Attach Encrypted Volume 

1047 - Set DeleteOnTermination instance attribute equal to source volume 

1048 - For each volume 

1049 - Delete unencrypted volume 

1050 - Start Instance (if originally running) 

1051 - For each newly encrypted volume 

1052 - Delete transient tags 

1053 

1054 :example: 

1055 

1056 .. code-block:: yaml 

1057 

1058 policies: 

1059 - name: encrypt-unencrypted-ebs 

1060 resource: ebs 

1061 filters: 

1062 - Encrypted: false 

1063 actions: 

1064 - type: encrypt-instance-volumes 

1065 key: alias/encrypted 

1066 """ 

1067 

1068 schema = type_schema( 

1069 'encrypt-instance-volumes', 

1070 required=['key'], 

1071 key={'type': 'string'}, 

1072 delay={'type': 'number'}, 

1073 verbose={'type': 'boolean'}) 

1074 

1075 permissions = ( 

1076 'ec2:CopySnapshot', 

1077 'ec2:CreateSnapshot', 

1078 'ec2:CreateVolume', 

1079 'ec2:DescribeInstances', 

1080 'ec2:DescribeSnapshots', 

1081 'ec2:DescribeVolumes', 

1082 'ec2:StopInstances', 

1083 'ec2:StartInstances', 

1084 'ec2:ModifyInstanceAttribute', 

1085 'ec2:DeleteTags') 

1086 

1087 def validate(self): 

1088 self.verbose = self.data.get('verbose', False) 

1089 return self 

1090 

1091 def process(self, volumes): 

1092 original_count = len(volumes) 

1093 volumes = [v for v in volumes 

1094 if not v['Encrypted'] or not v['Attachments']] 

1095 log.debug( 

1096 "EncryptVolumes filtered from %d to %d " 

1097 " unencrypted attached volumes" % ( 

1098 original_count, len(volumes))) 

1099 

1100 # Group volumes by instance id 

1101 instance_vol_map = {} 

1102 for v in volumes: 

1103 instance_id = v['Attachments'][0]['InstanceId'] 

1104 instance_vol_map.setdefault(instance_id, []).append(v) 

1105 

1106 # Query instances to find current instance state 

1107 self.instance_map = { 

1108 i['InstanceId']: i for i in 

1109 self.manager.get_resource_manager('ec2').get_resources( 

1110 list(instance_vol_map.keys()), cache=False)} 

1111 

1112 client = local_session(self.manager.session_factory).client('ec2') 

1113 

1114 with self.executor_factory(max_workers=3) as w: 

1115 futures = {} 

1116 for instance_id, vol_set in instance_vol_map.items(): 

1117 futures[w.submit( 

1118 self.process_volume, client, 

1119 instance_id, vol_set)] = instance_id 

1120 

1121 for f in as_completed(futures): 

1122 if f.exception(): 

1123 instance_id = futures[f] 

1124 log.error( 

1125 "Exception processing instance:%s volset: %s \n %s" % ( 

1126 instance_id, instance_vol_map[instance_id], 

1127 f.exception())) 

1128 

1129 def process_volume(self, client, instance_id, vol_set): 

1130 """Encrypt attached unencrypted ebs volumes 

1131 

1132 vol_set corresponds to all the unencrypted volumes on a given instance. 

1133 """ 

1134 key_id = self.get_encryption_key() 

1135 if self.verbose: 

1136 self.log.debug("Using encryption key: %s" % key_id) 

1137 

1138 # Only stop and start the instance if it was running. 

1139 instance_running = self.stop_instance(client, instance_id) 

1140 if instance_running is None: 

1141 return 

1142 

1143 # Create all the volumes before patching the instance. 

1144 paired = [] 

1145 for v in vol_set: 

1146 vol_id = self.create_encrypted_volume(client, v, key_id, instance_id) 

1147 paired.append((v, vol_id)) 

1148 

1149 # Next detach and reattach 

1150 for v, vol_id in paired: 

1151 client.detach_volume( 

1152 InstanceId=instance_id, VolumeId=v['VolumeId']) 

1153 # 5/8/2016 The detach isn't immediately consistent 

1154 time.sleep(self.data.get('delay', 15)) 

1155 client.attach_volume( 

1156 InstanceId=instance_id, VolumeId=vol_id, 

1157 Device=v['Attachments'][0]['Device']) 

1158 

1159 # Set DeleteOnTermination attribute the same as source volume 

1160 if v['Attachments'][0]['DeleteOnTermination']: 

1161 client.modify_instance_attribute( 

1162 InstanceId=instance_id, 

1163 BlockDeviceMappings=[ 

1164 { 

1165 'DeviceName': v['Attachments'][0]['Device'], 

1166 'Ebs': { 

1167 'VolumeId': vol_id, 

1168 'DeleteOnTermination': True 

1169 } 

1170 } 

1171 ] 

1172 ) 

1173 

1174 if instance_running: 

1175 client.start_instances(InstanceIds=[instance_id]) 

1176 

1177 if self.verbose: 

1178 self.log.debug( 

1179 "Deleting unencrypted volumes for: %s" % instance_id) 

1180 

1181 for v in vol_set: 

1182 client.delete_volume(VolumeId=v['VolumeId']) 

1183 

1184 # Clean-up transient tags on newly created encrypted volume. 

1185 for v, vol_id in paired: 

1186 client.delete_tags( 

1187 Resources=[vol_id], 

1188 Tags=[ 

1189 {'Key': 'maid-crypt-remediation'}, 

1190 {'Key': 'maid-origin-volume'}, 

1191 {'Key': 'maid-instance-device'} 

1192 ] 

1193 ) 

1194 

1195 def stop_instance(self, client, instance_id): 

1196 instance_state = self.instance_map[instance_id]['State']['Name'] 

1197 if instance_state in ('shutting-down', 'terminated'): 

1198 self.log.debug('Skipping terminating instance: %s' % instance_id) 

1199 return 

1200 elif instance_state in ('running',): 

1201 client.stop_instances(InstanceIds=[instance_id]) 

1202 self.wait_on_resource(client, instance_id=instance_id) 

1203 return True 

1204 return False 

1205 

1206 def create_encrypted_volume(self, ec2, v, key_id, instance_id): 

1207 unencrypted_volume_tags = v.get('Tags', []) 

1208 # Create a current snapshot 

1209 results = ec2.create_snapshot( 

1210 VolumeId=v['VolumeId'], 

1211 Description="maid transient snapshot for encryption",) 

1212 transient_snapshots = [results['SnapshotId']] 

1213 ec2.create_tags( 

1214 Resources=[results['SnapshotId']], 

1215 Tags=[ 

1216 {'Key': 'maid-crypto-remediation', 'Value': 'true'}]) 

1217 self.wait_on_resource(ec2, snapshot_id=results['SnapshotId']) 

1218 

1219 # Create encrypted snapshot from current 

1220 results = ec2.copy_snapshot( 

1221 SourceSnapshotId=results['SnapshotId'], 

1222 SourceRegion=v['AvailabilityZone'][:-1], 

1223 Description='maid transient snapshot for encryption', 

1224 Encrypted=True, 

1225 KmsKeyId=key_id) 

1226 transient_snapshots.append(results['SnapshotId']) 

1227 ec2.create_tags( 

1228 Resources=[results['SnapshotId']], 

1229 Tags=[ 

1230 {'Key': 'maid-crypto-remediation', 'Value': 'true'} 

1231 ]) 

1232 self.wait_on_resource(ec2, snapshot_id=results['SnapshotId']) 

1233 

1234 # Create encrypted volume, also tag so we can recover 

1235 results = ec2.create_volume( 

1236 Size=v['Size'], 

1237 VolumeType=v['VolumeType'], 

1238 SnapshotId=results['SnapshotId'], 

1239 AvailabilityZone=v['AvailabilityZone'], 

1240 Encrypted=True) 

1241 ec2.create_tags( 

1242 Resources=[results['VolumeId']], 

1243 Tags=[ 

1244 {'Key': 'maid-crypt-remediation', 'Value': instance_id}, 

1245 {'Key': 'maid-origin-volume', 'Value': v['VolumeId']}, 

1246 {'Key': 'maid-instance-device', 

1247 'Value': v['Attachments'][0]['Device']}] + unencrypted_volume_tags) 

1248 

1249 # Wait on encrypted volume creation 

1250 self.wait_on_resource(ec2, volume_id=results['VolumeId']) 

1251 

1252 # Delete transient snapshots 

1253 for sid in transient_snapshots: 

1254 ec2.delete_snapshot(SnapshotId=sid) 

1255 return results['VolumeId'] 

1256 

1257 def get_encryption_key(self): 

1258 kms = local_session(self.manager.session_factory).client('kms') 

1259 key_alias = self.data.get('key') 

1260 result = kms.describe_key(KeyId=key_alias) 

1261 key_id = result['KeyMetadata']['KeyId'] 

1262 return key_id 

1263 

1264 def wait_on_resource(self, *args, **kw): 

1265 # Sigh this is dirty, but failure in the middle of our workflow 

1266 # due to overly long resource creation is complex to unwind, 

1267 # with multi-volume instances. Wait up to three times (actual 

1268 # wait time is a per resource type configuration. 

1269 

1270 # Note we wait for all resource creation before attempting to 

1271 # patch an instance, so even on resource creation failure, the 

1272 # instance is not modified 

1273 try: 

1274 return self._wait_on_resource(*args, **kw) 

1275 except Exception: 

1276 try: 

1277 return self._wait_on_resource(*args, **kw) 

1278 except Exception: 

1279 return self._wait_on_resource(*args, **kw) 

1280 

1281 def _wait_on_resource( 

1282 self, client, snapshot_id=None, volume_id=None, instance_id=None): 

1283 # boto client waiters poll every 15 seconds up to a max 600s (5m) 

1284 if snapshot_id: 

1285 if self.verbose: 

1286 self.log.debug( 

1287 "Waiting on snapshot completion %s" % snapshot_id) 

1288 waiter = client.get_waiter('snapshot_completed') 

1289 waiter.wait(SnapshotIds=[snapshot_id]) 

1290 if self.verbose: 

1291 self.log.debug("Snapshot: %s completed" % snapshot_id) 

1292 elif volume_id: 

1293 if self.verbose: 

1294 self.log.debug("Waiting on volume creation %s" % volume_id) 

1295 waiter = client.get_waiter('volume_available') 

1296 waiter.wait(VolumeIds=[volume_id]) 

1297 if self.verbose: 

1298 self.log.debug("Volume: %s created" % volume_id) 

1299 elif instance_id: 

1300 if self.verbose: 

1301 self.log.debug("Waiting on instance stop") 

1302 waiter = client.get_waiter('instance_stopped') 

1303 waiter.wait(InstanceIds=[instance_id]) 

1304 if self.verbose: 

1305 self.log.debug("Instance: %s stopped" % instance_id) 

1306 

1307 

1308@EBS.action_registry.register('snapshot') 

1309class CreateSnapshot(BaseAction): 

1310 """Snapshot an EBS volume. 

1311 

1312 Tags may be optionally added to the snapshot during creation. 

1313 

1314 - `copy-volume-tags` copies all the tags from the specified 

1315 volume to the corresponding snapshot. 

1316 - `copy-tags` copies the listed tags from each volume 

1317 to the snapshot. This is mutually exclusive with 

1318 `copy-volume-tags`. 

1319 - `tags` allows new tags to be added to each snapshot. If 

1320 no tags are specified, then the tag `custodian_snapshot` 

1321 is added. 

1322 

1323 The default behavior is `copy-volume-tags: true`. 

1324 

1325 :example: 

1326 

1327 .. code-block:: yaml 

1328 

1329 policies: 

1330 - name: snapshot-volumes 

1331 resource: ebs 

1332 filters: 

1333 - Attachments: [] 

1334 - State: available 

1335 actions: 

1336 - type: snapshot 

1337 copy-tags: 

1338 - Name 

1339 tags: 

1340 custodian_snapshot: True 

1341 """ 

1342 schema = type_schema( 

1343 'snapshot', 

1344 **{'copy-tags': {'type': 'array', 'items': {'type': 'string'}}, 

1345 'copy-volume-tags': {'type': 'boolean'}, 

1346 'tags': {'type': 'object'}, 

1347 'description': {'type': 'string'}}) 

1348 permissions = ('ec2:CreateSnapshot', 'ec2:CreateTags',) 

1349 

1350 def validate(self): 

1351 if self.data.get('copy-tags') and 'copy-volume-tags' in self.data: 

1352 raise PolicyValidationError( 

1353 "Can specify copy-tags or copy-volume-tags, not both") 

1354 

1355 def process(self, volumes): 

1356 client = local_session(self.manager.session_factory).client('ec2') 

1357 retry = get_retry(['Throttled'], max_attempts=5) 

1358 for vol in volumes: 

1359 vol_id = vol['VolumeId'] 

1360 tags = [{ 

1361 'ResourceType': 'snapshot', 

1362 'Tags': self.get_snapshot_tags(vol) 

1363 }] 

1364 retry(self.process_volume, client=client, volume=vol_id, tags=tags) 

1365 

1366 def process_volume(self, client, volume, tags): 

1367 description = self.data.get('description') 

1368 if not description: 

1369 description = "Automated snapshot by c7n - %s" % (self.manager.ctx.policy.name) 

1370 

1371 try: 

1372 client.create_snapshot( 

1373 VolumeId=volume, 

1374 Description=description, 

1375 TagSpecifications=tags 

1376 ) 

1377 except ClientError as e: 

1378 if e.response['Error']['Code'] == 'InvalidVolume.NotFound': 

1379 return 

1380 raise 

1381 

1382 def get_snapshot_tags(self, resource): 

1383 user_tags = self.data.get('tags', {}) or {'custodian_snapshot': ''} 

1384 copy_tags = self.data.get('copy-tags', []) or self.data.get('copy-volume-tags', True) 

1385 return coalesce_copy_user_tags(resource, copy_tags, user_tags) 

1386 

1387 

1388@EBS.action_registry.register('delete') 

1389class Delete(BaseAction): 

1390 """Delete an ebs volume. 

1391 

1392 If the force boolean is true, we will detach an attached volume 

1393 from an instance. Note this cannot be done for running instance 

1394 root volumes. 

1395 

1396 :example: 

1397 

1398 .. code-block:: yaml 

1399 

1400 policies: 

1401 - name: delete-unattached-volumes 

1402 resource: ebs 

1403 filters: 

1404 - Attachments: [] 

1405 - State: available 

1406 actions: 

1407 - delete 

1408 """ 

1409 schema = type_schema('delete', force={'type': 'boolean'}) 

1410 permissions = ( 

1411 'ec2:DetachVolume', 'ec2:DeleteVolume', 'ec2:DescribeVolumes') 

1412 

1413 def process(self, volumes): 

1414 client = local_session(self.manager.session_factory).client('ec2') 

1415 with self.executor_factory(max_workers=3) as w: 

1416 futures = {} 

1417 for v in volumes: 

1418 futures[ 

1419 w.submit(self.process_volume, client, v)] = v 

1420 for f in as_completed(futures): 

1421 v = futures[f] 

1422 if f.exception(): 

1423 self.log.error( 

1424 "Error processing volume:%s error:%s", 

1425 v['VolumeId'], f.exception()) 

1426 

1427 def process_volume(self, client, volume): 

1428 try: 

1429 if self.data.get('force') and len(volume['Attachments']): 

1430 client.detach_volume(VolumeId=volume['VolumeId'], Force=True) 

1431 waiter = client.get_waiter('volume_available') 

1432 waiter.wait(VolumeIds=[volume['VolumeId']]) 

1433 self.manager.retry( 

1434 client.delete_volume, VolumeId=volume['VolumeId']) 

1435 except ClientError as e: 

1436 if e.response['Error']['Code'] == "InvalidVolume.NotFound": 

1437 return 

1438 raise 

1439 

1440 

1441@EBS.filter_registry.register('modifyable') 

1442class ModifyableVolume(Filter): 

1443 """Check if an ebs volume is modifyable online. 

1444 

1445 Considerations: 

1446 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/limitations.html 

1447 

1448 Consideration Summary 

1449 - only current instance types are supported (one exception m3.medium) 

1450 Current Generation Instances (2017-2) 

1451 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#current-gen-instances 

1452 

1453 - older magnetic volume types are not supported 

1454 - shrinking volumes is not supported 

1455 - must wait at least 6hrs between modifications to the same volume. 

1456 - volumes must have been attached after nov 1st, 2016. 

1457 

1458 See :ref:`modify action <aws.ebs.actions.modify>` for examples. 

1459 """ 

1460 

1461 schema = type_schema('modifyable') 

1462 

1463 older_generation = { 

1464 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 

1465 'c1.medium', 'c1.xlarge', 'cc2.8xlarge', 

1466 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cr1.8xlarge', 

1467 'hi1.4xlarge', 'hs1.8xlarge', 'cg1.4xlarge', 't1.micro', 

1468 # two legs good, not all current gen work either. 

1469 'm3.large', 'm3.xlarge', 'm3.2xlarge' 

1470 } 

1471 

1472 permissions = ("ec2:DescribeInstances",) 

1473 

1474 def process(self, resources, event=None): 

1475 results = [] 

1476 filtered = [] 

1477 attached = [] 

1478 stats = Counter() 

1479 marker_date = parse_date('2016-11-01T00:00:00+00:00') 

1480 

1481 # Filter volumes 

1482 for r in resources: 

1483 # unsupported type 

1484 if r['VolumeType'] == 'standard': 

1485 stats['vol-type'] += 1 

1486 filtered.append(r['VolumeId']) 

1487 continue 

1488 

1489 # unattached are easy 

1490 if not r.get('Attachments'): 

1491 results.append(r) 

1492 continue 

1493 

1494 # check for attachment date older then supported date 

1495 if r['Attachments'][0]['AttachTime'] < marker_date: 

1496 stats['attach-time'] += 1 

1497 filtered.append(r['VolumeId']) 

1498 continue 

1499 

1500 attached.append(r) 

1501 

1502 # Filter volumes attached to unsupported instance types 

1503 ec2 = self.manager.get_resource_manager('ec2') 

1504 instance_map = {} 

1505 for v in attached: 

1506 instance_map.setdefault( 

1507 v['Attachments'][0]['InstanceId'], []).append(v) 

1508 

1509 instances = ec2.get_resources(list(instance_map.keys())) 

1510 for i in instances: 

1511 if i['InstanceType'] in self.older_generation: 

1512 stats['instance-type'] += len(instance_map[i['InstanceId']]) 

1513 filtered.extend([v['VolumeId'] for v in instance_map.pop(i['InstanceId'])]) 

1514 else: 

1515 results.extend(instance_map.pop(i['InstanceId'])) 

1516 

1517 # Filter volumes that are currently under modification 

1518 client = local_session(self.manager.session_factory).client('ec2') 

1519 modifying = set() 

1520 

1521 # Re 197 - Max number of filters is 200, and we have to use 

1522 # three additional attribute filters. 

1523 for vol_set in chunks(list(results), 197): 

1524 vol_ids = [v['VolumeId'] for v in vol_set] 

1525 mutating = client.describe_volumes_modifications( 

1526 Filters=[ 

1527 {'Name': 'volume-id', 

1528 'Values': vol_ids}, 

1529 {'Name': 'modification-state', 

1530 'Values': ['modifying', 'optimizing', 'failed']}]) 

1531 for vm in mutating.get('VolumesModifications', ()): 

1532 stats['vol-mutation'] += 1 

1533 filtered.append(vm['VolumeId']) 

1534 modifying.add(vm['VolumeId']) 

1535 

1536 self.log.debug( 

1537 "filtered %d of %d volumes due to %s", 

1538 len(filtered), len(resources), sorted(stats.items())) 

1539 

1540 return [r for r in results if r['VolumeId'] not in modifying] 

1541 

1542 

1543@EBS.action_registry.register('modify') 

1544class ModifyVolume(BaseAction): 

1545 """Modify an ebs volume online. 

1546 

1547 **Note this action requires use of modifyable filter** 

1548 

1549 Intro Blog & Use Cases: 

1550 https://aws.amazon.com/blogs/aws/amazon-ebs-update-new-elastic-volumes-change-everything/ 

1551 Docs: 

1552 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modify-volume.html 

1553 Considerations: 

1554 https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/limitations.html 

1555 

1556 :example: 

1557 

1558 Find under utilized provisioned iops volumes older than a week 

1559 and change their type. 

1560 

1561 .. code-block:: yaml 

1562 

1563 policies: 

1564 - name: ebs-remove-piops 

1565 resource: ebs 

1566 filters: 

1567 - type: value 

1568 key: CreateTime 

1569 value_type: age 

1570 value: 7 

1571 op: greater-than 

1572 - VolumeType: io1 

1573 - type: metrics 

1574 name: VolumeConsumedReadWriteOps 

1575 statistics: Maximum 

1576 value: 100 

1577 op: less-than 

1578 days: 7 

1579 - modifyable 

1580 actions: 

1581 - type: modify 

1582 volume-type: gp2 

1583 

1584 `iops-percent` and `size-percent` can be used to modify 

1585 respectively iops on io1/io2 volumes and volume size. 

1586 

1587 When converting to io1/io2, `iops-percent` is used to set the iops 

1588 allocation for the new volume against the extant value for the old 

1589 volume. 

1590 

1591 :example: 

1592 

1593 Double storage and quadruple iops for all io1 volumes. 

1594 

1595 .. code-block:: yaml 

1596 

1597 policies: 

1598 - name: ebs-upsize-piops 

1599 resource: ebs 

1600 filters: 

1601 - VolumeType: io1 

1602 - modifyable 

1603 actions: 

1604 - type: modify 

1605 size-percent: 200 

1606 iops-percent: 400 

1607 

1608 

1609 **Note** resizing down aka shrinking requires OS and FS support 

1610 and potentially additional preparation, else data-loss may occur. 

1611 To prevent accidents, shrinking must be explicitly enabled by also 

1612 setting `shrink: true` on the action. 

1613 """ 

1614 

1615 schema = type_schema( 

1616 'modify', 

1617 **{'volume-type': {'enum': ['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1']}, 

1618 'shrink': False, 

1619 'size-percent': {'type': 'number'}, 

1620 'iops-percent': {'type': 'number'}}) 

1621 

1622 # assumptions as its the closest i can find. 

1623 permissions = ("ec2:ModifyVolumeAttribute",) 

1624 

1625 def validate(self): 

1626 if 'modifyable' not in self.manager.data.get('filters', ()): 

1627 raise PolicyValidationError( 

1628 "modify action requires modifyable filter in policy") 

1629 if self.data.get('size-percent', 100) < 100 and not self.data.get('shrink', False): 

1630 raise PolicyValidationError(( 

1631 "shrinking volumes requires os/fs support " 

1632 "or data-loss may ensue, use `shrink: true` to override")) 

1633 return self 

1634 

1635 def process(self, resources): 

1636 client = local_session(self.manager.session_factory).client('ec2') 

1637 for resource_set in chunks(resources, 50): 

1638 self.process_resource_set(client, resource_set) 

1639 

1640 def process_resource_set(self, client, resource_set): 

1641 vtype = self.data.get('volume-type') 

1642 psize = self.data.get('size-percent') 

1643 piops = self.data.get('iops-percent') 

1644 

1645 for r in resource_set: 

1646 params = {'VolumeId': r['VolumeId']} 

1647 if piops and ('io1' in (vtype, r['VolumeType']) or 

1648 'io2' in (vtype, r['VolumeType'])): 

1649 # default here if we're changing to io1 

1650 params['Iops'] = max(int(r.get('Iops', 10) * piops / 100.0), 100) 

1651 if psize: 

1652 params['Size'] = max(int(r['Size'] * psize / 100.0), 1) 

1653 if vtype: 

1654 params['VolumeType'] = vtype 

1655 self.manager.retry(client.modify_volume, **params)