Coverage for curator/actions.py: 99%

968 statements  

« prev     ^ index     » next       coverage.py v7.3.0, created at 2023-08-16 15:36 -0600

1"""Curator Actions""" 

2import logging 

3import re 

4import time 

5from copy import deepcopy 

6from datetime import datetime 

7from elasticsearch7.exceptions import ConflictError, RequestError 

8from curator import exceptions, utils 

9 

10class Alias(object): 

11 """Alias Action Class""" 

12 def __init__(self, name=None, extra_settings={}, **kwargs): 

13 """ 

14 Define the Alias object. 

15 

16 :arg name: The alias name 

17 :arg extra_settings: Extra settings, including filters and routing. For 

18 more information see 

19 https://www.elastic.co/guide/en/elasticsearch/reference/6.8/indices-aliases.html 

20 :type extra_settings: dict, representing the settings. 

21 """ 

22 if not name: 

23 raise exceptions.MissingArgument('No value for "name" provided.') 

24 #: Instance variable 

25 #: The strftime parsed version of `name`. 

26 self.name = utils.parse_date_pattern(name) 

27 #: The list of actions to perform. Populated by 

28 #: :mod:`curator.actions.Alias.add` and 

29 #: :mod:`curator.actions.Alias.remove` 

30 self.actions = [] 

31 #: Instance variable. 

32 #: The Elasticsearch Client object derived from `ilo` 

33 self.client = None 

34 #: Instance variable. 

35 #: Any extra things to add to the alias, like filters, or routing. 

36 self.extra_settings = extra_settings 

37 self.loggit = logging.getLogger('curator.actions.alias') 

38 #: Instance variable. 

39 #: Preset default value to `False`. 

40 self.warn_if_no_indices = False 

41 

42 def add(self, ilo, warn_if_no_indices=False): 

43 """ 

44 Create `add` statements for each index in `ilo` for `alias`, then 

45 append them to `actions`. Add any `extras` that may be there. 

46 

47 :arg ilo: A :class:`curator.indexlist.IndexList` object 

48 

49 """ 

50 utils.verify_index_list(ilo) 

51 if not self.client: 

52 self.client = ilo.client 

53 self.name = utils.parse_datemath(self.client, self.name) 

54 try: 

55 ilo.empty_list_check() 

56 except exceptions.NoIndices: 

57 # Add a warning if there are no indices to add, if so set in options 

58 if warn_if_no_indices: 

59 self.warn_if_no_indices = True 

60 self.loggit.warning( 

61 'No indices found after processing filters. ' 

62 'Nothing to add to {0}'.format(self.name) 

63 ) 

64 return 

65 else: 

66 # Re-raise the exceptions.NoIndices so it will behave as before 

67 raise exceptions.NoIndices('No indices to add to alias') 

68 for index in ilo.working_list(): 

69 self.loggit.debug( 

70 'Adding index {0} to alias {1} with extra settings ' 

71 '{2}'.format(index, self.name, self.extra_settings) 

72 ) 

73 add_dict = {'add' : {'index' : index, 'alias': self.name}} 

74 add_dict['add'].update(self.extra_settings) 

75 self.actions.append(add_dict) 

76 

77 def remove(self, ilo, warn_if_no_indices=False): 

78 """ 

79 Create `remove` statements for each index in `ilo` for `alias`, 

80 then append them to `actions`. 

81 

82 :arg ilo: A :class:`curator.indexlist.IndexList` object 

83 """ 

84 utils.verify_index_list(ilo) 

85 if not self.client: 

86 self.client = ilo.client 

87 self.name = utils.parse_datemath(self.client, self.name) 

88 try: 

89 ilo.empty_list_check() 

90 except exceptions.NoIndices: 

91 # Add a warning if there are no indices to add, if so set in options 

92 if warn_if_no_indices: 

93 self.warn_if_no_indices = True 

94 self.loggit.warning( 

95 'No indices found after processing filters. ' 

96 'Nothing to remove from {0}'.format(self.name) 

97 ) 

98 return 

99 else: 

100 # Re-raise the exceptions.NoIndices so it will behave as before 

101 raise exceptions.NoIndices('No indices to remove from alias') 

102 aliases = self.client.indices.get_alias() 

103 for index in ilo.working_list(): 

104 if index in aliases: 

105 self.loggit.debug( 

106 'Index {0} in get_aliases output'.format(index)) 

107 # Only remove if the index is associated with the alias 

108 if self.name in aliases[index]['aliases']: 

109 self.loggit.debug( 

110 'Removing index {0} from alias ' 

111 '{1}'.format(index, self.name) 

112 ) 

113 self.actions.append( 

114 {'remove' : {'index' : index, 'alias': self.name}}) 

115 else: 

116 self.loggit.debug( 

117 'Can not remove: Index {0} is not associated with alias' 

118 ' {1}'.format(index, self.name) 

119 ) 

120 

121 def body(self): 

122 """ 

123 Return a `body` string suitable for use with the `update_aliases` API 

124 call. 

125 """ 

126 if not self.actions: 

127 if not self.warn_if_no_indices: 

128 raise exceptions.ActionError('No "add" or "remove" operations') 

129 else: 

130 raise exceptions.NoIndices('No "adds" or "removes" found. Taking no action') 

131 self.loggit.debug('Alias actions: {0}'.format(self.actions)) 

132 

133 return {'actions' : self.actions} 

134 

135 def do_dry_run(self): 

136 """ 

137 Log what the output would be, but take no action. 

138 """ 

139 self.loggit.info('DRY-RUN MODE. No changes will be made.') 

140 for item in self.body()['actions']: 

141 job = list(item.keys())[0] 

142 index = item[job]['index'] 

143 alias = item[job]['alias'] 

144 # We want our log to look clever, so if job is "remove", strip the 

145 # 'e' so "remove" can become "removing". "adding" works already. 

146 self.loggit.info( 

147 'DRY-RUN: alias: {0}ing index "{1}" {2} alias ' 

148 '"{3}"'.format( 

149 job.rstrip('e'), 

150 index, 

151 'to' if job == 'add' else 'from', 

152 alias 

153 ) 

154 ) 

155 

156 def do_action(self): 

157 """ 

158 Run the API call `update_aliases` with the results of `body()` 

159 """ 

160 self.loggit.info('Updating aliases...') 

161 self.loggit.info('Alias actions: {0}'.format(self.body())) 

162 try: 

163 self.client.indices.update_aliases(body=self.body()) 

164 except Exception as err: 

165 utils.report_failure(err) 

166 

167class Allocation(object): 

168 """Allocation Action Class""" 

169 def __init__( 

170 self, ilo, key=None, value=None, allocation_type='require', wait_for_completion=False, 

171 wait_interval=3, max_wait=-1 

172 ): 

173 """ 

174 :arg ilo: A :class:`curator.indexlist.IndexList` object 

175 :arg key: An arbitrary metadata attribute key. Must match the key 

176 assigned to at least some of your nodes to have any effect. 

177 :arg value: An arbitrary metadata attribute value. Must correspond to 

178 values associated with `key` assigned to at least some of your nodes 

179 to have any effect. If a `None` value is provided, it will remove 

180 any setting associated with that `key`. 

181 :arg allocation_type: Type of allocation to apply. Default is `require` 

182 :arg wait_for_completion: Wait (or not) for the operation 

183 to complete before returning. (default: `False`) 

184 :type wait_for_completion: bool 

185 :arg wait_interval: How long in seconds to wait between checks for 

186 completion. 

187 :arg max_wait: Maximum number of seconds to `wait_for_completion` 

188 

189 .. note:: 

190 See: 

191 https://www.elastic.co/guide/en/elasticsearch/reference/6.8/shard-allocation-filtering.html 

192 """ 

193 utils.verify_index_list(ilo) 

194 if not key: 

195 raise exceptions.MissingArgument('No value for "key" provided') 

196 if allocation_type not in ['require', 'include', 'exclude']: 

197 raise ValueError( 

198 '{0} is an invalid allocation_type. Must be one of "require", ' 

199 '"include", "exclude".'.format(allocation_type) 

200 ) 

201 #: Instance variable. 

202 #: Internal reference to `ilo` 

203 self.index_list = ilo 

204 #: Instance variable. 

205 #: The Elasticsearch Client object derived from `ilo` 

206 self.client = ilo.client 

207 self.loggit = logging.getLogger('curator.actions.allocation') 

208 #: Instance variable. 

209 #: Populated at instance creation time. Value is 

210 #: ``index.routing.allocation.`` `allocation_type` ``.`` `key` ``.`` `value` 

211 bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key) 

212 self.body = {bkey : value} 

213 #: Instance variable. 

214 #: Internal reference to `wait_for_completion` 

215 self.wfc = wait_for_completion 

216 #: Instance variable 

217 #: How many seconds to wait between checks for completion. 

218 self.wait_interval = wait_interval 

219 #: Instance variable. 

220 #: How long in seconds to `wait_for_completion` before returning with an 

221 #: exception. A value of -1 means wait forever. 

222 self.max_wait = max_wait 

223 

224 def do_dry_run(self): 

225 """ 

226 Log what the output would be, but take no action. 

227 """ 

228 utils.show_dry_run(self.index_list, 'allocation', body=self.body) 

229 

230 def do_action(self): 

231 """ 

232 Change allocation settings for indices in `index_list.indices` with the 

233 settings in `body`. 

234 """ 

235 self.loggit.debug( 

236 'Cannot get change shard routing allocation of closed indices. ' 

237 'Omitting any closed indices.' 

238 ) 

239 self.index_list.filter_closed() 

240 self.index_list.empty_list_check() 

241 self.loggit.info( 

242 'Updating {0} selected indices: {1}'.format( 

243 len(self.index_list.indices), self.index_list.indices 

244 ) 

245 ) 

246 self.loggit.info('Updating index setting {0}'.format(self.body)) 

247 try: 

248 index_lists = utils.chunk_index_list(self.index_list.indices) 

249 for lst in index_lists: 

250 self.client.indices.put_settings( 

251 index=utils.to_csv(lst), body=self.body 

252 ) 

253 if self.wfc: 

254 self.loggit.debug( 

255 'Waiting for shards to complete relocation for indices:' 

256 ' {0}'.format(utils.to_csv(lst)) 

257 ) 

258 utils.wait_for_it( 

259 self.client, 'allocation', 

260 wait_interval=self.wait_interval, max_wait=self.max_wait 

261 ) 

262 except Exception as err: 

263 utils.report_failure(err) 

264 

265 

266class Close(object): 

267 """Close Action Class""" 

268 def __init__(self, ilo, delete_aliases=False, skip_flush=False, ignore_sync_failures=False): 

269 """ 

270 :arg ilo: A :class:`curator.indexlist.IndexList` object 

271 :arg delete_aliases: If `True`, will delete any associated aliases 

272 before closing indices. 

273 :type delete_aliases: bool 

274 :arg skip_flush: If `True`, will not flush indices before closing. 

275 :type skip_flush: bool 

276 :arg ignore_sync_failures: If `True`, will not fail if there are failures while attempting 

277 a synced flush. 

278 :type ignore_sync_failures: bool 

279 """ 

280 utils.verify_index_list(ilo) 

281 #: Instance variable. 

282 #: Internal reference to `ilo` 

283 self.index_list = ilo 

284 #: Instance variable. 

285 #: Internal reference to `delete_aliases` 

286 self.delete_aliases = delete_aliases 

287 #: Instance variable. 

288 #: Internal reference to `skip_flush` 

289 self.skip_flush = skip_flush 

290 #: Instance variable. 

291 #: Internal reference to `ignore_sync_failures` 

292 self.ignore_sync_failures = ignore_sync_failures 

293 #: Instance variable. 

294 #: The Elasticsearch Client object derived from `ilo` 

295 self.client = ilo.client 

296 self.loggit = logging.getLogger('curator.actions.close') 

297 

298 

299 def do_dry_run(self): 

300 """ 

301 Log what the output would be, but take no action. 

302 """ 

303 utils.show_dry_run( 

304 self.index_list, 'close', **{'delete_aliases':self.delete_aliases}) 

305 

306 def do_action(self): 

307 """ 

308 Close open indices in `index_list.indices` 

309 """ 

310 self.index_list.filter_closed() 

311 self.index_list.empty_list_check() 

312 self.loggit.info( 

313 'Closing {0} selected indices: {1}'.format( 

314 len(self.index_list.indices), self.index_list.indices 

315 ) 

316 ) 

317 try: 

318 index_lists = utils.chunk_index_list(self.index_list.indices) 

319 for lst in index_lists: 

320 lst_as_csv = utils.to_csv(lst) 

321 self.loggit.debug('CSV list of indices to close: {0}'.format(lst_as_csv)) 

322 if self.delete_aliases: 

323 self.loggit.info('Deleting aliases from indices before closing.') 

324 self.loggit.debug('Deleting aliases from: {0}'.format(lst)) 

325 try: 

326 self.client.indices.delete_alias(index=lst_as_csv, name='_all') 

327 self.loggit.debug('Deleted aliases from: {0}'.format(lst)) 

328 except Exception as err: 

329 self.loggit.warning( 

330 'Some indices may not have had aliases. Exception:' 

331 ' {0}'.format(err) 

332 ) 

333 if not self.skip_flush: 

334 try: 

335 self.client.indices.flush_synced(index=lst_as_csv, ignore_unavailable=True) 

336 except ConflictError as err: 

337 if not self.ignore_sync_failures: 

338 raise ConflictError(err.status_code, err.error, err.info) 

339 else: 

340 self.loggit.warning( 

341 'Ignoring flushed sync failures: ' 

342 '{0} {1}'.format(err.error, err.info) 

343 ) 

344 self.client.indices.close(index=lst_as_csv, ignore_unavailable=True) 

345 except Exception as err: 

346 utils.report_failure(err) 

347 

348class ClusterRouting(object): 

349 """ClusterRouting Action Class""" 

350 def __init__( 

351 self, client, routing_type=None, setting=None, value=None, wait_for_completion=False, 

352 wait_interval=9, max_wait=-1 

353 ): 

354 """ 

355 For now, the cluster routing settings are hardcoded to be ``transient`` 

356 

357 :arg client: An :class:`elasticsearch7.Elasticsearch` client object 

358 :arg routing_type: Type of routing to apply. Either `allocation` or 

359 `rebalance` 

360 :arg setting: Currently, the only acceptable value for `setting` is 

361 ``enable``. This is here in case that changes. 

362 :arg value: Used only if `setting` is `enable`. Semi-dependent on 

363 `routing_type`. Acceptable values for `allocation` and `rebalance` 

364 are ``all``, ``primaries``, and ``none`` (string, not `NoneType`). 

365 If `routing_type` is `allocation`, this can also be 

366 ``new_primaries``, and if `rebalance`, it can be ``replicas``. 

367 :arg wait_for_completion: Wait (or not) for the operation 

368 to complete before returning. (default: `False`) 

369 :type wait_for_completion: bool 

370 :arg wait_interval: How long in seconds to wait between checks for 

371 completion. 

372 :arg max_wait: Maximum number of seconds to `wait_for_completion` 

373 """ 

374 utils.verify_client_object(client) 

375 #: Instance variable. 

376 #: An :class:`elasticsearch7.Elasticsearch` client object 

377 self.client = client 

378 self.loggit = logging.getLogger('curator.actions.cluster_routing') 

379 #: Instance variable. 

380 #: Internal reference to `wait_for_completion` 

381 self.wfc = wait_for_completion 

382 #: Instance variable 

383 #: How many seconds to wait between checks for completion. 

384 self.wait_interval = wait_interval 

385 #: Instance variable. 

386 #: How long in seconds to `wait_for_completion` before returning with an 

387 #: exception. A value of -1 means wait forever. 

388 self.max_wait = max_wait 

389 

390 if setting != 'enable': 

391 raise ValueError( 

392 'Invalid value for "setting": {0}.'.format(setting) 

393 ) 

394 if routing_type == 'allocation': 

395 if value not in ['all', 'primaries', 'new_primaries', 'none']: 

396 raise ValueError( 

397 'Invalid "value": {0} with "routing_type":' 

398 '{1}.'.format(value, routing_type) 

399 ) 

400 elif routing_type == 'rebalance': 

401 if value not in ['all', 'primaries', 'replicas', 'none']: 

402 raise ValueError( 

403 'Invalid "value": {0} with "routing_type":' 

404 '{1}.'.format(value, routing_type) 

405 ) 

406 else: 

407 raise ValueError( 

408 'Invalid value for "routing_type": {0}.'.format(routing_type) 

409 ) 

410 bkey = 'cluster.routing.{0}.{1}'.format(routing_type, setting) 

411 self.body = {'transient' : {bkey : value}} 

412 

413 def do_dry_run(self): 

414 """ 

415 Log what the output would be, but take no action. 

416 """ 

417 self.loggit.info('DRY-RUN MODE. No changes will be made.') 

418 self.loggit.info( 

419 'DRY-RUN: Update cluster routing settings with arguments: ' 

420 '{0}'.format(self.body) 

421 ) 

422 

423 def do_action(self): 

424 """ 

425 Change cluster routing settings with the settings in `body`. 

426 """ 

427 self.loggit.info('Updating cluster settings: {0}'.format(self.body)) 

428 try: 

429 self.client.cluster.put_settings(body=self.body) 

430 if self.wfc: 

431 self.loggit.debug( 

432 'Waiting for shards to complete routing and/or rebalancing' 

433 ) 

434 utils.wait_for_it( 

435 self.client, 'cluster_routing', 

436 wait_interval=self.wait_interval, max_wait=self.max_wait 

437 ) 

438 except Exception as err: 

439 utils.report_failure(err) 

440 

441class CreateIndex(object): 

442 """Create Index Action Class""" 

443 def __init__(self, client, name, extra_settings={}, ignore_existing=False): 

444 """ 

445 :arg client: An :class:`elasticsearch7.Elasticsearch` client object 

446 :arg name: A name, which can contain :py:func:`time.strftime` 

447 strings 

448 :arg extra_settings: The `settings` and `mappings` for the index. For 

449 more information see 

450 https://www.elastic.co/guide/en/elasticsearch/reference/6.8/indices-create-index.html 

451 :type extra_settings: dict, representing the settings and mappings. 

452 :arg ignore_existing: If an index already exists, and this setting is ``True``, 

453 ignore the 400 error that results in a `resource_already_exists_exception` and 

454 return that it was successful. 

455 """ 

456 if not name: 

457 raise exceptions.ConfigurationError('Value for "name" not provided.') 

458 #: Instance variable. 

459 #: The parsed version of `name` 

460 self.name = utils.parse_date_pattern(name) 

461 #: Instance variable. 

462 #: Extracted from the action yaml, it should be a dictionary of 

463 #: mappings and settings suitable for index creation. 

464 self.body = extra_settings 

465 #: Instance variable. 

466 #: Extracted from the action yaml, it should be a boolean informing 

467 #: whether to ignore the error if the index already exists. 

468 self.ignore_existing = ignore_existing 

469 #: Instance variable. 

470 #: An :class:`elasticsearch7.Elasticsearch` client object 

471 self.client = client 

472 self.loggit = logging.getLogger('curator.actions.create_index') 

473 

474 def do_dry_run(self): 

475 """ 

476 Log what the output would be, but take no action. 

477 """ 

478 self.loggit.info('DRY-RUN MODE. No changes will be made.') 

479 self.loggit.info( 

480 'DRY-RUN: create_index "%s" with arguments: ' 

481 '%s' % (self.name, self.body) 

482 ) 

483 

484 def do_action(self): 

485 """ 

486 Create index identified by `name` with settings in `body` 

487 """ 

488 self.loggit.info( 

489 'Creating index "{0}" with settings: ' 

490 '{1}'.format(self.name, self.body) 

491 ) 

492 try: 

493 self.client.indices.create(index=self.name, body=self.body) 

494 # Most likely error is a 400, `resource_already_exists_exception` 

495 except RequestError as err: 

496 match_list = ["index_already_exists_exception", "resource_already_exists_exception"] 

497 if err.error in match_list and self.ignore_existing: 

498 self.loggit.warning('Index %s already exists.' % self.name) 

499 else: 

500 raise exceptions.FailedExecution('Index %s already exists.' % self.name) 

501 except Exception as err: 

502 utils.report_failure(err) 

503 

504class DeleteIndices(object): 

505 """Delete Indices Action Class""" 

506 def __init__(self, ilo, master_timeout=30): 

507 """ 

508 :arg ilo: A :class:`curator.indexlist.IndexList` object 

509 :arg master_timeout: Number of seconds to wait for master node response 

510 """ 

511 utils.verify_index_list(ilo) 

512 if not isinstance(master_timeout, int): 

513 raise TypeError( 

514 'Incorrect type for "master_timeout": {0}. ' 

515 'Should be integer value.'.format(type(master_timeout)) 

516 ) 

517 #: Instance variable. 

518 #: Internal reference to `ilo` 

519 self.index_list = ilo 

520 #: Instance variable. 

521 #: The Elasticsearch Client object derived from `ilo` 

522 self.client = ilo.client 

523 #: Instance variable. 

524 #: String value of `master_timeout` + 's', for seconds. 

525 self.master_timeout = str(master_timeout) + 's' 

526 self.loggit = logging.getLogger('curator.actions.delete_indices') 

527 self.loggit.debug('master_timeout value: {0}'.format( 

528 self.master_timeout)) 

529 

530 def _verify_result(self, result, count): 

531 """ 

532 Breakout method to aid readability 

533 :arg result: A list of indices from `_get_result_list` 

534 :arg count: The number of tries that have occurred 

535 :rtype: bool 

536 """ 

537 if isinstance(result, list) and result: 

538 self.loggit.error( 

539 'The following indices failed to delete on try ' 

540 '#{0}:'.format(count) 

541 ) 

542 for idx in result: 

543 self.loggit.error("---{0}".format(idx)) 

544 retval = False 

545 else: 

546 self.loggit.debug( 

547 'Successfully deleted all indices on try #{0}'.format(count) 

548 ) 

549 retval = True 

550 return retval 

551 

552 def __chunk_loop(self, chunk_list): 

553 """ 

554 Loop through deletes 3 times to ensure they complete 

555 :arg chunk_list: A list of indices pre-chunked so it won't overload the 

556 URL size limit. 

557 """ 

558 working_list = chunk_list 

559 for count in range(1, 4): # Try 3 times 

560 for i in working_list: 

561 self.loggit.info("---deleting index {0}".format(i)) 

562 self.client.indices.delete( 

563 index=utils.to_csv(working_list), master_timeout=self.master_timeout) 

564 result = [i for i in working_list if i in utils.get_indices(self.client)] 

565 if self._verify_result(result, count): 

566 return 

567 else: 

568 working_list = result 

569 self.loggit.error( 

570 'Unable to delete the following indices after 3 attempts: ' 

571 '{0}'.format(result) 

572 ) 

573 

574 def do_dry_run(self): 

575 """ 

576 Log what the output would be, but take no action. 

577 """ 

578 utils.show_dry_run(self.index_list, 'delete_indices') 

579 

580 def do_action(self): 

581 """ 

582 Delete indices in `index_list.indices` 

583 """ 

584 self.index_list.empty_list_check() 

585 self.loggit.info( 

586 'Deleting {0} selected indices: {1}'.format( 

587 len(self.index_list.indices), self.index_list.indices 

588 ) 

589 ) 

590 try: 

591 index_lists = utils.chunk_index_list(self.index_list.indices) 

592 for lst in index_lists: 

593 self.__chunk_loop(lst) 

594 except Exception as err: 

595 utils.report_failure(err) 

596 

597class ForceMerge(object): 

598 """ForceMerge Action Class""" 

599 def __init__(self, ilo, max_num_segments=None, delay=0): 

600 """ 

601 :arg ilo: A :class:`curator.indexlist.IndexList` object 

602 :arg max_num_segments: Number of segments per shard to forceMerge 

603 :arg delay: Number of seconds to delay between forceMerge operations 

604 """ 

605 utils.verify_index_list(ilo) 

606 if not max_num_segments: 

607 raise exceptions.MissingArgument('Missing value for "max_num_segments"') 

608 #: Instance variable. 

609 #: The Elasticsearch Client object derived from `ilo` 

610 self.client = ilo.client 

611 #: Instance variable. 

612 #: Internal reference to `ilo` 

613 self.index_list = ilo 

614 #: Instance variable. 

615 #: Internally accessible copy of `max_num_segments` 

616 self.max_num_segments = max_num_segments 

617 #: Instance variable. 

618 #: Internally accessible copy of `delay` 

619 self.delay = delay 

620 self.loggit = logging.getLogger('curator.actions.forcemerge') 

621 

622 def do_dry_run(self): 

623 """ 

624 Log what the output would be, but take no action. 

625 """ 

626 utils.show_dry_run( 

627 self.index_list, 'forcemerge', 

628 max_num_segments=self.max_num_segments, 

629 delay=self.delay, 

630 ) 

631 

632 def do_action(self): 

633 """ 

634 forcemerge indices in `index_list.indices` 

635 """ 

636 self.index_list.filter_closed() 

637 self.index_list.filter_forceMerged( 

638 max_num_segments=self.max_num_segments) 

639 self.index_list.empty_list_check() 

640 self.loggit.info( 

641 'forceMerging {0} selected indices: {1}'.format( 

642 len(self.index_list.indices), self.index_list.indices 

643 ) 

644 ) 

645 try: 

646 for index_name in self.index_list.indices: 

647 self.loggit.info( 

648 'forceMerging index {0} to {1} segments per shard. ' 

649 'Please wait...'.format(index_name, self.max_num_segments) 

650 ) 

651 self.client.indices.forcemerge( 

652 index=index_name, max_num_segments=self.max_num_segments) 

653 if self.delay > 0: 

654 self.loggit.info( 

655 'Pausing for {0} seconds before continuing...'.format(self.delay)) 

656 time.sleep(self.delay) 

657 except Exception as err: 

658 utils.report_failure(err) 

659 

660 

661class IndexSettings(object): 

662 """Index Settings Action Class""" 

663 def __init__( 

664 self, ilo, index_settings={}, ignore_unavailable=False, preserve_existing=False): 

665 """ 

666 :arg ilo: A :class:`curator.indexlist.IndexList` object 

667 :arg index_settings: A dictionary structure with one or more index 

668 settings to change. 

669 :arg ignore_unavailable: Whether specified concrete indices should be 

670 ignored when unavailable (missing or closed) 

671 :arg preserve_existing: Whether to update existing settings. If set to 

672 ``True`` existing settings on an index remain unchanged. The default 

673 is ``False`` 

674 """ 

675 utils.verify_index_list(ilo) 

676 if not index_settings: 

677 raise exceptions.MissingArgument('Missing value for "index_settings"') 

678 #: Instance variable. 

679 #: The Elasticsearch Client object derived from `ilo` 

680 self.client = ilo.client 

681 #: Instance variable. 

682 #: Internal reference to `ilo` 

683 self.index_list = ilo 

684 #: Instance variable. 

685 #: Internal reference to `index_settings` 

686 self.body = index_settings 

687 #: Instance variable. 

688 #: Internal reference to `ignore_unavailable` 

689 self.ignore_unavailable = ignore_unavailable 

690 #: Instance variable. 

691 #: Internal reference to `preserve_settings` 

692 self.preserve_existing = preserve_existing 

693 

694 self.loggit = logging.getLogger('curator.actions.index_settings') 

695 self._body_check() 

696 

697 def _body_check(self): 

698 # The body only passes the skimpiest of requirements by having 'index' 

699 # as the only root-level key, and having a 'dict' as its value 

700 if len(self.body) == 1: 

701 if 'index' in self.body: 

702 if isinstance(self.body['index'], dict): 

703 return True 

704 raise exceptions.ConfigurationError( 

705 'Bad value for "index_settings": {0}'.format(self.body)) 

706 

707 def _static_settings(self): 

708 return [ 

709 'number_of_shards', 

710 'shard', 

711 'codec', 

712 'routing_partition_size', 

713 ] 

714 

715 def _dynamic_settings(self): 

716 return [ 

717 'number_of_replicas', 

718 'auto_expand_replicas', 

719 'refresh_interval', 

720 'max_result_window', 

721 'max_rescore_window', 

722 'blocks', 

723 'max_refresh_listeners', 

724 'mapping', 

725 'merge', 

726 'translog', 

727 ] 

728 

729 def _settings_check(self): 

730 # Detect if even one index is open. Save all found to open_index_list. 

731 open_index_list = [] 

732 open_indices = False 

733 for idx in self.index_list.indices: 

734 if self.index_list.index_info[idx]['state'] == 'open': 

735 open_index_list.append(idx) 

736 open_indices = True 

737 for k in self.body['index']: 

738 if k in self._static_settings(): 

739 if not self.ignore_unavailable: 

740 if open_indices: 

741 raise exceptions.ActionError( 

742 'Static Setting "{0}" detected with open indices: ' 

743 '{1}. Static settings can only be used with closed ' 

744 'indices. Recommend filtering out open indices, ' 

745 'or setting ignore_unavailable to True'.format( 

746 k, open_index_list 

747 ) 

748 ) 

749 elif k in self._dynamic_settings(): 

750 # Dynamic settings should be appliable to open or closed indices 

751 # Act here if the case is different for some settings. 

752 pass 

753 else: 

754 self.loggit.warning( 

755 '"{0}" is not a setting Curator recognizes and may or may ' 

756 'not work.'.format(k) 

757 ) 

758 

759 def do_dry_run(self): 

760 """ 

761 Log what the output would be, but take no action. 

762 """ 

763 utils.show_dry_run(self.index_list, 'indexsettings', **self.body) 

764 

765 def do_action(self): 

766 """Actually do the action""" 

767 self._settings_check() 

768 # Ensure that the open indices filter applied in _settings_check() 

769 # didn't result in an empty list (or otherwise empty) 

770 self.index_list.empty_list_check() 

771 self.loggit.info( 

772 'Applying index settings to {0} indices: ' 

773 '{1}'.format(len(self.index_list.indices), self.index_list.indices) 

774 ) 

775 try: 

776 index_lists = utils.chunk_index_list(self.index_list.indices) 

777 for lst in index_lists: 

778 response = self.client.indices.put_settings( 

779 index=utils.to_csv(lst), body=self.body, 

780 ignore_unavailable=self.ignore_unavailable, 

781 preserve_existing=self.preserve_existing 

782 ) 

783 self.loggit.debug('PUT SETTINGS RESPONSE: {0}'.format(response)) 

784 except Exception as err: 

785 utils.report_failure(err) 

786 

787 

788class Open(object): 

789 """Open Action Class""" 

790 def __init__(self, ilo): 

791 """ 

792 :arg ilo: A :class:`curator.indexlist.IndexList` object 

793 """ 

794 utils.verify_index_list(ilo) 

795 #: Instance variable. 

796 #: The Elasticsearch Client object derived from `ilo` 

797 self.client = ilo.client 

798 #: Instance variable. 

799 #: Internal reference to `ilo` 

800 self.index_list = ilo 

801 self.loggit = logging.getLogger('curator.actions.open') 

802 

803 def do_dry_run(self): 

804 """ 

805 Log what the output would be, but take no action. 

806 """ 

807 utils.show_dry_run(self.index_list, 'open') 

808 

809 def do_action(self): 

810 """ 

811 Open closed indices in `index_list.indices` 

812 """ 

813 self.index_list.empty_list_check() 

814 self.loggit.info( 

815 'Opening {0} selected indices: {1}'.format( 

816 len(self.index_list.indices), 

817 self.index_list.indices 

818 ) 

819 ) 

820 try: 

821 index_lists = utils.chunk_index_list(self.index_list.indices) 

822 for lst in index_lists: 

823 self.client.indices.open(index=utils.to_csv(lst)) 

824 except Exception as err: 

825 utils.report_failure(err) 

826 

827class Replicas(object): 

828 """Replica Action Class""" 

829 def __init__( 

830 self, ilo, count=None, wait_for_completion=False, wait_interval=9, max_wait=-1): 

831 """ 

832 :arg ilo: A :class:`curator.indexlist.IndexList` object 

833 :arg count: The count of replicas per shard 

834 :arg wait_for_completion: Wait (or not) for the operation 

835 to complete before returning. (default: `False`) 

836 :type wait_for_completion: bool 

837 :arg wait_interval: How long in seconds to wait between checks for 

838 completion. 

839 :arg max_wait: Maximum number of seconds to `wait_for_completion` 

840 """ 

841 utils.verify_index_list(ilo) 

842 # It's okay for count to be zero 

843 if count == 0: 

844 pass 

845 elif not count: 

846 raise exceptions.MissingArgument('Missing value for "count"') 

847 #: Instance variable. 

848 #: The Elasticsearch Client object derived from `ilo` 

849 self.client = ilo.client 

850 #: Instance variable. 

851 #: Internal reference to `ilo` 

852 self.index_list = ilo 

853 #: Instance variable. 

854 #: Internally accessible copy of `count` 

855 self.count = count 

856 #: Instance variable. 

857 #: Internal reference to `wait_for_completion` 

858 self.wfc = wait_for_completion 

859 #: Instance variable 

860 #: How many seconds to wait between checks for completion. 

861 self.wait_interval = wait_interval 

862 #: Instance variable. 

863 #: How long in seconds to `wait_for_completion` before returning with an 

864 #: exception. A value of -1 means wait forever. 

865 self.max_wait = max_wait 

866 self.loggit = logging.getLogger('curator.actions.replicas') 

867 

868 def do_dry_run(self): 

869 """ 

870 Log what the output would be, but take no action. 

871 """ 

872 utils.show_dry_run(self.index_list, 'replicas', count=self.count) 

873 

874 def do_action(self): 

875 """ 

876 Update the replica count of indices in `index_list.indices` 

877 """ 

878 self.loggit.debug( 

879 'Cannot get update replica count of closed indices. ' 

880 'Omitting any closed indices.' 

881 ) 

882 self.index_list.filter_closed() 

883 self.index_list.empty_list_check() 

884 self.loggit.info( 

885 'Setting the replica count to {0} for {1} indices: ' 

886 '{2}'.format(self.count, len(self.index_list.indices), self.index_list.indices) 

887 ) 

888 try: 

889 index_lists = utils.chunk_index_list(self.index_list.indices) 

890 for lst in index_lists: 

891 self.client.indices.put_settings( 

892 index=utils.to_csv(lst), 

893 body={'number_of_replicas': self.count} 

894 ) 

895 if self.wfc and self.count > 0: 

896 self.loggit.debug( 

897 'Waiting for shards to complete replication for ' 

898 'indices: {0}'.format(utils.to_csv(lst)) 

899 ) 

900 utils.wait_for_it( 

901 self.client, 'replicas', 

902 wait_interval=self.wait_interval, max_wait=self.max_wait 

903 ) 

904 except Exception as err: 

905 utils.report_failure(err) 

906 

907class Rollover(object): 

908 """Rollover Action Class""" 

909 def __init__( 

910 self, client, name, conditions, new_index=None, extra_settings=None, 

911 wait_for_active_shards=1 

912 ): 

913 """ 

914 :arg client: An :class:`elasticsearch7.Elasticsearch` client object 

915 :arg name: The name of the single-index-mapped alias to test for 

916 rollover conditions. 

917 :new_index: The new index name 

918 :arg conditions: A dictionary of conditions to test 

919 :arg extra_settings: Must be either `None`, or a dictionary of settings 

920 to apply to the new index on rollover. This is used in place of 

921 `settings` in the Rollover API, mostly because it's already existent 

922 in other places here in Curator 

923 :arg wait_for_active_shards: The number of shards expected to be active 

924 before returning. 

925 """ 

926 self.loggit = logging.getLogger('curator.actions.rollover') 

927 if not isinstance(conditions, dict): 

928 raise exceptions.ConfigurationError('"conditions" must be a dictionary') 

929 else: 

930 self.loggit.debug('"conditions" is {0}'.format(conditions)) 

931 if not isinstance(extra_settings, dict) and extra_settings is not None: 

932 raise exceptions.ConfigurationError( 

933 '"extra_settings" must be a dictionary or None') 

934 utils.verify_client_object(client) 

935 #: Instance variable. 

936 #: The Elasticsearch Client object 

937 self.client = client 

938 #: Instance variable. 

939 #: Internal reference to `conditions` 

940 self.conditions = self._check_max_size(conditions) 

941 #: Instance variable. 

942 #: Internal reference to `extra_settings` 

943 self.settings = extra_settings 

944 #: Instance variable. 

945 #: Internal reference to `new_index` 

946 self.new_index = utils.parse_date_pattern(new_index) if new_index else new_index 

947 #: Instance variable. 

948 #: Internal reference to `wait_for_active_shards` 

949 self.wait_for_active_shards = wait_for_active_shards 

950 

951 # Verify that `conditions` and `settings` are good? 

952 # Verify that `name` is an alias, and is only mapped to one index. 

953 if utils.rollable_alias(client, name): 

954 self.name = name 

955 else: 

956 raise ValueError( 

957 'Unable to perform index rollover with alias ' 

958 '"{0}". See previous logs for more details.'.format(name) 

959 ) 

960 

961 def _check_max_size(self, conditions): 

962 """ 

963 Ensure that if ``max_size`` is specified, that ``self.client`` 

964 is running 6.1 or higher. 

965 """ 

966 if 'max_size' in conditions: 

967 version = utils.get_version(self.client) 

968 if version < (6, 1, 0): 

969 raise exceptions.ConfigurationError( 

970 'Your version of elasticsearch ({0}) does not support ' 

971 'the max_size rollover condition. It is only supported ' 

972 'in versions 6.1.0 and up.'.format(version) 

973 ) 

974 return conditions 

975 

976 def body(self): 

977 """ 

978 Create a body from conditions and settings 

979 """ 

980 retval = {} 

981 retval['conditions'] = self.conditions 

982 if self.settings: 

983 retval['settings'] = self.settings 

984 return retval 

985 

986 def log_result(self, result): 

987 """ 

988 Log the results based on whether the index rolled over or not 

989 """ 

990 dryrun_string = '' 

991 if result['dry_run']: 

992 dryrun_string = 'DRY-RUN: ' 

993 self.loggit.debug('{0}Result: {1}'.format(dryrun_string, result)) 

994 rollover_string = '{0}Old index {1} rolled over to new index {2}'.format( 

995 dryrun_string, 

996 result['old_index'], 

997 result['new_index'] 

998 ) 

999 # Success is determined by at one condition being True 

1000 success = False 

1001 for k in list(result['conditions'].keys()): 

1002 if result['conditions'][k]: 

1003 success = True 

1004 if result['dry_run'] and success: # log "successful" dry-run 

1005 self.loggit.info(rollover_string) 

1006 elif result['rolled_over']: 

1007 self.loggit.info(rollover_string) 

1008 else: 

1009 self.loggit.info( 

1010 '{0}Rollover conditions not met. Index {1} not rolled over.'.format( 

1011 dryrun_string, 

1012 result['old_index']) 

1013 ) 

1014 

1015 def doit(self, dry_run=False): 

1016 """ 

1017 This exists solely to prevent having to have duplicate code in both 

1018 `do_dry_run` and `do_action` 

1019 """ 

1020 return self.client.indices.rollover( 

1021 alias=self.name, 

1022 new_index=self.new_index, 

1023 body=self.body(), 

1024 dry_run=dry_run, 

1025 wait_for_active_shards=self.wait_for_active_shards, 

1026 ) 

1027 

1028 def do_dry_run(self): 

1029 """ 

1030 Log what the output would be, but take no action. 

1031 """ 

1032 self.loggit.info('DRY-RUN MODE. No changes will be made.') 

1033 self.log_result(self.doit(dry_run=True)) 

1034 

1035 def do_action(self): 

1036 """ 

1037 Rollover the index referenced by alias `name` 

1038 """ 

1039 self.loggit.info('Performing index rollover') 

1040 try: 

1041 self.log_result(self.doit()) 

1042 except Exception as err: 

1043 utils.report_failure(err) 

1044 

1045class DeleteSnapshots(object): 

1046 """Delete Snapshots Action Class""" 

1047 def __init__(self, slo, retry_interval=120, retry_count=3): 

1048 """ 

1049 :arg slo: A :class:`curator.snapshotlist.SnapshotList` object 

1050 :arg retry_interval: Number of seconds to delay betwen retries. Default: 

1051 120 (seconds) 

1052 :arg retry_count: Number of attempts to make. Default: 3 

1053 """ 

1054 utils.verify_snapshot_list(slo) 

1055 #: Instance variable. 

1056 #: The Elasticsearch Client object derived from `slo` 

1057 self.client = slo.client 

1058 #: Instance variable. 

1059 #: Internally accessible copy of `retry_interval` 

1060 self.retry_interval = retry_interval 

1061 #: Instance variable. 

1062 #: Internally accessible copy of `retry_count` 

1063 self.retry_count = retry_count 

1064 #: Instance variable. 

1065 #: Internal reference to `slo` 

1066 self.snapshot_list = slo 

1067 #: Instance variable. 

1068 #: The repository name derived from `slo` 

1069 self.repository = slo.repository 

1070 self.loggit = logging.getLogger('curator.actions.delete_snapshots') 

1071 

1072 def do_dry_run(self): 

1073 """ 

1074 Log what the output would be, but take no action. 

1075 """ 

1076 self.loggit.info('DRY-RUN MODE. No changes will be made.') 

1077 mykwargs = { 

1078 'repository' : self.repository, 

1079 'retry_interval' : self.retry_interval, 

1080 'retry_count' : self.retry_count, 

1081 } 

1082 for snap in self.snapshot_list.snapshots: 

1083 self.loggit.info( 

1084 'DRY-RUN: delete_snapshot: {0} with arguments: {1}'.format(snap, mykwargs)) 

1085 

1086 def do_action(self): 

1087 """ 

1088 Delete snapshots in `slo` 

1089 Retry up to `retry_count` times, pausing `retry_interval` 

1090 seconds between retries. 

1091 """ 

1092 self.snapshot_list.empty_list_check() 

1093 self.loggit.info( 

1094 'Deleting {0} selected snapshots: {1}'.format( 

1095 len(self.snapshot_list.snapshots), 

1096 self.snapshot_list.snapshots 

1097 ) 

1098 ) 

1099 if not utils.safe_to_snap( 

1100 self.client, repository=self.repository, 

1101 retry_interval=self.retry_interval, retry_count=self.retry_count 

1102 ): 

1103 raise exceptions.FailedExecution( 

1104 'Unable to delete snapshot(s) because a snapshot is in ' 

1105 'state "IN_PROGRESS"') 

1106 try: 

1107 for snap in self.snapshot_list.snapshots: 

1108 self.loggit.info('Deleting snapshot {0}...'.format(snap)) 

1109 self.client.snapshot.delete( 

1110 repository=self.repository, snapshot=snap) 

1111 except Exception as err: 

1112 utils.report_failure(err) 

1113 

1114class Reindex(object): 

1115 """Reindex Action Class""" 

1116 def __init__( 

1117 self, ilo, request_body, refresh=True, requests_per_second=-1, slices=1, timeout=60, 

1118 wait_for_active_shards=1, wait_for_completion=True, max_wait=-1, wait_interval=9, 

1119 remote_url_prefix=None, remote_ssl_no_validate=None, remote_certificate=None, 

1120 remote_client_cert=None, remote_client_key=None, remote_aws_key=None, 

1121 remote_aws_secret_key=None, remote_aws_region=None, remote_filters={}, 

1122 migration_prefix='', migration_suffix='' 

1123 ): 

1124 """ 

1125 :arg ilo: A :class:`curator.indexlist.IndexList` object 

1126 :arg request_body: The body to send to 

1127 :py:meth:`elasticsearch7.Elasticsearch.reindex`, which must be complete and 

1128 usable, as Curator will do no vetting of the request_body. If it 

1129 fails to function, Curator will return an exception. 

1130 :arg refresh: Whether to refresh the entire target index after the 

1131 operation is complete. (default: `True`) 

1132 :type refresh: bool 

1133 :arg requests_per_second: The throttle to set on this request in 

1134 sub-requests per second. ``-1`` means set no throttle as does 

1135 ``unlimited`` which is the only non-float this accepts. (default: 

1136 ``-1``) 

1137 :arg slices: The number of slices this task should be divided into. 1 

1138 means the task will not be sliced into subtasks. (default: ``1``) 

1139 :arg timeout: The length in seconds each individual bulk request should 

1140 wait for shards that are unavailable. (default: ``60``) 

1141 :arg wait_for_active_shards: Sets the number of shard copies that must 

1142 be active before proceeding with the reindex operation. (default: 

1143 ``1``) means the primary shard only. Set to ``all`` for all shard 

1144 copies, otherwise set to any non-negative value less than or equal 

1145 to the total number of copies for the shard (number of replicas + 1) 

1146 :arg wait_for_completion: Wait (or not) for the operation 

1147 to complete before returning. (default: `True`) 

1148 :type wait_for_completion: bool 

1149 :arg wait_interval: How long in seconds to wait between checks for 

1150 completion. 

1151 :arg max_wait: Maximum number of seconds to `wait_for_completion` 

1152 :arg remote_url_prefix: `Optional` url prefix, if needed to reach the 

1153 Elasticsearch API (i.e., it's not at the root level) 

1154 :type remote_url_prefix: str 

1155 :arg remote_ssl_no_validate: If `True`, do not validate the certificate 

1156 chain. This is an insecure option and you will see warnings in the 

1157 log output. 

1158 :type remote_ssl_no_validate: bool 

1159 :arg remote_certificate: Path to SSL/TLS certificate 

1160 :arg remote_client_cert: Path to SSL/TLS client certificate (public key) 

1161 :arg remote_client_key: Path to SSL/TLS private key 

1162 :arg remote_aws_key: AWS IAM Access Key (Only used if the 

1163 :mod:`requests-aws4auth` python module is installed) 

1164 :arg remote_aws_secret_key: AWS IAM Secret Access Key (Only used if the 

1165 :mod:`requests-aws4auth` python module is installed) 

1166 :arg remote_aws_region: AWS Region (Only used if the 

1167 :mod:`requests-aws4auth` python module is installed) 

1168 :arg remote_filters: Apply these filters to the remote client for 

1169 remote index selection. 

1170 :arg migration_prefix: When migrating, prepend this value to the index 

1171 name. 

1172 :arg migration_suffix: When migrating, append this value to the index 

1173 name. 

1174 """ 

1175 self.loggit = logging.getLogger('curator.actions.reindex') 

1176 utils.verify_index_list(ilo) 

1177 # Normally, we'd check for an empty list here. But since we can reindex 

1178 # from remote, we might just be starting with an empty one. 

1179 # ilo.empty_list_check() 

1180 if not isinstance(request_body, dict): 

1181 raise exceptions.ConfigurationError('"request_body" is not of type dictionary') 

1182 #: Instance variable. 

1183 #: Internal reference to `request_body` 

1184 self.body = request_body 

1185 self.loggit.debug('REQUEST_BODY = {0}'.format(request_body)) 

1186 #: Instance variable. 

1187 #: The Elasticsearch Client object derived from `ilo` 

1188 self.client = ilo.client 

1189 #: Instance variable. 

1190 #: Internal reference to `ilo` 

1191 self.index_list = ilo 

1192 #: Instance variable. 

1193 #: Internal reference to `refresh` 

1194 self.refresh = refresh 

1195 #: Instance variable. 

1196 #: Internal reference to `requests_per_second` 

1197 self.requests_per_second = requests_per_second 

1198 #: Instance variable. 

1199 #: Internal reference to `slices` 

1200 self.slices = slices 

1201 #: Instance variable. 

1202 #: Internal reference to `timeout`, and add "s" for seconds. 

1203 self.timeout = '{0}s'.format(timeout) 

1204 #: Instance variable. 

1205 #: Internal reference to `wait_for_active_shards` 

1206 self.wait_for_active_shards = wait_for_active_shards 

1207 #: Instance variable. 

1208 #: Internal reference to `wait_for_completion` 

1209 self.wfc = wait_for_completion 

1210 #: Instance variable 

1211 #: How many seconds to wait between checks for completion. 

1212 self.wait_interval = wait_interval 

1213 #: Instance variable. 

1214 #: How long in seconds to `wait_for_completion` before returning with an 

1215 #: exception. A value of -1 means wait forever. 

1216 self.max_wait = max_wait 

1217 #: Instance variable. 

1218 #: Internal reference to `migration_prefix` 

1219 self.mpfx = migration_prefix 

1220 #: Instance variable. 

1221 #: Internal reference to `migration_suffix` 

1222 self.msfx = migration_suffix 

1223 

1224 # This is for error logging later... 

1225 self.remote = False 

1226 if 'remote' in self.body['source']: 

1227 self.remote = True 

1228 

1229 self.migration = False 

1230 if self.body['dest']['index'] == 'MIGRATION': 

1231 self.migration = True 

1232 

1233 if self.migration: 

1234 if not self.remote and not self.mpfx and not self.msfx: 

1235 raise exceptions.ConfigurationError( 

1236 'MIGRATION can only be used locally with one or both of ' 

1237 'migration_prefix or migration_suffix.' 

1238 ) 

1239 

1240 # REINDEX_SELECTION is the designated token. If you use this for the 

1241 # source "index," it will be replaced with the list of indices from the 

1242 # provided 'ilo' (index list object). 

1243 if self.body['source']['index'] == 'REINDEX_SELECTION' \ 

1244 and not self.remote: 

1245 self.body['source']['index'] = self.index_list.indices 

1246 

1247 # Remote section 

1248 elif self.remote: 

1249 self.loggit.debug('Remote reindex request detected') 

1250 if 'host' not in self.body['source']['remote']: 

1251 raise exceptions.ConfigurationError('Missing remote "host"') 

1252 rclient_info = {} 

1253 for k in ['host', 'username', 'password']: 

1254 rclient_info[k] = self.body['source']['remote'][k] \ 

1255 if k in self.body['source']['remote'] else None 

1256 rhost = rclient_info['host'] 

1257 try: 

1258 # Save these for logging later 

1259 _ = rhost.split(':') 

1260 self.remote_port = _[2] 

1261 self.remote_host = _[1][2:] 

1262 except Exception as err: 

1263 raise exceptions.ConfigurationError( 

1264 'Host must be in the form [scheme]://[host]:[port] but ' 

1265 'was [{0}]'.format(rhost) 

1266 ) 

1267 rhttp_auth = '{0}:{1}'.format( 

1268 rclient_info['username'], rclient_info['password']) \ 

1269 if (rclient_info['username'] and rclient_info['password']) else None 

1270 if rhost[:5] == 'http:': 

1271 use_ssl = False 

1272 elif rhost[:5] == 'https': 

1273 use_ssl = True 

1274 else: 

1275 raise exceptions.ConfigurationError( 

1276 'Host must be in URL format. You provided: ' 

1277 '{0}'.format(rclient_info['host']) 

1278 ) 

1279 

1280 # Let's set a decent remote timeout for initially reading 

1281 # the indices on the other side, and collecting their metadata 

1282 remote_timeout = 180 

1283 

1284 # The rest only applies if using filters for remote indices 

1285 if self.body['source']['index'] == 'REINDEX_SELECTION': 

1286 self.loggit.debug('Filtering indices from remote') 

1287 from .indexlist import IndexList 

1288 self.loggit.debug( 

1289 'Remote client args: ' 

1290 'host={0} ' 

1291 'http_auth={1} ' 

1292 'url_prefix={2} ' 

1293 'use_ssl={3} ' 

1294 'ssl_no_validate={4} ' 

1295 'certificate={5} ' 

1296 'client_cert={6} ' 

1297 'client_key={7} ' 

1298 'aws_key={8} ' 

1299 'aws_secret_key={9} ' 

1300 'aws_region={10} ' 

1301 'timeout={11} ' 

1302 'skip_version_test=True'.format( 

1303 rhost, 

1304 rhttp_auth, 

1305 remote_url_prefix, 

1306 use_ssl, 

1307 remote_ssl_no_validate, 

1308 remote_certificate, 

1309 remote_client_cert, 

1310 remote_client_key, 

1311 remote_aws_key, 

1312 remote_aws_secret_key, 

1313 remote_aws_region, 

1314 remote_timeout 

1315 ) 

1316 ) 

1317 

1318 try: # let's try to build a remote connection with these! 

1319 rclient = utils.get_client( 

1320 host=rhost, 

1321 http_auth=rhttp_auth, 

1322 url_prefix=remote_url_prefix, 

1323 use_ssl=use_ssl, 

1324 ssl_no_validate=remote_ssl_no_validate, 

1325 certificate=remote_certificate, 

1326 client_cert=remote_client_cert, 

1327 client_key=remote_client_key, 

1328 aws_key=remote_aws_key, 

1329 aws_secret_key=remote_aws_secret_key, 

1330 aws_region=remote_aws_region, 

1331 skip_version_test=True, 

1332 timeout=remote_timeout 

1333 ) 

1334 except Exception as err: 

1335 self.loggit.error( 

1336 'Unable to establish connection to remote Elasticsearch' 

1337 ' with provided credentials/certificates/settings.' 

1338 ) 

1339 utils.report_failure(err) 

1340 try: 

1341 rio = IndexList(rclient) 

1342 rio.iterate_filters({'filters': remote_filters}) 

1343 try: 

1344 rio.empty_list_check() 

1345 except exceptions.NoIndices: 

1346 raise exceptions.FailedExecution( 

1347 'No actionable remote indices selected after ' 

1348 'applying filters.' 

1349 ) 

1350 self.body['source']['index'] = rio.indices 

1351 except Exception as err: 

1352 self.loggit.error( 

1353 'Unable to get/filter list of remote indices.' 

1354 ) 

1355 utils.report_failure(err) 

1356 

1357 self.loggit.debug( 

1358 'Reindexing indices: {0}'.format(self.body['source']['index'])) 

1359 

1360 def _get_request_body(self, source, dest): 

1361 body = deepcopy(self.body) 

1362 body['source']['index'] = source 

1363 body['dest']['index'] = dest 

1364 return body 

1365 

1366 def _get_reindex_args(self, source, dest): 

1367 # Always set wait_for_completion to False. Let 'utils.wait_for_it' do its 

1368 # thing if wait_for_completion is set to True. Report the task_id 

1369 # either way. 

1370 reindex_args = { 

1371 'body':self._get_request_body(source, dest), 'refresh':self.refresh, 

1372 'requests_per_second': self.requests_per_second, 

1373 'timeout': self.timeout, 

1374 'wait_for_active_shards': self.wait_for_active_shards, 

1375 'wait_for_completion': False, 

1376 'slices': self.slices 

1377 } 

1378 version = utils.get_version(self.client) 

1379 if version < (5, 1, 0): 

1380 self.loggit.info( 

1381 'Your version of elasticsearch ({0}) does not support ' 

1382 'sliced scroll for reindex, so that setting will not be ' 

1383 'used'.format(version) 

1384 ) 

1385 del reindex_args['slices'] 

1386 return reindex_args 

1387 

1388 def get_processed_items(self, task_id): 

1389 """ 

1390 This function calls client.tasks.get with the provided `task_id`. It will get the value 

1391 from ``'response.total'`` as the total number of elements processed during reindexing. 

1392 If the value is not found, it will return -1 

1393 

1394 :arg task_id: A task_id which ostensibly matches a task searchable in the 

1395 tasks API. 

1396 """ 

1397 try: 

1398 task_data = self.client.tasks.get(task_id=task_id) 

1399 except Exception as err: 

1400 raise exceptions.CuratorException( 

1401 'Unable to obtain task information for task_id "{0}". Exception ' 

1402 '{1}'.format(task_id, err) 

1403 ) 

1404 total_processed_items = -1 

1405 task = task_data['task'] 

1406 if task['action'] == 'indices:data/write/reindex': 

1407 self.loggit.debug('It\'s a REINDEX TASK') 

1408 self.loggit.debug('TASK_DATA: {0}'.format(task_data)) 

1409 self.loggit.debug('TASK_DATA keys: {0}'.format(list(task_data.keys()))) 

1410 if 'response' in task_data: 

1411 response = task_data['response'] 

1412 total_processed_items = response['total'] 

1413 self.loggit.debug('total_processed_items = {0}'.format(total_processed_items)) 

1414 

1415 return total_processed_items 

1416 

1417 def _post_run_quick_check(self, index_name, task_id): 

1418 # Check whether any documents were processed 

1419 # if no documents processed, the target index "dest" won't exist 

1420 processed_items = self.get_processed_items(task_id) 

1421 if processed_items == 0: 

1422 self.loggit.info( 

1423 'No items were processed. Will not check if target index "{0}" ' 

1424 'exists'.format(index_name) 

1425 ) 

1426 else: 

1427 # Verify the destination index is there after the fact 

1428 index_exists = self.client.indices.exists(index=index_name) 

1429 alias_instead = self.client.indices.exists_alias(name=index_name) 

1430 if not index_exists and not alias_instead: 

1431 self.loggit.error( 

1432 'The index described as "{0}" was not found after the reindex ' 

1433 'operation. Check Elasticsearch logs for more ' 

1434 'information.'.format(index_name) 

1435 ) 

1436 if self.remote: 

1437 self.loggit.error( 

1438 'Did you forget to add "reindex.remote.whitelist: ' 

1439 '{0}:{1}" to the elasticsearch.yml file on the ' 

1440 '"dest" node?'.format( 

1441 self.remote_host, self.remote_port 

1442 ) 

1443 ) 

1444 raise exceptions.FailedExecution( 

1445 'Reindex failed. The index or alias identified by "{0}" was ' 

1446 'not found.'.format(index_name) 

1447 ) 

1448 

1449 def sources(self): 

1450 """Generator for sources & dests""" 

1451 dest = self.body['dest']['index'] 

1452 source_list = utils.ensure_list(self.body['source']['index']) 

1453 self.loggit.debug('source_list: {0}'.format(source_list)) 

1454 if not source_list or source_list == ['REINDEX_SELECTED']: # Empty list 

1455 raise exceptions.NoIndices 

1456 if not self.migration: 

1457 yield self.body['source']['index'], dest 

1458 

1459 # Loop over all sources (default will only be one) 

1460 else: 

1461 for source in source_list: 

1462 if self.migration: 

1463 dest = self.mpfx + source + self.msfx 

1464 yield source, dest 

1465 

1466 def show_run_args(self, source, dest): 

1467 """ 

1468 Show what will run 

1469 """ 

1470 

1471 return ( 

1472 'request body: {0} with arguments: ' 

1473 'refresh={1} ' 

1474 'requests_per_second={2} ' 

1475 'slices={3} ' 

1476 'timeout={4} ' 

1477 'wait_for_active_shards={5} ' 

1478 'wait_for_completion={6}'.format( 

1479 self._get_request_body(source, dest), 

1480 self.refresh, 

1481 self.requests_per_second, 

1482 self.slices, 

1483 self.timeout, 

1484 self.wait_for_active_shards, 

1485 self.wfc 

1486 ) 

1487 ) 

1488 

1489 def do_dry_run(self): 

1490 """ 

1491 Log what the output would be, but take no action. 

1492 """ 

1493 self.loggit.info('DRY-RUN MODE. No changes will be made.') 

1494 for source, dest in self.sources(): 

1495 self.loggit.info( 

1496 'DRY-RUN: REINDEX: {0}'.format(self.show_run_args(source, dest)) 

1497 ) 

1498 

1499 def do_action(self): 

1500 """ 

1501 Execute :py:meth:`elasticsearch7.Elasticsearch.reindex` operation with the 

1502 provided request_body and arguments. 

1503 """ 

1504 try: 

1505 # Loop over all sources (default will only be one) 

1506 for source, dest in self.sources(): 

1507 self.loggit.info('Commencing reindex operation') 

1508 self.loggit.debug( 

1509 'REINDEX: {0}'.format(self.show_run_args(source, dest))) 

1510 response = self.client.reindex(**self._get_reindex_args(source, dest)) 

1511 

1512 self.loggit.debug('TASK ID = {0}'.format(response['task'])) 

1513 if self.wfc: 

1514 utils.wait_for_it( 

1515 self.client, 'reindex', task_id=response['task'], 

1516 wait_interval=self.wait_interval, max_wait=self.max_wait 

1517 ) 

1518 self._post_run_quick_check(dest, response['task']) 

1519 

1520 else: 

1521 self.loggit.warning( 

1522 '"wait_for_completion" set to {0}. Remember ' 

1523 'to check task_id "{1}" for successful completion ' 

1524 'manually.'.format(self.wfc, response['task']) 

1525 ) 

1526 except exceptions.NoIndices as err: 

1527 raise exceptions.NoIndices( 

1528 'Source index must be list of actual indices. ' 

1529 'It must not be an empty list.') 

1530 except Exception as err: 

1531 utils.report_failure(err) 

1532 

1533 

1534class Snapshot(object): 

1535 """Snapshot Action Class""" 

1536 def __init__( 

1537 self, ilo, repository=None, name=None, ignore_unavailable=False, 

1538 include_global_state=True, partial=False, wait_for_completion=True, wait_interval=9, 

1539 max_wait=-1, skip_repo_fs_check=False 

1540 ): 

1541 """ 

1542 :arg ilo: A :class:`curator.indexlist.IndexList` object 

1543 :arg repository: The Elasticsearch snapshot repository to use 

1544 :arg name: What to name the snapshot. 

1545 :arg wait_for_completion: Wait (or not) for the operation 

1546 to complete before returning. (default: `True`) 

1547 :type wait_for_completion: bool 

1548 :arg wait_interval: How long in seconds to wait between checks for 

1549 completion. 

1550 :arg max_wait: Maximum number of seconds to `wait_for_completion` 

1551 :arg ignore_unavailable: Ignore unavailable shards/indices. 

1552 (default: `False`) 

1553 :type ignore_unavailable: bool 

1554 :arg include_global_state: Store cluster global state with snapshot. 

1555 (default: `True`) 

1556 :type include_global_state: bool 

1557 :arg partial: Do not fail if primary shard is unavailable. (default: 

1558 `False`) 

1559 :type partial: bool 

1560 :arg skip_repo_fs_check: Do not validate write access to repository on 

1561 all cluster nodes before proceeding. (default: `False`). Useful for 

1562 shared filesystems where intermittent timeouts can affect 

1563 validation, but won't likely affect snapshot success. 

1564 :type skip_repo_fs_check: bool 

1565 """ 

1566 utils.verify_index_list(ilo) 

1567 # Check here and don't bother with the rest of this if there are no 

1568 # indices in the index list. 

1569 ilo.empty_list_check() 

1570 if not utils.repository_exists(ilo.client, repository=repository): 

1571 raise exceptions.ActionError( 

1572 'Cannot snapshot indices to missing repository: ' 

1573 '{0}'.format(repository) 

1574 ) 

1575 if not name: 

1576 raise exceptions.MissingArgument('No value for "name" provided.') 

1577 #: Instance variable. 

1578 #: The Elasticsearch Client object derived from `ilo` 

1579 self.client = ilo.client 

1580 #: Instance variable. 

1581 #: The parsed version of `name` 

1582 self.name = utils.parse_datemath(self.client, utils.parse_date_pattern(name)) 

1583 #: Instance variable. 

1584 #: Internal reference to `ilo` 

1585 self.index_list = ilo 

1586 #: Instance variable. 

1587 #: Internally accessible copy of `repository` 

1588 self.repository = repository 

1589 #: Instance variable. 

1590 #: Internally accessible copy of `wait_for_completion` 

1591 self.wait_for_completion = wait_for_completion 

1592 #: Instance variable 

1593 #: How many seconds to wait between checks for completion. 

1594 self.wait_interval = wait_interval 

1595 #: Instance variable. 

1596 #: How long in seconds to `wait_for_completion` before returning with an 

1597 #: exception. A value of -1 means wait forever. 

1598 self.max_wait = max_wait 

1599 #: Instance variable. 

1600 #: Internally accessible copy of `skip_repo_fs_check` 

1601 self.skip_repo_fs_check = skip_repo_fs_check 

1602 self.state = None 

1603 

1604 #: Instance variable. 

1605 #: Populated at instance creation time by calling 

1606 #: :mod:`curator.utils.utils.create_snapshot_body` with `ilo.indices` and the 

1607 #: provided arguments: `ignore_unavailable`, `include_global_state`, 

1608 #: `partial` 

1609 self.body = utils.create_snapshot_body( 

1610 ilo.indices, 

1611 ignore_unavailable=ignore_unavailable, 

1612 include_global_state=include_global_state, 

1613 partial=partial 

1614 ) 

1615 

1616 self.loggit = logging.getLogger('curator.actions.snapshot') 

1617 

1618 def get_state(self): 

1619 """ 

1620 Get the state of the snapshot 

1621 """ 

1622 try: 

1623 self.state = self.client.snapshot.get( 

1624 repository=self.repository, 

1625 snapshot=self.name)['snapshots'][0]['state'] 

1626 return self.state 

1627 except IndexError: 

1628 raise exceptions.CuratorException( 

1629 'Snapshot "{0}" not found in repository ' 

1630 '"{1}"'.format(self.name, self.repository) 

1631 ) 

1632 

1633 def report_state(self): 

1634 """ 

1635 Log the state of the snapshot and raise an exception if the state is 

1636 not ``SUCCESS`` 

1637 """ 

1638 self.get_state() 

1639 if self.state == 'SUCCESS': 

1640 self.loggit.info('Snapshot {0} successfully completed.'.format(self.name)) 

1641 else: 

1642 msg = 'Snapshot {0} completed with state: {0}'.format(self.state) 

1643 self.loggit.error(msg) 

1644 raise exceptions.FailedSnapshot(msg) 

1645 

1646 def do_dry_run(self): 

1647 """ 

1648 Log what the output would be, but take no action. 

1649 """ 

1650 self.loggit.info('DRY-RUN MODE. No changes will be made.') 

1651 self.loggit.info( 

1652 'DRY-RUN: snapshot: {0} in repository {1} with arguments: ' 

1653 '{2}'.format(self.name, self.repository, self.body) 

1654 ) 

1655 

1656 def do_action(self): 

1657 """ 

1658 Snapshot indices in `index_list.indices`, with options passed. 

1659 """ 

1660 if not self.skip_repo_fs_check: 

1661 utils.test_repo_fs(self.client, self.repository) 

1662 if utils.snapshot_running(self.client): 

1663 raise exceptions.SnapshotInProgress('Snapshot already in progress.') 

1664 try: 

1665 self.loggit.info( 

1666 'Creating snapshot "{0}" from indices: {1}'.format( 

1667 self.name, self.index_list.indices 

1668 ) 

1669 ) 

1670 # Always set wait_for_completion to False. Let 'utils.wait_for_it' do its 

1671 # thing if wait_for_completion is set to True. Report the task_id 

1672 # either way. 

1673 self.client.snapshot.create( 

1674 repository=self.repository, snapshot=self.name, body=self.body, 

1675 wait_for_completion=False 

1676 ) 

1677 if self.wait_for_completion: 

1678 utils.wait_for_it( 

1679 self.client, 'snapshot', snapshot=self.name, 

1680 repository=self.repository, 

1681 wait_interval=self.wait_interval, max_wait=self.max_wait 

1682 ) 

1683 self.report_state() 

1684 else: 

1685 self.loggit.warning( 

1686 '"wait_for_completion" set to {0}.' 

1687 'Remember to check for successful completion ' 

1688 'manually.'.format(self.wait_for_completion) 

1689 ) 

1690 except Exception as err: 

1691 utils.report_failure(err) 

1692 

1693class Restore(object): 

1694 """Restore Action Class""" 

1695 def __init__( 

1696 self, slo, name=None, indices=None, include_aliases=False, ignore_unavailable=False, 

1697 include_global_state=False, partial=False, rename_pattern=None, 

1698 rename_replacement=None, extra_settings={}, wait_for_completion=True, wait_interval=9, 

1699 max_wait=-1, skip_repo_fs_check=False 

1700 ): 

1701 """ 

1702 :arg slo: A :class:`curator.snapshotlist.SnapshotList` object 

1703 :arg name: Name of the snapshot to restore. If no name is provided, it 

1704 will restore the most recent snapshot by age. 

1705 :type name: str 

1706 :arg indices: A list of indices to restore. If no indices are provided, 

1707 it will restore all indices in the snapshot. 

1708 :type indices: list 

1709 :arg include_aliases: If set to `True`, restore aliases with the 

1710 indices. (default: `False`) 

1711 :type include_aliases: bool 

1712 :arg ignore_unavailable: Ignore unavailable shards/indices. 

1713 (default: `False`) 

1714 :type ignore_unavailable: bool 

1715 :arg include_global_state: Restore cluster global state with snapshot. 

1716 (default: `False`) 

1717 :type include_global_state: bool 

1718 :arg partial: Do not fail if primary shard is unavailable. (default: 

1719 `False`) 

1720 :type partial: bool 

1721 :arg rename_pattern: A regular expression pattern with one or more 

1722 captures, e.g. ``index_(.+)`` 

1723 :type rename_pattern: str 

1724 :arg rename_replacement: A target index name pattern with `$#` numbered 

1725 references to the captures in ``rename_pattern``, e.g. 

1726 ``restored_index_$1`` 

1727 :type rename_replacement: str 

1728 :arg extra_settings: Extra settings, including shard count and settings 

1729 to omit. For more information see 

1730 https://www.elastic.co/guide/en/elasticsearch/reference/6.8/snapshots-restore-snapshot.html#change-index-settings-during-restore 

1731 :type extra_settings: dict, representing the settings. 

1732 :arg wait_for_completion: Wait (or not) for the operation 

1733 to complete before returning. (default: `True`) 

1734 :arg wait_interval: How long in seconds to wait between checks for 

1735 completion. 

1736 :arg max_wait: Maximum number of seconds to `wait_for_completion` 

1737 :type wait_for_completion: bool 

1738 

1739 :arg skip_repo_fs_check: Do not validate write access to repository on 

1740 all cluster nodes before proceeding. (default: `False`). Useful for 

1741 shared filesystems where intermittent timeouts can affect 

1742 validation, but won't likely affect snapshot success. 

1743 :type skip_repo_fs_check: bool 

1744 """ 

1745 self.loggit = logging.getLogger('curator.actions.snapshot') 

1746 utils.verify_snapshot_list(slo) 

1747 # Get the most recent snapshot. 

1748 most_recent = slo.most_recent() 

1749 self.loggit.debug('"most_recent" snapshot: {0}'.format(most_recent)) 

1750 #: Instance variable. 

1751 #: Will use a provided snapshot name, or the most recent snapshot in slo 

1752 self.name = name if name else most_recent 

1753 # Stop here now, if it's not a successful snapshot. 

1754 if slo.snapshot_info[self.name]['state'] == 'PARTIAL' and partial: 

1755 self.loggit.warning( 

1756 'Performing restore of snapshot in state PARTIAL.') 

1757 elif slo.snapshot_info[self.name]['state'] != 'SUCCESS': 

1758 raise exceptions.CuratorException( 

1759 'Restore operation can only be performed on snapshots with ' 

1760 'state "SUCCESS", or "PARTIAL" if partial=True.' 

1761 ) 

1762 #: Instance variable. 

1763 #: The Elasticsearch Client object derived from `slo` 

1764 self.client = slo.client 

1765 #: Instance variable. 

1766 #: Internal reference to `slo` 

1767 self.snapshot_list = slo 

1768 #: Instance variable. 

1769 #: `repository` derived from `slo` 

1770 self.repository = slo.repository 

1771 

1772 if indices: 

1773 self.indices = utils.ensure_list(indices) 

1774 else: 

1775 self.indices = slo.snapshot_info[self.name]['indices'] 

1776 self.wfc = wait_for_completion 

1777 #: Instance variable 

1778 #: How many seconds to wait between checks for completion. 

1779 self.wait_interval = wait_interval 

1780 #: Instance variable. 

1781 #: How long in seconds to `wait_for_completion` before returning with an 

1782 #: exception. A value of -1 means wait forever. 

1783 self.max_wait = max_wait 

1784 #: Instance variable version of ``rename_pattern`` 

1785 self.rename_pattern = rename_pattern if rename_replacement is not None \ 

1786 else '' 

1787 #: Instance variable version of ``rename_replacement`` 

1788 self.rename_replacement = rename_replacement if rename_replacement \ 

1789 is not None else '' 

1790 #: Also an instance variable version of ``rename_replacement`` 

1791 #: but with Java regex group designations of ``$#`` 

1792 #: converted to Python's ``\\#`` style. 

1793 self.py_rename_replacement = self.rename_replacement.replace('$', '\\') 

1794 #: Instance variable. 

1795 #: Internally accessible copy of `skip_repo_fs_check` 

1796 self.skip_repo_fs_check = skip_repo_fs_check 

1797 

1798 #: Instance variable. 

1799 #: Populated at instance creation time from the other options 

1800 self.body = { 

1801 'indices' : self.indices, 

1802 'include_aliases' : include_aliases, 

1803 'ignore_unavailable' : ignore_unavailable, 

1804 'include_global_state' : include_global_state, 

1805 'partial' : partial, 

1806 'rename_pattern' : self.rename_pattern, 

1807 'rename_replacement' : self.rename_replacement, 

1808 } 

1809 if extra_settings: 

1810 self.loggit.debug( 

1811 'Adding extra_settings to restore body: ' 

1812 '{0}'.format(extra_settings) 

1813 ) 

1814 try: 

1815 self.body.update(extra_settings) 

1816 except: 

1817 self.loggit.error( 

1818 'Unable to apply extra settings to restore body') 

1819 self.loggit.debug('REPOSITORY: {0}'.format(self.repository)) 

1820 self.loggit.debug('WAIT_FOR_COMPLETION: {0}'.format(self.wfc)) 

1821 self.loggit.debug( 

1822 'SKIP_REPO_FS_CHECK: {0}'.format(self.skip_repo_fs_check)) 

1823 self.loggit.debug('BODY: {0}'.format(self.body)) 

1824 # Populate the expected output index list. 

1825 self._get_expected_output() 

1826 

1827 def _get_expected_output(self): 

1828 if not self.rename_pattern and not self.rename_replacement: 

1829 self.expected_output = self.indices 

1830 return # Don't stick around if we're not replacing anything 

1831 self.expected_output = [] 

1832 for index in self.indices: 

1833 self.expected_output.append( 

1834 re.sub( 

1835 self.rename_pattern, 

1836 self.py_rename_replacement, 

1837 index 

1838 ) 

1839 ) 

1840 self.loggit.debug('index: {0} replacement: {1}'.format(index, self.expected_output[-1])) 

1841 

1842 def report_state(self): 

1843 """ 

1844 Log the state of the restore 

1845 This should only be done if ``wait_for_completion`` is `True`, and only 

1846 after completing the restore. 

1847 """ 

1848 all_indices = utils.get_indices(self.client) 

1849 found_count = 0 

1850 missing = [] 

1851 for index in self.expected_output: 

1852 if index in all_indices: 

1853 found_count += 1 

1854 self.loggit.info('Found restored index {0}'.format(index)) 

1855 else: 

1856 missing.append(index) 

1857 if found_count == len(self.expected_output): 

1858 self.loggit.info('All indices appear to have been restored.') 

1859 else: 

1860 msg = ( 

1861 'Some of the indices do not appear to have been restored. Missing: ' 

1862 '{0}'.format(missing) 

1863 ) 

1864 self.loggit.error(msg) 

1865 raise exceptions.FailedRestore(msg) 

1866 

1867 def do_dry_run(self): 

1868 """ 

1869 Log what the output would be, but take no action. 

1870 """ 

1871 self.loggit.info('DRY-RUN MODE. No changes will be made.') 

1872 self.loggit.info( 

1873 'DRY-RUN: restore: Repository: {0} Snapshot name: {1} Arguments: ' 

1874 '{2}'.format( 

1875 self.repository, self.name, 

1876 {'wait_for_completion' : self.wfc, 'body' : self.body} 

1877 ) 

1878 ) 

1879 

1880 for index in self.indices: 

1881 if self.rename_pattern and self.rename_replacement: 

1882 replacement_msg = 'as {0}'.format( 

1883 re.sub( 

1884 self.rename_pattern, 

1885 self.py_rename_replacement, 

1886 index 

1887 ) 

1888 ) 

1889 else: 

1890 replacement_msg = '' 

1891 self.loggit.info( 

1892 'DRY-RUN: restore: Index {0} {1}'.format(index, replacement_msg) 

1893 ) 

1894 

1895 def do_action(self): 

1896 """ 

1897 Restore indices with options passed. 

1898 """ 

1899 if not self.skip_repo_fs_check: 

1900 utils.test_repo_fs(self.client, self.repository) 

1901 if utils.snapshot_running(self.client): 

1902 raise exceptions.SnapshotInProgress('Cannot restore while a snapshot is in progress.') 

1903 try: 

1904 self.loggit.info( 

1905 'Restoring indices "{0}" from snapshot: {1}'.format(self.indices, self.name) 

1906 ) 

1907 # Always set wait_for_completion to False. Let 'utils.wait_for_it' do its 

1908 # thing if wait_for_completion is set to True. Report the task_id 

1909 # either way. 

1910 self.client.snapshot.restore( 

1911 repository=self.repository, snapshot=self.name, body=self.body, 

1912 wait_for_completion=False 

1913 ) 

1914 if self.wfc: 

1915 utils.wait_for_it( 

1916 self.client, 'restore', index_list=self.expected_output, 

1917 wait_interval=self.wait_interval, max_wait=self.max_wait 

1918 ) 

1919 self.report_state() 

1920 else: 

1921 self.loggit.warning( 

1922 '"wait_for_completion" set to {0}. ' 

1923 'Remember to check for successful completion ' 

1924 'manually.'.format(self.wfc) 

1925 ) 

1926 except Exception as err: 

1927 utils.report_failure(err) 

1928 

1929class Shrink(object): 

1930 """Shrink Action Class""" 

1931 def __init__( 

1932 self, ilo, shrink_node='DETERMINISTIC', node_filters={}, number_of_shards=1, 

1933 number_of_replicas=1, shrink_prefix='', shrink_suffix='-shrink', copy_aliases=False, 

1934 delete_after=True, post_allocation={}, wait_for_active_shards=1, 

1935 wait_for_rebalance=True, extra_settings={}, wait_for_completion=True, wait_interval=9, 

1936 max_wait=-1 

1937 ): 

1938 """ 

1939 :arg ilo: A :class:`curator.indexlist.IndexList` object 

1940 :arg shrink_node: The node name to use as the shrink target, or 

1941 ``DETERMINISTIC``, which will use the values in ``node_filters`` to 

1942 determine which node will be the shrink node. 

1943 :arg node_filters: If the value of ``shrink_node`` is ``DETERMINISTIC``, 

1944 the values in ``node_filters`` will be used while determining which 

1945 node to allocate the shards on before performing the shrink. 

1946 :type node_filters: dict, representing the filters 

1947 :arg number_of_shards: The number of shards the shrunk index should have 

1948 :arg number_of_replicas: The number of replicas for the shrunk index 

1949 :arg shrink_prefix: Prepend the shrunk index with this value 

1950 :arg shrink_suffix: Append the value to the shrunk index (default: `-shrink`) 

1951 :arg copy_aliases: Whether to copy each source index aliases to target index after 

1952 shrinking. The aliases will be added to target index and deleted from source index at 

1953 the same time(default: `False`) 

1954 :type copy_aliases: bool 

1955 :arg delete_after: Whether to delete each index after shrinking. (default: `True`) 

1956 :type delete_after: bool 

1957 :arg post_allocation: If populated, the `allocation_type`, `key`, and 

1958 `value` will be applied to the shrunk index to re-route it. 

1959 :type post_allocation: dict, with keys `allocation_type`, `key`, and `value` 

1960 :arg wait_for_active_shards: The number of shards expected to be active before returning. 

1961 :arg extra_settings: Permitted root keys are `settings` and `aliases`. 

1962 :type extra_settings: dict 

1963 :arg wait_for_rebalance: Wait for rebalance. (default: `True`) 

1964 :type wait_for_rebalance: bool 

1965 :arg wait_for_active_shards: Wait for active shards before returning. 

1966 :arg wait_for_completion: Wait (or not) for the operation 

1967 to complete before returning. You should not normally change this, 

1968 ever. (default: `True`) 

1969 :arg wait_interval: How long in seconds to wait between checks for 

1970 completion. 

1971 :arg max_wait: Maximum number of seconds to `wait_for_completion` 

1972 :type wait_for_completion: bool 

1973 """ 

1974 self.loggit = logging.getLogger('curator.actions.shrink') 

1975 utils.verify_index_list(ilo) 

1976 if 'permit_masters' not in node_filters: 

1977 node_filters['permit_masters'] = False 

1978 #: Instance variable. The Elasticsearch Client object derived from `ilo` 

1979 self.client = ilo.client 

1980 #: Instance variable. Internal reference to `ilo` 

1981 self.index_list = ilo 

1982 #: Instance variable. Internal reference to `shrink_node` 

1983 self.shrink_node = shrink_node 

1984 #: Instance variable. Internal reference to `node_filters` 

1985 self.node_filters = node_filters 

1986 #: Instance variable. Internal reference to `shrink_prefix` 

1987 self.shrink_prefix = shrink_prefix 

1988 #: Instance variable. Internal reference to `shrink_suffix` 

1989 self.shrink_suffix = shrink_suffix 

1990 #: Instance variable. Internal reference to `copy_aliases` 

1991 self.copy_aliases = copy_aliases 

1992 #: Instance variable. Internal reference to `delete_after` 

1993 self.delete_after = delete_after 

1994 #: Instance variable. Internal reference to `post_allocation` 

1995 self.post_allocation = post_allocation 

1996 #: Instance variable. Internal reference to `wait_for_rebalance` 

1997 self.wait_for_rebalance = wait_for_rebalance 

1998 #: Instance variable. Internal reference to `wait_for_completion` 

1999 self.wfc = wait_for_completion 

2000 #: Instance variable. How many seconds to wait between checks for completion. 

2001 self.wait_interval = wait_interval 

2002 #: Instance variable. How long in seconds to `wait_for_completion` before returning with an 

2003 #: exception. A value of -1 means wait forever. 

2004 self.max_wait = max_wait 

2005 #: Instance variable. Internal reference to `number_of_shards` 

2006 self.number_of_shards = number_of_shards 

2007 self.wait_for_active_shards = wait_for_active_shards 

2008 self.shrink_node_name = None 

2009 

2010 self.body = { 

2011 'settings': { 

2012 'index.number_of_shards' : number_of_shards, 

2013 'index.number_of_replicas' : number_of_replicas, 

2014 } 

2015 } 

2016 

2017 if extra_settings: 

2018 self._merge_extra_settings(extra_settings) 

2019 

2020 if utils.get_version(self.client) >= (6, 1, 0): 

2021 self._merge_extra_settings({ 

2022 'settings': { 

2023 'index.routing.allocation.require._name': None, 

2024 'index.blocks.write': None 

2025 }}) 

2026 

2027 def _merge_extra_settings(self, extra_settings): 

2028 self.loggit.debug( 

2029 'Adding extra_settings to shrink body: ' 

2030 '{0}'.format(extra_settings) 

2031 ) 

2032 # Pop these here, otherwise we could overwrite our default number of 

2033 # shards and replicas 

2034 if 'settings' in extra_settings: 

2035 settings = extra_settings.pop('settings') 

2036 try: 

2037 self.body['settings'].update(settings) 

2038 except Exception as err: 

2039 raise exceptions.ConfigurationError( 

2040 'Unable to apply extra settings "{0}" to shrink body. Exception: {1}'.format( 

2041 {'settings':settings}, err 

2042 ) 

2043 ) 

2044 if extra_settings: 

2045 try: # Apply any remaining keys, should there be any. 

2046 self.body.update(extra_settings) 

2047 except Exception as err: 

2048 raise exceptions.ConfigurationError( 

2049 'Unable to apply extra settings "{0}" to shrink body. Exception: {1}'.format( 

2050 extra_settings, err 

2051 ) 

2052 ) 

2053 

2054 def _data_node(self, node_id): 

2055 roles = utils.node_roles(self.client, node_id) 

2056 name = utils.node_id_to_name(self.client, node_id) 

2057 if not 'data' in roles: 

2058 self.loggit.info('Skipping node "{0}": non-data node'.format(name)) 

2059 return False 

2060 if 'master' in roles and not self.node_filters['permit_masters']: 

2061 self.loggit.info('Skipping node "{0}": master node'.format(name)) 

2062 return False 

2063 elif 'master' in roles and self.node_filters['permit_masters']: 

2064 self.loggit.warning( 

2065 'Not skipping node "{0}" which is a master node (not recommended), but ' 

2066 'permit_masters is True'.format(name) 

2067 ) 

2068 return True 

2069 else: # It does have `data` as a role. 

2070 return True 

2071 

2072 def _exclude_node(self, name): 

2073 if 'exclude_nodes' in self.node_filters: 

2074 if name in self.node_filters['exclude_nodes']: 

2075 self.loggit.info('Excluding node "{0}" due to node_filters'.format(name)) 

2076 return True 

2077 return False 

2078 

2079 def _shrink_target(self, name): 

2080 return '{0}{1}{2}'.format(self.shrink_prefix, name, self.shrink_suffix) 

2081 

2082 def qualify_single_node(self): 

2083 """Qualify a single node as a shrink target""" 

2084 node_id = utils.name_to_node_id(self.client, self.shrink_node) 

2085 if node_id: 

2086 self.shrink_node_id = node_id 

2087 self.shrink_node_name = self.shrink_node 

2088 else: 

2089 raise exceptions.ConfigurationError( 

2090 'Unable to find node named: "{0}"'.format(self.shrink_node)) 

2091 if self._exclude_node(self.shrink_node): 

2092 raise exceptions.ConfigurationError( 

2093 'Node "{0}" listed for exclusion'.format(self.shrink_node)) 

2094 if not self._data_node(node_id): 

2095 raise exceptions.ActionError( 

2096 'Node "{0}" is not usable as a shrink node'.format(self.shrink_node)) 

2097 self.shrink_node_avail = ( 

2098 self.client.nodes.stats()['nodes'][node_id]['fs']['total']['available_in_bytes'] 

2099 ) 

2100 

2101 def most_available_node(self): 

2102 """ 

2103 Determine which data node name has the most available free space, and 

2104 meets the other node filters settings. 

2105 

2106 :arg client: An :class:`elasticsearch7.Elasticsearch` client object 

2107 """ 

2108 mvn_avail = 0 

2109 # mvn_total = 0 

2110 mvn_name = None 

2111 mvn_id = None 

2112 nodes = self.client.nodes.stats()['nodes'] 

2113 for node_id in nodes: 

2114 name = nodes[node_id]['name'] 

2115 if self._exclude_node(name): 

2116 self.loggit.debug('Node "{0}" excluded by node filters'.format(name)) 

2117 continue 

2118 if not self._data_node(node_id): 

2119 self.loggit.debug('Node "{0}" is not a data node'.format(name)) 

2120 continue 

2121 value = nodes[node_id]['fs']['total']['available_in_bytes'] 

2122 if value > mvn_avail: 

2123 mvn_name = name 

2124 mvn_id = node_id 

2125 mvn_avail = value 

2126 # mvn_total = nodes[node_id]['fs']['total']['total_in_bytes'] 

2127 self.shrink_node_name = mvn_name 

2128 self.shrink_node_id = mvn_id 

2129 self.shrink_node_avail = mvn_avail 

2130 # self.shrink_node_total = mvn_total 

2131 

2132 def route_index(self, idx, allocation_type, key, value): 

2133 """Apply the indicated shard routing allocation""" 

2134 bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key) 

2135 routing = {bkey : value} 

2136 try: 

2137 self.client.indices.put_settings(index=idx, body=routing) 

2138 if self.wait_for_rebalance: 

2139 utils.wait_for_it( 

2140 self.client, 'allocation', wait_interval=self.wait_interval, 

2141 max_wait=self.max_wait 

2142 ) 

2143 else: 

2144 utils.wait_for_it( 

2145 self.client, 'relocate', index=idx, wait_interval=self.wait_interval, 

2146 max_wait=self.max_wait 

2147 ) 

2148 except Exception as err: 

2149 utils.report_failure(err) 

2150 

2151 def __log_action(self, error_msg, dry_run=False): 

2152 if not dry_run: 

2153 raise exceptions.ActionError(error_msg) 

2154 else: 

2155 self.loggit.warning('DRY-RUN: {0}'.format(error_msg)) 

2156 

2157 def _block_writes(self, idx): 

2158 block = {'index.blocks.write': True} 

2159 self.client.indices.put_settings(index=idx, body=block) 

2160 

2161 def _unblock_writes(self, idx): 

2162 unblock = {'index.blocks.write': False} 

2163 self.client.indices.put_settings(index=idx, body=unblock) 

2164 

2165 def _check_space(self, idx, dry_run=False): 

2166 # Disk watermark calculation is already baked into `available_in_bytes` 

2167 size = utils.index_size(self.client, idx, value='primaries') 

2168 padded = (size * 2) + (32 * 1024) 

2169 if padded < self.shrink_node_avail: 

2170 self.loggit.debug( 

2171 'Sufficient space available for 2x the size of index "{0}". Required: {1}, ' 

2172 'available: {2}'.format(idx, padded, self.shrink_node_avail) 

2173 ) 

2174 else: 

2175 error_msg = ( 

2176 'Insufficient space available for 2x the size of index "{0}", shrinking will ' 

2177 'exceed space available. Required: {1}, available: {2}'.format( 

2178 idx, padded, self.shrink_node_avail 

2179 ) 

2180 ) 

2181 self.__log_action(error_msg, dry_run) 

2182 

2183 def _check_node(self): 

2184 if self.shrink_node != 'DETERMINISTIC': 

2185 if not self.shrink_node_name: 

2186 self.qualify_single_node() 

2187 else: 

2188 self.most_available_node() 

2189 # At this point, we should have the three shrink-node identifying 

2190 # instance variables: 

2191 # - self.shrink_node_name 

2192 # - self.shrink_node_id 

2193 # - self.shrink_node_avail 

2194 # # - self.shrink_node_total - only if needed in the future 

2195 

2196 def _check_target_exists(self, idx, dry_run=False): 

2197 target = self._shrink_target(idx) 

2198 if self.client.indices.exists(target): 

2199 error_msg = 'Target index "{0}" already exists'.format(target) 

2200 self.__log_action(error_msg, dry_run) 

2201 

2202 def _check_doc_count(self, idx, dry_run=False): 

2203 max_docs = 2147483519 

2204 doc_count = self.client.indices.stats(idx)['indices'][idx]['primaries']['docs']['count'] 

2205 if doc_count > (max_docs * self.number_of_shards): 

2206 error_msg = ( 

2207 'Too many documents ({0}) to fit in {1} shard(s). Maximum number of docs per ' 

2208 'shard is {2}'.format(doc_count, self.number_of_shards, max_docs) 

2209 ) 

2210 self.__log_action(error_msg, dry_run) 

2211 

2212 def _check_shard_count(self, idx, src_shards, dry_run=False): 

2213 if self.number_of_shards >= src_shards: 

2214 error_msg = ( 

2215 'Target number of shards ({0}) must be less than current number of shards ({1}) ' 

2216 'in index "{2}"'.format(self.number_of_shards, src_shards, idx) 

2217 ) 

2218 self.__log_action(error_msg, dry_run) 

2219 

2220 def _check_shard_factor(self, idx, src_shards, dry_run=False): 

2221 # Find the list of factors of src_shards 

2222 factors = [x for x in range(1, src_shards+1) if src_shards % x == 0] 

2223 # Pop the last one, because it will be the value of src_shards 

2224 factors.pop() 

2225 if not self.number_of_shards in factors: 

2226 error_msg = ( 

2227 '"{0}" is not a valid factor of {1} shards. Valid values are ' 

2228 '{2}'.format(self.number_of_shards, src_shards, factors) 

2229 ) 

2230 self.__log_action(error_msg, dry_run) 

2231 

2232 def _check_all_shards(self, idx): 

2233 shards = self.client.cluster.state(index=idx)['routing_table']['indices'][idx]['shards'] 

2234 found = [] 

2235 for shardnum in shards: 

2236 for shard_idx in range(0, len(shards[shardnum])): 

2237 if shards[shardnum][shard_idx]['node'] == self.shrink_node_id: 

2238 found.append( 

2239 {'shard': shardnum, 'primary': shards[shardnum][shard_idx]['primary']}) 

2240 if len(shards) != len(found): 

2241 self.loggit.debug( 

2242 'Found these shards on node "{0}": {1}'.format(self.shrink_node_name, found)) 

2243 raise exceptions.ActionError( 

2244 'Unable to shrink index "{0}" as not all shards were found on the designated ' 

2245 'shrink node ({1}): {2}'.format(idx, self.shrink_node_name, found) 

2246 ) 

2247 

2248 def pre_shrink_check(self, idx, dry_run=False): 

2249 """Do a shrink preflight check""" 

2250 self.loggit.debug('BEGIN PRE_SHRINK_CHECK') 

2251 self.loggit.debug('Check that target exists') 

2252 self._check_target_exists(idx, dry_run) 

2253 self.loggit.debug('Check doc count constraints') 

2254 self._check_doc_count(idx, dry_run) 

2255 self.loggit.debug('Check shard count') 

2256 src_shards = int(self.client.indices.get(idx)[idx]['settings']['index']['number_of_shards']) 

2257 self._check_shard_count(idx, src_shards, dry_run) 

2258 self.loggit.debug('Check shard factor') 

2259 self._check_shard_factor(idx, src_shards, dry_run) 

2260 self.loggit.debug('Check node availability') 

2261 self._check_node() 

2262 self.loggit.debug('Check available disk space') 

2263 self._check_space(idx, dry_run) 

2264 self.loggit.debug('FINISH PRE_SHRINK_CHECK') 

2265 

2266 def do_copy_aliases(self, source_idx, target_idx): 

2267 """Copy the aliases to the shrunk index""" 

2268 alias_actions = [] 

2269 aliases = self.client.indices.get_alias(index=source_idx) 

2270 for alias in aliases[source_idx]['aliases']: 

2271 self.loggit.debug('alias: {0}'.format(alias)) 

2272 alias_actions.append( 

2273 {'remove': {'index': source_idx, 'alias': alias}}) 

2274 alias_actions.append( 

2275 {'add': {'index': target_idx, 'alias': alias}}) 

2276 if alias_actions: 

2277 self.loggit.info('Copy alias actions: {0}'.format(alias_actions)) 

2278 self.client.indices.update_aliases({'actions' : alias_actions}) 

2279 

2280 def do_dry_run(self): 

2281 """ 

2282 Show what a regular run would do, but don't actually do it. 

2283 """ 

2284 self.index_list.filter_closed() 

2285 self.index_list.filter_by_shards(number_of_shards=self.number_of_shards) 

2286 self.index_list.empty_list_check() 

2287 try: 

2288 index_lists = utils.chunk_index_list(self.index_list.indices) 

2289 for lst in index_lists: 

2290 for idx in lst: # Shrink can only be done one at a time... 

2291 target = self._shrink_target(idx) 

2292 self.pre_shrink_check(idx, dry_run=True) 

2293 self.loggit.info( 

2294 'DRY-RUN: Moving shards to shrink node: "{0}"'.format( 

2295 self.shrink_node_name 

2296 ) 

2297 ) 

2298 self.loggit.info( 

2299 'DRY-RUN: Shrinking index "{0}" to "{1}" with settings: {2}, ' 

2300 'wait_for_active_shards={3}'.format( 

2301 idx, target, self.body, self.wait_for_active_shards 

2302 ) 

2303 ) 

2304 if self.post_allocation: 

2305 self.loggit.info( 

2306 'DRY-RUN: Applying post-shrink allocation rule "{0}" to index ' 

2307 '"{1}"'.format( 

2308 'index.routing.allocation.{0}.{1}:{2}'.format( 

2309 self.post_allocation['allocation_type'], 

2310 self.post_allocation['key'], self.post_allocation['value'] 

2311 ), target 

2312 ) 

2313 ) 

2314 if self.copy_aliases: 

2315 self.loggit.info( 

2316 'DRY-RUN: Copy source index aliases "{0}"'.format( 

2317 self.client.indices.get_alias(idx) 

2318 ) 

2319 ) 

2320 #self.do_copy_aliases(idx, target) 

2321 if self.delete_after: 

2322 self.loggit.info('DRY-RUN: Deleting source index "{0}"'.format(idx)) 

2323 except Exception as err: 

2324 utils.report_failure(err) 

2325 

2326 def do_action(self): 

2327 """Actually do the action""" 

2328 self.index_list.filter_closed() 

2329 self.index_list.filter_by_shards(number_of_shards=self.number_of_shards) 

2330 self.index_list.empty_list_check() 

2331 self.loggit.info( 

2332 'Shrinking {0} selected indices: {1}'.format( 

2333 len(self.index_list.indices), self.index_list.indices 

2334 ) 

2335 ) 

2336 try: 

2337 index_lists = utils.chunk_index_list(self.index_list.indices) 

2338 for lst in index_lists: 

2339 for idx in lst: # Shrink can only be done one at a time... 

2340 target = self._shrink_target(idx) 

2341 self.loggit.info('Source index: {0} -- Target index: {1}'.format(idx, target)) 

2342 # Pre-check ensures disk space available for each pass of the loop 

2343 self.pre_shrink_check(idx) 

2344 # Route the index to the shrink node 

2345 self.loggit.info( 

2346 'Moving shards to shrink node: "{0}"'.format(self.shrink_node_name)) 

2347 self.route_index(idx, 'require', '_name', self.shrink_node_name) 

2348 # Ensure a copy of each shard is present 

2349 self._check_all_shards(idx) 

2350 # Block writes on index 

2351 self._block_writes(idx) 

2352 # Do final health check 

2353 if not utils.health_check(self.client, status='green'): 

2354 raise exceptions.ActionError( 

2355 'Unable to proceed with shrink action. Cluster health is not "green"') 

2356 # Do the shrink 

2357 self.loggit.info( 

2358 'Shrinking index "{0}" to "{1}" with settings: {2}, wait_for_active_shards' 

2359 '={3}'.format(idx, target, self.body, self.wait_for_active_shards) 

2360 ) 

2361 try: 

2362 self.client.indices.shrink( 

2363 index=idx, target=target, body=self.body, 

2364 wait_for_active_shards=self.wait_for_active_shards 

2365 ) 

2366 # Wait for it to complete 

2367 if self.wfc: 

2368 self.loggit.debug( 

2369 'Wait for shards to complete allocation for index: ' 

2370 '{0}'.format(target) 

2371 ) 

2372 if self.wait_for_rebalance: 

2373 utils.wait_for_it( 

2374 self.client, 'shrink', wait_interval=self.wait_interval, 

2375 max_wait=self.max_wait 

2376 ) 

2377 else: 

2378 utils.wait_for_it( 

2379 self.client, 'relocate', index=target, 

2380 wait_interval=self.wait_interval, max_wait=self.max_wait 

2381 ) 

2382 except Exception as err: 

2383 if self.client.indices.exists(index=target): 

2384 self.loggit.error( 

2385 'Deleting target index "{0}" due to failure to complete ' 

2386 'shrink'.format(target) 

2387 ) 

2388 self.client.indices.delete(index=target) 

2389 raise exceptions.ActionError( 

2390 'Unable to shrink index "{0}" -- Error: {1}'.format(idx, err)) 

2391 self.loggit.info('Index "{0}" successfully shrunk to "{1}"'.format(idx, target)) 

2392 # Do post-shrink steps 

2393 # Unblock writes on index (just in case) 

2394 self._unblock_writes(idx) 

2395 ## Post-allocation, if enabled 

2396 if self.post_allocation: 

2397 self.loggit.info( 

2398 'Applying post-shrink allocation rule "{0}" to index "{1}"'.format( 

2399 'index.routing.allocation.{0}.{1}:{2}'.format( 

2400 self.post_allocation['allocation_type'], 

2401 self.post_allocation['key'], self.post_allocation['value'] 

2402 ), target 

2403 ) 

2404 ) 

2405 self.route_index( 

2406 target, self.post_allocation['allocation_type'], 

2407 self.post_allocation['key'], self.post_allocation['value'] 

2408 ) 

2409 ## Copy aliases, if flagged 

2410 if self.copy_aliases: 

2411 self.loggit.info('Copy source index aliases "{0}"'.format(idx)) 

2412 self.do_copy_aliases(idx, target) 

2413 ## Delete, if flagged 

2414 if self.delete_after: 

2415 self.loggit.info('Deleting source index "{0}"'.format(idx)) 

2416 self.client.indices.delete(index=idx) 

2417 else: # Let's unset the routing we applied here. 

2418 self.loggit.info('Unassigning routing for source index: "{0}"'.format(idx)) 

2419 self.route_index(idx, 'require', '_name', '') 

2420 

2421 except Exception as err: 

2422 # Just in case it fails after attempting to meet this condition 

2423 self._unblock_writes(idx) 

2424 utils.report_failure(err)