From b16473a4afbf72a9a01fcf94a22be75a63c6ea20 Mon Sep 17 00:00:00 2001 From: Matus Kosut Date: Wed, 24 Oct 2018 14:56:04 +0200 Subject: [PATCH] jujuna 0.1.0 --- CHANGELOG | 14 ++ LICENSE | 201 ++++++++++++++++ VERSION | 1 + jujuna/__init__.py | 5 + jujuna/__main__.py | 176 ++++++++++++++ jujuna/brokers/__init__.py | 33 +++ jujuna/brokers/api.py | 10 + jujuna/brokers/file.py | 29 +++ jujuna/brokers/mount.py | 33 +++ jujuna/brokers/network.py | 27 +++ jujuna/brokers/package.py | 24 ++ jujuna/brokers/process.py | 23 ++ jujuna/brokers/service.py | 29 +++ jujuna/brokers/user.py | 27 +++ jujuna/clean.py | 110 +++++++++ jujuna/deploy.py | 113 +++++++++ jujuna/exporters/__init__.py | 60 +++++ jujuna/exporters/file.py | 45 ++++ jujuna/exporters/mount.py | 30 +++ jujuna/exporters/network.py | 53 +++++ jujuna/exporters/package.py | 31 +++ jujuna/exporters/process.py | 24 ++ jujuna/exporters/service.py | 38 +++ jujuna/exporters/user.py | 28 +++ jujuna/helper.py | 50 ++++ jujuna/settings.py | 74 ++++++ jujuna/tests.py | 154 +++++++++++++ jujuna/upgrade.py | 435 +++++++++++++++++++++++++++++++++++ requirements.txt | 7 + setup.py | 46 ++++ tests/__init__.py | 0 tests/unit/__init__.py | 0 tests/unit/test_args.py | 36 +++ tox.ini | 71 ++++++ 34 files changed, 2037 insertions(+) create mode 100644 CHANGELOG create mode 100644 LICENSE create mode 100644 VERSION create mode 100644 jujuna/__init__.py create mode 100644 jujuna/__main__.py create mode 100644 jujuna/brokers/__init__.py create mode 100644 jujuna/brokers/api.py create mode 100644 jujuna/brokers/file.py create mode 100644 jujuna/brokers/mount.py create mode 100644 jujuna/brokers/network.py create mode 100644 jujuna/brokers/package.py create mode 100644 jujuna/brokers/process.py create mode 100644 jujuna/brokers/service.py create mode 100644 jujuna/brokers/user.py create mode 100644 jujuna/clean.py create mode 100644 jujuna/deploy.py create mode 100644 jujuna/exporters/__init__.py create mode 100644 jujuna/exporters/file.py create mode 100644 jujuna/exporters/mount.py create mode 100644 jujuna/exporters/network.py create mode 100644 jujuna/exporters/package.py create mode 100755 jujuna/exporters/process.py create mode 100644 jujuna/exporters/service.py create mode 100644 jujuna/exporters/user.py create mode 100644 jujuna/helper.py create mode 100644 jujuna/settings.py create mode 100644 jujuna/tests.py create mode 100644 jujuna/upgrade.py create mode 100644 requirements.txt create mode 100644 setup.py create mode 100644 tests/__init__.py create mode 100644 tests/unit/__init__.py create mode 100644 tests/unit/test_args.py create mode 100644 tox.ini diff --git a/CHANGELOG b/CHANGELOG new file mode 100644 index 0000000..696bd38 --- /dev/null +++ b/CHANGELOG @@ -0,0 +1,14 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +### Changed +- List yet unreleased changes. + +## [0.1.0] - 2018-10-24 +### Added +- Initial opensourcing commit + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..04e3de7 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Matus Kosut + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..6e8bf73 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/jujuna/__init__.py b/jujuna/__init__.py new file mode 100644 index 0000000..fc9c97f --- /dev/null +++ b/jujuna/__init__.py @@ -0,0 +1,5 @@ +""" +Jujuna init file. +""" + +name = "jujuna" diff --git a/jujuna/__main__.py b/jujuna/__main__.py new file mode 100644 index 0000000..afbf344 --- /dev/null +++ b/jujuna/__main__.py @@ -0,0 +1,176 @@ +#!/usr/bin/python3 + +import sys +import logging +import argparse +import argcomplete +import async_timeout + +from juju import loop + +from jujuna.deploy import deploy # noqa +from jujuna.upgrade import upgrade # noqa +from jujuna.tests import test # noqa +from jujuna.clean import clean # noqa + + +logger = logging.getLogger('jujuna') +logger.setLevel(logging.DEBUG) + +# Double logging cleanup +while logger.handlers: + logger.handlers.pop() + +logHandler = logging.StreamHandler(sys.stdout) +logHandler.setLevel(logging.DEBUG) +logFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +logHandler.setFormatter(logFormatter) +logger.addHandler(logHandler) + + +def get_parser(): + parser = argparse.ArgumentParser( + # formatter_class=utils.ParagraphDescriptionFormatter, + description="Deploy a local bundle, execute upgrade procedure, " + "run the deployment through a suite of tests to ensure " + "that it can handle the types of operations and failures " + "that are common for all deployments.", + ) + + subparsers = parser.add_subparsers(dest='action', help='Action to be executed') + subparsers.required = True + + p_deploy = subparsers.add_parser('deploy', help="Deploy a local bundle to the current or selected model") + p_deploy.add_argument('bundle_file', type=argparse.FileType('r'), + help="Path to bundle file (i.e. ceph/bundle.yaml)") + p_deploy.add_argument("-c", "--controller", default=None, dest="ctrl_name", help="Controller (def: current)") + p_deploy.add_argument("-m", "--model", default=None, dest="model_name", help="Model to use instead of current") + p_deploy.add_argument("-w", "--wait", action='store_true', help="Wait for deploy to finish") + p_deploy.add_argument("-t", "--timeout", default=0, type=int, help="Timeout after N seconds.") + p_deploy.add_argument("--endpoint", default=None, dest="endpoint", + help="Juju endpoint (requires model uuid instead of name)") + p_deploy.add_argument("--username", default=None, dest="username", help="Juju username") + p_deploy.add_argument("--password", default=None, dest="password", help="Juju password") + + p_upgrade = subparsers.add_parser('upgrade', help="Upgrade applications deployed in the current or selected model") + p_upgrade.add_argument("-c", "--controller", default=None, dest="ctrl_name", help="Controller (def: current)") + p_upgrade.add_argument("-m", "--model", default=None, dest="model_name", help="Model to use instead of current") + p_upgrade.add_argument("-o", "--origin", default='cloud:xenial-ocata', dest="origin", + help="""Openstack origin: + cloud:xenial-newton, + cloud:xenial-ocata (default), + cloud:xenial-pike, + cloud:xenial-queens, + cloud:bionic-rocky + """) + p_upgrade.add_argument("-a", "--apps", nargs='*', default=[], help="Apps to be upgraded (ordered)") + p_upgrade.add_argument("-i", "--ignore-errors", action='store_true', dest='ignore_errors', + help="Ignore errors during charms upgrade and continue with upgrade procedure") + p_upgrade.add_argument("-p", "--pause", action='store_true', help="Pause unit before upgrade (incl. HA)") + p_upgrade.add_argument("-e", "--evacuate", action='store_true', help="Evacuate nova-compute nodes during upgrade") + p_upgrade.add_argument("--upgrade-only", action='store_true', dest="upgrade_only", + help="Upgrade using upgrade hooks without changing the revision") + p_upgrade.add_argument("--charms-only", action='store_true', + help="Upgrade only charms without running upgrade hooks") + p_upgrade.add_argument("--dry-run", action='store_true', dest="dry_run", + help="Dry run - only show changes without upgrading") + p_upgrade.add_argument("-t", "--timeout", default=0, type=int, help="Timeout after N seconds.") + p_upgrade.add_argument("--endpoint", default=None, dest="endpoint", + help="Juju endpoint (requires model uuid instead of name)") + p_upgrade.add_argument("--username", default=None, dest="username", help="Juju username") + p_upgrade.add_argument("--password", default=None, dest="password", help="Juju password") + + p_test = subparsers.add_parser('test', help="Test applications in the current or selected model") + p_test.add_argument("test_suite", type=argparse.FileType('r'), + help="Path to test suite (i.e. ceph/suite.yaml)") + p_test.add_argument("-c", "--controller", default=None, dest="ctrl_name", help="Controller (def: current)") + p_test.add_argument("-m", "--model", default=None, dest="model_name", help="Model to use instead of current") + p_test.add_argument("-t", "--timeout", default=0, type=int, help="Timeout after N seconds.") + p_test.add_argument("--endpoint", default=None, dest="endpoint", + help="Juju endpoint (requires model uuid instead of name)") + p_test.add_argument("--username", default=None, dest="username", help="Juju username") + p_test.add_argument("--password", default=None, dest="password", help="Juju password") + + p_clean = subparsers.add_parser( + 'clean', + help="Clean the model by removing all applications present in the current or selected model" + ) + p_clean.add_argument("-c", "--controller", default=None, dest="ctrl_name", help="Controller (def: current)") + p_clean.add_argument("-m", "--model", default=None, dest="model_name", help="Model to use instead of current") + p_clean.add_argument("-w", "--wait", action='store_true', help="Wait for deploy to finish") + p_clean.add_argument("-f", "--force", action='store_true', help="Force cleanup (remove all machines in the model).") + p_clean.add_argument("-i", "--ignore", nargs='*', default=[], help="Apps to be ignored during removal") + p_clean.add_argument("--dry-run", action='store_true', dest="dry_run", + help="Dry run - only show changes without removing applications") + p_clean.add_argument("-t", "--timeout", default=0, type=int, help="Timeout after N seconds.") + p_clean.add_argument("--endpoint", default=None, dest="endpoint", + help="Juju endpoint (requires model uuid instead of name)") + p_clean.add_argument("--username", default=None, dest="username", help="Juju username") + p_clean.add_argument("--password", default=None, dest="password", help="Juju password") + + argcomplete.autocomplete(parser) + # options = add_bundle_opts(options, parser) + + # if not utils.valid_bundle_or_spell(options.path): + # parser.error('Invalid bundle directory: %s' % options.path) + + # configLogging(options) + return parser + + +doc_arg_parser = get_parser() # To be included in documentation + + +async def run_action(action, timeout, args): + """Run request action. + + """ + selected_action = globals()[action] + + # Remove used vars + del vars(args)['action'] + if 'timeout' in vars(args): + del vars(args)['timeout'] + + if timeout: + at = None + try: + async with async_timeout.timeout(timeout) as at: + return await selected_action(**vars(args)) + except Exception as e: + # Exit with timeout code if expired + if at and at.expired: + logger.warn('Operation timed out!') + return 124 + else: + raise + else: + return await selected_action(**vars(args)) + + +def action(args): + try: + if hasattr(args, 'action'): + return loop.run(run_action(args.action, args.timeout, args)) + except Exception as e: + logger.warn(e) + return 1 + + return 255 + + +def parse_args(args): + parser = get_parser() + return parser.parse_args(args) + + +def main(): + args = parse_args(sys.argv[1:]) + + ret = action(args) + + sys.exit(ret) + + +if __name__ == '__main__': + main() diff --git a/jujuna/brokers/__init__.py b/jujuna/brokers/__init__.py new file mode 100644 index 0000000..e895f78 --- /dev/null +++ b/jujuna/brokers/__init__.py @@ -0,0 +1,33 @@ +"""Broker abstract class.""" +import json + + +class Broker(): + """Broker abstract class.""" + + def __init__(self): + """Init abstract class.""" + self.named = self.__class__.__name__.lower() + + async def run(self, *args, **kwargs): + """Run method have to be overriden.""" + print("Run method for '{}' not implemented".format(self.named)) + return [] + + +def python3(file, args=[]): + if args: + return 'python3 {} {}'.format(file, ' '.join([str(arg) for arg in args])) + else: + return 'python3 {}'.format(file) + + +def load_output(results): + if results['Code'] == '0': + try: + var = json.loads(results['Stdout']) + except Exception as e: + raise Exception('JSON load failed: {}'.format(e)) + else: + raise Exception('Operation failed: {}'.format(results['Stderr'])) + return var diff --git a/jujuna/brokers/api.py b/jujuna/brokers/api.py new file mode 100644 index 0000000..082e9c8 --- /dev/null +++ b/jujuna/brokers/api.py @@ -0,0 +1,10 @@ +"""API broker.""" +from . import Broker + + +class Api(Broker): + """API broker.""" + + def __init__(self): + """Init broker.""" + super().__init__() diff --git a/jujuna/brokers/file.py b/jujuna/brokers/file.py new file mode 100644 index 0000000..24c4fa8 --- /dev/null +++ b/jujuna/brokers/file.py @@ -0,0 +1,29 @@ +"""File broker.""" +from . import Broker, python3, load_output +from jujuna.exporters import Exporter + + +class File(Broker): + """File broker.""" + + def __init__(self): + """Init broker.""" + super().__init__() + + async def run(self, test_data, unit, idx): + """Run tests.""" + rows = [] + async with Exporter(unit, self.named) as exporter: + files = test_data + # print(test_data) + + for file, params in files.items(): + act = await unit.run(python3(exporter, args=[file]), timeout=10) + results = load_output(act.data['results']) + # print('Expect: ', files[file]) + # print(results) + for param, value in params.items(): + res = results[param] == value + rows.append((idx, '{}.{} == {}'.format(file, param, value), res), ) + + return rows diff --git a/jujuna/brokers/mount.py b/jujuna/brokers/mount.py new file mode 100644 index 0000000..f7a2477 --- /dev/null +++ b/jujuna/brokers/mount.py @@ -0,0 +1,33 @@ +"""Mount broker.""" +from . import Broker, python3, load_output +from jujuna.exporters import Exporter +import re + + +class Mount(Broker): + """Mount broker.""" + + def __init__(self): + """Init broker.""" + super().__init__() + + async def run(self, test_case, unit, idx): + """Run tests.""" + rows = [] + async with Exporter(unit, self.named) as exporter: + act = await unit.run(python3(exporter), timeout=10) + results = load_output(act.data['results']) + # print(results.keys()) + if 'regex' in test_case: + for condition in test_case['regex']: + prog = re.compile(condition) + mounts = '' + + for result in results: + var = prog.search(result) + if var: + mounts = var.group(0) + + rows.append((idx, '{} == {}'.format(mounts, 'mounted'), True if mounts else False), ) + + return rows diff --git a/jujuna/brokers/network.py b/jujuna/brokers/network.py new file mode 100644 index 0000000..909ba68 --- /dev/null +++ b/jujuna/brokers/network.py @@ -0,0 +1,27 @@ +"""Network broker.""" +from . import Broker, python3, load_output +from jujuna.exporters import Exporter + + +class Network(Broker): + """Network broker.""" + + def __init__(self): + """Init broker.""" + super().__init__() + + async def run(self, test_case, unit, idx): + """Run tests.""" + rows = [] + async with Exporter(unit, self.named) as exporter: + if 'port' in test_case: + test_data = await unit.run(python3(exporter), timeout=10) + results = load_output(test_data.data['results']) + # print(results) + + local_ports = [s['local_port'] for s in results['sockets']] + + for port in test_case['port']: + rows.append((idx, '{}.{} == {}'.format('port', port, 'open'), port in local_ports), ) + + return rows diff --git a/jujuna/brokers/package.py b/jujuna/brokers/package.py new file mode 100644 index 0000000..062278b --- /dev/null +++ b/jujuna/brokers/package.py @@ -0,0 +1,24 @@ +"""Package broker.""" +from . import Broker, python3, load_output +from jujuna.exporters import Exporter + + +class Package(Broker): + """Mount broker.""" + + def __init__(self): + """Init broker.""" + super().__init__() + + async def run(self, test_case, unit, idx): + """Run tests.""" + rows = [] + async with Exporter(unit, self.named) as exporter: + act = await unit.run(python3(exporter), timeout=10) + results = load_output(act.data['results']) + # print(results['installed'].keys()) + if 'installed' in test_case: + for condition in test_case['installed']: + rows.append((idx, '{} == {}'.format(condition, 'installed'), condition in results['installed']), ) + + return rows diff --git a/jujuna/brokers/process.py b/jujuna/brokers/process.py new file mode 100644 index 0000000..e0494d4 --- /dev/null +++ b/jujuna/brokers/process.py @@ -0,0 +1,23 @@ +"""Process broker.""" +from . import Broker, python3, load_output +from jujuna.exporters import Exporter + + +class Process(Broker): + """Process broker.""" + + def __init__(self): + """Init Process broker.""" + super().__init__() + + async def run(self, test_case, unit, idx): + """Run tests.""" + rows = [] + async with Exporter(unit, self.named) as exporter: + act = await unit.run(python3(exporter), timeout=10) + results = load_output(act.data['results']) + # print(results) + for condition in test_case: + rows.append((idx, '{} == present'.format(condition), condition in results), ) + + return rows diff --git a/jujuna/brokers/service.py b/jujuna/brokers/service.py new file mode 100644 index 0000000..2d92287 --- /dev/null +++ b/jujuna/brokers/service.py @@ -0,0 +1,29 @@ +"""Service broker.""" +from . import Broker, python3, load_output +from jujuna.exporters import Exporter + + +class Service(Broker): + """Service broker.""" + + def __init__(self): + """Init service broker.""" + super().__init__() + + async def run(self, test_case, unit, idx): + """Run tests.""" + rows = [] + async with Exporter(unit, self.named) as exporter: + act = await unit.run(python3(exporter), timeout=10) + results = load_output(act.data['results']) + # print(results['services']) + # print(test_case) + for service, condition in test_case.items(): + rows.append((idx, '{} == {}'.format(service, 'exists'), service in results['services']), ) + for c, v in condition.items(): + rows.append(( + idx, '{}.{} == {}'.format(service, c, v), + service in results['services'] and results['services'][service][c] == v + ), ) + + return rows diff --git a/jujuna/brokers/user.py b/jujuna/brokers/user.py new file mode 100644 index 0000000..fa9485d --- /dev/null +++ b/jujuna/brokers/user.py @@ -0,0 +1,27 @@ +"""User broker.""" +from . import Broker, python3, load_output +from jujuna.exporters import Exporter + + +class User(Broker): + """User broker.""" + + def __init__(self): + """Init User broker.""" + super().__init__() + + async def run(self, test_case, unit, idx): + """Run tests.""" + rows = [] + async with Exporter(unit, self.named) as exporter: + act = await unit.run(python3(exporter), timeout=10) + user_data = load_output(act.data['results']) + # print(user_data) + for condition, test_item in test_case.items(): + test_res = False + # if subitem in test_item.items(): + if all(user_data[condition][key] == value for key, value in test_item.items()): + test_res = True + rows.append((idx, '{} == {}'.format(condition, 'present'), test_res), ) + + return rows diff --git a/jujuna/clean.py b/jujuna/clean.py new file mode 100644 index 0000000..f3db4bb --- /dev/null +++ b/jujuna/clean.py @@ -0,0 +1,110 @@ +#!/usr/bin/python3 + +import asyncio +import websockets +import logging +from jujuna.helper import connect_juju +from juju.errors import JujuError + + +# create logger +log = logging.getLogger('jujuna.clean') + + +async def wait_until(model, *conditions, log_time=5, timeout=None, wait_period=0.5, loop=None): + """Return only after all conditions are true. + + """ + log_count = 0 + + def _disconnected(): + return not (model.is_connected() and model.connection().is_open) + + async def _block(log_count): + while not _disconnected() and not all(c() for c in conditions): + await asyncio.sleep(wait_period, loop=loop) + log_count += 0.5 + if log_count % log_time == 0: + log.info('[RUNNING] Machines: {} {} Apps: {}'.format( + len(model.machines), + ', '.join(model.machines.keys()), + len(model.applications) + )) + await asyncio.wait_for(_block(log_count), timeout, loop=loop) + + if _disconnected(): + raise websockets.ConnectionClosed(1006, 'no reason') + + log.info('[DONE] Machines: {} Apps: {}'.format( + len(model.machines), + len(model.applications) + )) + + +async def clean( + ctrl_name=None, + model_name=None, + ignore=[], + wait=False, + force=False, + dry_run=False, + endpoint=None, + username=None, + password=None +): + """Destroy applications present in the current or selected model. + + Connection requires juju client configs to be present locally or specification of credentialls: + endpoint (e.g. 127.0.0.1:17070), username, password, and model uuid as model_name. + + :param ctrl_name: juju controller + :param model_name: juju model name or uuid + :param ignore: list of application names + :param wait: boolean + :param force: boolean + :param dry_run: boolean + :param endpoint: string + :param username: string + :param password: string + """ + controller, model = await connect_juju( + ctrl_name, + model_name, + endpoint=endpoint, + username=username, + password=password + ) + + try: + # Remove all the apps + for app in model.applications: + if app in ignore: + log.info('Ignoring {}'.format(app)) + else: + log.info('Remove {} from model'.format(app)) + if not dry_run: + await model.applications[app].destroy() + + if not ignore and force: + machines = [m for m in model.machines.values() if 'arch' in m.safe_data['hardware-characteristics']] + for machine in machines: + log.info('Remove machine {} from model'.format(machine.entity_id)) + if not dry_run: + try: + await machine.destroy(force=True) + except Exception as e: + log.warn('ERROR: {}'.format(e)) + + if wait and not ignore and not dry_run: + await wait_until( + model, + lambda: model.applications == {} and model.machines == {}, + loop=model.loop + ) + + except JujuError as e: + log.info(e.message) + finally: + # Disconnect from the api server and cleanup. + await model.disconnect() + await controller.disconnect() diff --git a/jujuna/deploy.py b/jujuna/deploy.py new file mode 100644 index 0000000..f5b6208 --- /dev/null +++ b/jujuna/deploy.py @@ -0,0 +1,113 @@ +#!/usr/bin/python3 + +import asyncio +import websockets +import logging +from collections import defaultdict +from jujuna.helper import connect_juju +from juju.errors import JujuError + + +# create logger +log = logging.getLogger('jujuna.deploy') + + +async def deploy( + bundle_file, + ctrl_name=None, + model_name=None, + wait=False, + endpoint=None, + username=None, + password=None +): + """Deploy a local juju bundle. + + Handles deployment of a bundle file to the current or selected model. + + Connection requires juju client configs to be present locally or specification of credentialls: + endpoint (e.g. 127.0.0.1:17070), username, password, and model uuid as model_name. + + :param bundle_file: juju bundle file + :param ctrl_name: juju controller + :param model_name: juju model name or uuid + :param wait: boolean + :param endpoint: string + :param username: string + :param password: string + """ + log.info('Reading bundle: {}'.format(bundle_file.name)) + entity_url = 'local:' + bundle_file.name.replace('/bundle.yaml', '') + + controller, model = await connect_juju( + ctrl_name, + model_name, + endpoint=endpoint, + username=username, + password=password + ) + + try: + # Deploy a bundle + log.info("Deploy: {}.".format(entity_url)) + deployed_app = await model.deploy( + entity_url + ) + + if wait: + await wait_until( + model, + deployed_app, + loop=model.loop + ) + except JujuError as e: + log.info(str(e)) + finally: + # Disconnect from the api server and cleanup. + await model.disconnect() + await controller.disconnect() + + +async def wait_until(model, deployed_app, log_time=5, timeout=None, wait_period=0.5, loop=None): + """Blocking with logs. + + Return only after all conditions are true. + + :param model: juju model + :param deployed_app: juju application + :param log_time: logging frequency (s) + :param timeout: blocking timeout (s) + :param wait_period: waiting time between checks (s) + :param loop: asyncio event loop + """ + log_count = 0 + + def _disconnected(): + return not (model.is_connected() and model.connection().is_open) + + async def _block(log_count): + while not _disconnected() and not all(a.status == 'active' for a in deployed_app): + await asyncio.sleep(wait_period, loop=loop) + log_count += 0.5 + if log_count % log_time == 0: + d = defaultdict(int) + for a in deployed_app: + d[a.status] += 1 + log.info('[RUNNING] Machines: {} Apps: {} Stats: {}'.format( + len(model.machines), + len(model.applications), + dict(d) + )) + await asyncio.wait_for(_block(log_count), timeout, loop=loop) + + if _disconnected(): + raise websockets.ConnectionClosed(1006, 'no reason') + + d = defaultdict(int) + for a in deployed_app: + d[a.status] += 1 + log.info('[DONE] Machines: {} Apps: {} Stats: {}'.format( + len(model.machines), + len(model.applications), + dict(d) + )) diff --git a/jujuna/exporters/__init__.py b/jujuna/exporters/__init__.py new file mode 100644 index 0000000..16abbf3 --- /dev/null +++ b/jujuna/exporters/__init__.py @@ -0,0 +1,60 @@ +"""Exporter management.""" + +import os +import uuid +import async_timeout + + +class Exporter(): + """Exporter context manager.""" + + def __init__(self, unit, exporter_name): + """Init exporter.""" + if exporter_name.endswith('.py'): + raise Exception('Do not provide an extension') + self.unit = unit + self.full_path = os.path.dirname(os.path.realpath(__file__)) + self.exporter_local = os.path.join(self.full_path, exporter_name + '.py') + if not os.path.isfile(self.exporter_local): + raise Exception('Exporter: {}.py not found'.format(exporter_name)) + self.exporter_remote = os.path.join('/tmp', str(uuid.uuid4())[:9] + exporter_name + '.py') + + async def __aenter__(self): + """Upload exporter and return remote path.""" + # print('Load: ', self.exporter_local) + await self._load_exporter(self.unit, self.exporter_local, self.exporter_remote) + return self.exporter_remote + + async def __aexit__(self, type, value, traceback): + """Dispose remote exporter.""" + # print('Unload: ', self.exporter_remote) + await self._unload_exporter(self.unit, self.exporter_remote) + + async def _load_exporter(self, unit, source, target, user='ubuntu'): + """Upload exporter.""" + async with async_timeout.timeout(15): + await unit.scp_to( + source, + target, + user=user + ) + + async def _unload_exporter(self, unit, target, user='ubuntu'): + """Dispose of remote exporter.""" + for i in range(3): + try: + async with async_timeout.timeout(15): + ret = await unit.run('rm {}'.format(target), timeout=10) + break + except Exception as e: + # Try 3 times, if it fails raise on last + if i == 2 and 'No such file' not in str(e): + raise e + # ret = await unit.run('rm /tmp/*.py', timeout=3) + if ( + ret and ret.data and + 'results' in ret.data and + ret.data['results']['Code'] != '0' and + 'No such file' not in ret.data['results']['Stderr'] + ): + raise Exception('Unload failed: ', ret.data['results']['Stderr']) diff --git a/jujuna/exporters/file.py b/jujuna/exporters/file.py new file mode 100644 index 0000000..f1ca374 --- /dev/null +++ b/jujuna/exporters/file.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +import json +import sys +import os +import stat + + +def main(): + if len(sys.argv) != 2: + raise Exception('Specify exactly one filepath.') + filepath = sys.argv[1] + if not os.path.exists(filepath): + raise Exception("Path '{}' does not exist.".format(filepath)) + + file_stat = os.stat(filepath) + + file_vars = { + 'st_mode': file_stat.st_mode, + 'st_ino': file_stat.st_ino, + 'st_dev': file_stat.st_dev, + 'st_nlink': file_stat.st_nlink, + 'st_uid': file_stat.st_uid, + 'st_gid': file_stat.st_gid, + 'st_size': file_stat.st_size, + 'st_atime': file_stat.st_atime, + 'st_mtime': file_stat.st_mtime, + 'st_ctime': file_stat.st_ctime, + 'is_dir': stat.S_ISDIR(file_stat.st_mode), + 'is_chr': stat.S_ISCHR(file_stat.st_mode), + 'is_blk': stat.S_ISBLK(file_stat.st_mode), + 'is_reg': stat.S_ISREG(file_stat.st_mode), + 'is_fifo': stat.S_ISFIFO(file_stat.st_mode), + 'is_lnk': stat.S_ISLNK(file_stat.st_mode), + 'is_sock': stat.S_ISSOCK(file_stat.st_mode), + 'imode': stat.S_IMODE(file_stat.st_mode), + 'ifmt': stat.S_IFMT(file_stat.st_mode), + } + + print(json.dumps(file_vars)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/jujuna/exporters/mount.py b/jujuna/exporters/mount.py new file mode 100644 index 0000000..88e275d --- /dev/null +++ b/jujuna/exporters/mount.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 + +import json +import sys + + +def main(): + mount_data = {} + with open('/proc/mounts', 'r') as mountfile: + mount_lines = mountfile.read().split('\n')[:-1] + for mount_line in mount_lines: + mount_items = mount_line.split(' ') + mount_parsed = { + 'mountpoint': mount_items[1], + 'fs': mount_items[2], + 'params': mount_items[3].split(','), + } + if '/' not in mount_items[0]: + if mount_items[0] not in mount_data: + mount_data[mount_items[0]] = [] + mount_data[mount_items[0]].append(mount_parsed) + else: + mount_data[mount_items[0]] = mount_parsed + + print(json.dumps(mount_data)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/jujuna/exporters/network.py b/jujuna/exporters/network.py new file mode 100644 index 0000000..eb9e1e5 --- /dev/null +++ b/jujuna/exporters/network.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 + +import re +import sys +import json +import base64 + + +def decode_address(address): + hex_ip, hex_port = address.split(':') + ip = '.'.join([str(item) for item in reversed(list(base64.b16decode(hex_ip)))]) + port = int.from_bytes(base64.b16decode(hex_port), byteorder='big') + return ip, port + + +def sockets(): + with open('/proc/net/tcp', 'r') as f: + lines = [] + for line in f: + line = line.strip() + line = re.sub(' +', ' ', line) + lines.append(line.split()) + + sock_list = [] + header = lines[0] + + for line in lines[1:]: + line_struct = {} + for idx, head in enumerate(header): + line_struct[head] = line[idx] + sock_list.append(line_struct) + + data_list = [] + for item in sock_list: + data_item = {} + data_item['local_ip'], data_item['local_port'] = decode_address(item['local_address']) + data_item['rem_ip'], data_item['rem_port'] = decode_address(item['rem_address']) + data_list.append(data_item) + + return data_list + + +def main(): + data = { + 'sockets': sockets(), + } + + print(json.dumps(data)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/jujuna/exporters/package.py b/jujuna/exporters/package.py new file mode 100644 index 0000000..39d5be1 --- /dev/null +++ b/jujuna/exporters/package.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 + +import json +import sys +import apt + + +def main(): + pkg_data = {} + + cache = apt.Cache() + installed = {} + for mypkg in apt.Cache(): + if cache[mypkg.name].is_installed: + installed[mypkg.name] = { + 'id': mypkg.id, + 'name': mypkg.name, + 'shortname': mypkg.shortname, + 'versions': list(dict(mypkg.versions).keys()) + } + + pkg_data = { + 'installed': installed + } + + print(json.dumps(pkg_data)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/jujuna/exporters/process.py b/jujuna/exporters/process.py new file mode 100755 index 0000000..3eb8c98 --- /dev/null +++ b/jujuna/exporters/process.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 + +import os +import json +import sys + + +def main(): + pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] + pnames = {} + for pid in pids: + pname_raw = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() + pname = pname_raw.decode('utf-8').replace('\x00', ' ').strip().split(' ') + pnames[pname[0]] = { + 'pid': pid, + 'params': pname[1:] if len(pname) > 1 else [], + } + + print(json.dumps(pnames)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/jujuna/exporters/service.py b/jujuna/exporters/service.py new file mode 100644 index 0000000..c5abe2f --- /dev/null +++ b/jujuna/exporters/service.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 + +import json +import sys +import dbus + + +def main(): + bus = dbus.SystemBus() + systemd1 = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1') + manager = dbus.Interface(systemd1, dbus_interface='org.freedesktop.systemd1.Manager') + + units_list = manager.ListUnits() + + services = {} + + for unit in units_list: + if unit[0].endswith('.service'): + unit_name = unit[0].replace('.service', '') + + services[unit_name] = { + 'name': unit[0], + 'description': unit[1], + 'loaded': unit[2], + 'active': unit[3], + 'status': unit[4], + } + + systemd_data = { + 'services': services, + } + + print(json.dumps(systemd_data)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/jujuna/exporters/user.py b/jujuna/exporters/user.py new file mode 100644 index 0000000..31ac155 --- /dev/null +++ b/jujuna/exporters/user.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 + +import json +import sys +import pwd +import grp + + +def main(): + + user_data = {} + + for user in pwd.getpwall(): + user_data[user[0]] = { + 'uid': user[2], + 'gid': user[3], + 'group': grp.getgrgid(user[3])[0], + 'gecos': user[4], + 'dir': user[5], + 'shell': user[6], + } + + print(json.dumps(user_data)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/jujuna/helper.py b/jujuna/helper.py new file mode 100644 index 0000000..c6296b2 --- /dev/null +++ b/jujuna/helper.py @@ -0,0 +1,50 @@ + +from jujuna.settings import MAX_FRAME_SIZE + +from juju.controller import Controller +from juju.model import Model + +from theblues.charmstore import CharmStore + + +def cs_name_parse(name, series=None): + """Charm store name parse. + + """ + try: + cut = name.split(':', 1)[1] + arr = cut.split('/') if '/' in cut else ['', cut] + return { + 'series': arr[0] if arr[0] or not series else series, + 'charm': '-'.join(arr[1].split('-')[:-1]), + 'revision': int(arr[1].split('-')[-1]) + } + except: # noqa + print(name) + return {} + + +async def connect_juju(ctrl_name=None, model_name=None, endpoint=None, username=None, password=None): + controller = Controller(max_frame_size=MAX_FRAME_SIZE) # noqa + + if ctrl_name: + if endpoint: + await controller.connect(endpoint=endpoint, username=username, password=password) + else: + await controller.connect(ctrl_name) + else: + await controller.connect_current() + + if endpoint: + model = Model(max_frame_size=MAX_FRAME_SIZE) + await model.connect(uuid=model_name, endpoint=endpoint, username=username, password=password) + elif model_name: + model = await controller.get_model(model_name) + else: + model = Model(max_frame_size=MAX_FRAME_SIZE) # noqa + await model.connect_current() + + # HACK low unsettable timeout in the model + model.charmstore._cs = CharmStore(timeout=60) + + return controller, model diff --git a/jujuna/settings.py b/jujuna/settings.py new file mode 100644 index 0000000..bb40fa8 --- /dev/null +++ b/jujuna/settings.py @@ -0,0 +1,74 @@ + +# Juju requires higher frame size for large models +MAX_FRAME_SIZE = 2**26 + +# Not all charms use the openstack-origin. The openstack specific +# charms do, but some of the others use an alternate origin key +# depending on who the author was. +ORIGIN_KEYS = { + 'ceph': 'source', + 'ceph-osd': 'source', + 'ceph-mon': 'source', + 'ceph-radosgw': 'source', +} + +# Default list of services, used in upgrade if apps not specified in params +# Services are upgraded in the order specified +SERVICES = [ + # Ceph + 'ceph-mon', + 'ceph-mon-nrpe', + 'ceph-osd', + 'ceph-osd-nrpe', + 'ceph-radosgw', + 'ceph-radosgw-hacluster', + 'ceph-radosgw-nrpe', + + # Identity and Image + 'keystone', + 'keystone-hacluster', + 'keystone-nrpe', + + 'glance', + 'glance-hacluster', + 'glance-nrpe', + + # Upgrade nova + 'nova-cloud-controller', + 'nova-cloud-controller-hacluster', + 'nova-cloud-controller-nrpe', + + 'nova-compute', + 'nova-compute-nrpe', + + # Neutron upgrades + 'neutron-api', + 'neutron-api-hacluster', + 'neutron-api-nrpe', + + 'neutron-gateway', + 'neutron-gateway-nrpe', + + 'neutron-openvswitch', + + # Backend block-storage upgrade. + # Note: just upgrade cinder service. + 'cinder', + 'cinder-hacluster', + 'cinder-warmceph', + 'cinder-nrpe', + + # Upgrade dashboard + 'openstack-dashboard', + 'openstack-dashboard-hacluster', + 'openstack-dashboard-nrpe', + + 'rabbitmq-server', + 'rabbitmq-server-nrpe', + + 'ntp', + + 'mysql', + 'mysql-hacluster', + 'mysql-nrpe' +] diff --git a/jujuna/tests.py b/jujuna/tests.py new file mode 100644 index 0000000..05732f5 --- /dev/null +++ b/jujuna/tests.py @@ -0,0 +1,154 @@ +import time # noqa +import yaml +import json +import logging +import async_timeout +from collections import defaultdict +from juju.errors import JujuError +from jujuna.helper import connect_juju + +from jujuna.brokers.api import Api as ApiBroker +from jujuna.brokers.file import File as FileBroker +from jujuna.brokers.mount import Mount as MountBroker +from jujuna.brokers.network import Network as NetworkBroker +from jujuna.brokers.package import Package as PackageBroker +from jujuna.brokers.process import Process as ProcessBroker +from jujuna.brokers.service import Service as ServiceBroker +from jujuna.brokers.user import User as UserBroker + + +# create logger +log = logging.getLogger('jujuna.tests') + + +async def test( + test_suite=None, + ctrl_name=None, + model_name=None, + endpoint=None, + username=None, + password=None +): + """Run a test suite against applications deployed in the current or selected model. + + Applications are tested with declarative parameters specified in the test suite using the available brokers. + + Connection requires juju client configs to be present locally or specification of credentialls: + endpoint (e.g. 127.0.0.1:17070), username, password, and model uuid as model_name. + + :param test_suite: suite file (Yaml) + :param ctrl_name: juju controller + :param model_name: juju model name or uuid + :param endpoint: string + :param username: string + :param password: string + """ + if test_suite: + with open(test_suite.name, 'r') as stream: + try: + suite = yaml.load(stream) + except yaml.YAMLError as exc: + log.error(exc) + + controller, model = await connect_juju( + ctrl_name, + model_name, + endpoint=endpoint, + username=username, + password=password + ) + + model_passed, model_failed = 0, 0 + failed_units = set() + + try: + for app_name, app in model.applications.items(): + if suite and app_name in suite: + app_passed, app_failed = 0, 0 + log.info('[{}]: {} {} [{}]'.format(app_name, app.status, app.alive, len(app.units))) + for idx, unit in enumerate(app.units): + async with async_timeout.timeout(60): + passed, failed = await execute_brokers(suite[app_name], unit, idx) + app_passed += passed + app_failed += failed + if app.status in ['error', 'maintenance', 'blocked'] or failed: + failed_units.add(unit.name) + model_passed += app_passed + model_failed += app_failed + log.info('[{}]: Passed: {} Failed: {}'.format(app_name, app_passed, app_failed)) + + alive = defaultdict(int) + status = defaultdict(int) + for app in model.applications.values(): + alive[app.alive] += 1 + status[app.status] += 1 + + if ( + False not in alive and + all(s in ['active', 'waiting'] for s in status.keys()) + ): + log.info('All juju apps state to be alive and active.') + else: + log.info('Finished with errors: Alive: {} Status: {}'.format(dict(alive), dict(status))) + + log.info('[FINISHED] Passed tests: {} Failed tests: {}'.format(model_passed, model_failed)) + + except JujuError as e: + log.error(e.message) + finally: + # Disconnect from the api server and cleanup. + await model.disconnect() + await controller.disconnect() + + if failed_units: + log.error('Failed units: {}'.format(', '.join(sorted(failed_units)))) + return 1 + + +def python3(file): + return 'python3 {}'.format(file) + + +def load_output(results): + """Load json result from broker. + + """ + if results['Code'] == '0': + try: + var = json.loads(results['Stdout']) + except Exception as e: + raise Exception('JSON load failed: {}'.format(e)) + else: + raise Exception('Operation failed: {}'.format(results['Stderr'])) + return var + + +async def execute_brokers(app_test_suite, unit, idx): + """Iterate brokers and acquire results. + + """ + broker_map = { + 'api': ApiBroker, + 'file': FileBroker, + 'mount': MountBroker, + 'network': NetworkBroker, + 'package': PackageBroker, + 'process': ProcessBroker, + 'service': ServiceBroker, + 'user': UserBroker, + } + passed = 0 + failed = 0 + + for test_case in app_test_suite.keys(): + if test_case in broker_map.keys(): + rows = await broker_map[test_case]().run(app_test_suite[test_case], unit, idx) + for row in rows: + log.info('[{}]: {} {} [{}]'.format(unit.entity_id, test_case, row[1], "Pass" if row[2] else "Fail")) + if row[2]: + passed += 1 + else: + failed += 1 + else: + log.info("TEST: Skipped (Broker '{}' not registered)".format(test_case)) + return passed, failed diff --git a/jujuna/upgrade.py b/jujuna/upgrade.py new file mode 100644 index 0000000..903f4d0 --- /dev/null +++ b/jujuna/upgrade.py @@ -0,0 +1,435 @@ +import asyncio +import logging +import websockets +import async_timeout +from collections import defaultdict +from juju.errors import JujuError +from jujuna.helper import cs_name_parse, connect_juju +from jujuna.settings import ORIGIN_KEYS, SERVICES + + +# create logger +log = logging.getLogger('jujuna.upgrade') + + +async def upgrade( + ctrl_name=None, + model_name=None, + apps=[], + origin='cloud:xenial-ocata', + ignore_errors=False, + pause=False, + evacuate=False, + charms_only=False, + upgrade_only=False, + dry_run=False, + endpoint=None, + username=None, + password=None +): + """Upgrade applications deployed in the model. + + Handles upgrade of application deployed in the specified model. Focused on openstack upgrade procedures. + + Connection requires juju client configs to be present locally or specification of credentialls: + endpoint (e.g. 127.0.0.1:17070), username, password, and model uuid as model_name. + + :param ctrl_name: juju controller + :param model_name: juju model name or uuid + :param apps: ordered list of application names + :param origin: target openstack version string + :param ignore_errors: boolean + :param pause: boolean + :param evacuate: boolean + :param charms_only: boolean + :param upgrade_only: boolean + :param dry_run: boolean + :param endpoint: string + :param username: string + :param password: string + """ + + controller, model = await connect_juju( + ctrl_name, + model_name, + endpoint=endpoint, + username=username, + password=password + ) + + try: + log.info('Applications present in the current model: {}'.format(', '.join(list(model.applications.keys())))) + + log.info('Upgrading charms') + + # If apps are not specified in the order use configuration from settings + if not apps: + log.info('Apps not specified, using default upgrade configuration: {}'.format(SERVICES)) + apps = SERVICES + + upgraded = [] + latest_charms = [] + + for app_name in apps: + if app_name not in model.applications: + log.warn('Unable to find application: {}'.format(app_name)) + continue + + # Get charm url and parse current revision + charm_url = model.applications[app_name].data['charm-url'] + parse = cs_name_parse(charm_url) + + # Charmstore get latest revision + try: + charmstore_entity = await model.charmstore.entity(charm_url, include_stats=False) + latest = charmstore_entity['Meta']['revision-info']['Revisions'][0] + latest_revision = cs_name_parse(latest) + attemp_update = False + except Exception as e: + log.warn('Failed loading information from charmstore: {}'.format(charm_url)) + latest_revision = {'revision': 0} + attemp_update = True + + # Update if not newest or attempt to update if failed to find charmstore latest revision + if ( + not upgrade_only and + parse['revision'] < latest_revision['revision'] or + attemp_update + ): + log.info('Upgrade {} from: {} to: {}'.format(app_name, parse['revision'], latest_revision['revision'])) + try: + if not dry_run: + await model.applications[app_name].upgrade_charm() + await asyncio.sleep(30) + upgraded.append(app_name) + except JujuError: + log.warn('Not upgrading: {}'.format(app_name)) + else: + latest_charms.append(app_name) + + log.info('Upgraded: {} charms'.format(len(upgraded))) + + await wait_until( + model, + model.applications.values(), + timeout=1800, + loop=model.loop + ) + + log.info('Collecting final workload status') + if not dry_run and upgraded: + await asyncio.sleep(30) + + if charms_only: + upgraded = [] + else: + upgraded = apps + + wss = defaultdict(int) + wsm = defaultdict(int) + for app in model.applications.values(): + for unit in app.units: + wss[unit.workload_status] += 1 + if 'ready' not in unit.workload_status_message: + wsm[unit.workload_status_message] += 1 + log.info('Status of units after upgrade: {}'.format(dict(wss))) + log.info('Workload messages: {}'.format(dict(wsm))) + + if not ignore_errors and 'error' in wss.keys(): + raise Exception('Errors during upgrading charms to latest revision') + + if upgraded and origin == 'cloud:xenial-ocata' and 'nova-compute' in apps and 'cinder-warmceph' in apps: + log.info('Adding relation between nova-compute:ceph-access and cinder-warmceph:ceph-access') + if not dry_run: + try: + await model.add_relation('nova-compute:ceph-access', 'cinder-warmceph:ceph-access') + # TODO add completion check + await asyncio.sleep(120) + log.info('Completed addition of relation') + except Exception as e: + if 'already exists' not in str(e): + raise e + else: + log.warn('Ignored: relation already exists') + + await wait_until( + model, + model.applications.values(), + timeout=1800, + loop=model.loop + ) + + log.info('Charms with latest revision: {}'.format(', '.join(latest_charms) if latest_charms else 'None')) + + log.info('Upgrading services') + + s_upgrade = 0 + for app_name in upgraded: + if await is_rollable(model.applications[app_name]): + await perform_rolling_upgrade( + model.applications[app_name], + origin=origin, + pause=pause, + dry_run=dry_run + ) + s_upgrade += 1 + else: + await perform_bigbang_upgrade( + model.applications[app_name], + origin=origin, + dry_run=dry_run + ) + s_upgrade += 1 + + await wait_until( + model, + model.applications.values(), + timeout=1800, + loop=model.loop + ) + + log.info('Upgrade finished ({} upgraded services)'.format(s_upgrade)) + + d = defaultdict(int) + for a in model.applications.values(): + d[a.status] += 1 + log.info('[STATUS] {}'.format(dict(d))) + + finally: + await model.disconnect() + await controller.disconnect() + + +async def wait_until(model, apps, log_time=10, timeout=None, wait_period=0.5, loop=None): + """Blocking with logs. + + Return only after all conditions are true. + Waiting for maintenance units to become active. + + :param model: juju model + :param apps: list of juju applications + :param log_time: logging frequency (s) + :param timeout: blocking timeout (s) + :param wait_period: waiting time between checks (s) + :param loop: asyncio event loop + """ + log_count = 0 + + def _disconnected(): + return not (model.is_connected() and model.connection().is_open) + + async def _block(log_count): + blockable = ['maintenance', 'blocked', 'waiting', 'error'] + while not _disconnected() and any(u.workload_status in blockable for a in apps for u in a.units): + await asyncio.sleep(wait_period, loop=loop) + log_count += 0.5 + if log_count % log_time == 0: + wss = defaultdict(int) + for app in model.applications.values(): + for unit in app.units: + wss[unit.workload_status] += 1 + log.info('[WAITING] Charm workload status: {}'.format(dict(wss))) + await asyncio.wait_for(_block(log_count), timeout, loop=loop) + + if _disconnected(): + raise websockets.ConnectionClosed(1006, 'no reason') + + +async def is_rollable(application): + """Define whether the application is rollable. + + Application is considered rollable if it provides openstack-upgrade action, is deployed with more than 1 units, + is not ceph and successfuly applies action-managed-upgrade config. + + :param application: juju application + """ + actions = await enumerate_actions(application) + + if 'openstack-upgrade' not in actions: + return False + + if len(application.units) <= 1: + return False + + if application.name.lower().count('ceph') > 0: + log.info('Ceph is not rollable.') + return False + + if not await application.set_config({'action-managed-upgrade': 'True'}): + log.warn('Failed to enable action-managed-upgrade mode.') + return False + + return True + + +def get_hacluster_subordinate_pairs(application): + """Get hacluster subordinate pairs. + + Match hacluster subordinates into unit pairs, as they are respectively deployed. + + :param application: juju application + """ + # TODO get app or unit? How are the relations described? + # unit to unit relation missing in libjuju?! + for relation in application.relations: + if relation.is_subordinate and relation.provides.interface == 'hacluster': + sub = relation.provides.application + sub_pairs = {} + # HACK (matus) only matching relation is IP address of unit and its subordinate + # TODO machine id is not provided for subordinates in libjuju + for unit in application.units: + for sub_unit in sub.units: + if unit.data['public-address'] == sub_unit.data['public-address']: + sub_pairs[unit.name] = sub_unit + return sub_pairs + + return None + + +async def enumerate_actions(application): + """Enumerate available actions for the applications. + + Returns a list of actions. + + :param application: juju application + """ + actions = await application.get_actions() + return actions.keys() + + +async def order_units(name, units): + """Enumerate available actions for the applications. + + Returns a list of actions. + + :param application: juju application + """ + log.info('Determining ordering for service: %s' % name) + ordered = [] + + is_leader_data = [] + for unit in units: + is_leader_data.append(await unit.run('is-leader')) + + leader_info = filter(lambda u: u.data['results']['Stdout'].strip() == 'True', is_leader_data) + leader_unit = [x.data['receiver'] for x in leader_info][0] + + for unit in units: + if unit.name == leader_unit: + ordered.insert(0, unit) + else: + ordered.append(unit) + + log.info('Upgrade order is: %s' % [unit.name for unit in ordered]) + return ordered + + +async def perform_rolling_upgrade( + application, + dry_run=False, + evacuate=False, + pause=False, + origin='cloud:xenial-ocata' +): + """Perform rolling upgrade. + + Rolling upgrade is performed on the rollable application. + + :param application: juju application + :param dry_run: boolean + :param evacuate: boolean + :param pause: boolean + :param origin: origin string + """ + + actions = await enumerate_actions(application) + if not dry_run: + config_key = ORIGIN_KEYS.get(application.name, 'openstack-origin') + await application.set_config({config_key: origin}) + + ordered_units = await order_units(application.name, application.units) + hacluster_pairs = get_hacluster_subordinate_pairs(application) # TODO see fx + + for unit in ordered_units: + if hacluster_pairs: + hacluster_unit = hacluster_pairs.get(unit.name, None) + else: + hacluster_unit = None + + if evacuate and application.name == 'nova-compute': + # NOT IMPLEMENTED + log.warn('Nova evacuation is not implemented and will be skipped') + + if pause and hacluster_unit: + # TODO this will pause all the units for hacluster subordinates + log.info('Pausing service on hacluster subordinate: {}'.format(hacluster_unit.name)) + if not dry_run: + async with async_timeout.timeout(300): + action = await hacluster_unit.run_action('pause') + await application.model.wait_for_action(action.entity_id) + log.info('Service on hacluster subordinate {} is paused'.format(hacluster_unit.name)) + + if pause and 'pause' in actions: + log.info('Pausing service on unit: {}'.format(unit.name)) + if not dry_run: + async with async_timeout.timeout(300): + action = await unit.run_action('pause') + await application.model.wait_for_action(action.entity_id) + log.info('Service on unit {} is paused'.format(unit.name)) + + if 'openstack-upgrade' in actions: + log.info('Upgrading OpenStack for unit: {}'.format(unit.name)) + if not dry_run: + action = await unit.run_action('openstack-upgrade') + await application.model.wait_for_action(action.entity_id) + log.info('Completed upgrade for unit: {}'.format(unit.name)) + + if pause and 'resume' in actions: + log.info('Resuming service on unit: {}'.format(unit.name)) + if not dry_run: + async with async_timeout.timeout(300): + action = await unit.run_action('resume') + await application.model.wait_for_action(action.entity_id) + log.info('Service on unit {} has resumed'.format(unit.name)) + + if pause and hacluster_unit: + # TODO this will resume all the units for hacluster subordinates + log.info('Resuming service on hacluster subordinate: {}'.format(hacluster_unit.name)) + if not dry_run: + async with async_timeout.timeout(300): + action = await hacluster_unit.run_action('resume') + await application.model.wait_for_action(action.entity_id) + log.info('Service on hacluster subordinate {} has resumed'.format(hacluster_unit.name)) + + log.info('Unit {} has finished the upgrade'.format(unit.name)) + + +async def perform_bigbang_upgrade(application, dry_run=False, pause=False, origin='cloud:xenial-ocata'): + """Perform bigbang upgrade. + + Bigbang upgrade is performed on units of the application at once. + + :param application: juju application + :param dry_run: boolean + :param pause: boolean + :param origin: origin string + """ + log.info('Performing a big-bang upgrade for service: {}'.format(application.name)) + if not dry_run: + config_key = ORIGIN_KEYS.get(application.name, 'openstack-origin') + if config_key not in await application.get_config(): + log.warn('Unable to set source/origin during big-bang upgrade for service: {}'.format(application.name)) + log.info('Skipping upgrade for service: {}'.format(application.name)) + return + + await application.set_config({config_key: origin}) + await asyncio.sleep(15) + + upgrade_in_progress = True + while upgrade_in_progress: + # service = Juju.current().get_service(service.name) + # unit_uip = [u.is_upgrading() for u in service.units()] + unit_uip = [u.workload_status.lower().find('upgrad') >= 0 for u in application.units] + upgrade_in_progress = any(unit_uip) + if upgrade_in_progress: + await asyncio.sleep(5) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..22212a8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +pip>=10 +pyyaml +# xenial comes with python3.5 which is on 3.5.2-2ubuntu0~16.04.4 should work +async-timeout==2.0.1 # until xenial updates from python 3.5.1 to python 3.5.2 at least, then use 3.0.0 +tox>=3.3 +juju>=0.10.2 +argcomplete diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..9a27f42 --- /dev/null +++ b/setup.py @@ -0,0 +1,46 @@ +from pathlib import Path +from setuptools import setup, find_packages + +here = Path(__file__).absolute().parent +readme = here / 'README.md' +changelog = here / 'CHANGELOG' +reqs_file = here / 'requirements.txt' + +with open(reqs_file) as f: + reqs = [line for line in f.read().splitlines() + if not line.startswith('--')] +version = here / 'VERSION' + +SETUP = { + 'name': "jujuna", + 'packages': find_packages( + exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), + 'version': version.read_text().strip(), + 'author': "Matus Kosut", + 'author_email': "matuskosut@gmail.com", + 'maintainer': 'HUNT Data Center', + 'maintainer_email': 'cloud@hunt.ntnu.no', + 'url': "https://github.com/huntdatacenter/jujuna", + 'long_description': open('README.md').read(), + 'entry_points': { + 'console_scripts': [ + # Script invokation: + 'jujuna = jujuna.__main__:main', + ] + }, + 'license': 'Apache 2', + 'classifiers': [ + "Development Status :: 4 - Beta", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "License :: OSI Approved :: Apache Software License", + "Operating System :: POSIX :: Linux", + "Environment :: Console" + ], + 'install_requires': reqs, +} + + +if __name__ == '__main__': + setup(**SETUP) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/test_args.py b/tests/unit/test_args.py new file mode 100644 index 0000000..d499fca --- /dev/null +++ b/tests/unit/test_args.py @@ -0,0 +1,36 @@ +""" +Tests for argparse configuration. + +""" + +from jujuna.__main__ import parse_args +import unittest + + +class TestArguments(unittest.TestCase): + """Test argument parsing. + + """ + + def test_args_upgrade(self): + """Testing upgrade parser.""" + args = parse_args(['upgrade', '--dry-run']) + self.assertEqual(args.action, 'upgrade') + + self.assertFalse(args.upgrade_only) + self.assertFalse(args.charms_only) + self.assertTrue(args.dry_run) + + self.assertEqual(args.model_name, None) + self.assertEqual(args.ctrl_name, None) + + def test_args_clean(self): + """Testing clean parser.""" + args = parse_args(['clean', '--model', 'cloud', '-w']) + self.assertEqual(args.action, 'clean') + + self.assertFalse(args.dry_run) + self.assertTrue(args.wait) + + self.assertEqual(args.model_name, 'cloud') + self.assertEqual(args.ctrl_name, None) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..d12cfd4 --- /dev/null +++ b/tox.ini @@ -0,0 +1,71 @@ +[general] +install_requires = + pytest + flake8 + pip>=10 +name = jujuna + +[tox] +minversion=3.3.0 +skipsdist = True +envlist = py35, py36, py37, lint, docs + +[testenv] +deps = + -rrequirements.txt + {[general]install_requires} +commands = + pytest --tb native -ra -v -s + +[testenv:doc8] +basepython = python3.6 +skip_install = true +deps = + doc8 +commands = + doc8 docs/source/ + +[testenv:lint] +maxLineLength = 120 +deps = + {[testenv:doc8]deps} + flake8 +commands = + # flake8 --exclude=.env,.tox --ignore=E402 --max-line-length=120 + {[testenv:doc8]commands} + flake8 + +# Documentation +[testenv:docs] +basepython = python3 +deps = + sphinx + sphinx_rtd_theme + sphinx-argparse + . +commands = + sphinx-build -E -W -c docs/source/ -b html docs/source/ docs/build/html + ; sphinx-build -E -W -c docs/source/ -b man docs/source/ docs/build/man + ; sphinx-build -b html -a -E doc ./docs/html/ + +[flake8] +exclude = + .tox, + .git, + __pycache__, + docs/source/conf.py, + build, + dist, + tests/fixtures/*, + .env/*, + *.pyc, + *.egg-info, + .cache, + .eggs, + get-pip.py +ignore = D203, D100, D103, D202, D200 +import-order-style = google +max-line-length = 120 +statistics = True +application-import-names = flake8 +format = ${cyan}%(path)s${reset}:${yellow_bold}%(row)d${reset}:${green_bold}%(col)d${reset}: ${red_bold}%(code)s${reset} %(text)s