From 9fe9866fc219c5a0e3ca1f4f62da13f5faceed18 Mon Sep 17 00:00:00 2001 From: Alan Rominger Date: Tue, 6 Sep 2016 12:47:49 -0400 Subject: [PATCH 01/77] remove stray print statement --- awx/main/access.py | 1 - 1 file changed, 1 deletion(-) diff --git a/awx/main/access.py b/awx/main/access.py index 5fa3b76274..734dce5ecc 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -661,7 +661,6 @@ class CredentialAccess(BaseAccess): or (not organization_pk and obj.organization): return False - print(self.user in obj.admin_role) return self.user in obj.admin_role def can_delete(self, obj): From 646a9f1570e85e0cfa31141b5e0e544c4f711a37 Mon Sep 17 00:00:00 2001 From: Jared Tabor Date: Wed, 7 Sep 2016 15:53:41 -0700 Subject: [PATCH 02/77] Update Pendo service for 3.0 changes to retreiving the config endpoint --- .../authenticationServices/pendo.service.js | 63 ++++--------------- 1 file changed, 13 insertions(+), 50 deletions(-) diff --git a/awx/ui/client/src/login/authenticationServices/pendo.service.js b/awx/ui/client/src/login/authenticationServices/pendo.service.js index 5e034eaa87..dbfe407820 100644 --- a/awx/ui/client/src/login/authenticationServices/pendo.service.js +++ b/awx/ui/client/src/login/authenticationServices/pendo.service.js @@ -92,58 +92,21 @@ export default return deferred.promise; }, - getConfig: function () { - var config = ConfigService.get(), - deferred = $q.defer(); - if(_.isEmpty(config)){ - var url = GetBasePath('config'); - Rest.setUrl(url); - var promise = Rest.get(); - promise.then(function (response) { - config = response.data.license_info; - config.analytics_status = response.data.analytics_status; - config.version = response.data.version; - config.ansible_version = response.data.ansible_version; - if(config.analytics_status === 'detailed' || config.analytics_status === 'anonymous'){ - $pendolytics.bootstrap(); - deferred.resolve(config); - } - else { - deferred.reject('Pendo is turned off.'); - } - }); - promise.catch(function (response) { - ProcessErrors($rootScope, response.data, response.status, null, { - hdr: 'Error!', - msg: 'Failed to get inventory name. GET returned status: ' + - response.status }); - deferred.reject('Could not resolve pendo config.'); - }); - } - else if(config.analytics_status === 'detailed' || config.analytics_status === 'anonymous'){ - $pendolytics.bootstrap(); - deferred.resolve(config); - } - else { - deferred.reject('Pendo is turned off.'); - } - return deferred.promise; - }, - issuePendoIdentity: function () { - var that = this; - this.getConfig().then(function(config){ - var options = that.setPendoOptions(config); - that.setRole(options).then(function(options){ - $log.debug('Pendo status is '+ config.analytics_status + '. Object below:'); - $log.debug(options); - $pendolytics.identify(options); - }, function(reason){ - // reject function for setRole - $log.debug(reason); - }); + var config, + options, + c = ConfigService.get(), + config = c.license_info; + config.analytics_status = c.analytics_status; + config.version = c.version; + config.ansible_version = c.ansible_version; + options = this.setPendoOptions(config); + this.setRole(options).then(function(options){ + $log.debug('Pendo status is '+ config.analytics_status + '. Object below:'); + $log.debug(options); + $pendolytics.identify(options); }, function(reason){ - // reject function for getConfig + // reject function for setRole $log.debug(reason); }); } From 56d42100476955a2c42ab156c75844f84e390783 Mon Sep 17 00:00:00 2001 From: Matthew Jones Date: Wed, 14 Sep 2016 16:06:13 -0400 Subject: [PATCH 03/77] Implement Performance improvements shown in #3492 * Make callback workers tunable * Disable callback worker recycling - doesn't appear to be needed anymore * Tweak pexpect behavior by limiting its read buffer size and search window * Use copy instead of deepcopy for job event callback emitter censor --- .../management/commands/run_callback_receiver.py | 13 +++---------- awx/main/tasks.py | 2 +- awx/plugins/callback/job_event_callback.py | 4 ++-- awx/settings/defaults.py | 3 +++ 4 files changed, 9 insertions(+), 13 deletions(-) diff --git a/awx/main/management/commands/run_callback_receiver.py b/awx/main/management/commands/run_callback_receiver.py index e6080fa419..c31b3dffe2 100644 --- a/awx/main/management/commands/run_callback_receiver.py +++ b/awx/main/management/commands/run_callback_receiver.py @@ -25,8 +25,6 @@ from awx.main.socket import Socket logger = logging.getLogger('awx.main.commands.run_callback_receiver') -WORKERS = 4 - class CallbackReceiver(object): def __init__(self): self.parent_mappings = {} @@ -54,7 +52,7 @@ class CallbackReceiver(object): if use_workers: connection.close() - for idx in range(WORKERS): + for idx in range(settings.JOB_EVENT_WORKERS): queue_actual = Queue(settings.JOB_EVENT_MAX_QUEUE_SIZE) w = Process(target=self.callback_worker, args=(queue_actual, idx,)) w.start() @@ -99,7 +97,7 @@ class CallbackReceiver(object): time.sleep(0.1) def write_queue_worker(self, preferred_queue, worker_queues, message): - queue_order = sorted(range(WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0) + queue_order = sorted(range(settings.JOB_EVENT_WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0) for queue_actual in queue_order: try: worker_actual = worker_queues[queue_actual] @@ -153,7 +151,7 @@ class CallbackReceiver(object): if message['event'] == 'playbook_on_stats': job_parent_events = {} - actual_queue = self.write_queue_worker(total_messages % WORKERS, worker_queues, message) + actual_queue = self.write_queue_worker(total_messages % settings.JOB_EVENT_WORKERS, worker_queues, message) # NOTE: It might be better to recycle the entire callback receiver process if one or more of the queues are too full # the drawback is that if we under extremely high load we may be legitimately taking a while to process messages if actual_queue is None: @@ -282,7 +280,6 @@ class CallbackReceiver(object): return None def callback_worker(self, queue_actual, idx): - messages_processed = 0 while True: try: message = queue_actual.get(block=True, timeout=1) @@ -292,10 +289,6 @@ class CallbackReceiver(object): logger.error("Exception on listen socket, restarting: " + str(e)) break self.process_job_event(message) - messages_processed += 1 - if messages_processed >= settings.JOB_EVENT_RECYCLE_THRESHOLD: - logger.info("Shutting down message receiver") - break class Command(NoArgsCommand): ''' diff --git a/awx/main/tasks.py b/awx/main/tasks.py index c99f043e1a..20edd955ed 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -575,7 +575,7 @@ class BaseTask(Task): instance = self.update_model(instance.pk, status='running', output_replacements=output_replacements) while child.isalive(): - result_id = child.expect(expect_list, timeout=pexpect_timeout) + result_id = child.expect(expect_list, timeout=pexpect_timeout, maxread=100, searchwindowsize=100) if result_id in expect_passwords: child.sendline(expect_passwords[result_id]) if logfile_pos != logfile.tell(): diff --git a/awx/plugins/callback/job_event_callback.py b/awx/plugins/callback/job_event_callback.py index a9c5b712ed..2049edc4b8 100644 --- a/awx/plugins/callback/job_event_callback.py +++ b/awx/plugins/callback/job_event_callback.py @@ -38,7 +38,7 @@ import os import pwd import urlparse import re -from copy import deepcopy +from copy import copy # Requests import requests @@ -228,7 +228,7 @@ class BaseCallbackModule(object): def _log_event(self, event, **event_data): if 'res' in event_data: - event_data['res'] = censor(deepcopy(event_data['res'])) + event_data['res'] = censor(copy(event_data['res'])) if self.callback_consumer_port: self._post_job_event_queue_msg(event, event_data) diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 2998d15bb7..e2995d0dbd 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -438,6 +438,9 @@ AWX_TASK_ENV = {} # before it recycles JOB_EVENT_RECYCLE_THRESHOLD = 3000 +# Number of workers used to proecess job events in parallel +JOB_EVENT_WORKERS = 4 + # Maximum number of job events that can be waiting on a single worker queue before # it can be skipped as too busy JOB_EVENT_MAX_QUEUE_SIZE = 100 From fb2ab05045a47c70b7ce2d643c59743a79de04b1 Mon Sep 17 00:00:00 2001 From: Jim Ladd Date: Thu, 15 Sep 2016 16:58:50 -0400 Subject: [PATCH 04/77] Bump version for 3.0.3 --- awx/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/__init__.py b/awx/__init__.py index bf3f75255c..fe3644cf53 100644 --- a/awx/__init__.py +++ b/awx/__init__.py @@ -5,7 +5,7 @@ import os import sys import warnings -__version__ = '3.0.2' +__version__ = '3.0.3' __all__ = ['__version__'] From c74880756f10fbcc69ca8431b631a45d9c6b7174 Mon Sep 17 00:00:00 2001 From: Jared Tabor Date: Fri, 16 Sep 2016 14:13:53 -0700 Subject: [PATCH 05/77] Pendo should not issue identity when turned off Fixes an issue whereh the identity was issued to the Pendo agent despite an "analytic_status": "off" --- .../authenticationServices/pendo.service.js | 24 ++++++++++++------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/awx/ui/client/src/login/authenticationServices/pendo.service.js b/awx/ui/client/src/login/authenticationServices/pendo.service.js index dbfe407820..3eaf06e0b0 100644 --- a/awx/ui/client/src/login/authenticationServices/pendo.service.js +++ b/awx/ui/client/src/login/authenticationServices/pendo.service.js @@ -100,15 +100,21 @@ export default config.analytics_status = c.analytics_status; config.version = c.version; config.ansible_version = c.ansible_version; - options = this.setPendoOptions(config); - this.setRole(options).then(function(options){ - $log.debug('Pendo status is '+ config.analytics_status + '. Object below:'); - $log.debug(options); - $pendolytics.identify(options); - }, function(reason){ - // reject function for setRole - $log.debug(reason); - }); + if(config.analytics_status === 'detailed' || config.analytics_status === 'anonymous'){ + $pendolytics.bootstrap(); + options = this.setPendoOptions(config); + this.setRole(options).then(function(options){ + $log.debug('Pendo status is '+ config.analytics_status + '. Object below:'); + $log.debug(options); + $pendolytics.identify(options); + }, function(reason){ + // reject function for setRole + $log.debug(reason); + }); + } + else { + $log.debug('Pendo is turned off.') + } } }; } From 654633f556ea4ad2f97f8258c917b4fbc276291a Mon Sep 17 00:00:00 2001 From: Jared Tabor Date: Fri, 16 Sep 2016 14:53:58 -0700 Subject: [PATCH 06/77] Fix for Pendo if trial key/value pair is missing from the config endpoint -> license_info --- .../client/src/login/authenticationServices/pendo.service.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/awx/ui/client/src/login/authenticationServices/pendo.service.js b/awx/ui/client/src/login/authenticationServices/pendo.service.js index 3eaf06e0b0..10cdbd33d8 100644 --- a/awx/ui/client/src/login/authenticationServices/pendo.service.js +++ b/awx/ui/client/src/login/authenticationServices/pendo.service.js @@ -13,6 +13,7 @@ export default return { setPendoOptions: function (config) { var tower_version = config.version.split('-')[0], + trial = (config.trial) ? config.trial : false, options = { visitor: { id: null, @@ -24,7 +25,7 @@ export default planLevel: config.license_type, planPrice: config.instance_count, creationDate: config.license_date, - trial: config.trial, + trial: trial, tower_version: tower_version, ansible_version: config.ansible_version } From 1268c8e205486bb2647f9016a3cf1f926ca1d328 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Fri, 23 Sep 2016 08:22:26 -0400 Subject: [PATCH 07/77] update authentication class in postprocess and add unit test --- awx/main/tests/unit/test_settings.py | 11 +++++++++++ awx/settings/postprocess.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 awx/main/tests/unit/test_settings.py diff --git a/awx/main/tests/unit/test_settings.py b/awx/main/tests/unit/test_settings.py new file mode 100644 index 0000000000..2018771c63 --- /dev/null +++ b/awx/main/tests/unit/test_settings.py @@ -0,0 +1,11 @@ +from split_settings.tools import include + +def test_postprocess_auth_basic_enabled(): + locals().update({'__file__': __file__}) + + include('../../../settings/defaults.py', scope=locals()) + assert 'awx.api.authentication.LoggedBasicAuthentication' in locals()['REST_FRAMEWORK']['DEFAULT_AUTHENTICATION_CLASSES'] + + locals().update({'AUTH_BASIC_ENABLED': False}) + include('../../../settings/postprocess.py', scope=locals()) + assert 'awx.api.authentication.LoggedBasicAuthentication' not in locals()['REST_FRAMEWORK']['DEFAULT_AUTHENTICATION_CLASSES'] diff --git a/awx/settings/postprocess.py b/awx/settings/postprocess.py index 544758e04f..fd54fd5050 100644 --- a/awx/settings/postprocess.py +++ b/awx/settings/postprocess.py @@ -31,4 +31,4 @@ if not all([SOCIAL_AUTH_SAML_SP_ENTITY_ID, SOCIAL_AUTH_SAML_SP_PUBLIC_CERT, AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'awx.sso.backends.SAMLAuth'] if not AUTH_BASIC_ENABLED: - REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = [x for x in REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] if x != 'rest_framework.authentication.BasicAuthentication'] + REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = [x for x in REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] if x != 'awx.api.authentication.LoggedBasicAuthentication'] From 5fa1632d1e2cc0e065cbd6cd6ba7f31a1cd7b741 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Fri, 23 Sep 2016 08:41:13 -0400 Subject: [PATCH 08/77] flake8 drive-by --- awx/sso/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/sso/__init__.py b/awx/sso/__init__.py index 347aedfeee..6596e4bf78 100644 --- a/awx/sso/__init__.py +++ b/awx/sso/__init__.py @@ -8,7 +8,7 @@ import threading xmlsec_init_lock = threading.Lock() xmlsec_initialized = False -import dm.xmlsec.binding +import dm.xmlsec.binding # noqa original_xmlsec_initialize = dm.xmlsec.binding.initialize def xmlsec_initialize(*args, **kwargs): From fd18e2eebd678a0c2230593f28b4950474dadb5f Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Fri, 23 Sep 2016 09:12:36 -0400 Subject: [PATCH 09/77] add host variable dash fix as a migration --- .../0033_v303_v245_host_variable_fix.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 awx/main/migrations/0033_v303_v245_host_variable_fix.py diff --git a/awx/main/migrations/0033_v303_v245_host_variable_fix.py b/awx/main/migrations/0033_v303_v245_host_variable_fix.py new file mode 100644 index 0000000000..fad3545b65 --- /dev/null +++ b/awx/main/migrations/0033_v303_v245_host_variable_fix.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations +from awx.main.migrations import _migration_utils as migration_utils + + +def update_dashed_host_variables(apps, schema_editor): + Host = apps.get_model('main', 'Host') + for host in Host.objects.filter(variables='---'): + host.variables = '' + host.save() + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0032_v302_credential_permissions_update'), + ] + + operations = [ + migrations.RunPython(migration_utils.set_current_apps_for_migrations), + migrations.RunPython(update_dashed_host_variables), + ] From a4adda1ae719149e21bcd9e7690a88646509561d Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Fri, 23 Sep 2016 10:10:39 -0400 Subject: [PATCH 10/77] only allow superusers to start a job from a SystemJobTemplate --- awx/main/access.py | 4 +++- awx/main/tests/unit/test_access.py | 19 ++++++++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/awx/main/access.py b/awx/main/access.py index 734dce5ecc..72636e1776 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -1122,8 +1122,10 @@ class SystemJobTemplateAccess(BaseAccess): model = SystemJobTemplate + @check_superuser def can_start(self, obj): - return self.can_read(obj) + '''Only a superuser can start a job from a SystemJobTemplate''' + return False class SystemJobAccess(BaseAccess): ''' diff --git a/awx/main/tests/unit/test_access.py b/awx/main/tests/unit/test_access.py index 000d91268c..0c2e6bb5be 100644 --- a/awx/main/tests/unit/test_access.py +++ b/awx/main/tests/unit/test_access.py @@ -8,8 +8,16 @@ from awx.main.access import ( BaseAccess, check_superuser, JobTemplateAccess, + SystemJobTemplateAccess, +) + +from awx.main.models import ( + Credential, + Inventory, + Project, + Role, + Organization, ) -from awx.main.models import Credential, Inventory, Project, Role, Organization @pytest.fixture @@ -110,3 +118,12 @@ def test_jt_can_add_bad_data(user_unit): access = JobTemplateAccess(user_unit) assert not access.can_add({'asdf': 'asdf'}) +def test_system_job_template_can_start(mocker): + user = mocker.MagicMock(spec=User, id=1, is_system_auditor=True, is_superuser=False) + assert user.is_system_auditor + access = SystemJobTemplateAccess(user) + assert not access.can_start(None) + + user.is_superuser = True + access = SystemJobTemplateAccess(user) + assert access.can_start(None) From 898cbbf26e927da26ec0058e711755a71dfead50 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Thu, 15 Sep 2016 19:33:08 -0700 Subject: [PATCH 11/77] update azure_rm inventory script to 2.0.0rc5-target --- awx/plugins/inventory/azure_rm.ini.example | 7 +- awx/plugins/inventory/azure_rm.py | 86 ++++++++++++---------- 2 files changed, 54 insertions(+), 39 deletions(-) diff --git a/awx/plugins/inventory/azure_rm.ini.example b/awx/plugins/inventory/azure_rm.ini.example index 6ea2688efa..816da16532 100644 --- a/awx/plugins/inventory/azure_rm.ini.example +++ b/awx/plugins/inventory/azure_rm.ini.example @@ -1,5 +1,5 @@ # -# Configuration file for azure_rm_invetory.py +# Configuration file for azure_rm.py # [azure] # Control which resource groups are included. By default all resources groups are included. @@ -9,11 +9,14 @@ # Control which tags are included. Set tags to a comma separated list of keys or key:value pairs #tags= +# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus) +#locations= + # Include powerstate. If you don't need powerstate information, turning it off improves runtime performance. include_powerstate=yes # Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1. group_by_resource_group=yes group_by_location=yes -group_by_security_group=no +group_by_security_group=yes group_by_tag=yes diff --git a/awx/plugins/inventory/azure_rm.py b/awx/plugins/inventory/azure_rm.py index 0554ecf879..f3c9e7c28d 100755 --- a/awx/plugins/inventory/azure_rm.py +++ b/awx/plugins/inventory/azure_rm.py @@ -76,7 +76,7 @@ required. For a specific host, this script returns the following variables: "version": "latest" }, "location": "westus", - "mac_address": "00-0D-3A-31-2C-EC", + "mac_address": "00-00-5E-00-53-FE", "name": "object-name", "network_interface": "interface-name", "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1", @@ -115,7 +115,7 @@ When run in --list mode, instances are grouped by the following categories: - tag key - tag key_value -Control groups using azure_rm_inventory.ini or set environment variables: +Control groups using azure_rm.ini or set environment variables: AZURE_GROUP_BY_RESOURCE_GROUP=yes AZURE_GROUP_BY_LOCATION=yes @@ -130,6 +130,10 @@ Select hosts for specific tag key by assigning a comma separated list of tag key AZURE_TAGS=key1,key2,key3 +Select hosts for specific locations: + +AZURE_LOCATIONS=eastus,westus,eastus2 + Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to: AZURE_TAGS=key1:value1,key2:value2 @@ -137,12 +141,14 @@ AZURE_TAGS=key1:value1,key2:value2 If you don't need the powerstate, you can improve performance by turning off powerstate fetching: AZURE_INCLUDE_POWERSTATE=no -azure_rm_inventory.ini ----------------------- -As mentioned above you can control execution using environment variables or an .ini file. A sample -azure_rm_inventory.ini is included. The name of the .ini file is the basename of the inventory script (in this case -'azure_rm_inventory') with a .ini extension. This provides you with the flexibility of copying and customizing this -script and having matching .ini files. Go forth and customize your Azure inventory! +azure_rm.ini +------------ +As mentioned above, you can control execution using environment variables or a .ini file. A sample +azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case +'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify +a different path for the .ini file, define the AZURE_INI_PATH environment variable: + + export AZURE_INI_PATH=/path/to/custom.ini Powerstate: ----------- @@ -152,13 +158,13 @@ up. If the value is anything other than 'running', the machine is down, and will Examples: --------- Execute /bin/uname on all instances in the galaxy-qa resource group - $ ansible -i azure_rm_inventory.py galaxy-qa -m shell -a "/bin/uname -a" + $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a" Use the inventory script to print instance specific information - $ contrib/inventory/azure_rm_inventory.py --host my_instance_host_name --pretty + $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty Use with a playbook - $ ansible-playbook -i contrib/inventory/azure_rm_inventory.py my_playbook.yml --limit galaxy-qa + $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa Insecure Platform Warning @@ -180,11 +186,13 @@ Version: 1.0.0 import argparse import ConfigParser -import json +import json import os import re import sys +from distutils.version import LooseVersion + from os.path import expanduser HAS_AZURE = True @@ -195,12 +203,9 @@ try: from azure.mgmt.compute import __version__ as azure_compute_version from azure.common import AzureMissingResourceHttpError, AzureHttpError from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials - from azure.mgmt.network.network_management_client import NetworkManagementClient,\ - NetworkManagementClientConfiguration - from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient,\ - ResourceManagementClientConfiguration - from azure.mgmt.compute.compute_management_client import ComputeManagementClient,\ - ComputeManagementClientConfiguration + from azure.mgmt.network.network_management_client import NetworkManagementClient + from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient + from azure.mgmt.compute.compute_management_client import ComputeManagementClient except ImportError as exc: HAS_AZURE_EXC = exc HAS_AZURE = False @@ -219,6 +224,7 @@ AZURE_CREDENTIAL_ENV_MAPPING = dict( AZURE_CONFIG_SETTINGS = dict( resource_groups='AZURE_RESOURCE_GROUPS', tags='AZURE_TAGS', + locations='AZURE_LOCATIONS', include_powerstate='AZURE_INCLUDE_POWERSTATE', group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP', group_by_location='AZURE_GROUP_BY_LOCATION', @@ -226,7 +232,7 @@ AZURE_CONFIG_SETTINGS = dict( group_by_tag='AZURE_GROUP_BY_TAG' ) -AZURE_MIN_VERSION = "2016-03-30" +AZURE_MIN_VERSION = "0.30.0rc5" def azure_id_to_dict(id): @@ -362,8 +368,7 @@ class AzureRM(object): def network_client(self): self.log('Getting network client') if not self._network_client: - self._network_client = NetworkManagementClient( - NetworkManagementClientConfiguration(self.azure_credentials, self.subscription_id)) + self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id) self._register('Microsoft.Network') return self._network_client @@ -371,16 +376,14 @@ class AzureRM(object): def rm_client(self): self.log('Getting resource manager client') if not self._resource_client: - self._resource_client = ResourceManagementClient( - ResourceManagementClientConfiguration(self.azure_credentials, self.subscription_id)) + self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id) return self._resource_client @property def compute_client(self): self.log('Getting compute client') if not self._compute_client: - self._compute_client = ComputeManagementClient( - ComputeManagementClientConfiguration(self.azure_credentials, self.subscription_id)) + self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id) self._register('Microsoft.Compute') return self._compute_client @@ -403,6 +406,7 @@ class AzureInventory(object): self.resource_groups = [] self.tags = None + self.locations = None self.replace_dash_in_groups = False self.group_by_resource_group = True self.group_by_location = True @@ -425,6 +429,9 @@ class AzureInventory(object): if self._args.tags: self.tags = self._args.tags.split(',') + if self._args.locations: + self.locations = self._args.locations.split(',') + if self._args.no_powerstate: self.include_powerstate = False @@ -462,6 +469,8 @@ class AzureInventory(object): help='Return inventory for comma separated list of resource group names') parser.add_argument('--tags', action='store', help='Return inventory for comma separated list of tag key:value pairs') + parser.add_argument('--locations', action='store', + help='Return inventory for comma separated list of locations') parser.add_argument('--no-powerstate', action='store_true', default=False, help='Do not include the power state of each virtual host') return parser.parse_args() @@ -487,7 +496,7 @@ class AzureInventory(object): except Exception as exc: sys.exit("Error: fetching virtual machines - {0}".format(str(exc))) - if self._args.host or self.tags > 0: + if self._args.host or self.tags or self.locations: selected_machines = self._selected_machines(virtual_machines) self._load_machines(selected_machines) else: @@ -524,7 +533,7 @@ class AzureInventory(object): resource_group=resource_group, mac_address=None, plan=(machine.plan.name if machine.plan else None), - virtual_machine_size=machine.hardware_profile.vm_size.value, + virtual_machine_size=machine.hardware_profile.vm_size, computer_name=machine.os_profile.computer_name, provisioning_state=machine.provisioning_state, ) @@ -576,7 +585,7 @@ class AzureInventory(object): host_vars['mac_address'] = network_interface.mac_address for ip_config in network_interface.ip_configurations: host_vars['private_ip'] = ip_config.private_ip_address - host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method.value + host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method if ip_config.public_ip_address: public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id) public_ip_address = self._network_client.public_ip_addresses.get( @@ -585,7 +594,7 @@ class AzureInventory(object): host_vars['ansible_host'] = public_ip_address.ip_address host_vars['public_ip'] = public_ip_address.ip_address host_vars['public_ip_name'] = public_ip_address.name - host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method.value + host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method host_vars['public_ip_id'] = public_ip_address.id if public_ip_address.dns_settings: host_vars['fqdn'] = public_ip_address.dns_settings.fqdn @@ -599,6 +608,8 @@ class AzureInventory(object): selected_machines.append(machine) if self.tags and self._tags_match(machine.tags, self.tags): selected_machines.append(machine) + if self.locations and machine.location in self.locations: + selected_machines.append(machine) return selected_machines def _get_security_groups(self, resource_group): @@ -676,17 +687,17 @@ class AzureInventory(object): file_settings = self._load_settings() if file_settings: for key in AZURE_CONFIG_SETTINGS: - if key in ('resource_groups', 'tags') and file_settings.get(key, None) is not None: + if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key): values = file_settings.get(key).split(',') if len(values) > 0: setattr(self, key, values) - elif file_settings.get(key, None) is not None: + elif file_settings.get(key): val = self._to_boolean(file_settings[key]) setattr(self, key, val) else: env_settings = self._get_env_settings() for key in AZURE_CONFIG_SETTINGS: - if key in('resource_groups', 'tags') and env_settings.get(key, None) is not None: + if key in('resource_groups', 'tags', 'locations') and env_settings.get(key): values = env_settings.get(key).split(',') if len(values) > 0: setattr(self, key, values) @@ -719,7 +730,8 @@ class AzureInventory(object): def _load_settings(self): basename = os.path.splitext(os.path.basename(__file__))[0] - path = basename + '.ini' + default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) + path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path))) config = None settings = None try: @@ -774,11 +786,11 @@ class AzureInventory(object): def main(): if not HAS_AZURE: - sys.exit("The Azure python sdk is not installed (try 'pip install azure') - {0}".format(HAS_AZURE_EXC)) + sys.exit("The Azure python sdk is not installed (try 'pip install azure==2.0.0rc5') - {0}".format(HAS_AZURE_EXC)) - if azure_compute_version < AZURE_MIN_VERSION: - sys.exit("Expecting azure.mgmt.compute.__version__ to be >= {0}. Found version {1} " - "Do you have Azure >= 2.0.0rc2 installed?".format(AZURE_MIN_VERSION, azure_compute_version)) + if LooseVersion(azure_compute_version) != LooseVersion(AZURE_MIN_VERSION): + sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} " + "Do you have Azure == 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version)) AzureInventory() From 162f0b81d1847fd025296326322a961d7b17a5b0 Mon Sep 17 00:00:00 2001 From: Matthew Jones Date: Wed, 28 Sep 2016 13:15:53 -0400 Subject: [PATCH 12/77] Remove unneeded maxread searchwindowsize was the important bit here --- awx/main/tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 20edd955ed..c76107d601 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -575,7 +575,7 @@ class BaseTask(Task): instance = self.update_model(instance.pk, status='running', output_replacements=output_replacements) while child.isalive(): - result_id = child.expect(expect_list, timeout=pexpect_timeout, maxread=100, searchwindowsize=100) + result_id = child.expect(expect_list, timeout=pexpect_timeout, searchwindowsize=100) if result_id in expect_passwords: child.sendline(expect_passwords[result_id]) if logfile_pos != logfile.tell(): From 839c2877c840314af4812b6e51b1c28a19b77dc6 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Mon, 8 Aug 2016 10:45:01 -0700 Subject: [PATCH 13/77] update requirements versions - current Ansible stuff (2.1.1) requires azure SDK 2.0.0rc5 - requests 2.10.0 is the published minimum version for pywinrm, as it contains a bugfix for catastrophic SSL tunnel failure on large payloads that pywinrm hits frequently, 2.11.0 is best tested. --- requirements/requirements_ansible.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/requirements_ansible.txt b/requirements/requirements_ansible.txt index b35cb6fcbb..e62e3e5ead 100644 --- a/requirements/requirements_ansible.txt +++ b/requirements/requirements_ansible.txt @@ -1,7 +1,7 @@ anyjson==0.3.3 apache-libcloud==0.20.1 appdirs==1.4.0 -azure==2.0.0rc2 +azure==2.0.0rc5 Babel==2.2.0 boto==2.40.0 cliff==1.15.0 @@ -69,7 +69,7 @@ rackspace-auth-openstack==1.3 rackspace-novaclient==1.5 rax-default-network-flags-python-novaclient-ext==0.3.2 rax-scheduled-images-python-novaclient-ext==0.3.1 -requests==2.5.1 +requests==2.11.0 requestsexceptions==1.1.1 shade==1.4.0 simplejson==3.8.1 From f30c8c0bb8006e156965eeaa468ec6183862cc0b Mon Sep 17 00:00:00 2001 From: Matthew Jones Date: Thu, 29 Sep 2016 10:37:18 -0400 Subject: [PATCH 14/77] Update postgres yum/apt repo locations *Thanks postgres team (cherry picked from commit b398fcc5b97ed478273e3d124aaaaa3af2879965) --- tools/docker-compose/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker-compose/Dockerfile b/tools/docker-compose/Dockerfile index 4e22788585..3acd687372 100644 --- a/tools/docker-compose/Dockerfile +++ b/tools/docker-compose/Dockerfile @@ -8,7 +8,7 @@ RUN apt-get update && apt-get install -y software-properties-common python-softw RUN add-apt-repository -y ppa:chris-lea/redis-server; add-apt-repository -y ppa:chris-lea/zeromq; add-apt-repository -y ppa:chris-lea/node.js; add-apt-repository -y ppa:ansible/ansible; add-apt-repository -y ppa:jal233/proot; RUN curl -sL https://deb.nodesource.com/setup_0.12 | bash - RUN curl -sL https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - -RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ trusty-pgdg main" | tee /etc/apt/sources.list.d/postgres-9.4.list +RUN echo "deb http://download.postgresql.org/pub/repos/apt/dists/ trusty-pgdg main" | tee /etc/apt/sources.list.d/postgres-9.4.list RUN apt-get update && apt-get install -y openssh-server ansible mg vim tmux git mercurial subversion python-dev python-psycopg2 make postgresql-client libpq-dev nodejs python-psutil libxml2-dev libxslt-dev lib32z1-dev libsasl2-dev libldap2-dev libffi-dev libzmq-dev proot python-pip libxmlsec1-dev swig redis-server libgss-dev libkrb5-dev && apt-get autoremove --purge -y && rm -rf /var/lib/apt/lists/* RUN pip install flake8 pytest pytest-pythonpath pytest-django pytest-cov pytest-mock dateutils django-debug-toolbar==1.4 pyflakes==1.0.0 virtualenv RUN /usr/bin/ssh-keygen -q -t rsa -N "" -f /root/.ssh/id_rsa From 4d22af0d3cc4d76154e05fb3a44fa7b348663c0d Mon Sep 17 00:00:00 2001 From: Matthew Jones Date: Thu, 29 Sep 2016 13:16:58 -0400 Subject: [PATCH 15/77] Fix up ubuntu apt repo paths --- tools/docker-compose/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker-compose/Dockerfile b/tools/docker-compose/Dockerfile index 3acd687372..8ec83450b5 100644 --- a/tools/docker-compose/Dockerfile +++ b/tools/docker-compose/Dockerfile @@ -8,7 +8,7 @@ RUN apt-get update && apt-get install -y software-properties-common python-softw RUN add-apt-repository -y ppa:chris-lea/redis-server; add-apt-repository -y ppa:chris-lea/zeromq; add-apt-repository -y ppa:chris-lea/node.js; add-apt-repository -y ppa:ansible/ansible; add-apt-repository -y ppa:jal233/proot; RUN curl -sL https://deb.nodesource.com/setup_0.12 | bash - RUN curl -sL https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - -RUN echo "deb http://download.postgresql.org/pub/repos/apt/dists/ trusty-pgdg main" | tee /etc/apt/sources.list.d/postgres-9.4.list +RUN echo "deb http://download.postgresql.org/pub/repos/apt/ trusty-pgdg main" | tee /etc/apt/sources.list.d/postgres-9.4.list RUN apt-get update && apt-get install -y openssh-server ansible mg vim tmux git mercurial subversion python-dev python-psycopg2 make postgresql-client libpq-dev nodejs python-psutil libxml2-dev libxslt-dev lib32z1-dev libsasl2-dev libldap2-dev libffi-dev libzmq-dev proot python-pip libxmlsec1-dev swig redis-server libgss-dev libkrb5-dev && apt-get autoremove --purge -y && rm -rf /var/lib/apt/lists/* RUN pip install flake8 pytest pytest-pythonpath pytest-django pytest-cov pytest-mock dateutils django-debug-toolbar==1.4 pyflakes==1.0.0 virtualenv RUN /usr/bin/ssh-keygen -q -t rsa -N "" -f /root/.ssh/id_rsa From f82b10f018cfc2a1f168a74d1ef5f9b05acbd879 Mon Sep 17 00:00:00 2001 From: AlanCoding Date: Thu, 29 Sep 2016 13:35:47 -0400 Subject: [PATCH 16/77] Job relaunch permissions made consistent with JT prompts --- awx/main/access.py | 30 ++++++++-- .../tests/functional/test_rbac_job_start.py | 56 +++++++++++++++++-- 2 files changed, 75 insertions(+), 11 deletions(-) diff --git a/awx/main/access.py b/awx/main/access.py index 72636e1776..f3b8ef22e1 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -1094,17 +1094,35 @@ class JobAccess(BaseAccess): if self.user.is_superuser: return True - # If a user can launch the job template then they can relaunch a job from that - # job template + inventory_access = obj.inventory and self.user in obj.inventory.use_role + credential_access = obj.credential and self.user in obj.credential.use_role + + # Check if JT execute access (and related prompts) is sufficient if obj.job_template is not None: - return self.user in obj.job_template.execute_role + prompts_access = True + job_fields = {} + for fd in obj.job_template._ask_for_vars_dict(): + job_fields[fd] = getattr(obj, fd) + accepted_fields, ignored_fields = obj.job_template._accept_or_ignore_job_kwargs(**job_fields) + for fd in ignored_fields: + if fd == 'extra_vars': + if ignored_fields[fd]: + prompts_access = False + elif job_fields[fd] != getattr(obj.job_template, fd): + # Job has field that is not promptable + prompts_access = False + if obj.credential != obj.job_template.credential and not credential_access: + prompts_access = False + if obj.inventory != obj.job_template.inventory and not inventory_access: + prompts_access = False + if prompts_access and self.user in obj.job_template.execute_role: + return True - inventory_access = self.user in obj.inventory.use_role - credential_access = self.user in obj.credential.use_role - org_access = self.user in obj.inventory.organization.admin_role + org_access = obj.inventory and self.user in obj.inventory.organization.admin_role project_access = obj.project is None or self.user in obj.project.admin_role + # job can be relaunched if user could make an equivalent JT return inventory_access and credential_access and (org_access or project_access) def can_cancel(self, obj): diff --git a/awx/main/tests/functional/test_rbac_job_start.py b/awx/main/tests/functional/test_rbac_job_start.py index 18060126e1..00358d1c38 100644 --- a/awx/main/tests/functional/test_rbac_job_start.py +++ b/awx/main/tests/functional/test_rbac_job_start.py @@ -2,11 +2,7 @@ import pytest from awx.main.models.inventory import Inventory from awx.main.models.credential import Credential -from awx.main.models.jobs import JobTemplate - -@pytest.fixture -def machine_credential(): - return Credential.objects.create(name='machine-cred', kind='ssh', username='test_user', password='pas4word') +from awx.main.models.jobs import JobTemplate, Job @pytest.mark.django_db @pytest.mark.job_permissions @@ -45,3 +41,53 @@ def test_inventory_use_access(inventory, user): inventory.use_role.members.add(common_user) assert common_user.can_access(Inventory, 'use', inventory) + +@pytest.mark.django_db +class TestJobRelaunchAccess: + @pytest.fixture + def jt_no_prompts(self, machine_credential, inventory): + return JobTemplate.objects.create(name='test-job_template', credential=machine_credential, inventory=inventory) + + @pytest.fixture + def jt_with_prompts(self, jt_no_prompts): + jt_no_prompts.update( + ask_tags_on_launch=True, ask_variables_on_launch=True, ask_skip_tags_on_launch=True, + ask_limit_on_launch=True, ask_job_type_on_launch=True, ask_inventory_on_launch=True, + ask_credential_on_launch=True) + return jt_no_prompts + + @pytest.fixture + def job_no_prompts(self, jt_no_prompts): + return jt_no_prompts.create_unified_job() + + @pytest.fixture + def job_with_prompts(self, jt_with_prompts, organization): + new_cred = Credential.objects.create(name='new-cred', kind='ssh', username='test_user', password='pas4word') + new_inv = Inventory.objects.create(name='new-inv', organization=organization) + return jt_with_prompts.create_unified_job(credential=new_cred, inventory=new_inv) + + def test_no_relaunch_without_prompted_fields_access(self, job_with_prompts, rando): + "Has JT execute_role but no use_role on inventory & credential - deny relaunch" + job_with_prompts.job_template.execute_role.members.add(rando) + assert not rando.can_access(Job, 'start', job_with_prompts) + + def test_can_relaunch_with_prompted_fields_access(self, job_with_prompts, rando): + "Has use_role on the prompted inventory & credential - allow relaunch" + job_with_prompts.job_template.execute_role.members.add(rando) + job_with_prompts.credential.use_role.members.add(rando) + job_with_prompts.inventory.use_role.members.add(rando) + assert rando.can_access(Job, 'start', job_with_prompts) + + def test_no_relaunch_after_limit_change(self, job_no_prompts, rando): + "State of the job contradicts the JT state - deny relaunch" + job_no_prompts.job_template.execute_role.members.add(rando) + job_no_prompts.limit = 'webservers' + job_no_prompts.save() + assert not rando.can_access(Job, 'start', job_no_prompts) + + def test_can_relaunch_if_limit_was_prompt(self, job_with_prompts, rando): + "Job state differs from JT, but only on prompted fields - allow relaunch" + job_with_prompts.job_template.execute_role.members.add(rando) + job_with_prompts.limit = 'webservers' + job_with_prompts.save() + assert not rando.can_access(Job, 'start', job_with_prompts) From 115dfac471169269689546d2f552faf88296082f Mon Sep 17 00:00:00 2001 From: AlanCoding Date: Thu, 29 Sep 2016 16:42:41 -0400 Subject: [PATCH 17/77] do not introspect extra_vars, speed up tests, add base case test --- awx/main/access.py | 5 +--- .../tests/functional/test_rbac_job_start.py | 25 +++++++++---------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/awx/main/access.py b/awx/main/access.py index f3b8ef22e1..d122d48da8 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -1105,10 +1105,7 @@ class JobAccess(BaseAccess): job_fields[fd] = getattr(obj, fd) accepted_fields, ignored_fields = obj.job_template._accept_or_ignore_job_kwargs(**job_fields) for fd in ignored_fields: - if fd == 'extra_vars': - if ignored_fields[fd]: - prompts_access = False - elif job_fields[fd] != getattr(obj.job_template, fd): + if fd != 'extra_vars' and job_fields[fd] != getattr(obj.job_template, fd): # Job has field that is not promptable prompts_access = False if obj.credential != obj.job_template.credential and not credential_access: diff --git a/awx/main/tests/functional/test_rbac_job_start.py b/awx/main/tests/functional/test_rbac_job_start.py index 00358d1c38..c934973cf4 100644 --- a/awx/main/tests/functional/test_rbac_job_start.py +++ b/awx/main/tests/functional/test_rbac_job_start.py @@ -45,26 +45,25 @@ def test_inventory_use_access(inventory, user): @pytest.mark.django_db class TestJobRelaunchAccess: @pytest.fixture - def jt_no_prompts(self, machine_credential, inventory): - return JobTemplate.objects.create(name='test-job_template', credential=machine_credential, inventory=inventory) + def job_no_prompts(self, machine_credential, inventory): + jt = JobTemplate.objects.create(name='test-job_template', credential=machine_credential, inventory=inventory) + return jt.create_unified_job() @pytest.fixture - def jt_with_prompts(self, jt_no_prompts): - jt_no_prompts.update( + def job_with_prompts(self, machine_credential, inventory, organization): + jt = JobTemplate.objects.create( + name='test-job-template-prompts', credential=machine_credential, inventory=inventory, ask_tags_on_launch=True, ask_variables_on_launch=True, ask_skip_tags_on_launch=True, ask_limit_on_launch=True, ask_job_type_on_launch=True, ask_inventory_on_launch=True, ask_credential_on_launch=True) - return jt_no_prompts - - @pytest.fixture - def job_no_prompts(self, jt_no_prompts): - return jt_no_prompts.create_unified_job() - - @pytest.fixture - def job_with_prompts(self, jt_with_prompts, organization): new_cred = Credential.objects.create(name='new-cred', kind='ssh', username='test_user', password='pas4word') new_inv = Inventory.objects.create(name='new-inv', organization=organization) - return jt_with_prompts.create_unified_job(credential=new_cred, inventory=new_inv) + return jt.create_unified_job(credential=new_cred, inventory=new_inv) + + def test_normal_relaunch_via_job_template(self, job_no_prompts, rando): + "Has JT execute_role, job unchanged relative to JT" + job_no_prompts.job_template.execute_role.members.add(rando) + assert rando.can_access(Job, 'start', job_no_prompts) def test_no_relaunch_without_prompted_fields_access(self, job_with_prompts, rando): "Has JT execute_role but no use_role on inventory & credential - deny relaunch" From e36a4ed47e515bca959e28256d589e0bf4f77edc Mon Sep 17 00:00:00 2001 From: mabashian Date: Fri, 30 Sep 2016 10:14:08 -0400 Subject: [PATCH 18/77] Updated user's teams list empty text to read: "This user is not a member of any teams" --- awx/ui/client/src/forms/Users.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/ui/client/src/forms/Users.js b/awx/ui/client/src/forms/Users.js index ddcda6e296..2ce2f69712 100644 --- a/awx/ui/client/src/forms/Users.js +++ b/awx/ui/client/src/forms/Users.js @@ -141,7 +141,7 @@ export default open: false, index: false, actions: {}, - + emptyListText: 'This user is not a member of any teams', fields: { name: { key: true, From 4ca8bc1eda6c5bc8059d8e29048c9a0f702b7f90 Mon Sep 17 00:00:00 2001 From: Graham Mainwaring Date: Fri, 30 Sep 2016 13:28:14 -0400 Subject: [PATCH 19/77] Merge pull request #3630 from ghjm/restore_apache_fix Use correct Apache service name when restoring on EL6 From b4c5e31a1dadf4ce8de4d30f2cc32ca7c6b07bea Mon Sep 17 00:00:00 2001 From: mabashian Date: Mon, 3 Oct 2016 09:56:57 -0400 Subject: [PATCH 20/77] Stay on the projects/inv manage pages when syncs are launched from those pages. Otherwise, direct the user to the stdout pages for those individual updates. --- .../job-submission-factories/launchjob.factory.js | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/awx/ui/client/src/job-submission/job-submission-factories/launchjob.factory.js b/awx/ui/client/src/job-submission/job-submission-factories/launchjob.factory.js index 3968eaf484..6c3d861afa 100644 --- a/awx/ui/client/src/job-submission/job-submission-factories/launchjob.factory.js +++ b/awx/ui/client/src/job-submission/job-submission-factories/launchjob.factory.js @@ -130,12 +130,20 @@ export default goToJobDetails('managementJobStdout'); } else if(_.has(data, 'project_update')) { - if($state.current.name !== 'projects') { + // If we are on the projects list or any child state of that list + // then we want to stay on that page. Otherwise go to the stdout + // view. + if(!$state.includes('projects')) { goToJobDetails('scmUpdateStdout'); } } else if(_.has(data, 'inventory_update')) { - goToJobDetails('inventorySyncStdout'); + // If we are on the inventory manage page or any child state of that + // page then we want to stay on that page. Otherwise go to the stdout + // view. + if(!$state.includes('inventoryManage')) { + goToJobDetails('inventorySyncStdout'); + } } } if(scope.clearDialog) { From 16129b6e64b4a26068669979f42f7c3b1bff37af Mon Sep 17 00:00:00 2001 From: Shane McDonald Date: Mon, 3 Oct 2016 09:36:33 -0400 Subject: [PATCH 21/77] Add pycparser to SRC_ONLY_PKGS Fixes stuff due to https://github.com/pyca/cryptography/issues/3187 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2ff93989dd..68f6faffa1 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ CLIENT_TEST_DIR ?= build_test # Python packages to install only from source (not from binary wheels) # Comma separated list -SRC_ONLY_PKGS ?= cffi +SRC_ONLY_PKGS ?= cffi,pycparser # Determine appropriate shasum command UNAME_S := $(shell uname -s) From 6097bc2f176b4305aad81810b61c20b015a0489e Mon Sep 17 00:00:00 2001 From: mabashian Date: Mon, 3 Oct 2016 11:34:27 -0400 Subject: [PATCH 22/77] Fixed bug where editing a host was showing oddly formatted YAML in the variables section --- .../src/inventories/manage/hosts/hosts-edit.controller.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/awx/ui/client/src/inventories/manage/hosts/hosts-edit.controller.js b/awx/ui/client/src/inventories/manage/hosts/hosts-edit.controller.js index 9098e53333..bc96df0eaf 100644 --- a/awx/ui/client/src/inventories/manage/hosts/hosts-edit.controller.js +++ b/awx/ui/client/src/inventories/manage/hosts/hosts-edit.controller.js @@ -5,8 +5,8 @@ *************************************************/ export default - ['$state', '$stateParams', '$scope', 'HostForm', 'ParseTypeChange', 'GenerateForm', 'HostManageService', 'host', - function($state, $stateParams, $scope, HostForm, ParseTypeChange, GenerateForm, HostManageService, host){ + ['$state', '$stateParams', '$scope', 'HostForm', 'ParseTypeChange', 'GenerateForm', 'HostManageService', 'host', 'ParseVariableString', + function($state, $stateParams, $scope, HostForm, ParseTypeChange, GenerateForm, HostManageService, host, ParseVariableString){ var generator = GenerateForm, form = HostForm; $scope.parseType = 'yaml'; @@ -31,12 +31,12 @@ var init = function(){ $scope.host = host; generator.inject(form, {mode: 'edit', related: false, id: 'Inventory-hostManage--panel', scope: $scope}); - $scope.variables = host.variables === '' ? '---' : host.variables; + $scope.variables = host.variables === '' ? '---' : ParseVariableString(host.variables); $scope.name = host.name; $scope.description = host.description; ParseTypeChange({ scope: $scope, - field_id: 'host_variables', + field_id: 'host_variables' }); }; init(); From 9fc30643cae55598c71e8c4aaace58fb19b09f45 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 3 Oct 2016 16:15:06 -0400 Subject: [PATCH 23/77] Prevent filtering on password fields. --- awx/api/filters.py | 7 +++++-- awx/main/tests/unit/api/test_filters.py | 21 ++++++++++++++++++++- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/awx/api/filters.py b/awx/api/filters.py index 55155224c4..08a26735d2 100644 --- a/awx/api/filters.py +++ b/awx/api/filters.py @@ -14,7 +14,7 @@ from django.contrib.contenttypes.models import ContentType from django.utils.encoding import force_text # Django REST Framework -from rest_framework.exceptions import ParseError +from rest_framework.exceptions import ParseError, PermissionDenied from rest_framework.filters import BaseFilterBackend # Ansible Tower @@ -97,7 +97,10 @@ class FieldLookupBackend(BaseFilterBackend): new_parts.append(name) - if name == 'pk': + + if name in getattr(model, 'PASSWORD_FIELDS', ()): + raise PermissionDenied('Filtering on password fields is not allowed.') + elif name == 'pk': field = model._meta.pk else: field = model._meta.get_field_by_name(name)[0] diff --git a/awx/main/tests/unit/api/test_filters.py b/awx/main/tests/unit/api/test_filters.py index 8f045db877..55ef257567 100644 --- a/awx/main/tests/unit/api/test_filters.py +++ b/awx/main/tests/unit/api/test_filters.py @@ -1,7 +1,8 @@ import pytest +from rest_framework.exceptions import PermissionDenied from awx.api.filters import FieldLookupBackend -from awx.main.models import JobTemplate +from awx.main.models import Credential, JobTemplate @pytest.mark.parametrize(u"empty_value", [u'', '']) def test_empty_in(empty_value): @@ -15,3 +16,21 @@ def test_valid_in(valid_value): field_lookup = FieldLookupBackend() value, new_lookup = field_lookup.value_to_python(JobTemplate, 'project__in', valid_value) assert 'foo' in value + +@pytest.mark.parametrize('lookup_suffix', ['', 'contains', 'startswith', 'in']) +@pytest.mark.parametrize('password_field', Credential.PASSWORD_FIELDS) +def test_filter_on_password_field(password_field, lookup_suffix): + field_lookup = FieldLookupBackend() + lookup = '__'.join(filter(None, [password_field, lookup_suffix])) + with pytest.raises(PermissionDenied) as excinfo: + field, new_lookup = field_lookup.get_field_from_lookup(Credential, lookup) + assert 'not allowed' in str(excinfo.value) + +@pytest.mark.parametrize('lookup_suffix', ['', 'contains', 'startswith', 'in']) +@pytest.mark.parametrize('password_field', Credential.PASSWORD_FIELDS) +def test_filter_on_related_password_field(password_field, lookup_suffix): + field_lookup = FieldLookupBackend() + lookup = '__'.join(filter(None, ['credential', password_field, lookup_suffix])) + with pytest.raises(PermissionDenied) as excinfo: + field, new_lookup = field_lookup.get_field_from_lookup(JobTemplate, lookup) + assert 'not allowed' in str(excinfo.value) From dc9344c82240205d8ce9447e42c4948d1ab0ee44 Mon Sep 17 00:00:00 2001 From: Leigh Johnson Date: Thu, 6 Oct 2016 11:53:58 -0400 Subject: [PATCH 24/77] Host event modal - wrap long strings. --- awx/ui/client/src/job-detail/host-event/host-event.block.less | 3 +++ 1 file changed, 3 insertions(+) diff --git a/awx/ui/client/src/job-detail/host-event/host-event.block.less b/awx/ui/client/src/job-detail/host-event/host-event.block.less index 6edfc450ec..b22d52d36b 100644 --- a/awx/ui/client/src/job-detail/host-event/host-event.block.less +++ b/awx/ui/client/src/job-detail/host-event/host-event.block.less @@ -125,6 +125,8 @@ .OnePlusTwo-left--detailsRow; } .HostEvent-field--content{ + word-wrap: break-word; + max-width: 13em; flex: 0 1 13em; } .HostEvent-details--left, .HostEvent-details--right{ @@ -138,6 +140,7 @@ flex: 0 1 25em; } .HostEvent-field--content{ + max-width: 15em; flex: 0 1 15em; align-self: flex-end; } From f35fcf86e73739b85a32a1ba18e68ac2b3036c98 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Thu, 6 Oct 2016 16:13:38 -0400 Subject: [PATCH 25/77] fixed search pagination url issue --- awx/ui/client/src/helpers/refresh.js | 22 ++++++++++++++++++- .../client/src/search/tagSearch.controller.js | 3 ++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/awx/ui/client/src/helpers/refresh.js b/awx/ui/client/src/helpers/refresh.js index 5755cd934e..57c806f64a 100644 --- a/awx/ui/client/src/helpers/refresh.js +++ b/awx/ui/client/src/helpers/refresh.js @@ -71,7 +71,27 @@ export default // if you're editing an object, make sure you're on the right // page to display the element you are editing - if (scope.addedItem) { + if (params.fromSearch) { + var url = params.url; + // for a search, we want to make sure to get the first page of + // results + if (url.indexOf("page=") > -1) { + // if the url includes a page, remove that part + var urlArr = url.split("page="); + var afterPageUrlArr = urlArr[1].split("&"); + + if (afterPageUrlArr.length > 1) { + // if there's stuff after the page part, + // put that back in + afterPageUrlArr.shift(); + url = urlArr[0] + + afterPageUrlArr.join("&"); + } else { + url = urlArr[0]; + } + } + getPage(url); + } else if (scope.addedItem) { id = scope.addedItem + ""; delete scope.addedItem; $rootScope.rowBeingEdited = id; diff --git a/awx/ui/client/src/search/tagSearch.controller.js b/awx/ui/client/src/search/tagSearch.controller.js index e50a25670e..0209329110 100644 --- a/awx/ui/client/src/search/tagSearch.controller.js +++ b/awx/ui/client/src/search/tagSearch.controller.js @@ -72,7 +72,8 @@ export default ['$scope', 'Refresh', 'tagSearchService', '$stateParams', scope: listScope, set: set, iterator: iterator, - url: url + url: url, + fromSearch: true }); listScope.$on('PostRefresh', function() { From 58817367697bb6cdd595affefaf217ab6b4c2e5b Mon Sep 17 00:00:00 2001 From: James Laska Date: Wed, 5 Oct 2016 10:48:47 -0400 Subject: [PATCH 26/77] Properly detect enabled foreman inventory Related #3632 --- awx/settings/defaults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index e2995d0dbd..603a979cda 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -679,7 +679,7 @@ OPENSTACK_INSTANCE_ID_VAR = 'openstack.id' # ----- Foreman ----- # --------------------- SATELLITE6_ENABLED_VAR = 'foreman.enabled' -SATELLITE6_ENABLED_VALUE = 'true' +SATELLITE6_ENABLED_VALUE = True SATELLITE6_GROUP_FILTER = r'^.+$' SATELLITE6_HOST_FILTER = r'^.+$' SATELLITE6_EXCLUDE_EMPTY_GROUPS = True From 77a56b3dfd1c766764660172498cdbc7ddd0d7c3 Mon Sep 17 00:00:00 2001 From: James Laska Date: Fri, 7 Oct 2016 09:50:18 -0400 Subject: [PATCH 27/77] Properly recognize foreman.enabled The ENABLED_VALUE must always be a string. Thanks to @cchurch for the suggestion. Resolves #3632 --- awx/settings/defaults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 603a979cda..771155a523 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -679,7 +679,7 @@ OPENSTACK_INSTANCE_ID_VAR = 'openstack.id' # ----- Foreman ----- # --------------------- SATELLITE6_ENABLED_VAR = 'foreman.enabled' -SATELLITE6_ENABLED_VALUE = True +SATELLITE6_ENABLED_VALUE = 'True' SATELLITE6_GROUP_FILTER = r'^.+$' SATELLITE6_HOST_FILTER = r'^.+$' SATELLITE6_EXCLUDE_EMPTY_GROUPS = True From dbe6f5200f8c4ff307d78c34e4701e673defb234 Mon Sep 17 00:00:00 2001 From: Jared Tabor Date: Fri, 7 Oct 2016 14:58:31 -0700 Subject: [PATCH 28/77] YAML parsing groups and hosts extra variables YAML parsing got lost in translation in the 3.0 scuffle --- .../manage/groups/groups-add.controller.js | 11 +++++------ .../manage/groups/groups-edit.controller.js | 11 +++++++---- .../inventories/manage/hosts/hosts-add.controller.js | 9 +++++---- .../inventories/manage/hosts/hosts-edit.controller.js | 9 +++++---- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/awx/ui/client/src/inventories/manage/groups/groups-add.controller.js b/awx/ui/client/src/inventories/manage/groups/groups-add.controller.js index d1272f508b..627e253de0 100644 --- a/awx/ui/client/src/inventories/manage/groups/groups-add.controller.js +++ b/awx/ui/client/src/inventories/manage/groups/groups-add.controller.js @@ -5,10 +5,8 @@ *************************************************/ export default - ['$state', '$stateParams', '$scope', 'GroupForm', 'CredentialList', 'inventoryScriptsListObject', 'ParseTypeChange', 'GenerateForm', 'inventoryData', 'LookUpInit', - 'GroupManageService', 'GetChoices', 'GetBasePath', 'CreateSelect2', 'GetSourceTypeOptions', - function($state, $stateParams, $scope, GroupForm, CredentialList, InventoryScriptsList, ParseTypeChange, GenerateForm, inventoryData, LookUpInit, - GroupManageService, GetChoices, GetBasePath, CreateSelect2, GetSourceTypeOptions){ + ['$state', '$stateParams', '$scope', 'GroupForm', 'CredentialList', 'inventoryScriptsListObject', 'ParseTypeChange', 'GenerateForm', 'inventoryData', 'LookUpInit', 'GroupManageService', 'GetChoices', 'GetBasePath', 'CreateSelect2', 'GetSourceTypeOptions','ToJSON', + function($state, $stateParams, $scope, GroupForm, CredentialList, InventoryScriptsList, ParseTypeChange, GenerateForm, inventoryData, LookUpInit, GroupManageService, GetChoices, GetBasePath, CreateSelect2, GetSourceTypeOptions, ToJSON){ var generator = GenerateForm, form = GroupForm(); @@ -20,10 +18,11 @@ $state.go('^'); }; $scope.formSave = function(){ - var params, source; + var params, source, + json_data = ToJSON($scope.parseType, $scope.variables, true); // group fields var group = { - variables: $scope.variables === '---' || $scope.variables === '{}' ? null : $scope.variables, + variables: json_data, name: $scope.name, description: $scope.description, inventory: inventoryData.id diff --git a/awx/ui/client/src/inventories/manage/groups/groups-edit.controller.js b/awx/ui/client/src/inventories/manage/groups/groups-edit.controller.js index 941789f39d..2b7deefad4 100644 --- a/awx/ui/client/src/inventories/manage/groups/groups-edit.controller.js +++ b/awx/ui/client/src/inventories/manage/groups/groups-edit.controller.js @@ -7,10 +7,10 @@ export default ['$state', '$stateParams', '$scope', 'GroupForm', 'CredentialList', 'inventoryScriptsListObject', 'ToggleNotification', 'ParseVariableString', 'ParseTypeChange', 'GenerateForm', 'LookUpInit', 'RelatedSearchInit', 'RelatedPaginateInit', 'NotificationsListInit', - 'GroupManageService','GetChoices', 'GetBasePath', 'CreateSelect2', 'GetSourceTypeOptions', 'groupData', 'inventorySourceData', + 'GroupManageService','GetChoices', 'GetBasePath', 'CreateSelect2', 'GetSourceTypeOptions', 'groupData', 'inventorySourceData', 'ToJSON', function($state, $stateParams, $scope, GroupForm, CredentialList, InventoryScriptsList, ToggleNotification, ParseVariableString, ParseTypeChange, GenerateForm, LookUpInit, RelatedSearchInit, RelatedPaginateInit, NotificationsListInit, - GroupManageService, GetChoices, GetBasePath, CreateSelect2, GetSourceTypeOptions, groupData, inventorySourceData){ + GroupManageService, GetChoices, GetBasePath, CreateSelect2, GetSourceTypeOptions, groupData, inventorySourceData, ToJSON){ var generator = GenerateForm, form = GroupForm(); @@ -22,15 +22,18 @@ $state.go('^'); }; $scope.formSave = function(){ - var params, source; + var params, source, + json_data = ToJSON($scope.parseType, $scope.variables, true); + // group fields var group = { - variables: $scope.variables === '---' || $scope.variables === '{}' ? null : $scope.variables, + variables: json_data, name: $scope.name, description: $scope.description, inventory: $scope.inventory, id: groupData.id }; + if ($scope.source){ // inventory_source fields params = { diff --git a/awx/ui/client/src/inventories/manage/hosts/hosts-add.controller.js b/awx/ui/client/src/inventories/manage/hosts/hosts-add.controller.js index d0dd6c3c86..1958a24353 100644 --- a/awx/ui/client/src/inventories/manage/hosts/hosts-add.controller.js +++ b/awx/ui/client/src/inventories/manage/hosts/hosts-add.controller.js @@ -5,8 +5,8 @@ *************************************************/ export default - ['$state', '$stateParams', '$scope', 'HostForm', 'ParseTypeChange', 'GenerateForm', 'HostManageService', - function($state, $stateParams, $scope, HostForm, ParseTypeChange, GenerateForm, HostManageService){ + ['$state', '$stateParams', '$scope', 'HostForm', 'ParseTypeChange', 'GenerateForm', 'HostManageService', 'ToJSON', + function($state, $stateParams, $scope, HostForm, ParseTypeChange, GenerateForm, HostManageService, ToJSON){ var generator = GenerateForm, form = HostForm; $scope.parseType = 'yaml'; @@ -17,8 +17,9 @@ $scope.host.enabled = !$scope.host.enabled; }; $scope.formSave = function(){ - var params = { - variables: $scope.variables === '---' || $scope.variables === '{}' ? null : $scope.variables, + var json_data = ToJSON($scope.parseType, $scope.variables, true), + params = { + variables: json_data,// $scope.variables === '---' || $scope.variables === '{}' ? null : $scope.variables, name: $scope.name, description: $scope.description, enabled: $scope.host.enabled, diff --git a/awx/ui/client/src/inventories/manage/hosts/hosts-edit.controller.js b/awx/ui/client/src/inventories/manage/hosts/hosts-edit.controller.js index bc96df0eaf..7480349505 100644 --- a/awx/ui/client/src/inventories/manage/hosts/hosts-edit.controller.js +++ b/awx/ui/client/src/inventories/manage/hosts/hosts-edit.controller.js @@ -5,8 +5,8 @@ *************************************************/ export default - ['$state', '$stateParams', '$scope', 'HostForm', 'ParseTypeChange', 'GenerateForm', 'HostManageService', 'host', 'ParseVariableString', - function($state, $stateParams, $scope, HostForm, ParseTypeChange, GenerateForm, HostManageService, host, ParseVariableString){ + ['$state', '$stateParams', '$scope', 'HostForm', 'ParseTypeChange', 'GenerateForm', 'HostManageService', 'host', 'ParseVariableString', 'ToJSON', + function($state, $stateParams, $scope, HostForm, ParseTypeChange, GenerateForm, HostManageService, host, ParseVariableString, ToJSON){ var generator = GenerateForm, form = HostForm; $scope.parseType = 'yaml'; @@ -17,9 +17,10 @@ $scope.host.enabled = !$scope.host.enabled; }; $scope.formSave = function(){ - var host = { + var json_data = ToJSON($scope.parseType, $scope.variables, true), + host = { id: $scope.host.id, - variables: $scope.variables === '---' || $scope.variables === '{}' ? null : $scope.variables, + variables: json_data, name: $scope.name, description: $scope.description, enabled: $scope.host.enabled From 236d4df4605933df360b6d584d1761c1eff37b11 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Mon, 10 Oct 2016 12:31:08 -0400 Subject: [PATCH 29/77] fixed manual project select2 population --- awx/ui/client/src/controllers/Projects.js | 16 +++++++++++----- awx/ui/client/src/helpers/ProjectPath.js | 1 + 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/awx/ui/client/src/controllers/Projects.js b/awx/ui/client/src/controllers/Projects.js index 60692d581e..fce2ba8bde 100644 --- a/awx/ui/client/src/controllers/Projects.js +++ b/awx/ui/client/src/controllers/Projects.js @@ -596,6 +596,16 @@ export function ProjectsEdit($scope, $rootScope, $compile, $location, $log, }); }); + if ($scope.pathsReadyRemove) { + $scope.pathsReadyRemove(); + } + $scope.pathsReadyRemove = $scope.$on('pathsReady', function () { + CreateSelect2({ + element: '#local-path-select', + multiple: false + }); + }); + // After the project is loaded, retrieve each related set if ($scope.projectLoadedRemove) { $scope.projectLoadedRemove(); @@ -623,6 +633,7 @@ export function ProjectsEdit($scope, $rootScope, $compile, $location, $log, $scope.project_local_paths = opts; $scope.local_path = $scope.project_local_paths[0]; $scope.base_dir = 'You do not have access to view this property'; + $scope.$emit('pathsReady'); } LookUpInit({ @@ -718,11 +729,6 @@ export function ProjectsEdit($scope, $rootScope, $compile, $location, $log, multiple: false }); - CreateSelect2({ - element: '#local-path-select', - multiple: false - }); - $scope.scmBranchLabel = ($scope.scm_type.value === 'svn') ? 'Revision #' : 'SCM Branch'; // Initialize related search functions. Doing it here to make sure relatedSets object is populated. diff --git a/awx/ui/client/src/helpers/ProjectPath.js b/awx/ui/client/src/helpers/ProjectPath.js index 91761fce0e..5fdc619aea 100644 --- a/awx/ui/client/src/helpers/ProjectPath.js +++ b/awx/ui/client/src/helpers/ProjectPath.js @@ -78,6 +78,7 @@ export default // trigger display of alert block when scm_type == manual scope.showMissingPlaybooksAlert = true; } + scope.$emit('pathsReady'); }) .error(function (data, status) { ProcessErrors(scope, data, status, null, { hdr: 'Error!', From 57c599df41384ab387a37b634ea9e9f8ff97fbe9 Mon Sep 17 00:00:00 2001 From: Shane McDonald Date: Wed, 12 Oct 2016 16:07:25 -0400 Subject: [PATCH 30/77] Exclude test directory from setup bundle tarballs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also using the rsync strategy in the tar-build target. This doesn’t leave behind the empty setup/test directory. --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 68f6faffa1..bc628758a1 100644 --- a/Makefile +++ b/Makefile @@ -663,10 +663,10 @@ release_build: # Build setup tarball tar-build/$(SETUP_TAR_FILE): @mkdir -p tar-build - @cp -a setup tar-build/$(SETUP_TAR_NAME) + @rsync -az --exclude /test setup/ tar-build/$(SETUP_TAR_NAME) @rsync -az docs/licenses tar-build/$(SETUP_TAR_NAME)/ @cd tar-build/$(SETUP_TAR_NAME) && sed -e 's#%NAME%#$(NAME)#;s#%VERSION%#$(VERSION)#;s#%RELEASE%#$(RELEASE)#;' group_vars/all.in > group_vars/all - @cd tar-build && tar -czf $(SETUP_TAR_FILE) --exclude "*/all.in" --exclude "**/test/*" $(SETUP_TAR_NAME)/ + @cd tar-build && tar -czf $(SETUP_TAR_FILE) --exclude "*/all.in" $(SETUP_TAR_NAME)/ @ln -sf $(SETUP_TAR_FILE) tar-build/$(SETUP_TAR_LINK) tar-build/$(SETUP_TAR_CHECKSUM): @@ -703,7 +703,7 @@ setup-bundle-build: # TODO - Somehow share implementation with setup_tarball setup-bundle-build/$(OFFLINE_TAR_FILE): - cp -a setup setup-bundle-build/$(OFFLINE_TAR_NAME) + rsync -az --exclude /test setup/ setup-bundle-build/$(OFFLINE_TAR_NAME) rsync -az docs/licenses setup-bundle-build/$(OFFLINE_TAR_NAME)/ cd setup-bundle-build/$(OFFLINE_TAR_NAME) && sed -e 's#%NAME%#$(NAME)#;s#%VERSION%#$(VERSION)#;s#%RELEASE%#$(RELEASE)#;' group_vars/all.in > group_vars/all $(PYTHON) $(DEPS_SCRIPT) -d $(DIST) -r $(DIST_MAJOR) -u $(AW_REPO_URL) -s setup-bundle-build/$(OFFLINE_TAR_NAME) -v -v -v From 280d265d4ede175770f2815c22d831c9366dd910 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Thu, 13 Oct 2016 12:29:02 -0400 Subject: [PATCH 31/77] filter internal User.admin_roles from the /roles API list view --- awx/main/models/rbac.py | 6 +++++- awx/main/tests/functional/test_rbac_api.py | 1 - 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/awx/main/models/rbac.py b/awx/main/models/rbac.py index 5e040b85a1..be724069d7 100644 --- a/awx/main/models/rbac.py +++ b/awx/main/models/rbac.py @@ -389,7 +389,11 @@ class Role(models.Model): ) ''' % sql_params] ) - return qs + + # Do not show roles that are of content_type(User) + # these roles are for internal only user. + user_type = ContentType.objects.get_for_model(User) + return qs.exclude(content_type__pk=user_type.id) @staticmethod @check_singleton diff --git a/awx/main/tests/functional/test_rbac_api.py b/awx/main/tests/functional/test_rbac_api.py index 54dcc8deb5..0076c59c9e 100644 --- a/awx/main/tests/functional/test_rbac_api.py +++ b/awx/main/tests/functional/test_rbac_api.py @@ -51,7 +51,6 @@ def test_get_roles_list_user(organization, inventory, team, get, user): assert Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).id in role_hash assert organization.admin_role.id in role_hash assert organization.member_role.id in role_hash - assert this_user.admin_role.id in role_hash assert custom_role.id in role_hash assert inventory.admin_role.id not in role_hash From d89c07a25b3ebd5e94c47c6b3dcf0aef2ec45237 Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Mon, 17 Oct 2016 15:18:46 -0400 Subject: [PATCH 32/77] Remove support@ansible.com, clean up some old links. Cherry-picked from devel. --- setup.py | 6 +++--- tools/docker-compose/ansible_tower.egg-info/PKG-INFO | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index e6e25d4eec..ff268cd81d 100755 --- a/setup.py +++ b/setup.py @@ -79,13 +79,13 @@ setup( name='ansible-tower', version=__version__.split("-")[0], # FIXME: Should keep full version here? author='Ansible, Inc.', - author_email='support@ansible.com', + author_email='info@ansible.com', description='ansible-tower: API, UI and Task Engine for Ansible', - long_description='AWX provides a web-based user interface, REST API and ' + long_description='Ansible Tower provides a web-based user interface, REST API and ' 'task engine built on top of Ansible', license='Proprietary', keywords='ansible', - url='http://github.com/ansible/ansible-commander', + url='http://github.com/ansible/ansible-tower', packages=['awx'], include_package_data=True, zip_safe=False, diff --git a/tools/docker-compose/ansible_tower.egg-info/PKG-INFO b/tools/docker-compose/ansible_tower.egg-info/PKG-INFO index 61643c7c28..0d78373ace 100644 --- a/tools/docker-compose/ansible_tower.egg-info/PKG-INFO +++ b/tools/docker-compose/ansible_tower.egg-info/PKG-INFO @@ -2,11 +2,11 @@ Metadata-Version: 1.1 Name: ansible-tower Version: 3.0.0-0.devel Summary: ansible-tower: API, UI and Task Engine for Ansible -Home-page: http://github.com/ansible/ansible-commander +Home-page: http://github.com/ansible/ansible-tower Author: Ansible, Inc. -Author-email: support@ansible.com +Author-email: info@ansible.com License: Proprietary -Description: AWX provides a web-based user interface, REST API and task engine built on top of Ansible +Description: Ansible Tower provides a web-based user interface, REST API and task engine built on top of Ansible Keywords: ansible Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable From 6d07699b2954db9c6662586934f58c863e07653b Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 18 Oct 2016 18:02:30 -0400 Subject: [PATCH 33/77] bump shade version --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index a6bda0f61c..5d8a8365db 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -115,7 +115,7 @@ redis==2.10.3 requests-oauthlib==0.5.0 requests==2.9.1 requestsexceptions==1.1.1 -shade==1.4.0 +shade==1.12.1 simplejson==3.8.1 six==1.9.0 slackclient==0.16 From b43945b0f27f1b1dca4454977e3bab0df9d3e41a Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 19 Oct 2016 12:33:53 -0400 Subject: [PATCH 34/77] Revert "bump shade version" This reverts commit d0a7313a4ccb1cfca3c7655b34b61c6cb6861909. --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 5d8a8365db..a6bda0f61c 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -115,7 +115,7 @@ redis==2.10.3 requests-oauthlib==0.5.0 requests==2.9.1 requestsexceptions==1.1.1 -shade==1.12.1 +shade==1.4.0 simplejson==3.8.1 six==1.9.0 slackclient==0.16 From ec140fb7daa7ae90a3f99dd6876636e3436fd65d Mon Sep 17 00:00:00 2001 From: Leigh Johnson Date: Wed, 19 Oct 2016 12:45:22 -0500 Subject: [PATCH 35/77] Hack copying of job_template.related.survey_spec into ui job copy flow, resolves #3737 --- .../copy/job-templates-copy.service.js | 89 +++++++++++-------- 1 file changed, 53 insertions(+), 36 deletions(-) diff --git a/awx/ui/client/src/job-templates/copy/job-templates-copy.service.js b/awx/ui/client/src/job-templates/copy/job-templates-copy.service.js index 53e86a78bb..a6f937cd58 100644 --- a/awx/ui/client/src/job-templates/copy/job-templates-copy.service.js +++ b/awx/ui/client/src/job-templates/copy/job-templates-copy.service.js @@ -5,39 +5,56 @@ *************************************************/ export default - ['$rootScope', 'Rest', 'ProcessErrors', 'GetBasePath', 'moment', - function($rootScope, Rest, ProcessErrors, GetBasePath, moment){ - return { - get: function(id){ - var defaultUrl = GetBasePath('job_templates') + '?id=' + id; - Rest.setUrl(defaultUrl); - return Rest.get() - .success(function(res){ - return res; - }) - .error(function(res, status){ - ProcessErrors($rootScope, res, status, null, {hdr: 'Error!', - msg: 'Call to '+ defaultUrl + ' failed. Return status: '+ status}); - }); - }, - set: function(data){ - var defaultUrl = GetBasePath('job_templates'); - Rest.setUrl(defaultUrl); - var name = this.buildName(data.results[0].name); - data.results[0].name = name + ' @ ' + moment().format('h:mm:ss a'); // 2:49:11 pm - return Rest.post(data.results[0]) - .success(function(res){ - return res; - }) - .error(function(res, status){ - ProcessErrors($rootScope, res, status, null, {hdr: 'Error!', - msg: 'Call to '+ defaultUrl + ' failed. Return status: '+ status}); - }); - }, - buildName: function(name){ - var result = name.split('@')[0]; - return result; - } - }; - } - ]; + ['$rootScope', 'Rest', 'ProcessErrors', 'GetBasePath', 'moment', + function($rootScope, Rest, ProcessErrors, GetBasePath, moment){ + return { + get: function(id){ + var defaultUrl = GetBasePath('job_templates') + '?id=' + id; + Rest.setUrl(defaultUrl); + return Rest.get() + .success(function(res){ + return res; + }) + .error(function(res, status){ + ProcessErrors($rootScope, res, status, null, {hdr: 'Error!', + msg: 'Call to '+ defaultUrl + ' failed. Return status: '+ status}); + }); + }, + getSurvey: function(endpoint){ + Rest.setUrl(endpoint); + return Rest.get(); + }, + copySurvey: function(source, target){ + return this.getSurvey(source.related.survey_spec).success( (data) => { + Rest.setUrl(target.related.survey_spec); + return Rest.post(data); + }); + }, + set: function(data){ + var defaultUrl = GetBasePath('job_templates'); + var self = this; + Rest.setUrl(defaultUrl); + var name = this.buildName(data.results[0].name); + data.results[0].name = name + ' @ ' + moment().format('h:mm:ss a'); // 2:49:11 pm + return Rest.post(data.results[0]) + .success(function(job_template_res){ + // also copy any associated survey_spec + if (data.results[0].related.survey_spec){ + return self.copySurvey(data.results[0], job_template_res).success( () => job_template_res); + } + else{ + return job_template_res; + } + }) + .error(function(res, status){ + ProcessErrors($rootScope, res, status, null, {hdr: 'Error!', + msg: 'Call to '+ defaultUrl + ' failed. Return status: '+ status}); + }); + }, + buildName: function(name){ + var result = name.split('@')[0]; + return result; + } + }; + } + ]; From 8a4d6b6f35b9fac703e4fc84d5e36c6682dc3371 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 19 Oct 2016 14:07:20 -0400 Subject: [PATCH 36/77] bump shade version --- requirements/requirements_ansible.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements_ansible.txt b/requirements/requirements_ansible.txt index e62e3e5ead..067e57ae21 100644 --- a/requirements/requirements_ansible.txt +++ b/requirements/requirements_ansible.txt @@ -71,7 +71,7 @@ rax-default-network-flags-python-novaclient-ext==0.3.2 rax-scheduled-images-python-novaclient-ext==0.3.1 requests==2.11.0 requestsexceptions==1.1.1 -shade==1.4.0 +shade==1.12.1 simplejson==3.8.1 six==1.9.0 stevedore==1.10.0 From 49c90b8f2d86bc2ecad7328804dc13f3df94d256 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 19 Oct 2016 14:17:04 -0400 Subject: [PATCH 37/77] Revert "bump shade version" This reverts commit 0125d114fdf8be5f3a5fdab856c92d82a2988607. --- requirements/requirements_ansible.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements_ansible.txt b/requirements/requirements_ansible.txt index 067e57ae21..e62e3e5ead 100644 --- a/requirements/requirements_ansible.txt +++ b/requirements/requirements_ansible.txt @@ -71,7 +71,7 @@ rax-default-network-flags-python-novaclient-ext==0.3.2 rax-scheduled-images-python-novaclient-ext==0.3.1 requests==2.11.0 requestsexceptions==1.1.1 -shade==1.12.1 +shade==1.4.0 simplejson==3.8.1 six==1.9.0 stevedore==1.10.0 From 5c69ed2e9f166e2d7b54bfe26813b1df3057b7a6 Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Wed, 19 Oct 2016 16:15:09 -0400 Subject: [PATCH 38/77] More regions! --- awx/settings/defaults.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 771155a523..6711a5872b 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -532,6 +532,7 @@ INV_ENV_VARIABLE_BLACKLIST = ("HOME", "USER", "_", "TERM") # http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region EC2_REGION_NAMES = { 'us-east-1': 'US East (Northern Virginia)', + 'us-east-2': 'US East (Ohio)', 'us-west-2': 'US West (Oregon)', 'us-west-1': 'US West (Northern California)', 'eu-central-1': 'EU (Frankfurt)', @@ -540,6 +541,7 @@ EC2_REGION_NAMES = { 'ap-southeast-2': 'Asia Pacific (Sydney)', 'ap-northeast-1': 'Asia Pacific (Tokyo)', 'ap-northeast-2': 'Asia Pacific (Seoul)', + 'ap-south-1': 'Asia Pacific (Mumbai)', 'sa-east-1': 'South America (Sao Paulo)', 'us-gov-west-1': 'US West (GovCloud)', 'cn-north-1': 'China (Beijing)', From 94b20d4fc6d86144cfab0ee5d50e2a0ca324a240 Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Wed, 19 Oct 2016 16:16:05 -0400 Subject: [PATCH 39/77] Also bump boto for new regions, per ryansb. --- requirements/requirements.txt | 2 +- requirements/requirements_ansible.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index a6bda0f61c..1cc458550f 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -5,7 +5,7 @@ appdirs==1.4.0 azure==2.0.0rc2 Babel==2.2.0 billiard==3.3.0.16 -boto==2.40.0 +boto==2.43.0 celery==3.1.10 cliff==1.15.0 cmd2==0.6.8 diff --git a/requirements/requirements_ansible.txt b/requirements/requirements_ansible.txt index e62e3e5ead..4d14cd380e 100644 --- a/requirements/requirements_ansible.txt +++ b/requirements/requirements_ansible.txt @@ -3,7 +3,7 @@ apache-libcloud==0.20.1 appdirs==1.4.0 azure==2.0.0rc5 Babel==2.2.0 -boto==2.40.0 +boto==2.43.0 cliff==1.15.0 cmd2==0.6.8 cryptography==1.3.2 From 40fdd3eba57ea4006d59d0e2ec6d91327d862b8d Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Wed, 19 Oct 2016 16:49:49 -0400 Subject: [PATCH 40/77] Sync azure changes to Tower virtual environment --- requirements/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index a6bda0f61c..91cc06ad94 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -2,7 +2,7 @@ git+https://github.com/chrismeyersfsu/ansiconv.git@tower_1.0.0#egg=ansiconv amqp==1.4.5 anyjson==0.3.3 appdirs==1.4.0 -azure==2.0.0rc2 +azure==2.0.0rc5 Babel==2.2.0 billiard==3.3.0.16 boto==2.40.0 @@ -113,7 +113,7 @@ rax-default-network-flags-python-novaclient-ext==0.3.2 rax-scheduled-images-python-novaclient-ext==0.3.1 redis==2.10.3 requests-oauthlib==0.5.0 -requests==2.9.1 +requests==2.11.0 requestsexceptions==1.1.1 shade==1.4.0 simplejson==3.8.1 From af82273948ae36294ca5e0d398592c2a008e2a23 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Thu, 20 Oct 2016 09:37:06 -0400 Subject: [PATCH 41/77] don't double encode channel messages --- awx/main/consumers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/awx/main/consumers.py b/awx/main/consumers.py index fb4bf55fd3..223ff84321 100644 --- a/awx/main/consumers.py +++ b/awx/main/consumers.py @@ -35,5 +35,4 @@ def ws_receive(message): def emit_channel_notification(group, payload): - payload = json.dumps(payload) Group(group).send({"text": json.dumps(payload)}) From bc1561fc672ed3da2a9746d3a5c44b24c48fde2a Mon Sep 17 00:00:00 2001 From: jaredevantabor Date: Thu, 20 Oct 2016 09:17:50 -0700 Subject: [PATCH 42/77] Removing front end double parsing of socket messages previously the socket messages were double encoded w/ quotations --- awx/ui/client/src/shared/socket/socket.service.js | 3 --- 1 file changed, 3 deletions(-) diff --git a/awx/ui/client/src/shared/socket/socket.service.js b/awx/ui/client/src/shared/socket/socket.service.js index 056bd09bbf..dfaac460b6 100644 --- a/awx/ui/client/src/shared/socket/socket.service.js +++ b/awx/ui/client/src/shared/socket/socket.service.js @@ -74,9 +74,6 @@ export default // Function called when messages are received on by the UI from // the API over the websocket. This will route each message to // the appropriate controller for the current $state. - e.data = e.data.replace(/\\/g, ''); - e.data = e.data.substr(0, e.data.length-1); - e.data = e.data.substr(1); $log.debug('Received From Server: ' + e.data); var data = JSON.parse(e.data), str = ""; From d84912bb2a36c8cfeb98cab40324c349ceb6a3a9 Mon Sep 17 00:00:00 2001 From: jaredevantabor Date: Thu, 20 Oct 2016 10:02:28 -0700 Subject: [PATCH 43/77] potentially adding token to websocket url --- awx/ui/client/src/shared/socket/socket.service.js | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/awx/ui/client/src/shared/socket/socket.service.js b/awx/ui/client/src/shared/socket/socket.service.js index dfaac460b6..a7d526149a 100644 --- a/awx/ui/client/src/shared/socket/socket.service.js +++ b/awx/ui/client/src/shared/socket/socket.service.js @@ -5,8 +5,8 @@ *************************************************/ import ReconnectingWebSocket from 'reconnectingwebsocket'; export default -['$rootScope', '$location', '$log','$state', '$q', - function ($rootScope, $location, $log, $state, $q) { +['$rootScope', '$location', '$log','$state', '$q', 'Authorization', + function ($rootScope, $location, $log, $state, $q, Authorization) { var needsResubscribing = false, socketPromise = $q.defer(); return { @@ -14,7 +14,8 @@ export default var self = this, host = window.location.host, protocol, - url; + url, + token = Authorization.getToken(); if($location.protocol() === 'http'){ protocol = 'ws'; @@ -26,6 +27,7 @@ export default if (!$rootScope.sessionTimer || ($rootScope.sessionTimer && !$rootScope.sessionTimer.isExpired())) { // We have a valid session token, so attempt socket connection + // url = `${url}?session=${token}`; $log.debug('Socket connecting to: ' + url); self.socket = new ReconnectingWebSocket(url, null, { From 6bd20c643304eef56690f3dc868defefc10279ea Mon Sep 17 00:00:00 2001 From: jaredevantabor Date: Thu, 20 Oct 2016 12:58:45 -0700 Subject: [PATCH 44/77] Adding the token to the socket url --- awx/ui/client/src/shared/socket/socket.service.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/ui/client/src/shared/socket/socket.service.js b/awx/ui/client/src/shared/socket/socket.service.js index a7d526149a..632fa812cb 100644 --- a/awx/ui/client/src/shared/socket/socket.service.js +++ b/awx/ui/client/src/shared/socket/socket.service.js @@ -27,7 +27,7 @@ export default if (!$rootScope.sessionTimer || ($rootScope.sessionTimer && !$rootScope.sessionTimer.isExpired())) { // We have a valid session token, so attempt socket connection - // url = `${url}?session=${token}`; + url = `${url}?token=${token}`; $log.debug('Socket connecting to: ' + url); self.socket = new ReconnectingWebSocket(url, null, { From a3123011ad21f140cef07ea23315fe481aeb634c Mon Sep 17 00:00:00 2001 From: Leigh Johnson Date: Mon, 24 Oct 2016 13:49:37 -0400 Subject: [PATCH 45/77] Resolves 404 when assigning resources/users to organizations in card view. Sidesteps a bug in the Refresh() utility, where pagination calculations are not made against filtered results. Resolves #750 --- .../linkout/organizations-linkout.route.js | 30 ++++--------------- .../list/organizations-list.controller.js | 27 +++++++---------- .../list/organizations-list.route.js | 5 +--- 3 files changed, 18 insertions(+), 44 deletions(-) diff --git a/awx/ui/client/src/organizations/linkout/organizations-linkout.route.js b/awx/ui/client/src/organizations/linkout/organizations-linkout.route.js index 13d51cc68f..6b028a8702 100644 --- a/awx/ui/client/src/organizations/linkout/organizations-linkout.route.js +++ b/awx/ui/client/src/organizations/linkout/organizations-linkout.route.js @@ -23,10 +23,7 @@ export default [ activityStreamTarget: 'organization' }, ncyBreadcrumb: { - parent: function($scope) { - $scope.$parent.$emit("ReloadOrgListView"); - return "organizations.edit"; - }, + parent: "organizations.edit", label: "USERS" }, resolve: { @@ -45,10 +42,7 @@ export default [ activityStreamTarget: 'organization' }, ncyBreadcrumb: { - parent: function($scope) { - $scope.$parent.$emit("ReloadOrgListView"); - return "organizations.edit"; - }, + parent: "organizations.edit", label: "TEAMS" }, resolve: { @@ -67,10 +61,7 @@ export default [ activityStreamTarget: 'organization' }, ncyBreadcrumb: { - parent: function($scope) { - $scope.$parent.$emit("ReloadOrgListView"); - return "organizations.edit"; - }, + parent: "organizations.edit", label: "INVENTORIES" }, resolve: { @@ -89,10 +80,7 @@ export default [ activityStreamTarget: 'organization' }, ncyBreadcrumb: { - parent: function($scope) { - $scope.$parent.$emit("ReloadOrgListView"); - return "organizations.edit"; - }, + parent: "organizations.edit", label: "PROJECTS" }, resolve: { @@ -111,10 +99,7 @@ export default [ activityStreamTarget: 'organization' }, ncyBreadcrumb: { - parent: function($scope) { - $scope.$parent.$emit("ReloadOrgListView"); - return "organizations.edit"; - }, + parent: "organizations.edit", label: "JOB TEMPLATES" }, resolve: { @@ -133,10 +118,7 @@ export default [ activityStreamTarget: 'organization' }, ncyBreadcrumb: { - parent: function($scope) { - $scope.$parent.$emit("ReloadOrgListView"); - return "organizations.edit"; - }, + parent: "organizations.edit", label: "ADMINS" }, resolve: { diff --git a/awx/ui/client/src/organizations/list/organizations-list.controller.js b/awx/ui/client/src/organizations/list/organizations-list.controller.js index 84510a60dc..878d23544f 100644 --- a/awx/ui/client/src/organizations/list/organizations-list.controller.js +++ b/awx/ui/client/src/organizations/list/organizations-list.controller.js @@ -8,12 +8,12 @@ export default ['$stateParams', '$scope', '$rootScope', '$location', '$log', '$compile', 'Rest', 'PaginateInit', 'SearchInit', 'OrganizationList', 'Alert', 'Prompt', 'ClearScope', 'ProcessErrors', 'GetBasePath', 'Wait', - '$state', 'generateList', 'Refresh', '$filter', + '$state', 'generateList', '$filter', function($stateParams, $scope, $rootScope, $location, $log, $compile, Rest, PaginateInit, SearchInit, OrganizationList, Alert, Prompt, ClearScope, ProcessErrors, GetBasePath, Wait, - $state, generateList, Refresh, $filter) { + $state, generateList, $filter) { ClearScope(); @@ -70,19 +70,14 @@ export default ['$stateParams', '$scope', '$rootScope', '$location', }; $scope.$on("ReloadOrgListView", function() { - var url = GetBasePath('organizations') + '?'; - if ($state.$current.self.name === "organizations" || - $state.$current.self.name === "organizations.add") { - $scope.activeCard = null; - } - if ($scope[list.iterator + 'SearchFilters']){ - url = url + _.reduce($scope[list.iterator+'SearchFilters'], (result, filter) => result + '&' + filter.url, ''); - } - Refresh({ - scope: $scope, - set: list.name, - iterator: list.iterator, - url: url + Rest.setUrl($scope.current_url); + Rest.get() + .success((data) => $scope.organizations = data.results) + .error(function(data, status) { + ProcessErrors($scope, data, status, null, { + hdr: 'Error!', + msg: 'Call to ' + defaultUrl + ' failed. DELETE returned status: ' + status + }); }); }); @@ -158,7 +153,7 @@ export default ['$stateParams', '$scope', '$rootScope', '$location', }); // grab the pagination elements, move, destroy list generator elements $('#organization-pagination').appendTo('#OrgCards'); - $('tag-search').appendTo('.OrgCards-search'); + $('#organizations tag-search').appendTo('.OrgCards-search'); $('#organizations-list').remove(); PaginateInit({ diff --git a/awx/ui/client/src/organizations/list/organizations-list.route.js b/awx/ui/client/src/organizations/list/organizations-list.route.js index c99604eecb..c965686317 100644 --- a/awx/ui/client/src/organizations/list/organizations-list.route.js +++ b/awx/ui/client/src/organizations/list/organizations-list.route.js @@ -17,10 +17,7 @@ export default { activityStreamTarget: 'organization' }, ncyBreadcrumb: { - parent: function($scope) { - $scope.$parent.$emit("ReloadOrgListView"); - return "setup"; - }, + parent: "setup", label: "ORGANIZATIONS" } }; From ced3c41df97d588af35c195eefb2326aa003cf84 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Mon, 24 Oct 2016 19:42:53 -0400 Subject: [PATCH 46/77] add auth_token verification to websocket --- awx/main/consumers.py | 44 +++++++++++++++++++++++++++++++++++++++++++ awx/main/routing.py | 1 + 2 files changed, 45 insertions(+) diff --git a/awx/main/consumers.py b/awx/main/consumers.py index 223ff84321..bbd155329a 100644 --- a/awx/main/consumers.py +++ b/awx/main/consumers.py @@ -1,20 +1,64 @@ import json +import urlparse from channels import Group from channels.sessions import channel_session +from django.contrib.auth.models import User +from awx.main.models.organization import AuthToken + def discard_groups(message): if 'groups' in message.channel_session: for group in message.channel_session['groups']: Group(group).discard(message.reply_channel) + +def validate_token(token): + try: + auth_token = AuthToken.objects.get(key=token) + if not auth_token.in_valid_tokens: + return None + except AuthToken.DoesNotExist: + return None + return auth_token + + +def user_from_token(auth_token): + try: + return User.objects.get(pk=auth_token.user_id) + except User.DoesNotExist: + return None + +@channel_session +def ws_connect(message): + token = None + qs = urlparse.parse_qs(message['query_string']) + if 'token' in qs: + if len(qs['token']) > 0: + token = qs['token'].pop() + message.channel_session['token'] = token + + @channel_session def ws_disconnect(message): discard_groups(message) + @channel_session def ws_receive(message): + token = message.channel_session.get('token') + + auth_token = validate_token(token) + if auth_token is None: + message.reply_channel.send({"text": json.dumps({"error": "invalid auth token"})}) + return None + + user = user_from_token(auth_token) + if user is None: + message.reply_channel.send({"text": json.dumps({"error": "no valid user"})}) + return None + raw_data = message.content['text'] data = json.loads(raw_data) diff --git a/awx/main/routing.py b/awx/main/routing.py index 67a08ff1bd..0a49f25c6c 100644 --- a/awx/main/routing.py +++ b/awx/main/routing.py @@ -2,6 +2,7 @@ from channels.routing import route channel_routing = [ + route("websocket.connect", "awx.main.consumers.ws_connect", path=r'^/websocket/$'), route("websocket.disconnect", "awx.main.consumers.ws_disconnect", path=r'^/websocket/$'), route("websocket.receive", "awx.main.consumers.ws_receive", path=r'^/websocket/$'), ] From 9f15fc38738436d58d46b4b45a4c9236d91b8bee Mon Sep 17 00:00:00 2001 From: Alan Rominger Date: Tue, 25 Oct 2016 11:53:43 -0400 Subject: [PATCH 47/77] fix spelling of disassociated --- awx/api/templates/api/job_template_label_list.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/api/templates/api/job_template_label_list.md b/awx/api/templates/api/job_template_label_list.md index 76c520eab5..9d503e9c65 100644 --- a/awx/api/templates/api/job_template_label_list.md +++ b/awx/api/templates/api/job_template_label_list.md @@ -2,7 +2,7 @@ Labels not associated with any other resources are deleted. A label can become disassociated with a resource as a result of 3 events. -1. A label is explicitly diassociated with a related job template +1. A label is explicitly disassociated with a related job template 2. A job is deleted with labels 3. A cleanup job deletes a job with labels From b11f5c301584a440fee109fbe17bc0447ded493e Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Tue, 25 Oct 2016 13:40:15 -0400 Subject: [PATCH 48/77] Revert "filter internal User.admin_roles from the /roles API list view" This reverts commit 2a55bfa5741e91c466570e7b4cf9b517d366c610. --- awx/main/models/rbac.py | 6 +----- awx/main/tests/functional/test_rbac_api.py | 1 + 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/awx/main/models/rbac.py b/awx/main/models/rbac.py index be724069d7..5e040b85a1 100644 --- a/awx/main/models/rbac.py +++ b/awx/main/models/rbac.py @@ -389,11 +389,7 @@ class Role(models.Model): ) ''' % sql_params] ) - - # Do not show roles that are of content_type(User) - # these roles are for internal only user. - user_type = ContentType.objects.get_for_model(User) - return qs.exclude(content_type__pk=user_type.id) + return qs @staticmethod @check_singleton diff --git a/awx/main/tests/functional/test_rbac_api.py b/awx/main/tests/functional/test_rbac_api.py index 0076c59c9e..54dcc8deb5 100644 --- a/awx/main/tests/functional/test_rbac_api.py +++ b/awx/main/tests/functional/test_rbac_api.py @@ -51,6 +51,7 @@ def test_get_roles_list_user(organization, inventory, team, get, user): assert Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).id in role_hash assert organization.admin_role.id in role_hash assert organization.member_role.id in role_hash + assert this_user.admin_role.id in role_hash assert custom_role.id in role_hash assert inventory.admin_role.id not in role_hash From f129c4292968bff13cfb2169aa53ca604cf53af8 Mon Sep 17 00:00:00 2001 From: James Laska Date: Wed, 26 Oct 2016 15:57:46 -0400 Subject: [PATCH 49/77] Update rax.py inventory Resolves https://github.com/ansible/ansible/pull/18204 and continues support for disabling SSL verification. Fixes #3709 --- awx/plugins/inventory/rax.py | 50 ++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/awx/plugins/inventory/rax.py b/awx/plugins/inventory/rax.py index f29e0e8ba0..89ff425717 100755 --- a/awx/plugins/inventory/rax.py +++ b/awx/plugins/inventory/rax.py @@ -155,8 +155,6 @@ import ConfigParser from six import iteritems -from ansible.constants import get_config, mk_boolean - try: import json except ImportError: @@ -166,11 +164,12 @@ try: import pyrax from pyrax.utils import slugify except ImportError: - print('pyrax is required for this module') - sys.exit(1) + sys.exit('pyrax is required for this module') from time import time +from ansible.constants import get_config, mk_boolean + NON_CALLABLES = (basestring, bool, dict, int, list, type(None)) @@ -227,12 +226,21 @@ def _list_into_cache(regions): prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') - networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', - 'public', islist=True) try: - ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', - 'RAX_ACCESS_IP_VERSION', 4, - islist=True)) + # Ansible 2.3+ + networks = get_config(p, 'rax', 'access_network', + 'RAX_ACCESS_NETWORK', 'public', value_type='list') + except TypeError: + # Ansible 2.2.x and below + networks = get_config(p, 'rax', 'access_network', + 'RAX_ACCESS_NETWORK', 'public', islist=True) + try: + try: + ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', + 'RAX_ACCESS_IP_VERSION', 4, value_type='list')) + except TypeError: + ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', + 'RAX_ACCESS_IP_VERSION', 4, islist=True)) except: ip_versions = [4] else: @@ -406,10 +414,9 @@ def setup(): if os.path.isfile(default_creds_file): creds_file = default_creds_file elif not keyring_username: - sys.stderr.write('No value in environment variable %s and/or no ' - 'credentials file at %s\n' - % ('RAX_CREDS_FILE', default_creds_file)) - sys.exit(1) + sys.exit('No value in environment variable %s and/or no ' + 'credentials file at %s' + % ('RAX_CREDS_FILE', default_creds_file)) identity_type = pyrax.get_setting('identity_type') pyrax.set_setting('identity_type', identity_type or 'rackspace') @@ -422,23 +429,28 @@ def setup(): else: pyrax.set_credential_file(creds_file, region=region) except Exception as e: - sys.stderr.write("%s: %s\n" % (e, e.message)) - sys.exit(1) + sys.exit("%s: %s" % (e, e.message)) regions = [] if region: regions.append(region) else: - region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', - islist=True) + try: + # Ansible 2.3+ + region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', + value_type='list') + except TypeError: + # Ansible 2.2.x and below + region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', + islist=True) + for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: - sys.stderr.write('Unsupported region %s' % region) - sys.exit(1) + sys.exit('Unsupported region %s' % region) elif region not in regions: regions.append(region) From 7b68dc6d33969e162c2976dd0e118a04705b8335 Mon Sep 17 00:00:00 2001 From: Jake McDermott Date: Fri, 28 Oct 2016 16:42:44 -0400 Subject: [PATCH 50/77] Merge pull request #3808 from jakemcdermott/noissue_fix_tower_welcome_msg interpret backslash escapes when displaying url in welcome message From aa119e0102f54bcd8c141d2f995630ea065fbcaa Mon Sep 17 00:00:00 2001 From: Aaron Tan Date: Mon, 31 Oct 2016 12:09:14 -0400 Subject: [PATCH 51/77] Enforce wfj to ignore canceled jobs. --- awx/main/scheduler/dag_workflow.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/awx/main/scheduler/dag_workflow.py b/awx/main/scheduler/dag_workflow.py index 6d2b349365..f2f471381d 100644 --- a/awx/main/scheduler/dag_workflow.py +++ b/awx/main/scheduler/dag_workflow.py @@ -60,6 +60,8 @@ class WorkflowDAG(SimpleDAG): # Job is about to run or is running. Hold our horses and wait for # the job to finish. We can't proceed down the graph path until we # have the job result. + elif job.status == 'canceled': + continue elif job.status not in ['failed', 'error', 'successful']: return False elif job.status in ['failed', 'error']: From 0a634d7baf137dca58fdc36954a6c24b1bb3fb55 Mon Sep 17 00:00:00 2001 From: Aaron Tan Date: Mon, 31 Oct 2016 14:44:15 -0400 Subject: [PATCH 52/77] Enforce wfj to ignore errored jobs. --- awx/main/scheduler/dag_workflow.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/awx/main/scheduler/dag_workflow.py b/awx/main/scheduler/dag_workflow.py index f2f471381d..584c1258f3 100644 --- a/awx/main/scheduler/dag_workflow.py +++ b/awx/main/scheduler/dag_workflow.py @@ -33,14 +33,14 @@ class WorkflowDAG(SimpleDAG): # Job is about to run or is running. Hold our horses and wait for # the job to finish. We can't proceed down the graph path until we # have the job result. - elif job.status not in ['failed', 'error', 'successful']: + elif job.status not in ['failed', 'successful']: continue - elif job.status in ['failed', 'error']: + elif job.status == 'failed': children_failed = self.get_dependencies(obj, 'failure_nodes') children_always = self.get_dependencies(obj, 'always_nodes') children_all = children_failed + children_always nodes.extend(children_all) - elif job.status in ['successful']: + elif job.status == 'successful': children_success = self.get_dependencies(obj, 'success_nodes') children_always = self.get_dependencies(obj, 'always_nodes') children_all = children_success + children_always @@ -60,16 +60,16 @@ class WorkflowDAG(SimpleDAG): # Job is about to run or is running. Hold our horses and wait for # the job to finish. We can't proceed down the graph path until we # have the job result. - elif job.status == 'canceled': + elif job.status in ['canceled', 'error']: continue - elif job.status not in ['failed', 'error', 'successful']: + elif job.status not in ['failed', 'successful']: return False - elif job.status in ['failed', 'error']: + elif job.status == 'failed': children_failed = self.get_dependencies(obj, 'failure_nodes') children_always = self.get_dependencies(obj, 'always_nodes') children_all = children_failed + children_always nodes.extend(children_all) - elif job.status in ['successful']: + elif job.status == 'successful': children_success = self.get_dependencies(obj, 'success_nodes') children_always = self.get_dependencies(obj, 'always_nodes') children_all = children_success + children_always From 23af9d6b729bdbf762cd0be22d40eab610fc218a Mon Sep 17 00:00:00 2001 From: AlanCoding Date: Mon, 31 Oct 2016 14:54:45 -0400 Subject: [PATCH 53/77] fix bug blocking jobs from running --- awx/main/models/jobs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index a15e291d78..3377968eba 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -1210,7 +1210,7 @@ class JobEvent(CreatedModifiedModel): if isinstance(invocation, dict) and verbosity == 0 and 'module_args' in invocation: event_data['res']['invocation']['module_args'] = '' self.event_data = event_data - update_fields.add('event_data') + updated_fields.add('event_data') if self.event == 'playbook_on_stats': try: failures_dict = event_data.get('failures', {}) From d2ba01251f975a2ca90cab68ca7deccf36cbf954 Mon Sep 17 00:00:00 2001 From: Leigh Johnson Date: Mon, 31 Oct 2016 15:59:48 -0400 Subject: [PATCH 54/77] @NoIssue Fixes incorrect i18n conflict resolutions. --- awx/ui/client/src/controllers/Projects.js | 2 +- awx/ui/client/src/forms/Credentials.js | 1 - awx/ui/client/src/forms/Projects.js | 2 +- awx/ui/client/src/inventories/main.js | 12 +++-- .../src/inventories/manage/hosts/main.js | 48 +------------------ 5 files changed, 10 insertions(+), 55 deletions(-) diff --git a/awx/ui/client/src/controllers/Projects.js b/awx/ui/client/src/controllers/Projects.js index 380f472a97..e26fcd5ecf 100644 --- a/awx/ui/client/src/controllers/Projects.js +++ b/awx/ui/client/src/controllers/Projects.js @@ -267,7 +267,7 @@ ProjectsList.$inject = ['$scope', '$rootScope', '$location', '$log', '$statePara export function ProjectsAdd($scope, $rootScope, $compile, $location, $log, $stateParams, GenerateForm, ProjectsForm, Rest, Alert, ProcessErrors, - GetBasePath, GetProjectPath, GetChoices, Wait, $state, CreateSelect2) { + GetBasePath, GetProjectPath, GetChoices, Wait, $state, CreateSelect2, i18n) { var form = ProjectsForm(), base = $location.path().replace(/^\//, '').split('/')[0], diff --git a/awx/ui/client/src/forms/Credentials.js b/awx/ui/client/src/forms/Credentials.js index 8f82f75d55..40ecf2da91 100644 --- a/awx/ui/client/src/forms/Credentials.js +++ b/awx/ui/client/src/forms/Credentials.js @@ -51,7 +51,6 @@ export default list: 'OrganizationList', sourceModel: 'organization', sourceField: 'name', - ngClick: 'lookUpOrganization()', awPopOver: i18n._("

If no organization is given, the credential can only be used by the user that creates the credential. Organization admins and system administrators can assign an organization so that roles for the credential can be assigned to users and teams in that organization.

"), dataTitle: i18n._('Organization') + ' ', dataPlacement: 'bottom', diff --git a/awx/ui/client/src/forms/Projects.js b/awx/ui/client/src/forms/Projects.js index 73fcea4e25..ca9d24c6e7 100644 --- a/awx/ui/client/src/forms/Projects.js +++ b/awx/ui/client/src/forms/Projects.js @@ -181,7 +181,7 @@ angular.module('ProjectFormDefinition', ['SchedulesListDefinition']) }] }, scm_update_cache_timeout: { - label: i18n._('Cache Timeout (seconds)'), + label: i18n._(`Cache Timeout (seconds)`), id: 'scm-cache-timeout', type: 'number', integer: true, diff --git a/awx/ui/client/src/inventories/main.js b/awx/ui/client/src/inventories/main.js index e8cf115725..b904e74bd9 100644 --- a/awx/ui/client/src/inventories/main.js +++ b/awx/ui/client/src/inventories/main.js @@ -182,11 +182,13 @@ angular.module('inventory', [ edit: 'HostEditController' }, resolve: { - host: ['$stateParams', 'HostManageService', function($stateParams, HostManageService) { - return HostManageService.get({ id: $stateParams.host_id }).then(function(res) { - return res.data.results[0]; - }); - }] + edit: { + host: ['$stateParams', 'HostManageService', function($stateParams, HostManageService) { + return HostManageService.get({ id: $stateParams.host_id }).then(function(res) { + return res.data.results[0]; + }); + }] + } }, ncyBreadcrumb: { label: "{{host.name}}", diff --git a/awx/ui/client/src/inventories/manage/hosts/main.js b/awx/ui/client/src/inventories/manage/hosts/main.js index 8fb523fcc6..6dd1f334a7 100644 --- a/awx/ui/client/src/inventories/manage/hosts/main.js +++ b/awx/ui/client/src/inventories/manage/hosts/main.js @@ -10,50 +10,4 @@ import HostsEditController from './hosts-edit.controller'; export default angular.module('manageHosts', []) .controller('HostsAddController', HostsAddController) - .controller('HostEditController', HostsEditController) - .config(['$stateProvider', 'stateDefinitionsProvider', - function($stateProvider, stateDefinitionsProvider) { - let addHost, editHost, - stateDefinitions = stateDefinitionsProvider.$get(); - addHost = { - name: 'inventoryManage.addHost', - url: '/add-host', - lazyLoad: () => stateDefinitions.generateTree({ - url: '/add-host', - name: 'inventoryManage.addHost', - modes: ['add'], - form: 'HostForm', - controllers: { - add: 'HostsAddController' - } - }) - }; - - editHost = { - name: 'inventoryManage.editHost', - url: '/edit-host/:host_id', - ncyBreadcrumb: { - label: '{{host.name}}', - }, - lazyLoad: () => stateDefinitions.generateTree({ - url: '/edit-host/:host_id', - name: 'inventoryManage.editHost', - modes: ['edit'], - form: 'HostForm', - controllers: { - edit: 'HostEditController' - }, - resolve: { - host: ['$stateParams', 'HostManageService', function($stateParams, HostManageService) { - return HostManageService.get({ id: $stateParams.host_id }).then(function(res) { - return res.data.results[0]; - }); - }] - } - }) - }; - - $stateProvider.state(addHost); - $stateProvider.state(editHost); - } - ]); + .controller('HostEditController', HostsEditController); From b281d475f794b0e4be6bf86c4d2888a135839dc8 Mon Sep 17 00:00:00 2001 From: Leigh Johnson Date: Mon, 31 Oct 2016 18:18:03 -0400 Subject: [PATCH 55/77] Resolves lookup issues blocking job template creation flow @NoIssue --- .../add/job-templates-add.controller.js | 12 +++++------- awx/ui/client/src/shared/Utilities.js | 2 -- awx/ui/client/src/shared/stateDefinitions.factory.js | 3 ++- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/awx/ui/client/src/job-templates/add/job-templates-add.controller.js b/awx/ui/client/src/job-templates/add/job-templates-add.controller.js index 5f64274f0f..1e7d79d167 100644 --- a/awx/ui/client/src/job-templates/add/job-templates-add.controller.js +++ b/awx/ui/client/src/job-templates/add/job-templates-add.controller.js @@ -54,7 +54,10 @@ default_val: false }); CallbackHelpInit({ scope: $scope }); - ParseTypeChange({ scope: $scope, field_id: 'job_template_variables', onChange: callback }); + SurveyControllerInit({ + scope: $scope, + parent_scope: $scope + }); } callback = function() { @@ -62,10 +65,6 @@ $scope[form.name + '_form'].$setDirty(); }; - SurveyControllerInit({ - scope: $scope, - parent_scope: $scope - }); var selectCount = 0; @@ -73,6 +72,7 @@ $scope.removeChoicesReady(); } $scope.removeChoicesReady = $scope.$on('choicesReadyVerbosity', function () { + ParseTypeChange({ scope: $scope, field_id: 'job_template_variables', onChange: callback }); selectCount++; if (selectCount === 3) { var verbosity; @@ -120,8 +120,6 @@ element:'#job_template_verbosity', multiple: false }); - - $scope.$emit('lookUpInitialize'); } }); diff --git a/awx/ui/client/src/shared/Utilities.js b/awx/ui/client/src/shared/Utilities.js index f4517f47cf..5a4d0ce4dc 100644 --- a/awx/ui/client/src/shared/Utilities.js +++ b/awx/ui/client/src/shared/Utilities.js @@ -517,7 +517,6 @@ angular.module('Utilities', ['RestServices', 'Utilities', 'sanitizeFilter']) function($rootScope) { return function(directive) { - /* @todo re-enable var docw, doch, spinnyw, spinnyh; if (directive === 'start' && !$rootScope.waiting) { $rootScope.waiting = true; @@ -538,7 +537,6 @@ angular.module('Utilities', ['RestServices', 'Utilities', 'sanitizeFilter']) $rootScope.waiting = false; }); } - */ }; } ]) diff --git a/awx/ui/client/src/shared/stateDefinitions.factory.js b/awx/ui/client/src/shared/stateDefinitions.factory.js index b65ecfebf5..46e976036f 100644 --- a/awx/ui/client/src/shared/stateDefinitions.factory.js +++ b/awx/ui/client/src/shared/stateDefinitions.factory.js @@ -363,7 +363,7 @@ export default ['$injector', '$stateExtender', '$log', function($injector, $stat function buildFieldDefinition(field) { let state = $stateExtender.buildDefinition({ searchPrefix: field.sourceModel, - squashSearchUrl: true, + //squashSearchUrl: true, @issue enable name: `${formStateDefinition.name}.${field.sourceModel}`, url: `/${field.sourceModel}`, // a lookup field's basePath takes precedence over generic list definition's basePath, if supplied @@ -391,6 +391,7 @@ export default ['$injector', '$stateExtender', '$log', function($injector, $stat }, resolve: { ListDefinition: [field.list, function(list) { + list.iterator = field.sourceModel; return list; }], Dataset: ['ListDefinition', 'QuerySet', '$stateParams', 'GetBasePath', '$interpolate', '$rootScope', '$state', From c4560806363eb1ad8125ed89bd694c48cd229299 Mon Sep 17 00:00:00 2001 From: jlmitch5 Date: Tue, 1 Nov 2016 10:42:45 -0400 Subject: [PATCH 56/77] add Authorization service to socket service DI --- awx/ui/client/src/shared/socket/socket.service.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/awx/ui/client/src/shared/socket/socket.service.js b/awx/ui/client/src/shared/socket/socket.service.js index c2eea137a3..1af67cb6ef 100644 --- a/awx/ui/client/src/shared/socket/socket.service.js +++ b/awx/ui/client/src/shared/socket/socket.service.js @@ -5,8 +5,8 @@ *************************************************/ import ReconnectingWebSocket from 'reconnectingwebsocket'; export default -['$rootScope', '$location', '$log','$state', '$q', 'i18n', - function ($rootScope, $location, $log, $state, $q, i18n) { +['$rootScope', '$location', '$log','$state', '$q', 'i18n', 'Authorization', + function ($rootScope, $location, $log, $state, $q, i18n, Authorization) { var needsResubscribing = false, socketPromise = $q.defer(); return { From a1de3535436a551843a86487c7920e46fde89d60 Mon Sep 17 00:00:00 2001 From: Matthew Jones Date: Tue, 1 Nov 2016 14:39:35 -0400 Subject: [PATCH 57/77] Shifting migrations after 3.0.3 merge --- awx/conf/migrations/0002_v310_copy_tower_settings.py | 4 ++-- ...{0033_v310_add_workflows.py => 0034_v310_add_workflows.py} | 2 +- ..._modify_ha_instance.py => 0035_v310_modify_ha_instance.py} | 2 +- ...{0035_v310_jobevent_uuid.py => 0036_v310_jobevent_uuid.py} | 2 +- ...e_tower_settings.py => 0037_v310_remove_tower_settings.py} | 2 +- ...ow_simultaneous.py => 0038_v310_job_allow_simultaneous.py} | 2 +- ...low_rbac_prompts.py => 0039_v310_workflow_rbac_prompts.py} | 2 +- .../{0039_v310_channelgroup.py => 0040_v310_channelgroup.py} | 2 +- .../{0040_v310_artifacts.py => 0041_v310_artifacts.py} | 2 +- .../{0041_v310_job_timeout.py => 0042_v310_job_timeout.py} | 2 +- ...{0042_v310_executionnode.py => 0043_v310_executionnode.py} | 2 +- .../{0043_v310_scm_revision.py => 0044_v310_scm_revision.py} | 2 +- ..._playbook_files.py => 0045_v310_project_playbook_files.py} | 2 +- ...v310_job_event_stdout.py => 0046_v310_job_event_stdout.py} | 2 +- 14 files changed, 15 insertions(+), 15 deletions(-) rename awx/main/migrations/{0033_v310_add_workflows.py => 0034_v310_add_workflows.py} (98%) rename awx/main/migrations/{0034_v310_modify_ha_instance.py => 0035_v310_modify_ha_instance.py} (91%) rename awx/main/migrations/{0035_v310_jobevent_uuid.py => 0036_v310_jobevent_uuid.py} (88%) rename awx/main/migrations/{0036_v310_remove_tower_settings.py => 0037_v310_remove_tower_settings.py} (90%) rename awx/main/migrations/{0037_v310_job_allow_simultaneous.py => 0038_v310_job_allow_simultaneous.py} (87%) rename awx/main/migrations/{0038_v310_workflow_rbac_prompts.py => 0039_v310_workflow_rbac_prompts.py} (98%) rename awx/main/migrations/{0039_v310_channelgroup.py => 0040_v310_channelgroup.py} (91%) rename awx/main/migrations/{0040_v310_artifacts.py => 0041_v310_artifacts.py} (93%) rename awx/main/migrations/{0041_v310_job_timeout.py => 0042_v310_job_timeout.py} (96%) rename awx/main/migrations/{0042_v310_executionnode.py => 0043_v310_executionnode.py} (90%) rename awx/main/migrations/{0043_v310_scm_revision.py => 0044_v310_scm_revision.py} (95%) rename awx/main/migrations/{0044_v310_project_playbook_files.py => 0045_v310_project_playbook_files.py} (91%) rename awx/main/migrations/{0045_v310_job_event_stdout.py => 0046_v310_job_event_stdout.py} (98%) diff --git a/awx/conf/migrations/0002_v310_copy_tower_settings.py b/awx/conf/migrations/0002_v310_copy_tower_settings.py index 2ab255debb..2fbdd60477 100644 --- a/awx/conf/migrations/0002_v310_copy_tower_settings.py +++ b/awx/conf/migrations/0002_v310_copy_tower_settings.py @@ -69,11 +69,11 @@ class Migration(migrations.Migration): dependencies = [ ('conf', '0001_initial'), - ('main', '0035_v310_jobevent_uuid'), + ('main', '0036_v310_jobevent_uuid'), ] run_before = [ - ('main', '0036_v310_remove_tower_settings'), + ('main', '0037_v310_remove_tower_settings'), ] operations = [ diff --git a/awx/main/migrations/0033_v310_add_workflows.py b/awx/main/migrations/0034_v310_add_workflows.py similarity index 98% rename from awx/main/migrations/0033_v310_add_workflows.py rename to awx/main/migrations/0034_v310_add_workflows.py index 1ca0462edf..ceaf091cad 100644 --- a/awx/main/migrations/0033_v310_add_workflows.py +++ b/awx/main/migrations/0034_v310_add_workflows.py @@ -11,7 +11,7 @@ import awx.main.fields class Migration(migrations.Migration): dependencies = [ - ('main', '0032_v302_credential_permissions_update'), + ('main', '0033_v303_v245_host_variable_fix'), ] operations = [ diff --git a/awx/main/migrations/0034_v310_modify_ha_instance.py b/awx/main/migrations/0035_v310_modify_ha_instance.py similarity index 91% rename from awx/main/migrations/0034_v310_modify_ha_instance.py rename to awx/main/migrations/0035_v310_modify_ha_instance.py index ad245ca4b4..fa58ec094c 100644 --- a/awx/main/migrations/0034_v310_modify_ha_instance.py +++ b/awx/main/migrations/0035_v310_modify_ha_instance.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0033_v310_add_workflows'), + ('main', '0034_v310_add_workflows'), ] operations = [ diff --git a/awx/main/migrations/0035_v310_jobevent_uuid.py b/awx/main/migrations/0036_v310_jobevent_uuid.py similarity index 88% rename from awx/main/migrations/0035_v310_jobevent_uuid.py rename to awx/main/migrations/0036_v310_jobevent_uuid.py index bd667792e7..b097c2d1f1 100644 --- a/awx/main/migrations/0035_v310_jobevent_uuid.py +++ b/awx/main/migrations/0036_v310_jobevent_uuid.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0034_v310_modify_ha_instance'), + ('main', '0035_v310_modify_ha_instance'), ] operations = [ diff --git a/awx/main/migrations/0036_v310_remove_tower_settings.py b/awx/main/migrations/0037_v310_remove_tower_settings.py similarity index 90% rename from awx/main/migrations/0036_v310_remove_tower_settings.py rename to awx/main/migrations/0037_v310_remove_tower_settings.py index a5a75b12ca..00ee17f098 100644 --- a/awx/main/migrations/0036_v310_remove_tower_settings.py +++ b/awx/main/migrations/0037_v310_remove_tower_settings.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0035_v310_jobevent_uuid'), + ('main', '0036_v310_jobevent_uuid'), ] # These settings are now in the separate awx.conf app. diff --git a/awx/main/migrations/0037_v310_job_allow_simultaneous.py b/awx/main/migrations/0038_v310_job_allow_simultaneous.py similarity index 87% rename from awx/main/migrations/0037_v310_job_allow_simultaneous.py rename to awx/main/migrations/0038_v310_job_allow_simultaneous.py index 8a2e89df94..1ec3412fb4 100644 --- a/awx/main/migrations/0037_v310_job_allow_simultaneous.py +++ b/awx/main/migrations/0038_v310_job_allow_simultaneous.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0036_v310_remove_tower_settings'), + ('main', '0037_v310_remove_tower_settings'), ] operations = [ diff --git a/awx/main/migrations/0038_v310_workflow_rbac_prompts.py b/awx/main/migrations/0039_v310_workflow_rbac_prompts.py similarity index 98% rename from awx/main/migrations/0038_v310_workflow_rbac_prompts.py rename to awx/main/migrations/0039_v310_workflow_rbac_prompts.py index 6fa55f8469..35db9c5575 100644 --- a/awx/main/migrations/0038_v310_workflow_rbac_prompts.py +++ b/awx/main/migrations/0039_v310_workflow_rbac_prompts.py @@ -10,7 +10,7 @@ import awx.main.fields class Migration(migrations.Migration): dependencies = [ - ('main', '0037_v310_job_allow_simultaneous'), + ('main', '0038_v310_job_allow_simultaneous'), ] operations = [ diff --git a/awx/main/migrations/0039_v310_channelgroup.py b/awx/main/migrations/0040_v310_channelgroup.py similarity index 91% rename from awx/main/migrations/0039_v310_channelgroup.py rename to awx/main/migrations/0040_v310_channelgroup.py index a150f4cf29..51f2016926 100644 --- a/awx/main/migrations/0039_v310_channelgroup.py +++ b/awx/main/migrations/0040_v310_channelgroup.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0038_v310_workflow_rbac_prompts'), + ('main', '0039_v310_workflow_rbac_prompts'), ] operations = [ diff --git a/awx/main/migrations/0040_v310_artifacts.py b/awx/main/migrations/0041_v310_artifacts.py similarity index 93% rename from awx/main/migrations/0040_v310_artifacts.py rename to awx/main/migrations/0041_v310_artifacts.py index af1c66f485..f54cff411b 100644 --- a/awx/main/migrations/0040_v310_artifacts.py +++ b/awx/main/migrations/0041_v310_artifacts.py @@ -8,7 +8,7 @@ import jsonfield.fields class Migration(migrations.Migration): dependencies = [ - ('main', '0039_v310_channelgroup'), + ('main', '0040_v310_channelgroup'), ] operations = [ diff --git a/awx/main/migrations/0041_v310_job_timeout.py b/awx/main/migrations/0042_v310_job_timeout.py similarity index 96% rename from awx/main/migrations/0041_v310_job_timeout.py rename to awx/main/migrations/0042_v310_job_timeout.py index 447ed6d38b..4d49e5841a 100644 --- a/awx/main/migrations/0041_v310_job_timeout.py +++ b/awx/main/migrations/0042_v310_job_timeout.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0040_v310_artifacts'), + ('main', '0041_v310_artifacts'), ] operations = [ diff --git a/awx/main/migrations/0042_v310_executionnode.py b/awx/main/migrations/0043_v310_executionnode.py similarity index 90% rename from awx/main/migrations/0042_v310_executionnode.py rename to awx/main/migrations/0043_v310_executionnode.py index f696d4b95d..bab47ad032 100644 --- a/awx/main/migrations/0042_v310_executionnode.py +++ b/awx/main/migrations/0043_v310_executionnode.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0041_v310_job_timeout'), + ('main', '0042_v310_job_timeout'), ] operations = [ diff --git a/awx/main/migrations/0043_v310_scm_revision.py b/awx/main/migrations/0044_v310_scm_revision.py similarity index 95% rename from awx/main/migrations/0043_v310_scm_revision.py rename to awx/main/migrations/0044_v310_scm_revision.py index 08db6be47e..40ee1f8596 100644 --- a/awx/main/migrations/0043_v310_scm_revision.py +++ b/awx/main/migrations/0044_v310_scm_revision.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0042_v310_executionnode'), + ('main', '0043_v310_executionnode'), ] operations = [ diff --git a/awx/main/migrations/0044_v310_project_playbook_files.py b/awx/main/migrations/0045_v310_project_playbook_files.py similarity index 91% rename from awx/main/migrations/0044_v310_project_playbook_files.py rename to awx/main/migrations/0045_v310_project_playbook_files.py index cdf059faec..77c7bfc8b7 100644 --- a/awx/main/migrations/0044_v310_project_playbook_files.py +++ b/awx/main/migrations/0045_v310_project_playbook_files.py @@ -8,7 +8,7 @@ import jsonfield.fields class Migration(migrations.Migration): dependencies = [ - ('main', '0043_v310_scm_revision'), + ('main', '0044_v310_scm_revision'), ] operations = [ diff --git a/awx/main/migrations/0045_v310_job_event_stdout.py b/awx/main/migrations/0046_v310_job_event_stdout.py similarity index 98% rename from awx/main/migrations/0045_v310_job_event_stdout.py rename to awx/main/migrations/0046_v310_job_event_stdout.py index e3325ddb6b..7ff2ed8ade 100644 --- a/awx/main/migrations/0045_v310_job_event_stdout.py +++ b/awx/main/migrations/0046_v310_job_event_stdout.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0044_v310_project_playbook_files'), + ('main', '0045_v310_project_playbook_files'), ] operations = [ From 96355854a8ad16c0bfd16cb33afc1c7b0606478e Mon Sep 17 00:00:00 2001 From: Matthew Jones Date: Tue, 1 Nov 2016 14:39:55 -0400 Subject: [PATCH 58/77] Bump django version for CVE https://www.djangoproject.com/weblog/2016/nov/01/security-releases/ --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 5e4425c8df..fb885a8842 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -12,7 +12,7 @@ cliff==1.15.0 cmd2==0.6.8 d2to1==0.2.11 # TODO: Still needed? defusedxml==0.4.1 -Django==1.8.15 +Django==1.8.16 debtcollector==1.2.0 decorator==4.0.6 django-auth-ldap==1.2.6 From 555f0bb90f5228b026de8ea2245789f987d50dd3 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 13 Oct 2016 09:42:29 -0400 Subject: [PATCH 59/77] project and jobs running correctly --- awx/main/models/projects.py | 2 +- awx/main/scheduler/__init__.py | 516 +++++++++++------- awx/main/scheduler/dependency_graph.py | 108 ++++ awx/main/scheduler/partial.py | 109 ++++ awx/main/scheduler/tasks.py | 39 +- awx/main/tests/functional/test_partial.py | 65 +++ awx/main/tests/unit/scheduler/__init__.py | 0 .../unit/scheduler/test_dependency_graph.py | 121 ++++ .../test_scheduler_project_update.py | 194 +++++++ awx/settings/defaults.py | 8 + 10 files changed, 948 insertions(+), 214 deletions(-) create mode 100644 awx/main/scheduler/dependency_graph.py create mode 100644 awx/main/scheduler/partial.py create mode 100644 awx/main/tests/functional/test_partial.py create mode 100644 awx/main/tests/unit/scheduler/__init__.py create mode 100644 awx/main/tests/unit/scheduler/test_dependency_graph.py create mode 100644 awx/main/tests/unit/scheduler/test_scheduler_project_update.py diff --git a/awx/main/models/projects.py b/awx/main/models/projects.py index 1c693a6398..4c20e01e08 100644 --- a/awx/main/models/projects.py +++ b/awx/main/models/projects.py @@ -275,7 +275,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin): def _get_unified_job_field_names(cls): return ['name', 'description', 'local_path', 'scm_type', 'scm_url', 'scm_branch', 'scm_clean', 'scm_delete_on_update', - 'credential', 'schedule', 'timeout'] + 'credential', 'schedule', 'timeout', 'launch_type',] def save(self, *args, **kwargs): new_instance = not bool(self.pk) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 6ecdc09b37..0711528c56 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -2,77 +2,107 @@ # All Rights Reserved # Python -import datetime +from datetime import timedelta import logging +from sets import Set # Django from django.conf import settings from django.db import transaction +from django.db.utils import DatabaseError # AWX from awx.main.models import * # noqa -from awx.main.utils import get_system_task_capacity -from awx.main.scheduler.dag_simple import SimpleDAG +#from awx.main.scheduler.dag_simple import SimpleDAG from awx.main.scheduler.dag_workflow import WorkflowDAG +from awx.main.scheduler.dependency_graph import DependencyGraph +from awx.main.scheduler.partial import ( + JobDict, + ProjectUpdateDict, + InventoryUpdateDict, + ProjectUpdateLatestDict, +) + # Celery from celery.task.control import inspect logger = logging.getLogger('awx.main.scheduler') -def get_tasks(): - """Fetch all Tower tasks that are relevant to the task management - system. - """ - RELEVANT_JOBS = ('pending', 'waiting', 'running') - # TODO: Replace this when we can grab all objects in a sane way. - graph_jobs = [j for j in Job.objects.filter(status__in=RELEVANT_JOBS)] - graph_ad_hoc_commands = [ahc for ahc in AdHocCommand.objects.filter(status__in=RELEVANT_JOBS)] - graph_inventory_updates = [iu for iu in - InventoryUpdate.objects.filter(status__in=RELEVANT_JOBS)] - graph_project_updates = [pu for pu in - ProjectUpdate.objects.filter(status__in=RELEVANT_JOBS)] - graph_system_jobs = [sj for sj in - SystemJob.objects.filter(status__in=RELEVANT_JOBS)] - graph_workflow_jobs = [wf for wf in - WorkflowJob.objects.filter(status__in=RELEVANT_JOBS)] - all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates + - graph_project_updates + graph_system_jobs + - graph_workflow_jobs, - key=lambda task: task.created) - return all_actions +class Scheduler(): + def __init__(self): + self.graph = DependencyGraph() + self.capacity_total = 200 + self.capacity_used = 0 -def get_running_workflow_jobs(): - graph_workflow_jobs = [wf for wf in - WorkflowJob.objects.filter(status='running')] - return graph_workflow_jobs + def _get_tasks_with_status(self, status_list): -def spawn_workflow_graph_jobs(workflow_jobs): - # TODO: Consider using transaction.atomic - for workflow_job in workflow_jobs: - dag = WorkflowDAG(workflow_job) - spawn_nodes = dag.bfs_nodes_to_run() - for spawn_node in spawn_nodes: - kv = spawn_node.get_job_kwargs() - job = spawn_node.unified_job_template.create_unified_job(**kv) - spawn_node.job = job - spawn_node.save() - can_start = job.signal_start(**kv) - if not can_start: - job.status = 'failed' - job.job_explanation = "Workflow job could not start because it was not in the right state or required manual credentials" - job.save(update_fields=['status', 'job_explanation']) - job.websocket_emit_status("failed") + graph_jobs = JobDict.filter_partial(status=status_list) + ''' + graph_ad_hoc_commands = [ahc for ahc in AdHocCommand.objects.filter(**kv)] + graph_inventory_updates = [iu for iu in + InventoryUpdate.objects.filter(**kv)] + ''' + graph_inventory_updates = InventoryUpdateDict.filter_partial(status=status_list) + graph_project_updates = ProjectUpdateDict.filter_partial(status=status_list) + ''' + graph_system_jobs = [sj for sj in + SystemJob.objects.filter(**kv)] + graph_workflow_jobs = [wf for wf in + WorkflowJob.objects.filter(**kv)] + all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates + + graph_project_updates + graph_system_jobs + + graph_workflow_jobs, + key=lambda task: task.created) + ''' + all_actions = sorted(graph_jobs + graph_project_updates + graph_inventory_updates, + key=lambda task: task['created']) + return all_actions - # TODO: should we emit a status on the socket here similar to tasks.py tower_periodic_scheduler() ? - #emit_websocket_notification('/socket.io/jobs', '', dict(id=)) + def get_tasks(self): + RELEVANT_JOBS = ('pending', 'waiting', 'running') + return self._get_tasks_with_status(RELEVANT_JOBS) -# See comment in tasks.py::RunWorkflowJob::run() -def process_finished_workflow_jobs(workflow_jobs): - for workflow_job in workflow_jobs: - dag = WorkflowDAG(workflow_job) - if dag.is_workflow_done(): - with transaction.atomic(): + # TODO: Consider a database query for this logic + def get_latest_project_update_tasks(self, all_sorted_tasks): + project_ids = Set() + for task in all_sorted_tasks: + if type(task) == JobDict: + project_ids.add(task['project_id']) + + return ProjectUpdateLatestDict.filter_partial(list(project_ids)) + + def get_running_workflow_jobs(self): + graph_workflow_jobs = [wf for wf in + WorkflowJob.objects.filter(status='running')] + return graph_workflow_jobs + + def spawn_workflow_graph_jobs(self, workflow_jobs): + # TODO: Consider using transaction.atomic + for workflow_job in workflow_jobs: + dag = WorkflowDAG(workflow_job) + spawn_nodes = dag.bfs_nodes_to_run() + for spawn_node in spawn_nodes: + kv = spawn_node.get_job_kwargs() + job = spawn_node.unified_job_template.create_unified_job(**kv) + spawn_node.job = job + spawn_node.save() + can_start = job.signal_start(**kv) + if not can_start: + job.status = 'failed' + job.job_explanation = "Workflow job could not start because it was not in the right state or required manual credentials" + job.save(update_fields=['status', 'job_explanation']) + job.websocket_emit_status("failed") + + # TODO: should we emit a status on the socket here similar to tasks.py tower_periodic_scheduler() ? + #emit_websocket_notification('/socket.io/jobs', '', dict(id=)) + + # See comment in tasks.py::RunWorkflowJob::run() + def process_finished_workflow_jobs(self, workflow_jobs): + for workflow_job in workflow_jobs: + dag = WorkflowDAG(workflow_job) + if dag.is_workflow_done(): + # TODO: detect if wfj failed if workflow_job._has_failed(): workflow_job.status = 'failed' else: @@ -80,178 +110,248 @@ def process_finished_workflow_jobs(workflow_jobs): workflow_job.save() workflow_job.websocket_emit_status(workflow_job.status) -def rebuild_graph(): - """Regenerate the task graph by refreshing known tasks from Tower, purging - orphaned running tasks, and creating dependencies for new tasks before - generating directed edge relationships between those tasks. - """ - ''' - # Sanity check: Only do this on the primary node. - if Instance.objects.my_role() == 'secondary': - return None - ''' + def get_activate_tasks(self): + inspector = inspect() + if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'): + active_task_queues = inspector.active() + else: + logger.warn("Ignoring celery task inspector") + active_task_queues = None - inspector = inspect() - if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'): - active_task_queues = inspector.active() - else: - logger.warn("Ignoring celery task inspector") - active_task_queues = None + active_tasks = [] + if active_task_queues is not None: + for queue in active_task_queues: + active_tasks += [at['id'] for at in active_task_queues[queue]] + else: + logger.error("Could not communicate with celery!") + # TODO: Something needs to be done here to signal to the system + # as a whole that celery appears to be down. + if not hasattr(settings, 'CELERY_UNIT_TEST'): + return None - all_sorted_tasks = get_tasks() - if not len(all_sorted_tasks): - return None + return active_tasks - active_tasks = [] - if active_task_queues is not None: - for queue in active_task_queues: - active_tasks += [at['id'] for at in active_task_queues[queue]] - else: - logger.error("Could not communicate with celery!") - # TODO: Something needs to be done here to signal to the system - # as a whole that celery appears to be down. - if not hasattr(settings, 'CELERY_UNIT_TEST'): - return None + def start_task(self, task, dependent_tasks=[]): + from awx.main.tasks import handle_work_error, handle_work_success - running_tasks = filter(lambda t: t.status == 'running', all_sorted_tasks) - running_celery_tasks = filter(lambda t: type(t) != WorkflowJob, running_tasks) - waiting_tasks = filter(lambda t: t.status != 'running', all_sorted_tasks) - new_tasks = filter(lambda t: t.status == 'pending', all_sorted_tasks) + #print("start_task() <%s, %s> with deps %s" % (task.get_job_type_str(), task['id'], dependent_tasks)) + + # TODO: spawn inventory and project updates + task_actual = { + 'type':task.get_job_type_str(), + 'id': task['id'], + } + dependencies = [{'type': t.get_job_type_str(), 'id': t['id']} for t in dependent_tasks] + + error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies) + success_handler = handle_work_success.s(task_actual=task_actual) + + job_obj = task.get_full() + job_obj.status = 'waiting' + job_obj.save() - # Check running tasks and make sure they are active in celery - logger.debug("Active celery tasks: " + str(active_tasks)) - for task in list(running_celery_tasks): - if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): - # NOTE: Pull status again and make sure it didn't finish in - # the meantime? - task.status = 'failed' - task.job_explanation += ' '.join(( - 'Task was marked as running in Tower but was not present in', - 'Celery, so it has been marked as failed.', - )) - task.save() - task.websocket_emit_status("failed") - running_tasks.pop(running_tasks.index(task)) - logger.error("Task %s appears orphaned... marking as failed" % task) + #print("For real, starting job <%s, %s>" % (type(job_obj), job_obj.id)) + start_status = job_obj.start(error_callback=error_handler, success_callback=success_handler) + if not start_status: + job_obj.status = 'failed' + if job_obj.job_explanation: + job_obj.job_explanation += ' ' + job_obj.job_explanation += 'Task failed pre-start check.' + job_obj.save() + # TODO: run error handler to fail sub-tasks and send notifications + return - # Create and process dependencies for new tasks - for task in new_tasks: - logger.debug("Checking dependencies for: %s" % str(task)) - try: - task_dependencies = task.generate_dependencies(running_tasks + waiting_tasks) - except Exception, e: - logger.error("Failed processing dependencies for {}: {}".format(task, e)) - task.status = 'failed' - task.job_explanation += 'Task failed to generate dependencies: {}'.format(e) - task.save() - task.websocket_emit_status("failed") - continue - logger.debug("New dependencies: %s" % str(task_dependencies)) - for dep in task_dependencies: - # We recalculate the created time for the moment to ensure the - # dependencies are always sorted in the right order relative to - # the dependent task. - time_delt = len(task_dependencies) - task_dependencies.index(dep) - dep.created = task.created - datetime.timedelta(seconds=1 + time_delt) - dep.status = 'waiting' - dep.save() - waiting_tasks.insert(waiting_tasks.index(task), dep) - if not hasattr(settings, 'UNIT_TEST_IGNORE_TASK_WAIT'): - task.status = 'waiting' - task.save() + self.consume_capacity(task) - # Rebuild graph - graph = SimpleDAG() - for task in running_tasks: - graph.add_node(task) - for wait_task in waiting_tasks[:50]: - node_dependencies = [] - for node in graph: - if wait_task.is_blocked_by(node['node_object']): - node_dependencies.append(node['node_object']) - graph.add_node(wait_task) - for dependency in node_dependencies: - graph.add_edge(wait_task, dependency) - if settings.DEBUG: - graph.generate_graphviz_plot() - return graph + def process_runnable_tasks(self, runnable_tasks): + for i, task in enumerate(runnable_tasks): + # TODO: maybe batch process new tasks. + # Processing a new task individually seems to be expensive + self.graph.add_job(task) -def process_graph(graph, task_capacity): - """Given a task dependency graph, start and manage tasks given their - priority and weight. - """ - from awx.main.tasks import handle_work_error, handle_work_success + def create_project_update(self, task): + dep = Project.objects.get(id=task['project_id']).create_project_update(launch_type='dependency') - leaf_nodes = graph.get_leaf_nodes() - running_nodes = filter(lambda x: x['node_object'].status == 'running', leaf_nodes) - running_impact = sum([t['node_object'].task_impact for t in running_nodes]) - ready_nodes = filter(lambda x: x['node_object'].status != 'running', leaf_nodes) - remaining_volume = task_capacity - running_impact - logger.info('Running Nodes: %s; Capacity: %s; Running Impact: %s; ' - 'Remaining Capacity: %s' % - (str(running_nodes), str(task_capacity), - str(running_impact), str(remaining_volume))) - logger.info("Ready Nodes: %s" % str(ready_nodes)) - for task_node in ready_nodes: - node_obj = task_node['node_object'] - # NOTE: This could be used to pass metadata through the task system - # node_args = task_node['metadata'] - impact = node_obj.task_impact - if impact <= remaining_volume or running_impact == 0: - node_dependencies = graph.get_dependents(node_obj) - # Allow other tasks to continue if a job fails, even if they are - # other jobs. + # TODO: Consider using milliseconds or microseconds + # Project created 1 seconds behind + dep.created = task['created'] - timedelta(seconds=1) + dep.status = 'waiting' + dep.save() - node_type = graph.get_node_type(node_obj) - if node_type == 'job': - # clear dependencies because a job can block (not necessarily - # depend) on other jobs that share the same job template + project_task = ProjectUpdateDict.get_partial(dep.id) + #waiting_tasks.insert(waiting_tasks.index(task), dep) + + return project_task + + def generate_dependencies(self, task): + dependencies = [] + # TODO: What if the project is null ? + if type(task) is JobDict: + if task['project__scm_update_on_launch'] is True and \ + self.graph.should_update_related_project(task): + project_task = self.create_project_update(task) + dependencies.append(project_task) + # Inventory created 2 seconds behind + return dependencies + + def process_latest_project_updates(self, latest_project_updates): + for task in latest_project_updates: + self.graph.add_latest_project_update(task) + + def process_dependencies(self, dependent_task, dependency_tasks): + for task in dependency_tasks: + # ProjectUpdate or InventoryUpdate may be blocked by another of + # the same type. + if not self.graph.is_job_blocked(task): + self.graph.add_job(task) + if not self.would_exceed_capacity(task): + #print("process_dependencies() going to run project update <%s, %s>" % (task['id'], task['project_id'])) + self.start_task(task, [dependent_task]) + else: + self.graph.add_job(task) + + def process_pending_tasks(self, pending_tasks): + for task in pending_tasks: + + if not self.graph.is_job_blocked(task): + #print("process_pending_tasks() generating deps for job <%s, %s, %s>" % (task['id'], task['project_id'], task.model)) + dependencies = self.generate_dependencies(task) + self.process_dependencies(task, dependencies) + + # Spawning deps might have blocked us + if not self.graph.is_job_blocked(task): + self.graph.add_job(task) + if not self.would_exceed_capacity(task): + #print("Starting the original task <%s, %s>" % (task.get_job_type_str(), task['id'])) + self.start_task(task) + else: + self.graph.add_job(task) + + # Stop processing tasks if we know we are out of capacity + if self.get_remaining_capacity() <= 0: + return + + def fail_inconsistent_running_jobs(self, active_tasks, all_sorted_tasks): + for i, task in enumerate(all_sorted_tasks): + if task['status'] != 'running': + continue + + if (task['celery_task_id'] not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): + # NOTE: Pull status again and make sure it didn't finish in + # the meantime? + # TODO: try catch the getting of the job. The job COULD have been deleted + task_obj = task.get_full() + task_obj.status = 'failed' + task_obj.job_explanation += ' '.join(( + 'Task was marked as running in Tower but was not present in', + 'Celery, so it has been marked as failed.', + )) + task_obj.save() + task_obj.websocket_emit_status("failed") + + all_sorted_tasks.pop(i) + logger.error("Task %s appears orphaned... marking as failed" % task) + + def process_celery_tasks(self, active_tasks, all_sorted_tasks): + + ''' + Rectify tower db <-> celery inconsistent view of jobs state + ''' + # Check running tasks and make sure they are active in celery + logger.debug("Active celery tasks: " + str(active_tasks)) + all_sorted_tasks = self.fail_inconsistent_running_jobs(active_tasks, + all_sorted_tasks) + + def calculate_capacity_used(self, tasks): + self.capacity_used = 0 + for t in tasks: + self.capacity_used += t.task_impact() + + def would_exceed_capacity(self, task): + return (task.task_impact() + self.capacity_used > self.capacity_total) + + def consume_capacity(self, task): + self.capacity_used += task.task_impact() + #print("Capacity used %s vs total %s" % (self.capacity_used, self.capacity_total)) + + def get_remaining_capacity(self): + return (self.capacity_total - self.capacity_used) + + def process_tasks(self, all_sorted_tasks): + + # TODO: Process new tasks + running_tasks = filter(lambda t: t['status'] == 'running', all_sorted_tasks) + runnable_tasks = filter(lambda t: t['status'] in ['waiting', 'running'], all_sorted_tasks) + + self.calculate_capacity_used(running_tasks) + + self.process_runnable_tasks(runnable_tasks) + + pending_tasks = filter(lambda t: t['status'] == 'pending', all_sorted_tasks) + self.process_pending_tasks(pending_tasks) + + + ''' + def do_graph_things(): + # Rebuild graph + graph = SimpleDAG() + for task in running_tasks: + graph.add_node(task) + #for wait_task in waiting_tasks[:50]: + for wait_task in waiting_tasks: node_dependencies = [] + for node in graph: + if wait_task.is_blocked_by(node['node_object']): + node_dependencies.append(node['node_object']) + graph.add_node(wait_task) + for dependency in node_dependencies: + graph.add_edge(wait_task, dependency) + if settings.DEBUG: + graph.generate_graphviz_plot() + return graph + ''' + #return do_graph_things() - # Make the workflow_job look like it's started by setting status to - # running, but don't make a celery Task for it. - # Introduce jobs from the workflow so they are candidates to run. - # Call process_graph() again to allow choosing for run, the - # created candidate jobs. - elif node_type == 'workflow_job': - node_obj.start() - spawn_workflow_graph_jobs([node_obj]) - return process_graph(graph, task_capacity) + def _schedule(self): + all_sorted_tasks = self.get_tasks() + if len(all_sorted_tasks) > 0: + #self.process_celery_tasks(active_tasks, all_sorted_tasks) - dependent_nodes = [{'type': graph.get_node_type(node_obj), 'id': node_obj.id}] + \ - [{'type': graph.get_node_type(n['node_object']), - 'id': n['node_object'].id} for n in node_dependencies] - error_handler = handle_work_error.s(subtasks=dependent_nodes) - success_handler = handle_work_success.s(task_actual={'type': graph.get_node_type(node_obj), - 'id': node_obj.id}) - with transaction.atomic(): - start_status = node_obj.start(error_callback=error_handler, success_callback=success_handler) - if not start_status: - node_obj.status = 'failed' - if node_obj.job_explanation: - node_obj.job_explanation += ' ' - node_obj.job_explanation += 'Task failed pre-start check.' - node_obj.save() - continue - remaining_volume -= impact - running_impact += impact - logger.info('Started Node: %s (capacity hit: %s) ' - 'Remaining Capacity: %s' % - (str(node_obj), str(impact), str(remaining_volume))) + latest_project_updates = self.get_latest_project_update_tasks(all_sorted_tasks) + self.process_latest_project_updates(latest_project_updates) -def schedule(): - with transaction.atomic(): - # Lock - Instance.objects.select_for_update().all()[0] + self.process_tasks(all_sorted_tasks) - task_capacity = get_system_task_capacity() + #print("Finished schedule()") - workflow_jobs = get_running_workflow_jobs() - process_finished_workflow_jobs(workflow_jobs) - spawn_workflow_graph_jobs(workflow_jobs) + def schedule(self): + with transaction.atomic(): + #t1 = datetime.now() + # Lock + try: + Instance.objects.select_for_update(nowait=True).all()[0] + except DatabaseError: + return - graph = rebuild_graph() - if graph: - process_graph(graph, task_capacity) + #workflow_jobs = get_running_workflow_jobs() + #process_finished_workflow_jobs(workflow_jobs) + #spawn_workflow_graph_jobs(workflow_jobs) + + ''' + Get tasks known by celery + ''' + ''' + active_tasks = self.get_activate_tasks() + # Communication with celery failed :(, return + if active_tasks is None: + return None + ''' + self._schedule() # Unlock, due to transaction ending + #t2 = datetime.now() + #t_diff = t2 - t1 + #print("schedule() time %s" % (t_diff.total_seconds())) + + + diff --git a/awx/main/scheduler/dependency_graph.py b/awx/main/scheduler/dependency_graph.py new file mode 100644 index 0000000000..5ecea91385 --- /dev/null +++ b/awx/main/scheduler/dependency_graph.py @@ -0,0 +1,108 @@ +from datetime import timedelta +from django.utils.timezone import now as tz_now + +from awx.main.scheduler.partial import JobDict, ProjectUpdateDict, InventoryUpdateDict +class DependencyGraph(object): + PROJECT_UPDATES = 'project_updates' + INVENTORY_UPDATES = 'inventory_updates' + JOB_TEMPLATE_JOBS = 'job_template_jobs' + LATEST_PROJECT_UPDATES = 'latest_project_updates' + + def __init__(self, *args, **kwargs): + self.data = {} + # project_id -> True / False + self.data[self.PROJECT_UPDATES] = {} + # inventory_id -> True / False + self.data[self.INVENTORY_UPDATES] = {} + # job_template_id -> True / False + self.data[self.JOB_TEMPLATE_JOBS] = {} + + # project_id -> latest ProjectUpdateDict + self.data[self.LATEST_PROJECT_UPDATES] = {} + + def add_latest_project_update(self, job): + self.data[self.LATEST_PROJECT_UPDATES][job['project_id']] = job + + def get_now(self): + return tz_now() + + ''' + JobDict + + Presume that job is related to a project that is update on launch + ''' + def should_update_related_project(self, job): + now = self.get_now() + latest_project_update = self.data[self.LATEST_PROJECT_UPDATES].get(job['project_id'], None) + if not latest_project_update: + return True + + # TODO: Other finished, failed cases? i.e. error ? + if latest_project_update['status'] == 'failed': + return True + + ''' + This is a bit of fuzzy logic. + If the latest project update has a created time == job_created_time-1 + then consider the project update found. This is so we don't enter an infinite loop + of updating the project when cache timeout is 0. + ''' + if latest_project_update['project__scm_update_cache_timeout'] == 0 and \ + latest_project_update['launch_type'] == 'dependency' and \ + latest_project_update['created'] == job['created'] - timedelta(seconds=1): + return False + + ''' + Normal, expected, cache timeout logic + ''' + timeout_seconds = timedelta(seconds=latest_project_update['project__scm_update_cache_timeout']) + if (latest_project_update['finished'] + timeout_seconds) < now: + return True + + return False + + def add_project_update(self, job): + self.data[self.PROJECT_UPDATES][job['project_id']] = False + + def add_inventory_update(self, job): + self.data[self.INVENTORY_UPDATES][job['inventory_id']] = False + + def add_job_template_job(self, job): + self.data[self.JOB_TEMPLATE_JOBS][job['job_template_id']] = False + + + def can_project_update_run(self, job): + return self.data[self.PROJECT_UPDATES].get(job['project_id'], True) + + def can_inventory_update_run(self, job): + return self.data[self.INVENTORY_UPDATES].get(job['inventory_id'], True) + + def can_job_run(self, job): + if self.can_project_update_run(job) is True and \ + self.can_inventory_update_run(job) is True: + if job['allow_simultaneous'] is False: + return self.data[self.JOB_TEMPLATE_JOBS].get(job['job_template_id'], True) + else: + return True + return False + + def is_job_blocked(self, job): + if type(job) is ProjectUpdateDict: + return not self.can_project_update_run(job) + elif type(job) is InventoryUpdateDict: + return not self.can_inventory_update_run(job) + elif type(job) is JobDict: + return not self.can_job_run(job) + + def add_job(self, job): + if type(job) is ProjectUpdateDict: + self.add_project_update(job) + elif type(job) is InventoryUpdateDict: + self.add_inventory_update(job) + elif type(job) is JobDict: + self.add_job_template_job(job) + + def add_jobs(self, jobs): + for j in jobs: + self.add_job(j) + diff --git a/awx/main/scheduler/partial.py b/awx/main/scheduler/partial.py new file mode 100644 index 0000000000..16c6597f99 --- /dev/null +++ b/awx/main/scheduler/partial.py @@ -0,0 +1,109 @@ + +# AWX +from awx.main.models import ( + Job, + ProjectUpdate, + InventoryUpdate, +) + +class PartialModelDict(object): + FIELDS = () + model = None + data = None + + def __init__(self, data): + if type(data) is not dict: + raise RuntimeError("Expected data to be of type dict not %s" % type(data)) + self.data = data + + def __getitem__(self, index): + return self.data[index] + + def __setitem__(self, key, value): + self.data[key] = value + + def get(self, key, **kwargs): + return self.data.get(key, **kwargs) + + def get_full(self): + return self.model.objects.get(id=self.data['id']) + + def refresh_partial(self): + return self.__class__(self.model.objects.filter(id=self.data['id']).values(*self.__class__.get_db_values())[0]) + + @classmethod + def get_partial(cls, id): + return cls(cls.model.objects.filter(id=id).values(*cls.get_db_values())[0]) + + @classmethod + def get_db_values(cls): + return cls.FIELDS + + @classmethod + def filter_partial(cls, status=[]): + kv = { + 'status__in': status + } + return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] + + def get_job_type_str(self): + raise RuntimeError("Inherit and implement me") + + def task_impact(self): + raise RuntimeError("Inherit and implement me") + +class JobDict(PartialModelDict): + FIELDS = ( + 'id', 'status', 'job_template_id', 'inventory_id', 'project_id', + 'launch_type', 'limit', 'allow_simultaneous', 'created', + 'job_type', 'celery_task_id', 'project__scm_update_on_launch', + 'forks', + ) + model = Job + + def get_job_type_str(self): + return 'job' + + def task_impact(self): + return (5 if self.data['forks'] == 0 else self.data['forks']) * 10 + +class ProjectUpdateDict(PartialModelDict): + FIELDS = ( + 'id', 'status', 'project_id', 'created', 'celery_task_id', 'launch_type', 'project__scm_update_cache_timeout', 'project__scm_update_on_launch', + ) + model = ProjectUpdate + + def get_job_type_str(self): + return 'project_update' + + def task_impact(self): + return 10 + +class ProjectUpdateLatestDict(ProjectUpdateDict): + FIELDS = ( + 'id', 'status', 'project_id', 'created', 'finished', 'project__scm_update_cache_timeout', 'launch_type', 'project__scm_update_on_launch', + ) + model = ProjectUpdate + + @classmethod + def filter_partial(cls, project_ids): + # TODO: This can shurley be made more efficient + results = [] + for project_id in project_ids: + qs = cls.model.objects.filter(project_id=project_id, status__in=['waiting', 'successful', 'failed']).order_by('-finished') + if qs.count() > 0: + results.append(cls(cls.model.objects.filter(id=qs[0].id).values(*cls.get_db_values())[0])) + return results + +class InventoryUpdateDict(PartialModelDict): + FIELDS = ( + 'id', 'status', 'created', 'celery_task_id', + ) + model = InventoryUpdate + + def get_job_type_str(self): + return 'inventory_update' + + def task_impact(self): + return 20 + diff --git a/awx/main/scheduler/tasks.py b/awx/main/scheduler/tasks.py index 343bdd1546..ef0334e316 100644 --- a/awx/main/scheduler/tasks.py +++ b/awx/main/scheduler/tasks.py @@ -1,14 +1,17 @@ # Python import logging -import time + +# Django +from django.db import transaction +from django.db.utils import DatabaseError # Celery from celery import task # AWX -from awx.main.models import UnifiedJob -from awx.main.scheduler import schedule +from awx.main.models import Instance +from awx.main.scheduler import Scheduler logger = logging.getLogger('awx.main.scheduler') @@ -18,6 +21,7 @@ logger = logging.getLogger('awx.main.scheduler') @task def run_job_launch(job_id): + ''' # Wait for job to exist. # The job is created in a transaction then the message is created, but # the transaction may not have completed. @@ -45,11 +49,13 @@ def run_job_launch(job_id): # TODO: while not loop should call get wrapped in a try except #job = UnifiedJob.objects.get(id=job_id) + ''' - schedule() + Scheduler().schedule() @task def run_job_complete(job_id): + ''' # TODO: use list of finished status from jobs.py or unified_jobs.py finished_status = ['successful', 'error', 'failed', 'completed'] q = UnifiedJob.objects.filter(id=job_id) @@ -74,6 +80,29 @@ def run_job_complete(job_id): logger.error("Expected job status '%s' to be one of '%s' while processing 'job_complete' message." % (job.status, finished_status)) return retry += 1 + ''' - schedule() + Scheduler().schedule() + +@task +def run_scheduler(): + Scheduler().schedule() + +@task +def run_fail_inconsistent_running_jobs(): + return + print("run_fail_inconsistent_running_jobs() running") + with transaction.atomic(): + # Lock + try: + Instance.objects.select_for_update(nowait=True).all()[0] + scheduler = Scheduler() + active_tasks = scheduler.get_activate_tasks() + if active_tasks is None: + return None + + all_sorted_tasks = scheduler.get_tasks() + scheduler.process_celery_tasks(active_tasks, all_sorted_tasks) + except DatabaseError: + return diff --git a/awx/main/tests/functional/test_partial.py b/awx/main/tests/functional/test_partial.py new file mode 100644 index 0000000000..69ad71c4df --- /dev/null +++ b/awx/main/tests/functional/test_partial.py @@ -0,0 +1,65 @@ + +# Python +import pytest +from django.utils.timezone import now as tz_now +from datetime import timedelta + +# AWX +from awx.main.models import ( + Project, + ProjectUpdate, +) +from awx.main.scheduler.partial import ( + ProjectUpdateLatestDict, +) + + +@pytest.fixture +def failed_project_update(): + p = Project.objects.create(name="proj1") + pu = ProjectUpdate.objects.create(project=p, status='failed', finished=tz_now() - timedelta(seconds=20)) + + return (p, pu) + +@pytest.fixture +def successful_project_update(): + p = Project.objects.create(name="proj1") + pu = ProjectUpdate.objects.create(project=p, status='successful', finished=tz_now() - timedelta(seconds=20)) + + return (p, pu) + +# Failed project updates newer than successful ones +@pytest.fixture +def multiple_project_updates(): + p = Project.objects.create(name="proj1") + + epoch = tz_now() + + successful_pus = [ProjectUpdate.objects.create(project=p, + status='successful', + finished=epoch - timedelta(seconds=100 + i)) for i in xrange(0, 5)] + failed_pus = [ProjectUpdate.objects.create(project=p, + status='failed', + finished=epoch - timedelta(seconds=100 - len(successful_pus) + i)) for i in xrange(0, 5)] + return (p, failed_pus, successful_pus) + +class TestProjectUpdateLatestDictDict(): + @pytest.mark.django_db + class TestFilterPartial(): + def test_project_update_successful(self, successful_project_update): + (project, project_update) = successful_project_update + + tasks = ProjectUpdateLatestDict.filter_partial(project_ids=[project.id]) + + assert 1 == len(tasks) + assert project_update.id == tasks[0]['id'] + + def test_correct_project_update(self, multiple_project_updates): + (project, failed_pus, successful_pus) = multiple_project_updates + + tasks = ProjectUpdateLatestDict.filter_partial(project_ids=[project.id]) + + assert 1 == len(tasks) + assert failed_pus[0].id == tasks[0]['id'] + + diff --git a/awx/main/tests/unit/scheduler/__init__.py b/awx/main/tests/unit/scheduler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/main/tests/unit/scheduler/test_dependency_graph.py b/awx/main/tests/unit/scheduler/test_dependency_graph.py new file mode 100644 index 0000000000..081f175027 --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_dependency_graph.py @@ -0,0 +1,121 @@ + +# Python +import pytest +from datetime import timedelta + +# Django +from django.utils.timezone import now as tz_now + +# AWX +from awx.main.scheduler.dependency_graph import DependencyGraph +from awx.main.scheduler.partial import ProjectUpdateDict + +@pytest.fixture +def graph(): + return DependencyGraph() + +@pytest.fixture +def job(): + return dict(project_id=1) + +@pytest.fixture +def unsuccessful_last_project(graph, job): + pu = ProjectUpdateDict(dict(id=1, + project__scm_update_cache_timeout=999999, + project_id=1, + status='failed', + created='3', + finished='3',)) + + graph.add_latest_project_update(pu) + + return graph + +@pytest.fixture +def last_dependent_project(graph): + now = tz_now() + + job = { + 'project_id': 1, + 'created': now, + } + pu = ProjectUpdateDict(dict(id=1, project_id=1, status='waiting', + project__scm_update_cache_timeout=0, + launch_type='dependency', + created=now - timedelta(seconds=1),)) + + graph.add_latest_project_update(pu) + + return (graph, job) + +@pytest.fixture +def timedout_project_update(graph, job): + now = tz_now() + + job = { + 'project_id': 1, + 'created': now, + } + pu = ProjectUpdateDict(dict(id=1, project_id=1, status='successful', + project__scm_update_cache_timeout=10, + launch_type='dependency', + created=now - timedelta(seconds=100), + finished=now - timedelta(seconds=11),)) + + graph.add_latest_project_update(pu) + + return (graph, job) + +@pytest.fixture +def not_timedout_project_update(graph, job): + now = tz_now() + + job = { + 'project_id': 1, + 'created': now, + } + pu = ProjectUpdateDict(dict(id=1, project_id=1, status='successful', + project__scm_update_cache_timeout=3600, + launch_type='dependency', + created=now - timedelta(seconds=100), + finished=now - timedelta(seconds=11),)) + + graph.add_latest_project_update(pu) + + return (graph, job) + + +class TestShouldUpdateRelatedProject(): + + def test_no_project_updates(self, graph, job): + actual = graph.should_update_related_project(job) + + assert True is actual + + def test_timedout_project_update(self, timedout_project_update): + (graph, job) = timedout_project_update + + actual = graph.should_update_related_project(job) + + assert True is actual + + def test_not_timedout_project_update(self, not_timedout_project_update): + (graph, job) = not_timedout_project_update + + actual = graph.should_update_related_project(job) + + assert False is actual + + def test_unsuccessful_last_project(self, unsuccessful_last_project, job): + graph = unsuccessful_last_project + + actual = graph.should_update_related_project(job) + + assert True is actual + + def test_last_dependent_project(self, last_dependent_project): + (graph, job) = last_dependent_project + + actual = graph.should_update_related_project(job) + assert False is actual + diff --git a/awx/main/tests/unit/scheduler/test_scheduler_project_update.py b/awx/main/tests/unit/scheduler/test_scheduler_project_update.py new file mode 100644 index 0000000000..54add63d51 --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_scheduler_project_update.py @@ -0,0 +1,194 @@ + +# Python +import pytest +from datetime import timedelta + +# Django +from django.utils.timezone import now as tz_now + +# awx +from awx.main.scheduler.partial import ( + JobDict, + ProjectUpdateDict, +) +from awx.main.scheduler import Scheduler + +# TODO: wherever get_latest_rpoject_update_task() is stubbed and returns a +# ProjectUpdateDict. We should instead return a ProjectUpdateLatestDict() +# For now, this is ok since the fields on deviate that much. + +@pytest.fixture +def epoch(): + return tz_now() + + +@pytest.fixture +def scheduler_factory(mocker, epoch): + def fn(tasks=[], latest_project_updates=[], create_project_update=None): + sched = Scheduler() + sched.capacity_total = 999999999 + + sched.graph.get_now = lambda: epoch + + mocker.patch.object(sched, 'get_tasks', return_value=tasks) + mocker.patch.object(sched, 'get_latest_project_update_tasks', return_value=latest_project_updates) + mocker.patch.object(sched, 'create_project_update', return_value=create_project_update) + mocker.patch.object(sched, 'start_task') + return sched + return fn + +@pytest.fixture +def project_update_factory(epoch): + def fn(): + return ProjectUpdateDict({ + 'id': 1, + 'created': epoch - timedelta(seconds=100), + 'project_id': 1, + 'project__scm_update_cache_timeout': 0, + 'celery_task_id': '', + 'launch_type': 'dependency', + 'project__scm_update_on_launch': True, + }) + return fn + +@pytest.fixture +def pending_project_update(project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'pending' + return project_update + +@pytest.fixture +def waiting_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'waiting' + return project_update + +@pytest.fixture +def pending_job(epoch): + return JobDict({ + 'id': 1, + 'status': 'pending', + 'job_template_id': 1, + 'project_id': 1, + 'inventory_id': 1, + 'launch_type': 'manual', + 'allow_simultaneous': False, + 'created': epoch - timedelta(seconds=99), + 'celery_task_id': '', + 'project__scm_update_on_launch': True, + 'forks': 5 + }) + +@pytest.fixture +def running_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'running' + return project_update + +@pytest.fixture +def successful_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['finished'] = epoch - timedelta(seconds=90) + project_update['status'] = 'successful' + return project_update + +@pytest.fixture +def successful_project_update_cache_expired(epoch, project_update_factory): + project_update = project_update_factory() + + project_update['status'] = 'successful' + project_update['created'] = epoch - timedelta(seconds=120) + project_update['finished'] = epoch - timedelta(seconds=110) + project_update['project__scm_update_cache_timeout'] = 1 + return project_update + +@pytest.fixture +def failed_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['finished'] = epoch - timedelta(seconds=90) + project_update['status'] = 'failed' + return project_update + +class TestStartProjectUpdate(): + def test(self, scheduler_factory, pending_project_update): + scheduler = scheduler_factory(tasks=[pending_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_project_update) + assert scheduler.create_project_update.call_count == 0 + + ''' + Explicit project update should always run. They should not use cache logic. + ''' + def test_cache_oblivious(self, scheduler_factory, successful_project_update, pending_project_update): + scheduler = scheduler_factory(tasks=[pending_project_update], + latest_project_updates=[successful_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_project_update) + assert scheduler.create_project_update.call_count == 0 + + +class TestCreateDependentProjectUpdate(): + + def test(self, scheduler_factory, pending_job, waiting_project_update): + scheduler = scheduler_factory(tasks=[pending_job], + create_project_update=waiting_project_update) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_project_update, [pending_job]) + + def test_cache_hit(self, scheduler_factory, pending_job, successful_project_update): + scheduler = scheduler_factory(tasks=[successful_project_update, pending_job], + latest_project_updates=[successful_project_update]) + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_job) + + def test_cache_miss(self, scheduler_factory, pending_job, successful_project_update_cache_expired, waiting_project_update): + scheduler = scheduler_factory(tasks=[successful_project_update_cache_expired, pending_job], + latest_project_updates=[successful_project_update_cache_expired], + create_project_update=waiting_project_update) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_project_update, [pending_job]) + + def test_last_update_failed(self, scheduler_factory, pending_job, failed_project_update, waiting_project_update): + scheduler = scheduler_factory(tasks=[failed_project_update, pending_job], + latest_project_updates=[failed_project_update], + create_project_update=waiting_project_update) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_project_update, [pending_job]) + + +class TestJobBlockedOnProjectUpdate(): + def test(self, scheduler_factory, pending_job, waiting_project_update): + scheduler = scheduler_factory(tasks=[waiting_project_update, pending_job], + latest_project_updates=[waiting_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + assert scheduler.create_project_update.call_count == 0 + + def test_project_running(self, scheduler_factory, pending_job, running_project_update): + scheduler = scheduler_factory(tasks=[running_project_update, pending_job]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + assert scheduler.create_project_update.call_count == 0 + +class TestProjectUpdateBlocked(): + def test(self, scheduler_factory, running_project_update, pending_project_update): + scheduler = scheduler_factory(tasks=[running_project_update, pending_project_update], + latest_project_updates=[running_project_update]) + scheduler._schedule() + + scheduler.start_task.assert_not_called() + assert scheduler.create_project_update.call_count == 0 + diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 9c6ed0950b..a5c7975920 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -392,6 +392,14 @@ CELERYBEAT_SCHEDULE = { 'task': 'awx.main.tasks.cluster_node_heartbeat', 'schedule': timedelta(seconds=60) }, + 'task_scheduler': { + 'task': 'awx.main.scheduler.tasks.run_scheduler', + 'schedule': timedelta(seconds=10) + }, + 'task_fail_inconsistent_running_jobs': { + 'task': 'awx.main.scheduler.tasks.run_fail_inconsistent_running_jobs', + 'schedule': timedelta(seconds=30) + }, } # Django Caching Configuration From 306562cd670c38b9fb1ae07e941c417ac471c046 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 20 Oct 2016 15:05:02 -0400 Subject: [PATCH 60/77] inventory updates running correctly --- awx/main/models/inventory.py | 2 +- awx/main/models/unified_jobs.py | 10 +- awx/main/scheduler/__init__.py | 77 ++++-- awx/main/scheduler/dependency_graph.py | 82 +++++- awx/main/scheduler/partial.py | 66 ++++- awx/main/scheduler/tasks.py | 57 ----- awx/main/tests/functional/test_partial.py | 111 +++++--- awx/main/tests/unit/scheduler/conftest.py | 238 ++++++++++++++++++ .../test_scheduler_inventory_update.py | 85 +++++++ .../unit/scheduler/test_scheduler_job.py | 66 +++++ .../test_scheduler_project_update.py | 123 +-------- 11 files changed, 681 insertions(+), 236 deletions(-) create mode 100644 awx/main/tests/unit/scheduler/conftest.py create mode 100644 awx/main/tests/unit/scheduler/test_scheduler_inventory_update.py create mode 100644 awx/main/tests/unit/scheduler/test_scheduler_job.py diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index 6fb3e2f992..c77868759e 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -1089,7 +1089,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions): def _get_unified_job_field_names(cls): return ['name', 'description', 'source', 'source_path', 'source_script', 'source_vars', 'schedule', 'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars', - 'timeout'] + 'timeout', 'launch_type',] def save(self, *args, **kwargs): # If update_fields has been specified, add our field names to it, diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index 674bedbffe..19bc265c18 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -13,7 +13,7 @@ from StringIO import StringIO # Django from django.conf import settings -from django.db import models +from django.db import models, connection from django.core.exceptions import NON_FIELD_ERRORS from django.utils.translation import ugettext_lazy as _ from django.utils.timezone import now @@ -835,6 +835,10 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique return (True, opts) + def start_celery_task(self, opts, error_callback, success_callback): + task_class = self._get_task_class() + task_class().apply_async((self.pk,), opts, link_error=error_callback, link=success_callback) + def start(self, error_callback, success_callback, **kwargs): ''' Start the task running via Celery. @@ -842,7 +846,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique task_class = self._get_task_class() (res, opts) = self.pre_start(**kwargs) if res: - task_class().apply_async((self.pk,), opts, link_error=error_callback, link=success_callback) + self.start_celery_task(opts, error_callback, success_callback) return res def signal_start(self, **kwargs): @@ -871,7 +875,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique self.websocket_emit_status("pending") from awx.main.scheduler.tasks import run_job_launch - run_job_launch.delay(self.id) + connection.on_commit(lambda: run_job_launch.delay(self.id)) # Each type of unified job has a different Task class; get the # appropirate one. diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 0711528c56..e704b3ef8a 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -8,7 +8,7 @@ from sets import Set # Django from django.conf import settings -from django.db import transaction +from django.db import transaction, connection from django.db.utils import DatabaseError # AWX @@ -20,8 +20,10 @@ from awx.main.scheduler.dependency_graph import DependencyGraph from awx.main.scheduler.partial import ( JobDict, ProjectUpdateDict, - InventoryUpdateDict, ProjectUpdateLatestDict, + InventoryUpdateDict, + InventoryUpdateLatestDict, + InventorySourceDict, ) # Celery @@ -72,11 +74,34 @@ class Scheduler(): return ProjectUpdateLatestDict.filter_partial(list(project_ids)) + # TODO: Consider a database query for this logic + def get_latest_inventory_update_tasks(self, all_sorted_tasks): + inventory_ids = Set() + for task in all_sorted_tasks: + if type(task) == JobDict: + inventory_ids.add(task['inventory_id']) + + return InventoryUpdateLatestDict.filter_partial(list(inventory_ids)) + + def get_running_workflow_jobs(self): graph_workflow_jobs = [wf for wf in WorkflowJob.objects.filter(status='running')] return graph_workflow_jobs + # TODO: Consider a database query for this logic + def get_inventory_source_tasks(self, all_sorted_tasks): + inventory_ids = Set() + results = [] + for task in all_sorted_tasks: + if type(task) is JobDict: + inventory_ids.add(task['inventory_id']) + + for inventory_id in inventory_ids: + results.append((inventory_id, InventorySourceDict.filter_partial(inventory_id))) + + return results + def spawn_workflow_graph_jobs(self, workflow_jobs): # TODO: Consider using transaction.atomic for workflow_job in workflow_jobs: @@ -134,8 +159,6 @@ class Scheduler(): def start_task(self, task, dependent_tasks=[]): from awx.main.tasks import handle_work_error, handle_work_success - #print("start_task() <%s, %s> with deps %s" % (task.get_job_type_str(), task['id'], dependent_tasks)) - # TODO: spawn inventory and project updates task_actual = { 'type':task.get_job_type_str(), @@ -148,10 +171,8 @@ class Scheduler(): job_obj = task.get_full() job_obj.status = 'waiting' - job_obj.save() - #print("For real, starting job <%s, %s>" % (type(job_obj), job_obj.id)) - start_status = job_obj.start(error_callback=error_handler, success_callback=success_handler) + (start_status, opts) = job_obj.pre_start() if not start_status: job_obj.status = 'failed' if job_obj.job_explanation: @@ -163,6 +184,8 @@ class Scheduler(): self.consume_capacity(task) + connection.on_commit(lambda: job_obj.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler)) + def process_runnable_tasks(self, runnable_tasks): for i, task in enumerate(runnable_tasks): # TODO: maybe batch process new tasks. @@ -179,10 +202,20 @@ class Scheduler(): dep.save() project_task = ProjectUpdateDict.get_partial(dep.id) - #waiting_tasks.insert(waiting_tasks.index(task), dep) return project_task + def create_inventory_update(self, task, inventory_source_task): + dep = InventorySource.objects.get(id=inventory_source_task['id']).create_inventory_update(launch_type='dependency') + + dep.created = task['created'] - timedelta(seconds=2) + dep.status = 'waiting' + dep.save() + + inventory_task = InventoryUpdateDict.get_partial(dep.id) + + return inventory_task + def generate_dependencies(self, task): dependencies = [] # TODO: What if the project is null ? @@ -191,12 +224,24 @@ class Scheduler(): self.graph.should_update_related_project(task): project_task = self.create_project_update(task) dependencies.append(project_task) - # Inventory created 2 seconds behind + # Inventory created 2 seconds behind job + + for inventory_source_task in self.graph.get_inventory_sources(task['inventory_id']): + if self.graph.should_update_related_inventory_source(task, inventory_source_task['id']): + inventory_task = self.create_inventory_update(task, inventory_source_task) + dependencies.append(inventory_task) return dependencies def process_latest_project_updates(self, latest_project_updates): - for task in latest_project_updates: - self.graph.add_latest_project_update(task) + map(lambda task: self.graph.add_latest_project_update(task), latest_project_updates) + + def process_latest_inventory_updates(self, latest_inventory_updates): + map(lambda task: self.graph.add_latest_inventory_update(task), latest_inventory_updates) + + def process_inventory_sources(self, inventory_id_sources): + #map(lambda inventory_id, inventory_sources: self.graph.add_inventory_sources(inventory_id, inventory_sources), inventory_id_sources) + for inventory_id, inventory_sources in inventory_id_sources: + self.graph.add_inventory_sources(inventory_id, inventory_sources) def process_dependencies(self, dependent_task, dependency_tasks): for task in dependency_tasks: @@ -205,7 +250,6 @@ class Scheduler(): if not self.graph.is_job_blocked(task): self.graph.add_job(task) if not self.would_exceed_capacity(task): - #print("process_dependencies() going to run project update <%s, %s>" % (task['id'], task['project_id'])) self.start_task(task, [dependent_task]) else: self.graph.add_job(task) @@ -214,7 +258,6 @@ class Scheduler(): for task in pending_tasks: if not self.graph.is_job_blocked(task): - #print("process_pending_tasks() generating deps for job <%s, %s, %s>" % (task['id'], task['project_id'], task.model)) dependencies = self.generate_dependencies(task) self.process_dependencies(task, dependencies) @@ -222,7 +265,6 @@ class Scheduler(): if not self.graph.is_job_blocked(task): self.graph.add_job(task) if not self.would_exceed_capacity(task): - #print("Starting the original task <%s, %s>" % (task.get_job_type_str(), task['id'])) self.start_task(task) else: self.graph.add_job(task) @@ -272,7 +314,6 @@ class Scheduler(): def consume_capacity(self, task): self.capacity_used += task.task_impact() - #print("Capacity used %s vs total %s" % (self.capacity_used, self.capacity_total)) def get_remaining_capacity(self): return (self.capacity_total - self.capacity_used) @@ -320,6 +361,12 @@ class Scheduler(): latest_project_updates = self.get_latest_project_update_tasks(all_sorted_tasks) self.process_latest_project_updates(latest_project_updates) + latest_inventory_updates = self.get_latest_inventory_update_tasks(all_sorted_tasks) + self.process_latest_inventory_updates(latest_inventory_updates) + + inventory_id_sources = self.get_inventory_source_tasks(all_sorted_tasks) + self.process_inventory_sources(inventory_id_sources) + self.process_tasks(all_sorted_tasks) #print("Finished schedule()") diff --git a/awx/main/scheduler/dependency_graph.py b/awx/main/scheduler/dependency_graph.py index 5ecea91385..14a77ab697 100644 --- a/awx/main/scheduler/dependency_graph.py +++ b/awx/main/scheduler/dependency_graph.py @@ -6,7 +6,12 @@ class DependencyGraph(object): PROJECT_UPDATES = 'project_updates' INVENTORY_UPDATES = 'inventory_updates' JOB_TEMPLATE_JOBS = 'job_template_jobs' + INVENTORY_SOURCE_UPDATES = 'inventory_source_updates' + LATEST_PROJECT_UPDATES = 'latest_project_updates' + LATEST_INVENTORY_UPDATES = 'latest_inventory_updates' + + INVENTORY_SOURCES = 'inventory_source_ids' def __init__(self, *args, **kwargs): self.data = {} @@ -16,13 +21,29 @@ class DependencyGraph(object): self.data[self.INVENTORY_UPDATES] = {} # job_template_id -> True / False self.data[self.JOB_TEMPLATE_JOBS] = {} + # inventory_source_id -> True / False + self.data[self.INVENTORY_SOURCE_UPDATES] = {} - # project_id -> latest ProjectUpdateDict + # project_id -> latest ProjectUpdateLatestDict self.data[self.LATEST_PROJECT_UPDATES] = {} + # inventory_source_id -> latest InventoryUpdateLatestDict + self.data[self.LATEST_INVENTORY_UPDATES] = {} + + # inventory_id -> [inventory_source_ids] + self.data[self.INVENTORY_SOURCES] = {} def add_latest_project_update(self, job): self.data[self.LATEST_PROJECT_UPDATES][job['project_id']] = job + def add_latest_inventory_update(self, job): + self.data[self.LATEST_INVENTORY_UPDATES][job['inventory_source_id']] = job + + def add_inventory_sources(self, inventory_id, inventory_sources): + self.data[self.INVENTORY_SOURCES][inventory_id] = inventory_sources + + def get_inventory_sources(self, inventory_id): + return self.data[self.INVENTORY_SOURCES].get(inventory_id, []) + def get_now(self): return tz_now() @@ -61,25 +82,59 @@ class DependencyGraph(object): return False - def add_project_update(self, job): + def should_update_related_inventory_source(self, job, inventory_source_id): + now = self.get_now() + latest_inventory_update = self.data[self.LATEST_INVENTORY_UPDATES].get(inventory_source_id, None) + if not latest_inventory_update: + return True + + # TODO: Other finished, failed cases? i.e. error ? + if latest_inventory_update['status'] == 'failed': + return True + + ''' + This is a bit of fuzzy logic. + If the latest inventory update has a created time == job_created_time-2 + then consider the inventory update found. This is so we don't enter an infinite loop + of updating the project when cache timeout is 0. + ''' + if latest_inventory_update['inventory_source__update_cache_timeout'] == 0 and \ + latest_inventory_update['launch_type'] == 'dependency' and \ + latest_inventory_update['created'] == job['created'] - timedelta(seconds=2): + return False + + ''' + Normal, expected, cache timeout logic + ''' + timeout_seconds = timedelta(seconds=latest_inventory_update['inventory_source__update_cache_timeout']) + if (latest_inventory_update['finished'] + timeout_seconds) < now: + return True + + return False + + def mark_project_update(self, job): self.data[self.PROJECT_UPDATES][job['project_id']] = False - def add_inventory_update(self, job): + def mark_inventory_update(self, inventory_id): + self.data[self.INVENTORY_UPDATES][inventory_id] = False + + def mark_inventory_source_update(self, inventory_source_id): + self.data[self.INVENTORY_SOURCE_UPDATES][inventory_source_id] = False + + def mark_job_template_job(self, job): self.data[self.INVENTORY_UPDATES][job['inventory_id']] = False - - def add_job_template_job(self, job): + self.data[self.PROJECT_UPDATES][job['project_id']] = False self.data[self.JOB_TEMPLATE_JOBS][job['job_template_id']] = False - def can_project_update_run(self, job): return self.data[self.PROJECT_UPDATES].get(job['project_id'], True) - def can_inventory_update_run(self, job): - return self.data[self.INVENTORY_UPDATES].get(job['inventory_id'], True) + def can_inventory_update_run(self, inventory_source_id): + return self.data[self.INVENTORY_SOURCE_UPDATES].get(inventory_source_id, True) def can_job_run(self, job): if self.can_project_update_run(job) is True and \ - self.can_inventory_update_run(job) is True: + self.data[self.INVENTORY_UPDATES].get(job['inventory_id'], True) is True: if job['allow_simultaneous'] is False: return self.data[self.JOB_TEMPLATE_JOBS].get(job['job_template_id'], True) else: @@ -90,17 +145,18 @@ class DependencyGraph(object): if type(job) is ProjectUpdateDict: return not self.can_project_update_run(job) elif type(job) is InventoryUpdateDict: - return not self.can_inventory_update_run(job) + return not self.can_inventory_update_run(job['inventory_source_id']) elif type(job) is JobDict: return not self.can_job_run(job) def add_job(self, job): if type(job) is ProjectUpdateDict: - self.add_project_update(job) + self.mark_project_update(job) elif type(job) is InventoryUpdateDict: - self.add_inventory_update(job) + self.mark_inventory_update(job['inventory_source__inventory_id']) + self.mark_inventory_source_update(job['inventory_source_id']) elif type(job) is JobDict: - self.add_job_template_job(job) + self.mark_job_template_job(job) def add_jobs(self, jobs): for j in jobs: diff --git a/awx/main/scheduler/partial.py b/awx/main/scheduler/partial.py index 16c6597f99..e3677dab2c 100644 --- a/awx/main/scheduler/partial.py +++ b/awx/main/scheduler/partial.py @@ -4,6 +4,7 @@ from awx.main.models import ( Job, ProjectUpdate, InventoryUpdate, + InventorySource, ) class PartialModelDict(object): @@ -57,7 +58,7 @@ class JobDict(PartialModelDict): 'id', 'status', 'job_template_id', 'inventory_id', 'project_id', 'launch_type', 'limit', 'allow_simultaneous', 'created', 'job_type', 'celery_task_id', 'project__scm_update_on_launch', - 'forks', + 'forks', 'inventory__inventory_sources', ) model = Job @@ -69,7 +70,9 @@ class JobDict(PartialModelDict): class ProjectUpdateDict(PartialModelDict): FIELDS = ( - 'id', 'status', 'project_id', 'created', 'celery_task_id', 'launch_type', 'project__scm_update_cache_timeout', 'project__scm_update_on_launch', + 'id', 'status', 'project_id', 'created', 'celery_task_id', + 'launch_type', 'project__scm_update_cache_timeout', + 'project__scm_update_on_launch', ) model = ProjectUpdate @@ -81,23 +84,29 @@ class ProjectUpdateDict(PartialModelDict): class ProjectUpdateLatestDict(ProjectUpdateDict): FIELDS = ( - 'id', 'status', 'project_id', 'created', 'finished', 'project__scm_update_cache_timeout', 'launch_type', 'project__scm_update_on_launch', + 'id', 'status', 'project_id', 'created', 'finished', + 'project__scm_update_cache_timeout', + 'launch_type', 'project__scm_update_on_launch', ) model = ProjectUpdate @classmethod def filter_partial(cls, project_ids): # TODO: This can shurley be made more efficient + # * shouldn't have to do a query per inventory_id + # * shouldn't have to call .values() on all the results, only to get the first result results = [] for project_id in project_ids: - qs = cls.model.objects.filter(project_id=project_id, status__in=['waiting', 'successful', 'failed']).order_by('-finished') + qs = cls.model.objects.filter(project_id=project_id, status__in=['waiting', 'successful', 'failed']).order_by('-finished', '-started', '-created',) if qs.count() > 0: results.append(cls(cls.model.objects.filter(id=qs[0].id).values(*cls.get_db_values())[0])) return results class InventoryUpdateDict(PartialModelDict): + #'inventory_source__update_on_launch', + #'inventory_source__update_cache_timeout', FIELDS = ( - 'id', 'status', 'created', 'celery_task_id', + 'id', 'status', 'created', 'celery_task_id', 'inventory_source_id', 'inventory_source__inventory_id', ) model = InventoryUpdate @@ -107,3 +116,50 @@ class InventoryUpdateDict(PartialModelDict): def task_impact(self): return 20 +class InventoryUpdateLatestDict(InventoryUpdateDict): + #'inventory_source__update_on_launch', + #'inventory_source__update_cache_timeout', + FIELDS = ( + 'id', 'status', 'created', 'celery_task_id', 'inventory_source_id', + 'finished', 'inventory_source__update_cache_timeout', 'launch_type', + ) + model = InventoryUpdate + + @classmethod + def filter_partial(cls, inventory_ids): + # TODO: This can shurley be made more efficient + # * shouldn't have to do a query per inventory_id nor per inventory_source_id + # * shouldn't have to call .values() on all the results, only to get the first result + results = [] + for inventory_id in inventory_ids: + inventory_source_ids = InventorySource.objects.filter(inventory_id=inventory_id, + update_on_launch=True).values_list('id', flat=True) + # Find the most recent inventory update for each inventory source + for inventory_source_id in inventory_source_ids: + qs = cls.model.objects.filter(inventory_source_id=inventory_source_id, + status__in=['waiting', 'successful', 'failed'], + inventory_source__update_on_launch=True).order_by('-finished', '-started', '-created') + if qs.count() > 0: + results.append(cls(cls.model.objects.filter(id=qs[0].id).values(*cls.get_db_values())[0])) + return results + +class InventorySourceDict(PartialModelDict): + FIELDS = ( + 'id', + ) + model = InventorySource + + def get_job_type_str(self): + return 'inventory_source' + + def task_impact(self): + return 20 + + @classmethod + # TODO: Optimize this to run the query once + def filter_partial(cls, inventory_id): + kv = { + 'inventory_id': inventory_id, + 'update_on_launch': True, + } + return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] diff --git a/awx/main/scheduler/tasks.py b/awx/main/scheduler/tasks.py index ef0334e316..ba1ddaeecc 100644 --- a/awx/main/scheduler/tasks.py +++ b/awx/main/scheduler/tasks.py @@ -21,67 +21,10 @@ logger = logging.getLogger('awx.main.scheduler') @task def run_job_launch(job_id): - ''' - # Wait for job to exist. - # The job is created in a transaction then the message is created, but - # the transaction may not have completed. - - # FIXME: We could generate the message in a Django signal handler. - # OR, we could call an explicit commit in the view and then send the - # message. - - retries = 10 - retry = 0 - while not UnifiedJob.objects.filter(id=job_id).exists(): - time.sleep(0.3) - - if retry >= retries: - logger.error("Failed to process 'job_launch' message for job %d" % job_id) - # ack the message so we don't build up the queue. - # - # The job can still be chosen to run during tower startup or - # when another job is started or completes - return - retry += 1 - - # "Safe" to get the job now since it exists. - # Really, there is a race condition from exists to get - - # TODO: while not loop should call get wrapped in a try except - #job = UnifiedJob.objects.get(id=job_id) - ''' - Scheduler().schedule() @task def run_job_complete(job_id): - ''' - # TODO: use list of finished status from jobs.py or unified_jobs.py - finished_status = ['successful', 'error', 'failed', 'completed'] - q = UnifiedJob.objects.filter(id=job_id) - - # Ensure that the job is updated in the database before we call to - # schedule the next job. - retries = 10 - retry = 0 - while True: - # Job not found, most likely deleted. That's fine - if not q.exists(): - logger.warn("Failed to find job '%d' while processing 'job_complete' message. Presume that it was deleted." % job_id) - break - - job = q[0] - if job.status in finished_status: - break - - time.sleep(0.3) - - if retry >= retries: - logger.error("Expected job status '%s' to be one of '%s' while processing 'job_complete' message." % (job.status, finished_status)) - return - retry += 1 - ''' - Scheduler().schedule() @task diff --git a/awx/main/tests/functional/test_partial.py b/awx/main/tests/functional/test_partial.py index 69ad71c4df..0ab84dc901 100644 --- a/awx/main/tests/functional/test_partial.py +++ b/awx/main/tests/functional/test_partial.py @@ -6,44 +6,48 @@ from datetime import timedelta # AWX from awx.main.models import ( + Organization, + Inventory, + Group, Project, ProjectUpdate, + InventoryUpdate, + InventorySource, ) from awx.main.scheduler.partial import ( ProjectUpdateLatestDict, + InventoryUpdateDict, + InventoryUpdateLatestDict, ) - @pytest.fixture -def failed_project_update(): - p = Project.objects.create(name="proj1") - pu = ProjectUpdate.objects.create(project=p, status='failed', finished=tz_now() - timedelta(seconds=20)) - - return (p, pu) - -@pytest.fixture -def successful_project_update(): - p = Project.objects.create(name="proj1") - pu = ProjectUpdate.objects.create(project=p, status='successful', finished=tz_now() - timedelta(seconds=20)) - - return (p, pu) - -# Failed project updates newer than successful ones -@pytest.fixture -def multiple_project_updates(): - p = Project.objects.create(name="proj1") - - epoch = tz_now() - - successful_pus = [ProjectUpdate.objects.create(project=p, - status='successful', - finished=epoch - timedelta(seconds=100 + i)) for i in xrange(0, 5)] - failed_pus = [ProjectUpdate.objects.create(project=p, - status='failed', - finished=epoch - timedelta(seconds=100 - len(successful_pus) + i)) for i in xrange(0, 5)] - return (p, failed_pus, successful_pus) +def org(): + return Organization.objects.create(name="org1") class TestProjectUpdateLatestDictDict(): + @pytest.fixture + def successful_project_update(self): + p = Project.objects.create(name="proj1") + pu = ProjectUpdate.objects.create(project=p, status='successful', finished=tz_now() - timedelta(seconds=20)) + + return (p, pu) + + # Failed project updates newer than successful ones + @pytest.fixture + def multiple_project_updates(self): + p = Project.objects.create(name="proj1") + + epoch = tz_now() + + successful_pus = [ProjectUpdate.objects.create(project=p, + status='successful', + finished=epoch - timedelta(seconds=100 + i)) for i in xrange(0, 5)] + failed_pus = [ProjectUpdate.objects.create(project=p, + status='failed', + finished=epoch - timedelta(seconds=100 - len(successful_pus) + i)) for i in xrange(0, 5)] + return (p, failed_pus, successful_pus) + + @pytest.mark.django_db class TestFilterPartial(): def test_project_update_successful(self, successful_project_update): @@ -63,3 +67,54 @@ class TestProjectUpdateLatestDictDict(): assert failed_pus[0].id == tasks[0]['id'] +class TestInventoryUpdateDict(): + @pytest.fixture + def waiting_inventory_update(self, org): + i = Inventory.objects.create(name='inv1', organization=org) + g = Group.objects.create(name='group1', inventory=i) + #Inventory.groups.add(g) + inv_src = InventorySource.objects.create(group=g) + iu = InventoryUpdate.objects.create(inventory_source=inv_src, status='waiting') + return iu + + @pytest.mark.django_db + class TestFilterPartial(): + def test_simple(self, waiting_inventory_update): + tasks = InventoryUpdateDict.filter_partial(status=['waiting']) + + assert 1 == len(tasks) + assert waiting_inventory_update.id == tasks[0]['id'] + +class TestInventoryUpdateLatestDict(): + @pytest.fixture + def inventory(self, org): + i = Inventory.objects.create(name='inv1', organization=org) + return i + + @pytest.fixture + def inventory_updates(self, inventory): + g1 = Group.objects.create(name='group1', inventory=inventory) + g2 = Group.objects.create(name='group2', inventory=inventory) + g3 = Group.objects.create(name='group3', inventory=inventory) + + inv_src1 = InventorySource.objects.create(group=g1, update_on_launch=True, inventory=inventory) + inv_src2 = InventorySource.objects.create(group=g2, update_on_launch=False, inventory=inventory) + inv_src3 = InventorySource.objects.create(group=g3, update_on_launch=True, inventory=inventory) + + iu1 = InventoryUpdate.objects.create(inventory_source=inv_src1, status='successful') + iu2 = InventoryUpdate.objects.create(inventory_source=inv_src2, status='waiting') + iu3 = InventoryUpdate.objects.create(inventory_source=inv_src3, status='waiting') + return [iu1, iu2, iu3] + + @pytest.mark.django_db + def test_filter_partial(self, inventory, inventory_updates): + + tasks = InventoryUpdateLatestDict.filter_partial([inventory.id]) + + inventory_updates_expected = [inventory_updates[0], inventory_updates[2]] + + assert 2 == len(tasks) + for i, inventory_update in enumerate(inventory_updates_expected): + assert inventory_update.id == tasks[i]['id'] + + diff --git a/awx/main/tests/unit/scheduler/conftest.py b/awx/main/tests/unit/scheduler/conftest.py new file mode 100644 index 0000000000..d8a71d456e --- /dev/null +++ b/awx/main/tests/unit/scheduler/conftest.py @@ -0,0 +1,238 @@ + +# Python +import pytest +from datetime import timedelta + +# Django +from django.utils.timezone import now as tz_now + +# awx +from awx.main.scheduler.partial import ( + JobDict, + ProjectUpdateDict, + InventoryUpdateDict, + InventorySourceDict, +) +from awx.main.scheduler import Scheduler + + +@pytest.fixture +def epoch(): + return tz_now() + +@pytest.fixture +def scheduler_factory(mocker, epoch): + def fn(tasks=[], inventory_sources=[], latest_project_updates=[], latest_inventory_updates=[], create_project_update=None, create_inventory_update=None): + sched = Scheduler() + sched.capacity_total = 999999999 + + sched.graph.get_now = lambda: epoch + + def no_create_inventory_update(task, ignore): + raise RuntimeError("create_inventory_update should not be called") + def no_create_project_update(task): + raise RuntimeError("create_project_update should not be called") + + mocker.patch.object(sched, 'get_tasks', return_value=tasks) + mocker.patch.object(sched, 'get_inventory_source_tasks', return_value=inventory_sources) + mocker.patch.object(sched, 'get_latest_project_update_tasks', return_value=latest_project_updates) + mocker.patch.object(sched, 'get_latest_inventory_update_tasks', return_value=latest_inventory_updates) + create_project_update_mock = mocker.patch.object(sched, 'create_project_update', return_value=create_project_update) + create_inventory_update_mock = mocker.patch.object(sched, 'create_inventory_update', return_value=create_inventory_update) + mocker.patch.object(sched, 'start_task') + + if not create_project_update: + create_project_update_mock.side_effect = no_create_project_update + if not create_inventory_update: + create_inventory_update_mock.side_effect = no_create_inventory_update + return sched + return fn + +@pytest.fixture +def project_update_factory(epoch): + def fn(): + return ProjectUpdateDict({ + 'id': 1, + 'created': epoch - timedelta(seconds=100), + 'project_id': 1, + 'project__scm_update_cache_timeout': 0, + 'celery_task_id': '', + 'launch_type': 'dependency', + 'project__scm_update_on_launch': True, + }) + return fn + +@pytest.fixture +def pending_project_update(project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'pending' + return project_update + +@pytest.fixture +def waiting_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'waiting' + return project_update + +@pytest.fixture +def running_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'running' + return project_update + +@pytest.fixture +def successful_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['finished'] = epoch - timedelta(seconds=90) + project_update['status'] = 'successful' + return project_update + +@pytest.fixture +def successful_project_update_cache_expired(epoch, project_update_factory): + project_update = project_update_factory() + + project_update['status'] = 'successful' + project_update['created'] = epoch - timedelta(seconds=120) + project_update['finished'] = epoch - timedelta(seconds=110) + project_update['project__scm_update_cache_timeout'] = 1 + return project_update + +@pytest.fixture +def failed_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['finished'] = epoch - timedelta(seconds=90) + project_update['status'] = 'failed' + return project_update + +@pytest.fixture +def inventory_update_factory(epoch): + def fn(): + return InventoryUpdateDict({ + 'id': 1, + 'created': epoch - timedelta(seconds=101), + 'inventory_id': 1, + 'celery_task_id': '', + 'status': 'pending', + 'launch_type': 'dependency', + 'inventory_source_id': 1, + 'inventory_source__inventory_id': 1, + }) + return fn + +@pytest.fixture +def inventory_update_latest_factory(epoch): + def fn(): + return InventoryUpdateDict({ + 'id': 1, + 'created': epoch - timedelta(seconds=101), + 'inventory_id': 1, + 'celery_task_id': '', + 'status': 'pending', + 'launch_type': 'dependency', + 'inventory_source_id': 1, + 'finished': None, + }) + return fn + +@pytest.fixture +def inventory_update_latest(inventory_update_latest_factory): + return inventory_update_latest_factory() + +@pytest.fixture +def successful_inventory_update_latest(inventory_update_latest_factory): + iu = inventory_update_latest_factory() + iu['status'] = 'successful' + iu['finished'] = iu['created'] + timedelta(seconds=10) + return iu + +@pytest.fixture +def failed_inventory_update_latest(inventory_update_latest_factory): + iu = inventory_update_latest_factory() + iu['status'] = 'failed' + return iu + +@pytest.fixture +def pending_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['status'] = 'pending' + return inventory_update + +@pytest.fixture +def waiting_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['status'] = 'waiting' + return inventory_update + +@pytest.fixture +def failed_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['status'] = 'failed' + return inventory_update + +@pytest.fixture +def running_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['status'] = 'running' + return inventory_update + +@pytest.fixture +def successful_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['finished'] = epoch - timedelta(seconds=90) + inventory_update['status'] = 'successful' + return inventory_update + +''' +Job +''' +@pytest.fixture +def job_factory(epoch): + def fn(project__scm_update_on_launch=True, inventory__inventory_sources=[]): + return JobDict({ + 'id': 1, + 'status': 'pending', + 'job_template_id': 1, + 'project_id': 1, + 'inventory_id': 1, + 'launch_type': 'manual', + 'allow_simultaneous': False, + 'created': epoch - timedelta(seconds=99), + 'celery_task_id': '', + 'project__scm_update_on_launch': project__scm_update_on_launch, + 'inventory__inventory_sources': inventory__inventory_sources, + 'forks': 5 + }) + return fn + +@pytest.fixture +def pending_job(job_factory): + job = job_factory() + job['status'] = 'pending' + return job + +@pytest.fixture +def running_job(job_factory): + job = job_factory() + job['status'] = 'running' + return job + +''' +Inventory id -> [InventorySourceDict, ...] +''' +@pytest.fixture +def inventory_source_factory(): + def fn(id=1): + return InventorySourceDict({ + 'id': id, + }) + return fn + +@pytest.fixture +def inventory_id_sources(inventory_source_factory): + return [ + (1, [ + inventory_source_factory(id=1), + inventory_source_factory(id=2), + ]), + ] + diff --git a/awx/main/tests/unit/scheduler/test_scheduler_inventory_update.py b/awx/main/tests/unit/scheduler/test_scheduler_inventory_update.py new file mode 100644 index 0000000000..09125df527 --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_scheduler_inventory_update.py @@ -0,0 +1,85 @@ + +# Python +import pytest +from datetime import timedelta + +@pytest.fixture +def pending_job(job_factory): + return job_factory(project__scm_update_on_launch=False, inventory__inventory_sources=['1']) + +@pytest.fixture +def successful_inventory_update_latest(inventory_update_latest_factory): + iu = inventory_update_latest_factory() + iu['inventory_source__update_cache_timeout'] = 100 + iu['status'] = 'successful' + iu['finished'] = iu['created'] + timedelta(seconds=10) + return iu + +@pytest.fixture +def successful_inventory_update_latest_cache_expired(inventory_update_latest_factory): + iu = inventory_update_latest_factory() + iu['inventory_source__update_cache_timeout'] = 1 + iu['finished'] = iu['created'] + timedelta(seconds=2) + return iu + +class TestStartInventoryUpdate(): + def test_pending(self, scheduler_factory, pending_inventory_update): + scheduler = scheduler_factory(tasks=[pending_inventory_update]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_inventory_update) + +class TestInventoryUpdateBlocked(): + def test_running_inventory_update(self, epoch, scheduler_factory, running_inventory_update, pending_inventory_update): + running_inventory_update['created'] = epoch - timedelta(seconds=100) + pending_inventory_update['created'] = epoch - timedelta(seconds=90) + + scheduler = scheduler_factory(tasks=[running_inventory_update, pending_inventory_update]) + + scheduler._schedule() + + def test_waiting_inventory_update(self, epoch, scheduler_factory, waiting_inventory_update, pending_inventory_update): + waiting_inventory_update['created'] = epoch - timedelta(seconds=100) + pending_inventory_update['created'] = epoch - timedelta(seconds=90) + + scheduler = scheduler_factory(tasks=[waiting_inventory_update, pending_inventory_update]) + + scheduler._schedule() + +class TestCreateDependentInventoryUpdate(): + + def test(self, scheduler_factory, pending_job, waiting_inventory_update, inventory_id_sources): + scheduler = scheduler_factory(tasks=[pending_job], + create_inventory_update=waiting_inventory_update, + inventory_sources=inventory_id_sources) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_inventory_update, [pending_job]) + + def test_cache_hit(self, scheduler_factory, pending_job, successful_inventory_update, successful_inventory_update_latest): + scheduler = scheduler_factory(tasks=[successful_inventory_update, pending_job], + latest_inventory_updates=[successful_inventory_update_latest]) + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_job) + + def test_cache_miss(self, scheduler_factory, pending_job, successful_inventory_update, successful_inventory_update_latest_cache_expired, waiting_inventory_update, inventory_id_sources): + scheduler = scheduler_factory(tasks=[successful_inventory_update, pending_job], + latest_inventory_updates=[successful_inventory_update_latest_cache_expired], + create_inventory_update=waiting_inventory_update, + inventory_sources=inventory_id_sources) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_inventory_update, [pending_job]) + + def test_last_update_failed(self, scheduler_factory, pending_job, failed_inventory_update, failed_inventory_update_latest, waiting_inventory_update, inventory_id_sources): + scheduler = scheduler_factory(tasks=[failed_inventory_update, pending_job], + latest_inventory_updates=[failed_inventory_update_latest], + create_inventory_update=waiting_inventory_update, + inventory_sources=inventory_id_sources) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_inventory_update, [pending_job]) + diff --git a/awx/main/tests/unit/scheduler/test_scheduler_job.py b/awx/main/tests/unit/scheduler/test_scheduler_job.py new file mode 100644 index 0000000000..37af2ead05 --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_scheduler_job.py @@ -0,0 +1,66 @@ + +# Python +import pytest +from datetime import timedelta + +# awx +from awx.main.scheduler.partial import ( + JobDict, + ProjectUpdateDict, +) + +# TODO: wherever get_latest_rpoject_update_task() is stubbed and returns a +# ProjectUpdateDict. We should instead return a ProjectUpdateLatestDict() +# For now, this is ok since the fields on deviate that much. + +class TestJobBlocked(): + def test_inventory_update_waiting(self, scheduler_factory, waiting_inventory_update, pending_job): + scheduler = scheduler_factory(tasks=[waiting_inventory_update, pending_job]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + + def test_inventory_update_running(self, scheduler_factory, running_inventory_update, pending_job, inventory_source_factory, inventory_id_sources): + scheduler = scheduler_factory(tasks=[running_inventory_update, pending_job], + inventory_sources=inventory_id_sources) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + + def test_project_update_running(self, scheduler_factory, pending_job, running_project_update): + scheduler = scheduler_factory(tasks=[running_project_update, pending_job]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + assert scheduler.create_project_update.call_count == 0 + + def test_project_update_waiting(self, scheduler_factory, pending_job, waiting_project_update): + scheduler = scheduler_factory(tasks=[waiting_project_update, pending_job], + latest_project_updates=[waiting_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + assert scheduler.create_project_update.call_count == 0 + +class TestJob(): + @pytest.fixture + def successful_project_update(self, project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'successful' + project_update['finished'] = project_update['created'] + timedelta(seconds=10) + project_update['project__scm_update_cache_timeout'] = 3600 + return project_update + + def test_existing_dependencies_finished(self, scheduler_factory, successful_project_update, successful_inventory_update_latest, pending_job): + scheduler = scheduler_factory(tasks=[successful_project_update, pending_job], + latest_project_updates=[successful_project_update], + latest_inventory_updates=[successful_inventory_update_latest]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_job) + diff --git a/awx/main/tests/unit/scheduler/test_scheduler_project_update.py b/awx/main/tests/unit/scheduler/test_scheduler_project_update.py index 54add63d51..e0fcbc3b1e 100644 --- a/awx/main/tests/unit/scheduler/test_scheduler_project_update.py +++ b/awx/main/tests/unit/scheduler/test_scheduler_project_update.py @@ -17,98 +17,6 @@ from awx.main.scheduler import Scheduler # ProjectUpdateDict. We should instead return a ProjectUpdateLatestDict() # For now, this is ok since the fields on deviate that much. -@pytest.fixture -def epoch(): - return tz_now() - - -@pytest.fixture -def scheduler_factory(mocker, epoch): - def fn(tasks=[], latest_project_updates=[], create_project_update=None): - sched = Scheduler() - sched.capacity_total = 999999999 - - sched.graph.get_now = lambda: epoch - - mocker.patch.object(sched, 'get_tasks', return_value=tasks) - mocker.patch.object(sched, 'get_latest_project_update_tasks', return_value=latest_project_updates) - mocker.patch.object(sched, 'create_project_update', return_value=create_project_update) - mocker.patch.object(sched, 'start_task') - return sched - return fn - -@pytest.fixture -def project_update_factory(epoch): - def fn(): - return ProjectUpdateDict({ - 'id': 1, - 'created': epoch - timedelta(seconds=100), - 'project_id': 1, - 'project__scm_update_cache_timeout': 0, - 'celery_task_id': '', - 'launch_type': 'dependency', - 'project__scm_update_on_launch': True, - }) - return fn - -@pytest.fixture -def pending_project_update(project_update_factory): - project_update = project_update_factory() - project_update['status'] = 'pending' - return project_update - -@pytest.fixture -def waiting_project_update(epoch, project_update_factory): - project_update = project_update_factory() - project_update['status'] = 'waiting' - return project_update - -@pytest.fixture -def pending_job(epoch): - return JobDict({ - 'id': 1, - 'status': 'pending', - 'job_template_id': 1, - 'project_id': 1, - 'inventory_id': 1, - 'launch_type': 'manual', - 'allow_simultaneous': False, - 'created': epoch - timedelta(seconds=99), - 'celery_task_id': '', - 'project__scm_update_on_launch': True, - 'forks': 5 - }) - -@pytest.fixture -def running_project_update(epoch, project_update_factory): - project_update = project_update_factory() - project_update['status'] = 'running' - return project_update - -@pytest.fixture -def successful_project_update(epoch, project_update_factory): - project_update = project_update_factory() - project_update['finished'] = epoch - timedelta(seconds=90) - project_update['status'] = 'successful' - return project_update - -@pytest.fixture -def successful_project_update_cache_expired(epoch, project_update_factory): - project_update = project_update_factory() - - project_update['status'] = 'successful' - project_update['created'] = epoch - timedelta(seconds=120) - project_update['finished'] = epoch - timedelta(seconds=110) - project_update['project__scm_update_cache_timeout'] = 1 - return project_update - -@pytest.fixture -def failed_project_update(epoch, project_update_factory): - project_update = project_update_factory() - project_update['finished'] = epoch - timedelta(seconds=90) - project_update['status'] = 'failed' - return project_update - class TestStartProjectUpdate(): def test(self, scheduler_factory, pending_project_update): scheduler = scheduler_factory(tasks=[pending_project_update]) @@ -164,31 +72,18 @@ class TestCreateDependentProjectUpdate(): scheduler.start_task.assert_called_with(waiting_project_update, [pending_job]) - -class TestJobBlockedOnProjectUpdate(): - def test(self, scheduler_factory, pending_job, waiting_project_update): - scheduler = scheduler_factory(tasks=[waiting_project_update, pending_job], - latest_project_updates=[waiting_project_update]) - - scheduler._schedule() - - scheduler.start_task.assert_not_called() - assert scheduler.create_project_update.call_count == 0 - - def test_project_running(self, scheduler_factory, pending_job, running_project_update): - scheduler = scheduler_factory(tasks=[running_project_update, pending_job]) - - scheduler._schedule() - - scheduler.start_task.assert_not_called() - assert scheduler.create_project_update.call_count == 0 - class TestProjectUpdateBlocked(): - def test(self, scheduler_factory, running_project_update, pending_project_update): - scheduler = scheduler_factory(tasks=[running_project_update, pending_project_update], - latest_project_updates=[running_project_update]) + def test_projct_update_running(self, scheduler_factory, running_project_update, pending_project_update): + scheduler = scheduler_factory(tasks=[running_project_update, pending_project_update]) scheduler._schedule() scheduler.start_task.assert_not_called() assert scheduler.create_project_update.call_count == 0 + def test_job_running(self, scheduler_factory, running_job, pending_project_update): + scheduler = scheduler_factory(tasks=[running_job, pending_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + From 5fa5d4b34b3b4a531207e1af88b663f84e933e95 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 24 Oct 2016 14:53:34 -0400 Subject: [PATCH 61/77] support distributed project updates --- awx/main/scheduler/dependency_graph.py | 4 ++-- awx/main/scheduler/partial.py | 8 ++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/awx/main/scheduler/dependency_graph.py b/awx/main/scheduler/dependency_graph.py index 14a77ab697..26b7518cf1 100644 --- a/awx/main/scheduler/dependency_graph.py +++ b/awx/main/scheduler/dependency_graph.py @@ -59,7 +59,7 @@ class DependencyGraph(object): return True # TODO: Other finished, failed cases? i.e. error ? - if latest_project_update['status'] == 'failed': + if latest_project_update['status'] in ['failed', 'canceled']: return True ''' @@ -89,7 +89,7 @@ class DependencyGraph(object): return True # TODO: Other finished, failed cases? i.e. error ? - if latest_inventory_update['status'] == 'failed': + if latest_inventory_update['status'] in ['failed', 'canceled']: return True ''' diff --git a/awx/main/scheduler/partial.py b/awx/main/scheduler/partial.py index e3677dab2c..a1870ccf4f 100644 --- a/awx/main/scheduler/partial.py +++ b/awx/main/scheduler/partial.py @@ -82,6 +82,14 @@ class ProjectUpdateDict(PartialModelDict): def task_impact(self): return 10 + @classmethod + def filter_partial(cls, status=[]): + kv = { + 'status__in': status, + 'job_type': 'check', + } + return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] + class ProjectUpdateLatestDict(ProjectUpdateDict): FIELDS = ( 'id', 'status', 'project_id', 'created', 'finished', From 46faeffbb3888c9fcbca9c7a677fe4bb7391a8ea Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 24 Oct 2016 15:32:43 -0400 Subject: [PATCH 62/77] added task manager system job support --- awx/main/scheduler/__init__.py | 23 +++++++++-------------- awx/main/scheduler/dependency_graph.py | 23 ++++++++++++++++++++--- awx/main/scheduler/partial.py | 21 +++++++++++++++++++++ 3 files changed, 50 insertions(+), 17 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index e704b3ef8a..67c0c81a6a 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -24,6 +24,7 @@ from awx.main.scheduler.partial import ( InventoryUpdateDict, InventoryUpdateLatestDict, InventorySourceDict, + SystemJobDict, ) # Celery @@ -37,16 +38,16 @@ class Scheduler(): self.capacity_total = 200 self.capacity_used = 0 - def _get_tasks_with_status(self, status_list): + def get_tasks(self): + status_list = ('pending', 'waiting', 'running') - graph_jobs = JobDict.filter_partial(status=status_list) + jobs = JobDict.filter_partial(status=status_list) ''' graph_ad_hoc_commands = [ahc for ahc in AdHocCommand.objects.filter(**kv)] - graph_inventory_updates = [iu for iu in - InventoryUpdate.objects.filter(**kv)] ''' - graph_inventory_updates = InventoryUpdateDict.filter_partial(status=status_list) - graph_project_updates = ProjectUpdateDict.filter_partial(status=status_list) + inventory_updates = InventoryUpdateDict.filter_partial(status=status_list) + project_updates = ProjectUpdateDict.filter_partial(status=status_list) + system_jobs = SystemJobDict.filter_partial(status=status_list) ''' graph_system_jobs = [sj for sj in SystemJob.objects.filter(**kv)] @@ -57,14 +58,10 @@ class Scheduler(): graph_workflow_jobs, key=lambda task: task.created) ''' - all_actions = sorted(graph_jobs + graph_project_updates + graph_inventory_updates, + all_actions = sorted(jobs + project_updates + inventory_updates + system_jobs, key=lambda task: task['created']) return all_actions - def get_tasks(self): - RELEVANT_JOBS = ('pending', 'waiting', 'running') - return self._get_tasks_with_status(RELEVANT_JOBS) - # TODO: Consider a database query for this logic def get_latest_project_update_tasks(self, all_sorted_tasks): project_ids = Set() @@ -239,9 +236,7 @@ class Scheduler(): map(lambda task: self.graph.add_latest_inventory_update(task), latest_inventory_updates) def process_inventory_sources(self, inventory_id_sources): - #map(lambda inventory_id, inventory_sources: self.graph.add_inventory_sources(inventory_id, inventory_sources), inventory_id_sources) - for inventory_id, inventory_sources in inventory_id_sources: - self.graph.add_inventory_sources(inventory_id, inventory_sources) + map(lambda inventory_id, inventory_sources: self.graph.add_inventory_sources(inventory_id, inventory_sources), inventory_id_sources) def process_dependencies(self, dependent_task, dependency_tasks): for task in dependency_tasks: diff --git a/awx/main/scheduler/dependency_graph.py b/awx/main/scheduler/dependency_graph.py index 26b7518cf1..e71cd6f0a7 100644 --- a/awx/main/scheduler/dependency_graph.py +++ b/awx/main/scheduler/dependency_graph.py @@ -1,11 +1,17 @@ from datetime import timedelta from django.utils.timezone import now as tz_now -from awx.main.scheduler.partial import JobDict, ProjectUpdateDict, InventoryUpdateDict +from awx.main.scheduler.partial import ( + JobDict, + ProjectUpdateDict, + InventoryUpdateDict, + SystemJobDict, +) class DependencyGraph(object): PROJECT_UPDATES = 'project_updates' INVENTORY_UPDATES = 'inventory_updates' JOB_TEMPLATE_JOBS = 'job_template_jobs' + SYSTEM_JOB = 'system_job' INVENTORY_SOURCE_UPDATES = 'inventory_source_updates' LATEST_PROJECT_UPDATES = 'latest_project_updates' @@ -23,6 +29,8 @@ class DependencyGraph(object): self.data[self.JOB_TEMPLATE_JOBS] = {} # inventory_source_id -> True / False self.data[self.INVENTORY_SOURCE_UPDATES] = {} + # True / False + self.data[self.SYSTEM_JOB] = True # project_id -> latest ProjectUpdateLatestDict self.data[self.LATEST_PROJECT_UPDATES] = {} @@ -112,6 +120,9 @@ class DependencyGraph(object): return False + def mark_system_job(self): + self.data[self.SYSTEM_JOB] = False + def mark_project_update(self, job): self.data[self.PROJECT_UPDATES][job['project_id']] = False @@ -141,6 +152,9 @@ class DependencyGraph(object): return True return False + def can_system_job_run(self): + return self.data[self.SYSTEM_JOB] + def is_job_blocked(self, job): if type(job) is ProjectUpdateDict: return not self.can_project_update_run(job) @@ -148,6 +162,8 @@ class DependencyGraph(object): return not self.can_inventory_update_run(job['inventory_source_id']) elif type(job) is JobDict: return not self.can_job_run(job) + elif type(job) is SystemJobDict: + return not self.can_system_job_run() def add_job(self, job): if type(job) is ProjectUpdateDict: @@ -157,8 +173,9 @@ class DependencyGraph(object): self.mark_inventory_source_update(job['inventory_source_id']) elif type(job) is JobDict: self.mark_job_template_job(job) + elif type(job) is SystemJobDict: + self.mark_system_job() def add_jobs(self, jobs): - for j in jobs: - self.add_job(j) + map(lambda j: self.add_job(j), jobs) diff --git a/awx/main/scheduler/partial.py b/awx/main/scheduler/partial.py index a1870ccf4f..dddbf763e7 100644 --- a/awx/main/scheduler/partial.py +++ b/awx/main/scheduler/partial.py @@ -5,6 +5,7 @@ from awx.main.models import ( ProjectUpdate, InventoryUpdate, InventorySource, + SystemJob, ) class PartialModelDict(object): @@ -171,3 +172,23 @@ class InventorySourceDict(PartialModelDict): 'update_on_launch': True, } return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] + +class SystemJobDict(PartialModelDict): + FIELDS = ( + 'id', 'created', 'status', + ) + model = SystemJob + + def get_job_type_str(self): + return 'system_job' + + def task_impact(self): + return 20 + + @classmethod + def filter_partial(cls, status=[]): + kv = { + 'status__in': status + } + return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] + From 9802b1f379f8b3b303370467bd908988f4389e61 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 25 Oct 2016 10:11:11 -0400 Subject: [PATCH 63/77] AdHocCommand support added to task manager --- awx/main/scheduler/__init__.py | 11 ++++------- awx/main/scheduler/dependency_graph.py | 14 +++++++++++--- awx/main/scheduler/partial.py | 13 +++++++++++++ awx/main/tasks.py | 2 +- 4 files changed, 29 insertions(+), 11 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 67c0c81a6a..ca79fb8aca 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -25,6 +25,7 @@ from awx.main.scheduler.partial import ( InventoryUpdateLatestDict, InventorySourceDict, SystemJobDict, + AdHocCommandDict, ) # Celery @@ -42,15 +43,11 @@ class Scheduler(): status_list = ('pending', 'waiting', 'running') jobs = JobDict.filter_partial(status=status_list) - ''' - graph_ad_hoc_commands = [ahc for ahc in AdHocCommand.objects.filter(**kv)] - ''' inventory_updates = InventoryUpdateDict.filter_partial(status=status_list) project_updates = ProjectUpdateDict.filter_partial(status=status_list) system_jobs = SystemJobDict.filter_partial(status=status_list) + ad_hoc_commands = AdHocCommandDict.filter_partial(status=status_list) ''' - graph_system_jobs = [sj for sj in - SystemJob.objects.filter(**kv)] graph_workflow_jobs = [wf for wf in WorkflowJob.objects.filter(**kv)] all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates + @@ -58,7 +55,7 @@ class Scheduler(): graph_workflow_jobs, key=lambda task: task.created) ''' - all_actions = sorted(jobs + project_updates + inventory_updates + system_jobs, + all_actions = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands, key=lambda task: task['created']) return all_actions @@ -236,7 +233,7 @@ class Scheduler(): map(lambda task: self.graph.add_latest_inventory_update(task), latest_inventory_updates) def process_inventory_sources(self, inventory_id_sources): - map(lambda inventory_id, inventory_sources: self.graph.add_inventory_sources(inventory_id, inventory_sources), inventory_id_sources) + map(lambda (inventory_id, inventory_sources): self.graph.add_inventory_sources(inventory_id, inventory_sources), inventory_id_sources) def process_dependencies(self, dependent_task, dependency_tasks): for task in dependency_tasks: diff --git a/awx/main/scheduler/dependency_graph.py b/awx/main/scheduler/dependency_graph.py index e71cd6f0a7..3142699077 100644 --- a/awx/main/scheduler/dependency_graph.py +++ b/awx/main/scheduler/dependency_graph.py @@ -6,6 +6,7 @@ from awx.main.scheduler.partial import ( ProjectUpdateDict, InventoryUpdateDict, SystemJobDict, + AdHocCommandDict, ) class DependencyGraph(object): PROJECT_UPDATES = 'project_updates' @@ -140,8 +141,8 @@ class DependencyGraph(object): def can_project_update_run(self, job): return self.data[self.PROJECT_UPDATES].get(job['project_id'], True) - def can_inventory_update_run(self, inventory_source_id): - return self.data[self.INVENTORY_SOURCE_UPDATES].get(inventory_source_id, True) + def can_inventory_update_run(self, job): + return self.data[self.INVENTORY_SOURCE_UPDATES].get(job['inventory_source_id'], True) def can_job_run(self, job): if self.can_project_update_run(job) is True and \ @@ -155,15 +156,20 @@ class DependencyGraph(object): def can_system_job_run(self): return self.data[self.SYSTEM_JOB] + def can_ad_hoc_command_run(self, job): + return self.data[self.INVENTORY_UPDATES].get(job['inventory_id'], True) + def is_job_blocked(self, job): if type(job) is ProjectUpdateDict: return not self.can_project_update_run(job) elif type(job) is InventoryUpdateDict: - return not self.can_inventory_update_run(job['inventory_source_id']) + return not self.can_inventory_update_run(job) elif type(job) is JobDict: return not self.can_job_run(job) elif type(job) is SystemJobDict: return not self.can_system_job_run() + elif type(job) is AdHocCommandDict: + return not self.can_ad_hoc_command_run(job) def add_job(self, job): if type(job) is ProjectUpdateDict: @@ -175,6 +181,8 @@ class DependencyGraph(object): self.mark_job_template_job(job) elif type(job) is SystemJobDict: self.mark_system_job() + elif type(job) is AdHocCommandDict: + self.mark_inventory_update(job['inventory_id']) def add_jobs(self, jobs): map(lambda j: self.add_job(j), jobs) diff --git a/awx/main/scheduler/partial.py b/awx/main/scheduler/partial.py index dddbf763e7..a92c5c7bd6 100644 --- a/awx/main/scheduler/partial.py +++ b/awx/main/scheduler/partial.py @@ -6,6 +6,7 @@ from awx.main.models import ( InventoryUpdate, InventorySource, SystemJob, + AdHocCommand, ) class PartialModelDict(object): @@ -192,3 +193,15 @@ class SystemJobDict(PartialModelDict): } return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] +class AdHocCommandDict(PartialModelDict): + FIELDS = ( + 'id', 'created', 'status', 'inventory_id', + ) + model = AdHocCommand + + def get_job_type_str(self): + return 'ad_hoc_command' + + def task_impact(self): + return 20 + diff --git a/awx/main/tasks.py b/awx/main/tasks.py index b76bcb48a6..fac9dca68f 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -1756,7 +1756,7 @@ class RunAdHocCommand(BaseTask): ''' Hook for actions to run after ad hoc command has completed. ''' - super(RunAdHocCommand, self).post_run_hook(ad_hoc_command, **kwargs) + super(RunAdHocCommand, self).post_run_hook(ad_hoc_command, status, **kwargs) class RunSystemJob(BaseTask): From fd8c641fa558a122c4ece500ae45cb88052a03e1 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 25 Oct 2016 13:37:31 -0400 Subject: [PATCH 64/77] flake8 fixes --- awx/main/models/unified_jobs.py | 1 - awx/main/tests/unit/scheduler/conftest.py | 1 + .../tests/unit/scheduler/test_scheduler_job.py | 10 ---------- .../scheduler/test_scheduler_project_update.py | 14 -------------- 4 files changed, 1 insertion(+), 25 deletions(-) diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index 19bc265c18..b8657431ab 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -843,7 +843,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique ''' Start the task running via Celery. ''' - task_class = self._get_task_class() (res, opts) = self.pre_start(**kwargs) if res: self.start_celery_task(opts, error_callback, success_callback) diff --git a/awx/main/tests/unit/scheduler/conftest.py b/awx/main/tests/unit/scheduler/conftest.py index d8a71d456e..2fd84474f7 100644 --- a/awx/main/tests/unit/scheduler/conftest.py +++ b/awx/main/tests/unit/scheduler/conftest.py @@ -30,6 +30,7 @@ def scheduler_factory(mocker, epoch): def no_create_inventory_update(task, ignore): raise RuntimeError("create_inventory_update should not be called") + def no_create_project_update(task): raise RuntimeError("create_project_update should not be called") diff --git a/awx/main/tests/unit/scheduler/test_scheduler_job.py b/awx/main/tests/unit/scheduler/test_scheduler_job.py index 37af2ead05..735ce04d95 100644 --- a/awx/main/tests/unit/scheduler/test_scheduler_job.py +++ b/awx/main/tests/unit/scheduler/test_scheduler_job.py @@ -3,16 +3,6 @@ import pytest from datetime import timedelta -# awx -from awx.main.scheduler.partial import ( - JobDict, - ProjectUpdateDict, -) - -# TODO: wherever get_latest_rpoject_update_task() is stubbed and returns a -# ProjectUpdateDict. We should instead return a ProjectUpdateLatestDict() -# For now, this is ok since the fields on deviate that much. - class TestJobBlocked(): def test_inventory_update_waiting(self, scheduler_factory, waiting_inventory_update, pending_job): scheduler = scheduler_factory(tasks=[waiting_inventory_update, pending_job]) diff --git a/awx/main/tests/unit/scheduler/test_scheduler_project_update.py b/awx/main/tests/unit/scheduler/test_scheduler_project_update.py index e0fcbc3b1e..8122d93c09 100644 --- a/awx/main/tests/unit/scheduler/test_scheduler_project_update.py +++ b/awx/main/tests/unit/scheduler/test_scheduler_project_update.py @@ -1,18 +1,4 @@ -# Python -import pytest -from datetime import timedelta - -# Django -from django.utils.timezone import now as tz_now - -# awx -from awx.main.scheduler.partial import ( - JobDict, - ProjectUpdateDict, -) -from awx.main.scheduler import Scheduler - # TODO: wherever get_latest_rpoject_update_task() is stubbed and returns a # ProjectUpdateDict. We should instead return a ProjectUpdateLatestDict() # For now, this is ok since the fields on deviate that much. From 4ef4b4709b25a43809b57073aac3caf9c11485e3 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 26 Oct 2016 14:34:13 -0400 Subject: [PATCH 65/77] workflow execution added --- awx/main/models/workflow.py | 17 ++--- awx/main/scheduler/__init__.py | 89 +++++++++-------------- awx/main/scheduler/dependency_graph.py | 14 ++++ awx/main/scheduler/partial.py | 13 ++++ awx/main/tests/unit/scheduler/conftest.py | 1 + requirements/requirements.txt | 1 + 6 files changed, 68 insertions(+), 67 deletions(-) diff --git a/awx/main/models/workflow.py b/awx/main/models/workflow.py index a4f02deef2..318d32ff48 100644 --- a/awx/main/models/workflow.py +++ b/awx/main/models/workflow.py @@ -393,11 +393,6 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, JobNotificationMixin, Workflow def _get_parent_field_name(cls): return 'workflow_job_template' - @classmethod - def _get_task_class(cls): - from awx.main.tasks import RunWorkflowJob - return RunWorkflowJob - def _has_failed(self): return False @@ -426,11 +421,9 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, JobNotificationMixin, Workflow def get_notification_friendly_name(self): return "Workflow Job" - def start(self, *args, **kwargs): - (res, opts) = self.pre_start(**kwargs) - if res: - self.status = 'running' - self.save() - self.websocket_emit_status("running") - return res + ''' + A WorkflowJob is a virtual job. It doesn't result in a celery task. + ''' + def start_celery_task(self, opts, error_callback, success_callback): + return None diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index ca79fb8aca..cf5fbecddc 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -26,6 +26,7 @@ from awx.main.scheduler.partial import ( InventorySourceDict, SystemJobDict, AdHocCommandDict, + WorkflowJobDict, ) # Celery @@ -47,15 +48,9 @@ class Scheduler(): project_updates = ProjectUpdateDict.filter_partial(status=status_list) system_jobs = SystemJobDict.filter_partial(status=status_list) ad_hoc_commands = AdHocCommandDict.filter_partial(status=status_list) - ''' - graph_workflow_jobs = [wf for wf in - WorkflowJob.objects.filter(**kv)] - all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates + - graph_project_updates + graph_system_jobs + - graph_workflow_jobs, - key=lambda task: task.created) - ''' - all_actions = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands, + workflow_jobs = WorkflowJobDict.filter_partial(status=status_list) + + all_actions = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands + workflow_jobs, key=lambda task: task['created']) return all_actions @@ -111,7 +106,7 @@ class Scheduler(): job.status = 'failed' job.job_explanation = "Workflow job could not start because it was not in the right state or required manual credentials" job.save(update_fields=['status', 'job_explanation']) - job.websocket_emit_status("failed") + connection.on_commit(lambda: job.websocket_emit_status('failed')) # TODO: should we emit a status on the socket here similar to tasks.py tower_periodic_scheduler() ? #emit_websocket_notification('/socket.io/jobs', '', dict(id=)) @@ -122,12 +117,9 @@ class Scheduler(): dag = WorkflowDAG(workflow_job) if dag.is_workflow_done(): # TODO: detect if wfj failed - if workflow_job._has_failed(): - workflow_job.status = 'failed' - else: - workflow_job.status = 'successful' + workflow_job.status = 'completed' workflow_job.save() - workflow_job.websocket_emit_status(workflow_job.status) + connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status)) def get_activate_tasks(self): inspector = inspect() @@ -153,6 +145,8 @@ class Scheduler(): def start_task(self, task, dependent_tasks=[]): from awx.main.tasks import handle_work_error, handle_work_success + status_changed = False + # TODO: spawn inventory and project updates task_actual = { 'type':task.get_job_type_str(), @@ -164,21 +158,36 @@ class Scheduler(): success_handler = handle_work_success.s(task_actual=task_actual) job_obj = task.get_full() - job_obj.status = 'waiting' + if job_obj.status == 'pending': + status_changed = True + job_obj.status = 'waiting' (start_status, opts) = job_obj.pre_start() if not start_status: + status_changed = True job_obj.status = 'failed' if job_obj.job_explanation: job_obj.job_explanation += ' ' job_obj.job_explanation += 'Task failed pre-start check.' job_obj.save() # TODO: run error handler to fail sub-tasks and send notifications - return + else: + if type(job_obj) is WorkflowJob: + job_obj.status = 'running' + status_changed = True - self.consume_capacity(task) + if status_changed is True: + job_obj.save() - connection.on_commit(lambda: job_obj.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler)) + self.consume_capacity(task) + + def post_commit(): + if status_changed: + job_obj.websocket_emit_status(job_obj.status) + if job_obj.status != 'failed': + job_obj.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler) + + connection.on_commit(post_commit) def process_runnable_tasks(self, runnable_tasks): for i, task in enumerate(runnable_tasks): @@ -281,7 +290,7 @@ class Scheduler(): 'Celery, so it has been marked as failed.', )) task_obj.save() - task_obj.websocket_emit_status("failed") + connection.on_commit(lambda: task_obj.websocket_emit_status('failed')) all_sorted_tasks.pop(i) logger.error("Task %s appears orphaned... marking as failed" % task) @@ -323,28 +332,6 @@ class Scheduler(): pending_tasks = filter(lambda t: t['status'] == 'pending', all_sorted_tasks) self.process_pending_tasks(pending_tasks) - - ''' - def do_graph_things(): - # Rebuild graph - graph = SimpleDAG() - for task in running_tasks: - graph.add_node(task) - #for wait_task in waiting_tasks[:50]: - for wait_task in waiting_tasks: - node_dependencies = [] - for node in graph: - if wait_task.is_blocked_by(node['node_object']): - node_dependencies.append(node['node_object']) - graph.add_node(wait_task) - for dependency in node_dependencies: - graph.add_edge(wait_task, dependency) - if settings.DEBUG: - graph.generate_graphviz_plot() - return graph - ''' - #return do_graph_things() - def _schedule(self): all_sorted_tasks = self.get_tasks() if len(all_sorted_tasks) > 0: @@ -359,23 +346,21 @@ class Scheduler(): inventory_id_sources = self.get_inventory_source_tasks(all_sorted_tasks) self.process_inventory_sources(inventory_id_sources) - self.process_tasks(all_sorted_tasks) + running_workflow_tasks = self.get_running_workflow_jobs() + self.process_finished_workflow_jobs(running_workflow_tasks) - #print("Finished schedule()") + self.spawn_workflow_graph_jobs(running_workflow_tasks) + + self.process_tasks(all_sorted_tasks) def schedule(self): with transaction.atomic(): - #t1 = datetime.now() # Lock try: Instance.objects.select_for_update(nowait=True).all()[0] except DatabaseError: return - #workflow_jobs = get_running_workflow_jobs() - #process_finished_workflow_jobs(workflow_jobs) - #spawn_workflow_graph_jobs(workflow_jobs) - ''' Get tasks known by celery ''' @@ -387,10 +372,4 @@ class Scheduler(): ''' self._schedule() - # Unlock, due to transaction ending - #t2 = datetime.now() - #t_diff = t2 - t1 - #print("schedule() time %s" % (t_diff.total_seconds())) - - diff --git a/awx/main/scheduler/dependency_graph.py b/awx/main/scheduler/dependency_graph.py index 3142699077..edd49c98a9 100644 --- a/awx/main/scheduler/dependency_graph.py +++ b/awx/main/scheduler/dependency_graph.py @@ -7,6 +7,7 @@ from awx.main.scheduler.partial import ( InventoryUpdateDict, SystemJobDict, AdHocCommandDict, + WorkflowJobDict, ) class DependencyGraph(object): PROJECT_UPDATES = 'project_updates' @@ -14,6 +15,7 @@ class DependencyGraph(object): JOB_TEMPLATE_JOBS = 'job_template_jobs' SYSTEM_JOB = 'system_job' INVENTORY_SOURCE_UPDATES = 'inventory_source_updates' + WORKFLOW_JOB_TEMPLATES_JOBS = 'workflow_job_template_jobs' LATEST_PROJECT_UPDATES = 'latest_project_updates' LATEST_INVENTORY_UPDATES = 'latest_inventory_updates' @@ -32,6 +34,8 @@ class DependencyGraph(object): self.data[self.INVENTORY_SOURCE_UPDATES] = {} # True / False self.data[self.SYSTEM_JOB] = True + # workflow_job_template_id -> True / False + self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS] = {} # project_id -> latest ProjectUpdateLatestDict self.data[self.LATEST_PROJECT_UPDATES] = {} @@ -138,6 +142,9 @@ class DependencyGraph(object): self.data[self.PROJECT_UPDATES][job['project_id']] = False self.data[self.JOB_TEMPLATE_JOBS][job['job_template_id']] = False + def mark_workflow_job(self, job): + self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS][job['workflow_job_template_id']] = False + def can_project_update_run(self, job): return self.data[self.PROJECT_UPDATES].get(job['project_id'], True) @@ -153,6 +160,9 @@ class DependencyGraph(object): return True return False + def can_workflow_job_run(self, job): + return self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS].get(job['workflow_job_template_id'], True) + def can_system_job_run(self): return self.data[self.SYSTEM_JOB] @@ -170,6 +180,8 @@ class DependencyGraph(object): return not self.can_system_job_run() elif type(job) is AdHocCommandDict: return not self.can_ad_hoc_command_run(job) + elif type(job) is WorkflowJobDict: + return not self.can_workflow_job_run(job) def add_job(self, job): if type(job) is ProjectUpdateDict: @@ -179,6 +191,8 @@ class DependencyGraph(object): self.mark_inventory_source_update(job['inventory_source_id']) elif type(job) is JobDict: self.mark_job_template_job(job) + elif type(job) is WorkflowJobDict: + self.mark_workflow_job(job) elif type(job) is SystemJobDict: self.mark_system_job() elif type(job) is AdHocCommandDict: diff --git a/awx/main/scheduler/partial.py b/awx/main/scheduler/partial.py index a92c5c7bd6..576b66a9c3 100644 --- a/awx/main/scheduler/partial.py +++ b/awx/main/scheduler/partial.py @@ -7,6 +7,7 @@ from awx.main.models import ( InventorySource, SystemJob, AdHocCommand, + WorkflowJob, ) class PartialModelDict(object): @@ -205,3 +206,15 @@ class AdHocCommandDict(PartialModelDict): def task_impact(self): return 20 +class WorkflowJobDict(PartialModelDict): + FIELDS = ( + 'id', 'created', 'status', 'workflow_job_template_id', + ) + model = WorkflowJob + + def get_job_type_str(self): + return 'workflow_job' + + def task_impact(self): + return 10 + diff --git a/awx/main/tests/unit/scheduler/conftest.py b/awx/main/tests/unit/scheduler/conftest.py index 2fd84474f7..cec68b1ef7 100644 --- a/awx/main/tests/unit/scheduler/conftest.py +++ b/awx/main/tests/unit/scheduler/conftest.py @@ -35,6 +35,7 @@ def scheduler_factory(mocker, epoch): raise RuntimeError("create_project_update should not be called") mocker.patch.object(sched, 'get_tasks', return_value=tasks) + mocker.patch.object(sched, 'get_running_workflow_jobs', return_value=[]) mocker.patch.object(sched, 'get_inventory_source_tasks', return_value=inventory_sources) mocker.patch.object(sched, 'get_latest_project_update_tasks', return_value=latest_project_updates) mocker.patch.object(sched, 'get_latest_inventory_update_tasks', return_value=latest_inventory_updates) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index fb885a8842..5f2448d9e6 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -25,6 +25,7 @@ django-radius==1.0.0 djangorestframework==3.3.2 djangorestframework-yaml==1.0.2 django-split-settings==0.1.1 +django-transaction-hooks==0.2 django-taggit==0.17.6 git+https://github.com/matburt/dm.xmlsec.binding.git@master#egg=dm.xmlsec.binding dogpile.core==0.4.1 From 454b3edb7c96995942716935e43005c25a20abca Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 27 Oct 2016 16:31:47 -0400 Subject: [PATCH 66/77] rectify celery<->db inconsistent running job --- awx/main/scheduler/__init__.py | 51 +++++++++++++++++----------------- awx/main/scheduler/tasks.py | 10 +++---- 2 files changed, 30 insertions(+), 31 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index cf5fbecddc..21fe954546 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -54,6 +54,22 @@ class Scheduler(): key=lambda task: task['created']) return all_actions + ''' + Tasks that are running and SHOULD have a celery task. + ''' + def get_running_tasks(self): + status_list = ('running',) + + jobs = JobDict.filter_partial(status=status_list) + inventory_updates = InventoryUpdateDict.filter_partial(status=status_list) + project_updates = ProjectUpdateDict.filter_partial(status=status_list) + system_jobs = SystemJobDict.filter_partial(status=status_list) + ad_hoc_commands = AdHocCommandDict.filter_partial(status=status_list) + + all_actions = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands, + key=lambda task: task['created']) + return all_actions + # TODO: Consider a database query for this logic def get_latest_project_update_tasks(self, all_sorted_tasks): project_ids = Set() @@ -121,7 +137,7 @@ class Scheduler(): workflow_job.save() connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status)) - def get_activate_tasks(self): + def get_active_tasks(self): inspector = inspect() if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'): active_task_queues = inspector.active() @@ -129,10 +145,10 @@ class Scheduler(): logger.warn("Ignoring celery task inspector") active_task_queues = None - active_tasks = [] + active_tasks = set() if active_task_queues is not None: for queue in active_task_queues: - active_tasks += [at['id'] for at in active_task_queues[queue]] + map(lambda at: active_tasks.add(at['id']), active_task_queues[queue]) else: logger.error("Could not communicate with celery!") # TODO: Something needs to be done here to signal to the system @@ -274,10 +290,11 @@ class Scheduler(): if self.get_remaining_capacity() <= 0: return - def fail_inconsistent_running_jobs(self, active_tasks, all_sorted_tasks): - for i, task in enumerate(all_sorted_tasks): - if task['status'] != 'running': - continue + def process_celery_tasks(self, active_tasks, all_running_sorted_tasks): + ''' + Rectify tower db <-> celery inconsistent view of jobs state + ''' + for task in all_running_sorted_tasks: if (task['celery_task_id'] not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): # NOTE: Pull status again and make sure it didn't finish in @@ -290,20 +307,11 @@ class Scheduler(): 'Celery, so it has been marked as failed.', )) task_obj.save() + print("Going to fail %s" % task_obj.id) connection.on_commit(lambda: task_obj.websocket_emit_status('failed')) - all_sorted_tasks.pop(i) logger.error("Task %s appears orphaned... marking as failed" % task) - def process_celery_tasks(self, active_tasks, all_sorted_tasks): - - ''' - Rectify tower db <-> celery inconsistent view of jobs state - ''' - # Check running tasks and make sure they are active in celery - logger.debug("Active celery tasks: " + str(active_tasks)) - all_sorted_tasks = self.fail_inconsistent_running_jobs(active_tasks, - all_sorted_tasks) def calculate_capacity_used(self, tasks): self.capacity_used = 0 @@ -361,15 +369,6 @@ class Scheduler(): except DatabaseError: return - ''' - Get tasks known by celery - ''' - ''' - active_tasks = self.get_activate_tasks() - # Communication with celery failed :(, return - if active_tasks is None: - return None - ''' self._schedule() diff --git a/awx/main/scheduler/tasks.py b/awx/main/scheduler/tasks.py index ba1ddaeecc..2b35b5ab64 100644 --- a/awx/main/scheduler/tasks.py +++ b/awx/main/scheduler/tasks.py @@ -33,19 +33,19 @@ def run_scheduler(): @task def run_fail_inconsistent_running_jobs(): - return - print("run_fail_inconsistent_running_jobs() running") with transaction.atomic(): # Lock try: Instance.objects.select_for_update(nowait=True).all()[0] scheduler = Scheduler() - active_tasks = scheduler.get_activate_tasks() + active_tasks = scheduler.get_active_tasks() + if active_tasks is None: + # TODO: Failed to contact celery. We should surface this. return None - all_sorted_tasks = scheduler.get_tasks() - scheduler.process_celery_tasks(active_tasks, all_sorted_tasks) + all_running_sorted_tasks = scheduler.get_running_tasks() + scheduler.process_celery_tasks(active_tasks, all_running_sorted_tasks) except DatabaseError: return From 03a484a6a6c52233cfe58b53ba3990d23842c1f2 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 31 Oct 2016 08:01:02 -0500 Subject: [PATCH 67/77] remove old task manager code --- awx/main/models/ad_hoc_commands.py | 38 ---------------- awx/main/models/inventory.py | 10 ----- awx/main/models/jobs.py | 61 +------------------------- awx/main/models/projects.py | 10 ----- awx/main/models/unified_jobs.py | 9 ---- awx/main/models/workflow.py | 3 -- awx/main/tests/functional/test_jobs.py | 34 -------------- 7 files changed, 1 insertion(+), 164 deletions(-) diff --git a/awx/main/models/ad_hoc_commands.py b/awx/main/models/ad_hoc_commands.py index 65f40427b0..aadd34c190 100644 --- a/awx/main/models/ad_hoc_commands.py +++ b/awx/main/models/ad_hoc_commands.py @@ -4,7 +4,6 @@ # Python import datetime import hmac -import json import logging from urlparse import urljoin @@ -24,7 +23,6 @@ from jsonfield import JSONField # AWX from awx.main.models.base import * # noqa from awx.main.models.unified_jobs import * # noqa -from awx.main.utils import decrypt_field from awx.main.models.notifications import JobNotificationMixin logger = logging.getLogger('awx.main.models.ad_hoc_commands') @@ -181,13 +179,6 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin): def get_passwords_needed_to_start(self): return self.passwords_needed_to_start - def is_blocked_by(self, obj): - from awx.main.models import InventoryUpdate - if type(obj) == InventoryUpdate: - if self.inventory == obj.inventory_source.inventory: - return True - return False - @property def task_impact(self): # NOTE: We sorta have to assume the host count matches and that forks default to 5 @@ -195,35 +186,6 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin): count_hosts = Host.objects.filter( enabled=True, inventory__ad_hoc_commands__pk=self.pk).count() return min(count_hosts, 5 if self.forks == 0 else self.forks) * 10 - def generate_dependencies(self, active_tasks): - from awx.main.models import InventoryUpdate - if not self.inventory: - return [] - inventory_sources = self.inventory.inventory_sources.filter( update_on_launch=True) - inventory_sources_found = [] - dependencies = [] - for obj in active_tasks: - if type(obj) == InventoryUpdate: - if obj.inventory_source in inventory_sources: - inventory_sources_found.append(obj.inventory_source) - # Skip updating any inventory sources that were already updated before - # running this job (via callback inventory refresh). - try: - start_args = json.loads(decrypt_field(self, 'start_args')) - except Exception: - start_args = None - start_args = start_args or {} - inventory_sources_already_updated = start_args.get('inventory_sources_already_updated', []) - if inventory_sources_already_updated: - for source in inventory_sources.filter(pk__in=inventory_sources_already_updated): - if source not in inventory_sources_found: - inventory_sources_found.append(source) - if inventory_sources.count(): # and not has_setup_failures? Probably handled as an error scenario in the task runner - for source in inventory_sources: - if source not in inventory_sources_found and source.needs_update_on_launch: - dependencies.append(source.create_inventory_update(launch_type='dependency')) - return dependencies - def copy(self): data = {} for field in ('job_type', 'inventory_id', 'limit', 'credential_id', diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index c77868759e..662b1702f9 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -22,7 +22,6 @@ from awx.main.constants import CLOUD_PROVIDERS from awx.main.fields import AutoOneToOneField, ImplicitRoleField from awx.main.managers import HostManager from awx.main.models.base import * # noqa -from awx.main.models.jobs import Job from awx.main.models.unified_jobs import * # noqa from awx.main.models.mixins import ResourceMixin from awx.main.models.notifications import ( @@ -1250,15 +1249,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin): def get_ui_url(self): return urljoin(settings.TOWER_URL_BASE, "/#/inventory_sync/{}".format(self.pk)) - def is_blocked_by(self, obj): - if type(obj) == InventoryUpdate: - if self.inventory_source.inventory == obj.inventory_source.inventory: - return True - if type(obj) == Job: - if self.inventory_source.inventory == obj.inventory: - return True - return False - @property def task_impact(self): return 50 diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index 3377968eba..8ed723626b 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -33,7 +33,7 @@ from awx.main.models.notifications import ( NotificationTemplate, JobNotificationMixin, ) -from awx.main.utils import decrypt_field, ignore_inventory_computed_fields +from awx.main.utils import ignore_inventory_computed_fields from awx.main.redact import PlainTextCleaner from awx.main.fields import ImplicitRoleField from awx.main.models.mixins import ResourceMixin @@ -646,29 +646,6 @@ class Job(UnifiedJob, JobOptions, JobNotificationMixin): kwargs['job_host_summaries__job__pk'] = self.pk return Host.objects.filter(**kwargs) - def is_blocked_by(self, obj): - from awx.main.models import InventoryUpdate, ProjectUpdate - if type(obj) == Job: - if obj.job_template is not None and obj.inventory is not None: - if obj.job_template == self.job_template and \ - obj.inventory == self.inventory: - if self.allow_simultaneous: - return False - if obj.launch_type == 'callback' and self.launch_type == 'callback' and \ - obj.limit != self.limit: - return False - return True - return False - if type(obj) == InventoryUpdate: - if self.inventory == obj.inventory_source.inventory: - return True - return False - if type(obj) == ProjectUpdate: - if obj.project == self.project: - return True - return False - return False - @property def task_impact(self): # NOTE: We sorta have to assume the host count matches and that forks default to 5 @@ -707,39 +684,6 @@ class Job(UnifiedJob, JobOptions, JobNotificationMixin): def processed_hosts(self): return self._get_hosts(job_host_summaries__processed__gt=0) - def generate_dependencies(self, active_tasks): - from awx.main.models import InventoryUpdate, ProjectUpdate - inventory_sources = self.inventory.inventory_sources.filter(update_on_launch=True) - project_found = False - inventory_sources_found = [] - dependencies = [] - for obj in active_tasks: - if type(obj) == ProjectUpdate and self.project is not None: - if obj.project == self.project: - project_found = True - if type(obj) == InventoryUpdate: - if obj.inventory_source in inventory_sources: - inventory_sources_found.append(obj.inventory_source) - # Skip updating any inventory sources that were already updated before - # running this job (via callback inventory refresh). - try: - start_args = json.loads(decrypt_field(self, 'start_args')) - except Exception: - start_args = None - start_args = start_args or {} - inventory_sources_already_updated = start_args.get('inventory_sources_already_updated', []) - if inventory_sources_already_updated: - for source in inventory_sources.filter(pk__in=inventory_sources_already_updated): - if source not in inventory_sources_found: - inventory_sources_found.append(source) - if not project_found and self.project is not None and self.project.needs_update_on_launch: - dependencies.append(self.project.create_project_update(launch_type='dependency')) - if inventory_sources.count(): # and not has_setup_failures? Probably handled as an error scenario in the task runner - for source in inventory_sources: - if source not in inventory_sources_found and source.needs_update_on_launch: - dependencies.append(source.create_inventory_update(launch_type='dependency')) - return dependencies - def notification_data(self, block=5): data = super(Job, self).notification_data() all_hosts = {} @@ -1526,9 +1470,6 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin): def get_ui_url(self): return urljoin(settings.TOWER_URL_BASE, "/#/management_jobs/{}".format(self.pk)) - def is_blocked_by(self, obj): - return True - def handle_extra_data(self, extra_data): extra_vars = {} if isinstance(extra_data, dict): diff --git a/awx/main/models/projects.py b/awx/main/models/projects.py index 4c20e01e08..c3763ff34f 100644 --- a/awx/main/models/projects.py +++ b/awx/main/models/projects.py @@ -22,7 +22,6 @@ from django.utils.timezone import now, make_aware, get_default_timezone # AWX from awx.main.models.base import * # noqa -from awx.main.models.jobs import Job from awx.main.models.notifications import ( NotificationTemplate, JobNotificationMixin, @@ -424,15 +423,6 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin): from awx.main.tasks import RunProjectUpdate return RunProjectUpdate - def is_blocked_by(self, obj): - if type(obj) == ProjectUpdate: - if self.project == obj.project: - return True - if type(obj) == Job: - if self.project == obj.project: - return True - return False - def websocket_emit_data(self): return dict(project_id=self.project.id) diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index b8657431ab..fa989ad60c 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -778,10 +778,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique def task_impact(self): raise NotImplementedError # Implement in subclass. - def is_blocked_by(self, task_object): - ''' Given another task object determine if this task would be blocked by it ''' - raise NotImplementedError # Implement in subclass. - def websocket_emit_data(self): ''' Return extra data that should be included when submitting data to the browser over the websocket connection ''' return {} @@ -792,11 +788,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique status_data['group_name'] = 'jobs' emit_channel_notification('jobs-status_changed', status_data) - def generate_dependencies(self, active_tasks): - ''' Generate any tasks that the current task might be dependent on given a list of active - tasks that might preclude creating one''' - return [] - def notification_data(self): return dict(id=self.id, name=self.name, diff --git a/awx/main/models/workflow.py b/awx/main/models/workflow.py index 318d32ff48..b267343ea3 100644 --- a/awx/main/models/workflow.py +++ b/awx/main/models/workflow.py @@ -406,9 +406,6 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, JobNotificationMixin, Workflow #def get_ui_url(self): # return urlparse.urljoin(tower_settings.TOWER_URL_BASE, "/#/workflow_jobs/{}".format(self.pk)) - def is_blocked_by(self, obj): - return True - @property def task_impact(self): return 0 diff --git a/awx/main/tests/functional/test_jobs.py b/awx/main/tests/functional/test_jobs.py index 83302e7400..55b5d428c9 100644 --- a/awx/main/tests/functional/test_jobs.py +++ b/awx/main/tests/functional/test_jobs.py @@ -2,40 +2,6 @@ from awx.main.models import Job import pytest -@pytest.mark.django_db -def test_job_blocking(get, post, job_template, inventory, inventory_factory): - j1 = Job.objects.create(job_template=job_template, - inventory=inventory) - j2 = Job.objects.create(job_template=job_template, - inventory=inventory) - assert j1.is_blocked_by(j2) - j2.inventory = inventory_factory(name='test-different-inventory') - assert not j1.is_blocked_by(j2) - j_callback_1 = Job.objects.create(job_template=job_template, - inventory=inventory, - launch_type='callback', - limit='a') - j_callback_2 = Job.objects.create(job_template=job_template, - inventory=inventory, - launch_type='callback', - limit='a') - assert j_callback_1.is_blocked_by(j_callback_2) - j_callback_2.limit = 'b' - assert not j_callback_1.is_blocked_by(j_callback_2) - -@pytest.mark.django_db -def test_job_blocking_allow_simul(get, post, job_template, inventory): - job_template.allow_simultaneous = True - j1 = Job.objects.create(job_template=job_template, - inventory=inventory) - j2 = Job.objects.create(job_template=job_template, - inventory=inventory) - assert not j1.is_blocked_by(j2) - assert not j2.is_blocked_by(j1) - job_template.allow_simultaneous = False - assert j1.is_blocked_by(j2) - assert j2.is_blocked_by(j1) - @pytest.mark.django_db def test_orphan_unified_job_creation(instance, inventory): job = Job.objects.create(job_template=None, inventory=inventory, name='hi world') From 0f98e1edec2805682de25a8133e2a6159489f14c Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 31 Oct 2016 09:26:27 -0500 Subject: [PATCH 68/77] remove todo's --- awx/main/scheduler/__init__.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 21fe954546..99bc87917c 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -108,7 +108,6 @@ class Scheduler(): return results def spawn_workflow_graph_jobs(self, workflow_jobs): - # TODO: Consider using transaction.atomic for workflow_job in workflow_jobs: dag = WorkflowDAG(workflow_job) spawn_nodes = dag.bfs_nodes_to_run() @@ -150,9 +149,6 @@ class Scheduler(): for queue in active_task_queues: map(lambda at: active_tasks.add(at['id']), active_task_queues[queue]) else: - logger.error("Could not communicate with celery!") - # TODO: Something needs to be done here to signal to the system - # as a whole that celery appears to be down. if not hasattr(settings, 'CELERY_UNIT_TEST'): return None @@ -163,7 +159,6 @@ class Scheduler(): status_changed = False - # TODO: spawn inventory and project updates task_actual = { 'type':task.get_job_type_str(), 'id': task['id'], @@ -214,7 +209,6 @@ class Scheduler(): def create_project_update(self, task): dep = Project.objects.get(id=task['project_id']).create_project_update(launch_type='dependency') - # TODO: Consider using milliseconds or microseconds # Project created 1 seconds behind dep.created = task['created'] - timedelta(seconds=1) dep.status = 'waiting' @@ -329,7 +323,6 @@ class Scheduler(): def process_tasks(self, all_sorted_tasks): - # TODO: Process new tasks running_tasks = filter(lambda t: t['status'] == 'running', all_sorted_tasks) runnable_tasks = filter(lambda t: t['status'] in ['waiting', 'running'], all_sorted_tasks) From ed37e68c53f1ad7a037da94e622f6f0dee7850fe Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 31 Oct 2016 14:16:59 -0500 Subject: [PATCH 69/77] run dependencies when capacity is available --- awx/main/scheduler/__init__.py | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 99bc87917c..0f551997bf 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -157,8 +157,6 @@ class Scheduler(): def start_task(self, task, dependent_tasks=[]): from awx.main.tasks import handle_work_error, handle_work_success - status_changed = False - task_actual = { 'type':task.get_job_type_str(), 'id': task['id'], @@ -169,13 +167,10 @@ class Scheduler(): success_handler = handle_work_success.s(task_actual=task_actual) job_obj = task.get_full() - if job_obj.status == 'pending': - status_changed = True - job_obj.status = 'waiting' + job_obj.status = 'waiting' (start_status, opts) = job_obj.pre_start() if not start_status: - status_changed = True job_obj.status = 'failed' if job_obj.job_explanation: job_obj.job_explanation += ' ' @@ -185,33 +180,27 @@ class Scheduler(): else: if type(job_obj) is WorkflowJob: job_obj.status = 'running' - status_changed = True - if status_changed is True: - job_obj.save() + job_obj.save() self.consume_capacity(task) def post_commit(): - if status_changed: - job_obj.websocket_emit_status(job_obj.status) + job_obj.websocket_emit_status(job_obj.status) if job_obj.status != 'failed': job_obj.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler) connection.on_commit(post_commit) def process_runnable_tasks(self, runnable_tasks): - for i, task in enumerate(runnable_tasks): - # TODO: maybe batch process new tasks. - # Processing a new task individually seems to be expensive - self.graph.add_job(task) + map(lambda task: self.graph.add_job(task), runnable_tasks) def create_project_update(self, task): dep = Project.objects.get(id=task['project_id']).create_project_update(launch_type='dependency') # Project created 1 seconds behind dep.created = task['created'] - timedelta(seconds=1) - dep.status = 'waiting' + dep.status = 'pending' dep.save() project_task = ProjectUpdateDict.get_partial(dep.id) @@ -222,7 +211,7 @@ class Scheduler(): dep = InventorySource.objects.get(id=inventory_source_task['id']).create_inventory_update(launch_type='dependency') dep.created = task['created'] - timedelta(seconds=2) - dep.status = 'waiting' + dep.status = 'pending' dep.save() inventory_task = InventoryUpdateDict.get_partial(dep.id) @@ -267,6 +256,9 @@ class Scheduler(): def process_pending_tasks(self, pending_tasks): for task in pending_tasks: + # Stop processing tasks if we know we are out of capacity + if self.get_remaining_capacity() <= 0: + return if not self.graph.is_job_blocked(task): dependencies = self.generate_dependencies(task) @@ -280,10 +272,6 @@ class Scheduler(): else: self.graph.add_job(task) - # Stop processing tasks if we know we are out of capacity - if self.get_remaining_capacity() <= 0: - return - def process_celery_tasks(self, active_tasks, all_running_sorted_tasks): ''' Rectify tower db <-> celery inconsistent view of jobs state @@ -329,8 +317,8 @@ class Scheduler(): self.calculate_capacity_used(running_tasks) self.process_runnable_tasks(runnable_tasks) - - pending_tasks = filter(lambda t: t['status'] == 'pending', all_sorted_tasks) + + pending_tasks = filter(lambda t: t['status'] in 'pending', all_sorted_tasks) self.process_pending_tasks(pending_tasks) def _schedule(self): From c4a3b604f8fe3c1f53d912ade04adf983a215622 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 31 Oct 2016 15:07:14 -0500 Subject: [PATCH 70/77] add back in alan's workflow fail detection --- awx/main/scheduler/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 0f551997bf..8c9679eaec 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -131,8 +131,10 @@ class Scheduler(): for workflow_job in workflow_jobs: dag = WorkflowDAG(workflow_job) if dag.is_workflow_done(): - # TODO: detect if wfj failed - workflow_job.status = 'completed' + if workflow_job._has_failed(): + workflow_job.status = 'failed' + else: + workflow_job.status = 'successful' workflow_job.save() connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status)) From 6efa468f2973eefd56048bba7a748ef8caca799a Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 1 Nov 2016 07:41:18 -0500 Subject: [PATCH 71/77] init task manager architecture docs --- docs/task_manager_system.md | 57 +++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 docs/task_manager_system.md diff --git a/docs/task_manager_system.md b/docs/task_manager_system.md new file mode 100644 index 0000000000..92b8e35880 --- /dev/null +++ b/docs/task_manager_system.md @@ -0,0 +1,57 @@ +# Task Manager Overview + +The task manager is responsible for deciding when jobs should be introduced to celery for running. When choosing a task to run the considerations are: (1) creation time, (2) job dependency, (3) capacity. + +Independent jobs are ran in order of creation time, earliest first. Jobs with dependencies are also ran in creation time order within the group of job dependencies. Capacity is the final consideration when deciding to release a job to be ran by celery. + +## Task Manager Architecture + +The task manager has a single entry point, `Scheduler().schedule()`. The method may be called in parallel, at any time, as many times as the user wants. The `schedule()` function tries to aquire a single, global, lock using the Instance table first record in the database. If the lock can not be aquired the method returns. The failure to aquire the lock indicates that there is another instance currently running `schedule()`. + +### Hybrid Scheduler: Periodic + Event +The `schedule()` function is ran (a) periodically by a celery task and (b) on job creation or completion. The task manager system would behave correctly if ran, exclusively, via (a) or (b). We chose to trigger `schedule()` via both mechanisms because of the nice properties I will now mention. (b) reduces the time from launch to running, resulting a better user experience. (a) is a fail-safe in case we miss code-paths, in the present and future, that change the 3 scheduling considerations for which we should call `schedule()` (i.e. adding new nodes to tower changes the capacity, obscure job error handling that fails a job) + Emperically, the periodic task manager has served us well in the past and we will continue to rely on it with the added event-triggered `schedule()`. + + ### Scheduler Algorithm + * Get all non-completed jobs, `all_tasks` + * Generate the hash tables from `all_tasks`: + * `` indicates a job is running + * `` indicates a project update is running + * `` indicates a job template or inventory update is running + * `` indiciates an inventory update is running + * `` indiciates a workflow job is running + * `` used to determine cache timeout + * `` used to determine cache timeout and dependencies to spawn + * `` used to determine cache timeout + * Detect finished workflow jobs + * Spawn next workflow jobs if needed + * For each pending jobs; start with oldest created job and stop when no capacity == 0 + * If job is not blocked, determined using generated hash tables, and there is capacity, then mark the as `waiting` and submit the job to celery. + +### Job Lifecycle +| Job Status | State | +|:----------:|:------------------------------------------------------------------------------------------------------------------:| +| pending | Job launched.
1. Hasn't yet been seen by the scheduler
2. Is blocked by another task
3. Not enough capacity | +| waiting | Job submitted to celery. | +| running | Job running in celery. | +| successful | Job finished with ansible-playbook return code 0. | +| failed | Job finished with ansible-playbook return code other than 0. | +| error | System failure. | + +## todo + +## Code Composition +* partials +* + +## Acceptance Tests +* assemelate with .md and trim the fat https://docs.google.com/a/redhat.com/document/d/1AOvKiTMSV0A2RHykHW66BZKBuaJ_l0SJ-VbMwvu-5Gk/edit?usp=sharing + + + + + + + + + From 87dd91e849161d57c6c190b6f61d69f74916c936 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 1 Nov 2016 09:52:54 -0500 Subject: [PATCH 72/77] rename Scheduler to TaskManager --- awx/main/scheduler/__init__.py | 2 +- awx/main/scheduler/tasks.py | 10 +++++----- awx/main/tests/unit/scheduler/conftest.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 8c9679eaec..c0a680c7d4 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -34,7 +34,7 @@ from celery.task.control import inspect logger = logging.getLogger('awx.main.scheduler') -class Scheduler(): +class TaskManager(): def __init__(self): self.graph = DependencyGraph() self.capacity_total = 200 diff --git a/awx/main/scheduler/tasks.py b/awx/main/scheduler/tasks.py index 2b35b5ab64..eb9c7691a7 100644 --- a/awx/main/scheduler/tasks.py +++ b/awx/main/scheduler/tasks.py @@ -11,7 +11,7 @@ from celery import task # AWX from awx.main.models import Instance -from awx.main.scheduler import Scheduler +from awx.main.scheduler import TaskManager logger = logging.getLogger('awx.main.scheduler') @@ -21,15 +21,15 @@ logger = logging.getLogger('awx.main.scheduler') @task def run_job_launch(job_id): - Scheduler().schedule() + TaskManager().schedule() @task def run_job_complete(job_id): - Scheduler().schedule() + TaskManager().schedule() @task def run_scheduler(): - Scheduler().schedule() + TaskManager().schedule() @task def run_fail_inconsistent_running_jobs(): @@ -37,7 +37,7 @@ def run_fail_inconsistent_running_jobs(): # Lock try: Instance.objects.select_for_update(nowait=True).all()[0] - scheduler = Scheduler() + scheduler = TaskManager() active_tasks = scheduler.get_active_tasks() if active_tasks is None: diff --git a/awx/main/tests/unit/scheduler/conftest.py b/awx/main/tests/unit/scheduler/conftest.py index cec68b1ef7..6b07649fd0 100644 --- a/awx/main/tests/unit/scheduler/conftest.py +++ b/awx/main/tests/unit/scheduler/conftest.py @@ -13,7 +13,7 @@ from awx.main.scheduler.partial import ( InventoryUpdateDict, InventorySourceDict, ) -from awx.main.scheduler import Scheduler +from awx.main.scheduler import TaskManager @pytest.fixture @@ -23,7 +23,7 @@ def epoch(): @pytest.fixture def scheduler_factory(mocker, epoch): def fn(tasks=[], inventory_sources=[], latest_project_updates=[], latest_inventory_updates=[], create_project_update=None, create_inventory_update=None): - sched = Scheduler() + sched = TaskManager() sched.capacity_total = 999999999 sched.graph.get_now = lambda: epoch From 13c89ab78c081066eecfa145fb51f4e283125bd4 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 1 Nov 2016 10:53:14 -0500 Subject: [PATCH 73/77] HAify job schedules and more task_manager renaming --- awx/main/migrations/0046_v310_tower_state.py | 24 +++++++++++++++++ awx/main/models/ha.py | 6 ++++- awx/main/scheduler/tasks.py | 2 +- awx/main/tasks.py | 28 ++++---------------- awx/settings/defaults.py | 7 ++--- requirements/requirements.txt | 1 + 6 files changed, 40 insertions(+), 28 deletions(-) create mode 100644 awx/main/migrations/0046_v310_tower_state.py diff --git a/awx/main/migrations/0046_v310_tower_state.py b/awx/main/migrations/0046_v310_tower_state.py new file mode 100644 index 0000000000..e9f785e0a6 --- /dev/null +++ b/awx/main/migrations/0046_v310_tower_state.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0045_v310_job_event_stdout'), + ] + + operations = [ + migrations.CreateModel( + name='TowerState', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('schedule_last_run', models.DateTimeField(auto_now_add=True)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/awx/main/models/ha.py b/awx/main/models/ha.py index 3f92aebc12..818233672b 100644 --- a/awx/main/models/ha.py +++ b/awx/main/models/ha.py @@ -5,13 +5,15 @@ from django.db import models from django.db.models.signals import post_save from django.dispatch import receiver +from solo.models import SingletonModel + from awx.main.managers import InstanceManager from awx.main.models.inventory import InventoryUpdate from awx.main.models.jobs import Job from awx.main.models.projects import ProjectUpdate from awx.main.models.unified_jobs import UnifiedJob -__all__ = ('Instance', 'JobOrigin') +__all__ = ('Instance', 'JobOrigin', 'TowerState',) class Instance(models.Model): @@ -33,6 +35,8 @@ class Instance(models.Model): # NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing return "tower" +class TowerState(SingletonModel): + schedule_last_run = models.DateTimeField(auto_now_add=True) class JobOrigin(models.Model): """A model representing the relationship between a unified job and diff --git a/awx/main/scheduler/tasks.py b/awx/main/scheduler/tasks.py index eb9c7691a7..622876a44e 100644 --- a/awx/main/scheduler/tasks.py +++ b/awx/main/scheduler/tasks.py @@ -28,7 +28,7 @@ def run_job_complete(job_id): TaskManager().schedule() @task -def run_scheduler(): +def run_task_manager(): TaskManager().schedule() @task diff --git a/awx/main/tasks.py b/awx/main/tasks.py index fac9dca68f..b6448e3761 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -21,7 +21,6 @@ import traceback import urlparse import uuid from distutils.version import LooseVersion as Version -import dateutil.parser import yaml try: import psutil @@ -137,30 +136,12 @@ def cluster_node_heartbeat(self): @task(bind=True, queue='default') def tower_periodic_scheduler(self): - def get_last_run(): - if not os.path.exists(settings.SCHEDULE_METADATA_LOCATION): - return None - fd = open(settings.SCHEDULE_METADATA_LOCATION) - try: - last_run = dateutil.parser.parse(fd.read()) - return last_run - except Exception as exc: - logger.error("get_last_run failed: {}".format(exc)) - return None - - def write_last_run(last_run): - fd = open(settings.SCHEDULE_METADATA_LOCATION, 'w') - fd.write(last_run.isoformat()) - fd.close() - run_now = now() - last_run = get_last_run() - if not last_run: - logger.debug("First run time") - write_last_run(run_now) - return + state = TowerState.get_solo() + last_run = state.schedule_last_run logger.debug("Last run was: %s", last_run) - write_last_run(run_now) + state.schedule_last_run = run_now + state.save() old_schedules = Schedule.objects.enabled().before(last_run) for schedule in old_schedules: @@ -180,6 +161,7 @@ def tower_periodic_scheduler(self): new_unified_job.save(update_fields=['status', 'job_explanation']) new_unified_job.websocket_emit_status("failed") emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules")) + state.save() def _send_notification_templates(instance, status_str): if status_str not in ['succeeded', 'failed']: diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index a5c7975920..02cbf3fc31 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -201,6 +201,7 @@ INSTALLED_APPS = ( 'awx.ui', 'awx.fact', 'awx.sso', + 'solo', ) INTERNAL_IPS = ('127.0.0.1',) @@ -392,9 +393,9 @@ CELERYBEAT_SCHEDULE = { 'task': 'awx.main.tasks.cluster_node_heartbeat', 'schedule': timedelta(seconds=60) }, - 'task_scheduler': { - 'task': 'awx.main.scheduler.tasks.run_scheduler', - 'schedule': timedelta(seconds=10) + 'task_manager': { + 'task': 'awx.main.scheduler.tasks.run_task_manager', + 'schedule': timedelta(seconds=20) }, 'task_fail_inconsistent_running_jobs': { 'task': 'awx.main.scheduler.tasks.run_fail_inconsistent_running_jobs', diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 5f2448d9e6..44e4f58bee 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -24,6 +24,7 @@ django-polymorphic==0.7.2 django-radius==1.0.0 djangorestframework==3.3.2 djangorestframework-yaml==1.0.2 +django-solo==1.1.2 django-split-settings==0.1.1 django-transaction-hooks==0.2 django-taggit==0.17.6 From e1a84f4c85606bd19e5edd9141947866011cce2d Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 1 Nov 2016 13:55:37 -0500 Subject: [PATCH 74/77] bump migrations --- .../{0046_v310_tower_state.py => 0047_v310_tower_state.py} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename awx/main/migrations/{0046_v310_tower_state.py => 0047_v310_tower_state.py} (92%) diff --git a/awx/main/migrations/0046_v310_tower_state.py b/awx/main/migrations/0047_v310_tower_state.py similarity index 92% rename from awx/main/migrations/0046_v310_tower_state.py rename to awx/main/migrations/0047_v310_tower_state.py index e9f785e0a6..f1227830a4 100644 --- a/awx/main/migrations/0046_v310_tower_state.py +++ b/awx/main/migrations/0047_v310_tower_state.py @@ -7,7 +7,7 @@ from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('main', '0045_v310_job_event_stdout'), + ('main', '0046_v310_job_event_stdout'), ] operations = [ From 25b85c4a0bafb4d7d376bb523d80cfc9db461701 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 1 Nov 2016 14:07:00 -0500 Subject: [PATCH 75/77] rename scheduler config singleton --- awx/main/migrations/0047_v310_tower_state.py | 2 +- awx/main/models/ha.py | 4 ++-- awx/main/tasks.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/awx/main/migrations/0047_v310_tower_state.py b/awx/main/migrations/0047_v310_tower_state.py index f1227830a4..941dfd0ba2 100644 --- a/awx/main/migrations/0047_v310_tower_state.py +++ b/awx/main/migrations/0047_v310_tower_state.py @@ -12,7 +12,7 @@ class Migration(migrations.Migration): operations = [ migrations.CreateModel( - name='TowerState', + name='TowerScheduleState', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('schedule_last_run', models.DateTimeField(auto_now_add=True)), diff --git a/awx/main/models/ha.py b/awx/main/models/ha.py index 818233672b..691faf6305 100644 --- a/awx/main/models/ha.py +++ b/awx/main/models/ha.py @@ -13,7 +13,7 @@ from awx.main.models.jobs import Job from awx.main.models.projects import ProjectUpdate from awx.main.models.unified_jobs import UnifiedJob -__all__ = ('Instance', 'JobOrigin', 'TowerState',) +__all__ = ('Instance', 'JobOrigin', 'TowerScheduleState',) class Instance(models.Model): @@ -35,7 +35,7 @@ class Instance(models.Model): # NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing return "tower" -class TowerState(SingletonModel): +class TowerScheduleState(SingletonModel): schedule_last_run = models.DateTimeField(auto_now_add=True) class JobOrigin(models.Model): diff --git a/awx/main/tasks.py b/awx/main/tasks.py index b6448e3761..7f187c7bce 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -137,7 +137,7 @@ def cluster_node_heartbeat(self): @task(bind=True, queue='default') def tower_periodic_scheduler(self): run_now = now() - state = TowerState.get_solo() + state = TowerScheduleState.get_solo() last_run = state.schedule_last_run logger.debug("Last run was: %s", last_run) state.schedule_last_run = run_now From a0021f974a0506480ce3d62239bd5891494d809f Mon Sep 17 00:00:00 2001 From: Michael Abashian Date: Tue, 1 Nov 2016 16:27:11 -0400 Subject: [PATCH 76/77] Fixed merge typo --- .../src/organizations/linkout/organizations-linkout.route.js | 1 + 1 file changed, 1 insertion(+) diff --git a/awx/ui/client/src/organizations/linkout/organizations-linkout.route.js b/awx/ui/client/src/organizations/linkout/organizations-linkout.route.js index 7d984ca836..111fa883b0 100644 --- a/awx/ui/client/src/organizations/linkout/organizations-linkout.route.js +++ b/awx/ui/client/src/organizations/linkout/organizations-linkout.route.js @@ -44,6 +44,7 @@ export default [{ value: { order_by: 'username' } + }, add_user_search: { value: { order_by: 'username', From 6d57d8f40b8832b13a8fdd0693a61cfc31579f1b Mon Sep 17 00:00:00 2001 From: Leigh Johnson Date: Tue, 1 Nov 2016 22:27:26 -0400 Subject: [PATCH 77/77] Fixes error thrown when saving various forms: `Cannot set property 'name_api_error' of null` Resolves #3822 & #3832 --- awx/ui/client/src/controllers/Projects.js | 4 ++-- awx/ui/client/src/controllers/Teams.js | 6 ++---- .../src/inventories/manage/adhoc/adhoc.controller.js | 2 +- .../src/inventory-scripts/edit/edit.controller.js | 2 +- .../job-templates/add/job-templates-add.controller.js | 5 +++-- .../edit/job-templates-edit.controller.js | 2 +- awx/ui/client/src/notifications/add/add.controller.js | 2 +- .../client/src/notifications/edit/edit.controller.js | 2 +- awx/ui/client/src/shared/form-generator.js | 10 +++++----- 9 files changed, 17 insertions(+), 18 deletions(-) diff --git a/awx/ui/client/src/controllers/Projects.js b/awx/ui/client/src/controllers/Projects.js index e020319223..7cb876d179 100644 --- a/awx/ui/client/src/controllers/Projects.js +++ b/awx/ui/client/src/controllers/Projects.js @@ -414,7 +414,7 @@ ProjectsAdd.$inject = ['$scope', '$rootScope', '$compile', '$location', '$log', export function ProjectsEdit($scope, $rootScope, $compile, $location, $log, - $stateParams, ProjectsForm, Rest, Alert, ProcessErrors, + $stateParams, ProjectsForm, Rest, Alert, ProcessErrors, GenerateForm, Prompt, ClearScope, GetBasePath, GetProjectPath, Authorization, GetChoices, Empty, DebugForm, Wait, ProjectUpdate, $state, CreateSelect2, ToggleNotification, i18n) { @@ -587,7 +587,7 @@ export function ProjectsEdit($scope, $rootScope, $compile, $location, $log, // Save changes to the parent $scope.formSave = function() { var fld, i, params; - //generator.clearApiErrors(); + GenerateForm.clearApiErrors($scope); Wait('start'); $rootScope.flashMessage = null; params = {}; diff --git a/awx/ui/client/src/controllers/Teams.js b/awx/ui/client/src/controllers/Teams.js index b4ae59fb39..5eb75c4ee3 100644 --- a/awx/ui/client/src/controllers/Teams.js +++ b/awx/ui/client/src/controllers/Teams.js @@ -105,8 +105,7 @@ export function TeamsAdd($scope, $rootScope, $stateParams, TeamForm, GenerateFor // Inject dynamic view var defaultUrl = GetBasePath('teams'), - form = TeamForm, - generator = GenerateForm; + form = TeamForm; init(); @@ -120,7 +119,7 @@ export function TeamsAdd($scope, $rootScope, $stateParams, TeamForm, GenerateFor // Save $scope.formSave = function() { var fld, data; - generator.clearApiErrors(); + GenerateForm.clearApiErrors($scope); Wait('start'); Rest.setUrl(defaultUrl); data = {}; @@ -216,7 +215,6 @@ export function TeamsEdit($scope, $rootScope, $stateParams, $scope.formSave = function() { $rootScope.flashMessage = null; if ($scope[form.name + '_form'].$valid) { - Rest.setUrl(defaultUrl + id + '/'); var data = processNewData(form.fields); Rest.put(data).success(function() { $state.go($state.current, null, { reload: true }); diff --git a/awx/ui/client/src/inventories/manage/adhoc/adhoc.controller.js b/awx/ui/client/src/inventories/manage/adhoc/adhoc.controller.js index e749a6d44c..dec6b6397a 100644 --- a/awx/ui/client/src/inventories/manage/adhoc/adhoc.controller.js +++ b/awx/ui/client/src/inventories/manage/adhoc/adhoc.controller.js @@ -202,7 +202,7 @@ function adhocController($q, $scope, $location, $stateParams, "privilege_escalation": "" }; - GenerateForm.clearApiErrors(); + GenerateForm.clearApiErrors($scope); // populate data with the relevant form values for (fld in adhocForm.fields) { diff --git a/awx/ui/client/src/inventory-scripts/edit/edit.controller.js b/awx/ui/client/src/inventory-scripts/edit/edit.controller.js index bb4147c083..2dce989b26 100644 --- a/awx/ui/client/src/inventory-scripts/edit/edit.controller.js +++ b/awx/ui/client/src/inventory-scripts/edit/edit.controller.js @@ -50,7 +50,7 @@ export default ['Rest', 'Wait', } $scope.formSave = function() { - generator.clearApiErrors(); + generator.clearApiErrors($scope); Wait('start'); Rest.setUrl(url + id + '/'); Rest.put({ diff --git a/awx/ui/client/src/job-templates/add/job-templates-add.controller.js b/awx/ui/client/src/job-templates/add/job-templates-add.controller.js index 1e7d79d167..f509f9b2fe 100644 --- a/awx/ui/client/src/job-templates/add/job-templates-add.controller.js +++ b/awx/ui/client/src/job-templates/add/job-templates-add.controller.js @@ -280,7 +280,7 @@ function saveCompleted(id) { - $state.go('jobTemplates.edit', {id: id}, {reload: true}); + $state.go('jobTemplates.edit', {job_template_id: id}, {reload: true}); } if ($scope.removeTemplateSaveSuccess) { @@ -419,7 +419,7 @@ $scope.survey_enabled = false; } - generator.clearApiErrors(); + generator.clearApiErrors($scope); Wait('start'); @@ -501,6 +501,7 @@ } catch (err) { Wait('stop'); + console.log(err) Alert("Error", "Error parsing extra variables. " + "Parser returned: " + err); } diff --git a/awx/ui/client/src/job-templates/edit/job-templates-edit.controller.js b/awx/ui/client/src/job-templates/edit/job-templates-edit.controller.js index be6ea855fc..95d7a8272a 100644 --- a/awx/ui/client/src/job-templates/edit/job-templates-edit.controller.js +++ b/awx/ui/client/src/job-templates/edit/job-templates-edit.controller.js @@ -542,7 +542,7 @@ export default $scope.survey_enabled = false; } - generator.clearApiErrors(); + generator.clearApiErrors($scope); Wait('start'); diff --git a/awx/ui/client/src/notifications/add/add.controller.js b/awx/ui/client/src/notifications/add/add.controller.js index 80e3d4ed69..1f007c3e2f 100644 --- a/awx/ui/client/src/notifications/add/add.controller.js +++ b/awx/ui/client/src/notifications/add/add.controller.js @@ -120,7 +120,7 @@ export default ['$rootScope', 'Rest', 'Wait', 'NotificationsFormObject', var params, v = $scope.notification_type.value; - generator.clearApiErrors(); + generator.clearApiErrors($scope); params = { "name": $scope.name, "description": $scope.description, diff --git a/awx/ui/client/src/notifications/edit/edit.controller.js b/awx/ui/client/src/notifications/edit/edit.controller.js index c96d930204..57f4a25055 100644 --- a/awx/ui/client/src/notifications/edit/edit.controller.js +++ b/awx/ui/client/src/notifications/edit/edit.controller.js @@ -197,7 +197,7 @@ export default ['Rest', 'Wait', var params, v = $scope.notification_type.value; - generator.clearApiErrors(); + generator.clearApiErrors($scope); params = { "name": $scope.name, "description": $scope.description, diff --git a/awx/ui/client/src/shared/form-generator.js b/awx/ui/client/src/shared/form-generator.js index 3ed0c224a9..af110494b4 100644 --- a/awx/ui/client/src/shared/form-generator.js +++ b/awx/ui/client/src/shared/form-generator.js @@ -303,21 +303,21 @@ angular.module('FormGenerator', [GeneratorHelpers.name, 'Utilities', listGenerat return html; }, - clearApiErrors: function () { + clearApiErrors: function (scope) { for (var fld in this.form.fields) { if (this.form.fields[fld].sourceModel) { - this.scope[this.form.fields[fld].sourceModel + '_' + this.form.fields[fld].sourceField + '_api_error'] = ''; + scope[this.form.fields[fld].sourceModel + '_' + this.form.fields[fld].sourceField + '_api_error'] = ''; $('[name="' + this.form.fields[fld].sourceModel + '_' + this.form.fields[fld].sourceField + '"]').removeClass('ng-invalid'); } else if (this.form.fields[fld].realName) { this.scope[this.form.fields[fld].realName + '_api_error'] = ''; $('[name="' + this.form.fields[fld].realName + '"]').removeClass('ng-invalid'); } else { - this.scope[fld + '_api_error'] = ''; + scope[fld + '_api_error'] = ''; $('[name="' + fld + '"]').removeClass('ng-invalid'); } } - if (!this.scope.$$phase) { - this.scope.$digest(); + if (!scope.$$phase) { + scope.$digest(); } },