mirror of
https://github.com/ansible/awx.git
synced 2026-02-06 03:54:44 -03:30
Compare commits
130 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9b2affe44 | ||
|
|
f61b6f9615 | ||
|
|
3b259de200 | ||
|
|
63e3e733e0 | ||
|
|
844b0f86b8 | ||
|
|
d4c3c089df | ||
|
|
1328fb80a0 | ||
|
|
1fbcd1b10b | ||
|
|
11b26c199b | ||
|
|
463c4c1f7e | ||
|
|
76a16b329e | ||
|
|
123f646cea | ||
|
|
d99c9c8dce | ||
|
|
4f3a8ef766 | ||
|
|
c114243082 | ||
|
|
229e997e7e | ||
|
|
dc7ec9dfe0 | ||
|
|
07aae8cefc | ||
|
|
902fb83493 | ||
|
|
1ef2d4cdad | ||
|
|
a6b362e455 | ||
|
|
2c3549331c | ||
|
|
016fc7f6bf | ||
|
|
e8eda28ce5 | ||
|
|
83c232eb20 | ||
|
|
c30639c4e6 | ||
|
|
5e84782b9c | ||
|
|
d134291097 | ||
|
|
4b669fb16d | ||
|
|
b53621e74c | ||
|
|
925c6543c4 | ||
|
|
bb5312f4fc | ||
|
|
7333e55748 | ||
|
|
5e20dcb6ca | ||
|
|
cab6b8b333 | ||
|
|
46020379aa | ||
|
|
e23fb31a4a | ||
|
|
17c95f200a | ||
|
|
7676ccdbac | ||
|
|
4626aa0144 | ||
|
|
fb7596929f | ||
|
|
d63518d789 | ||
|
|
6f1cbac324 | ||
|
|
2b80f0f7b6 | ||
|
|
10945faba1 | ||
|
|
d4ccb00338 | ||
|
|
d10d5f1539 | ||
|
|
3e5f328b52 | ||
|
|
d558ffd699 | ||
|
|
b64d401e74 | ||
|
|
0a3f131adc | ||
|
|
6f9cf6a649 | ||
|
|
5db43b8283 | ||
|
|
aa9e60c508 | ||
|
|
2162e8e0cc | ||
|
|
1eeffe4ae2 | ||
|
|
2927803a82 | ||
|
|
1b50b26901 | ||
|
|
44819987f7 | ||
|
|
9bf0d052ab | ||
|
|
5c98d04e09 | ||
|
|
6560ab0fab | ||
|
|
efb7a729c7 | ||
|
|
6e1deed79e | ||
|
|
ad3721bdb2 | ||
|
|
ca64630740 | ||
|
|
8686575311 | ||
|
|
0ecd6542bf | ||
|
|
5e3d47683d | ||
|
|
73f617d811 | ||
|
|
ea7e15bfc4 | ||
|
|
eca530c788 | ||
|
|
c1b48e2c9c | ||
|
|
9d501327fc | ||
|
|
31b3bad658 | ||
|
|
155c214df0 | ||
|
|
5931c13b04 | ||
|
|
5d7b7d5888 | ||
|
|
53ad819d65 | ||
|
|
3ce3786303 | ||
|
|
46bc146e26 | ||
|
|
88eaf1154a | ||
|
|
5421c243d7 | ||
|
|
5cdab1b57a | ||
|
|
2a86c5b944 | ||
|
|
daeeaf413a | ||
|
|
2d119f7b02 | ||
|
|
68950d56ca | ||
|
|
477c5df022 | ||
|
|
4c8f4f4cc5 | ||
|
|
6726e203b9 | ||
|
|
9a10811366 | ||
|
|
62bffaa7e6 | ||
|
|
14423c4f3f | ||
|
|
8037cddfe5 | ||
|
|
be4b3c75b4 | ||
|
|
818b261bea | ||
|
|
4707dc2a05 | ||
|
|
ebc2b821be | ||
|
|
85a875bbfe | ||
|
|
a9663c2900 | ||
|
|
05c24df9e3 | ||
|
|
1becd4c39d | ||
|
|
9817ab14d0 | ||
|
|
51b51a9bf7 | ||
|
|
55c5dd06cf | ||
|
|
0e5e23372d | ||
|
|
1e44d5c833 | ||
|
|
f7bc8fb662 | ||
|
|
416dcc83c9 | ||
|
|
13ed656506 | ||
|
|
f683f87ce3 | ||
|
|
c15cbe0f6e | ||
|
|
a8728670e1 | ||
|
|
cf9dffbaf8 | ||
|
|
3d1b32c72f | ||
|
|
e95da84e5a | ||
|
|
fcd759fa1f | ||
|
|
6772c81927 | ||
|
|
774ec40989 | ||
|
|
b7ba280da3 | ||
|
|
058e2c0d81 | ||
|
|
072919040b | ||
|
|
91ae343e3b | ||
|
|
5afabc7a19 | ||
|
|
9b2ca04118 | ||
|
|
589531163a | ||
|
|
c785c38748 | ||
|
|
f1e3be5ec8 | ||
|
|
bf5657a06a |
4
Makefile
4
Makefile
@@ -483,7 +483,7 @@ $(UI_RELEASE_FLAG_FILE): $(I18N_FLAG_FILE) $(UI_RELEASE_DEPS_FLAG_FILE)
|
||||
touch $(UI_RELEASE_FLAG_FILE)
|
||||
|
||||
$(UI_RELEASE_DEPS_FLAG_FILE):
|
||||
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1 $(NPM_BIN) --unsafe-perm --prefix awx/ui install --no-save awx/ui
|
||||
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1 $(NPM_BIN) --unsafe-perm --prefix awx/ui ci --no-save awx/ui
|
||||
touch $(UI_RELEASE_DEPS_FLAG_FILE)
|
||||
|
||||
# END UI RELEASE TASKS
|
||||
@@ -498,7 +498,7 @@ $(UI_DEPS_FLAG_FILE):
|
||||
rm -rf awx/ui/node_modules; \
|
||||
rm -f ${UI_RELEASE_DEPS_FLAG_FILE}; \
|
||||
fi; \
|
||||
$(NPM_BIN) --unsafe-perm --prefix awx/ui install --no-save awx/ui
|
||||
$(NPM_BIN) --unsafe-perm --prefix awx/ui ci --no-save awx/ui
|
||||
touch $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
ui-docker-machine: $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
@@ -91,16 +91,6 @@ def prepare_env():
|
||||
# Monkeypatch Django find_commands to also work with .pyc files.
|
||||
import django.core.management
|
||||
django.core.management.find_commands = find_commands
|
||||
# Fixup sys.modules reference to django.utils.six to allow jsonfield to
|
||||
# work when using Django 1.4.
|
||||
import django.utils
|
||||
try:
|
||||
import django.utils.six
|
||||
except ImportError: # pragma: no cover
|
||||
import six
|
||||
sys.modules['django.utils.six'] = sys.modules['six']
|
||||
django.utils.six = sys.modules['django.utils.six']
|
||||
from django.utils import six # noqa
|
||||
# Use the AWX_TEST_DATABASE_* environment variables to specify the test
|
||||
# database settings to use when management command is run as an external
|
||||
# program via unit tests.
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
import six
|
||||
import urllib.parse
|
||||
|
||||
# Django
|
||||
@@ -32,9 +31,6 @@ from rest_framework.permissions import AllowAny
|
||||
from rest_framework.renderers import StaticHTMLRenderer, JSONRenderer
|
||||
from rest_framework.negotiation import DefaultContentNegotiation
|
||||
|
||||
# cryptography
|
||||
from cryptography.fernet import InvalidToken
|
||||
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import * # noqa
|
||||
@@ -854,15 +850,18 @@ class CopyAPIView(GenericAPIView):
|
||||
return field_val
|
||||
if isinstance(field_val, dict):
|
||||
for sub_field in field_val:
|
||||
if isinstance(sub_field, six.string_types) \
|
||||
and isinstance(field_val[sub_field], six.string_types):
|
||||
if isinstance(sub_field, str) \
|
||||
and isinstance(field_val[sub_field], str):
|
||||
try:
|
||||
field_val[sub_field] = decrypt_field(obj, field_name, sub_field)
|
||||
except InvalidToken:
|
||||
except AttributeError:
|
||||
# Catching the corner case with v1 credential fields
|
||||
field_val[sub_field] = decrypt_field(obj, sub_field)
|
||||
elif isinstance(field_val, six.string_types):
|
||||
field_val = decrypt_field(obj, field_name)
|
||||
elif isinstance(field_val, str):
|
||||
try:
|
||||
field_val = decrypt_field(obj, field_name)
|
||||
except AttributeError:
|
||||
return field_val
|
||||
return field_val
|
||||
|
||||
def _build_create_dict(self, obj):
|
||||
@@ -916,7 +915,7 @@ class CopyAPIView(GenericAPIView):
|
||||
obj, field.name, field_val
|
||||
)
|
||||
new_obj = model.objects.create(**create_kwargs)
|
||||
logger.debug(six.text_type('Deep copy: Created new object {}({})').format(
|
||||
logger.debug('Deep copy: Created new object {}({})'.format(
|
||||
new_obj, model
|
||||
))
|
||||
# Need to save separatedly because Djang-crum get_current_user would
|
||||
|
||||
@@ -234,17 +234,17 @@ class RoleMetadata(Metadata):
|
||||
|
||||
# TODO: Tower 3.3 remove class and all uses in views.py when API v1 is removed
|
||||
class JobTypeMetadata(Metadata):
|
||||
def get_field_info(self, field):
|
||||
res = super(JobTypeMetadata, self).get_field_info(field)
|
||||
def get_field_info(self, field):
|
||||
res = super(JobTypeMetadata, self).get_field_info(field)
|
||||
|
||||
if field.field_name == 'job_type':
|
||||
index = 0
|
||||
for choice in res['choices']:
|
||||
if choice[0] == 'scan':
|
||||
res['choices'].pop(index)
|
||||
break
|
||||
index += 1
|
||||
return res
|
||||
if field.field_name == 'job_type':
|
||||
index = 0
|
||||
for choice in res['choices']:
|
||||
if choice[0] == 'scan':
|
||||
res['choices'].pop(index)
|
||||
break
|
||||
index += 1
|
||||
return res
|
||||
|
||||
|
||||
class SublistAttachDetatchMetadata(Metadata):
|
||||
|
||||
@@ -4,7 +4,6 @@ import json
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.utils import six
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
@@ -34,4 +33,4 @@ class JSONParser(parsers.JSONParser):
|
||||
raise ParseError(_('JSON parse error - not a JSON object'))
|
||||
return obj
|
||||
except ValueError as exc:
|
||||
raise ParseError(_('JSON parse error - %s\nPossible cause: trailing comma.' % six.text_type(exc)))
|
||||
raise ParseError(_('JSON parse error - %s\nPossible cause: trailing comma.' % str(exc)))
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.utils.safestring import SafeText
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework import renderers
|
||||
from rest_framework.request import override_method
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
|
||||
'''
|
||||
@@ -20,6 +20,19 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
|
||||
return renderers.JSONRenderer()
|
||||
return renderer
|
||||
|
||||
def get_content(self, renderer, data, accepted_media_type, renderer_context):
|
||||
if isinstance(data, SafeText):
|
||||
# Older versions of Django (pre-2.0) have a py3 bug which causes
|
||||
# bytestrings marked as "safe" to not actually get _treated_ as
|
||||
# safe; this causes certain embedded strings (like the stdout HTML
|
||||
# view) to be improperly escaped
|
||||
# see: https://github.com/ansible/awx/issues/3108
|
||||
# https://code.djangoproject.com/ticket/28121
|
||||
return data
|
||||
return super(BrowsableAPIRenderer, self).get_content(renderer, data,
|
||||
accepted_media_type,
|
||||
renderer_context)
|
||||
|
||||
def get_context(self, data, accepted_media_type, renderer_context):
|
||||
# Store the associated response status to know how to populate the raw
|
||||
# data form.
|
||||
@@ -71,8 +84,8 @@ class PlainTextRenderer(renderers.BaseRenderer):
|
||||
format = 'txt'
|
||||
|
||||
def render(self, data, media_type=None, renderer_context=None):
|
||||
if not isinstance(data, six.string_types):
|
||||
data = six.text_type(data)
|
||||
if not isinstance(data, str):
|
||||
data = str(data)
|
||||
return data.encode(self.charset)
|
||||
|
||||
|
||||
|
||||
@@ -5,9 +5,7 @@
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import operator
|
||||
import re
|
||||
import six
|
||||
import urllib.parse
|
||||
from collections import OrderedDict
|
||||
from datetime import timedelta
|
||||
@@ -46,11 +44,10 @@ from awx.main.constants import (
|
||||
ANSI_SGR_PATTERN,
|
||||
ACTIVE_STATES,
|
||||
CENSOR_VALUE,
|
||||
CHOICES_PRIVILEGE_ESCALATION_METHODS,
|
||||
)
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models.base import NEW_JOB_TYPE_CHOICES
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.fields import ImplicitRoleField, JSONBField
|
||||
from awx.main.utils import (
|
||||
get_type_for_model, get_model_for_type, timestamp_apiformat,
|
||||
camelcase_to_underscore, getattrd, parse_yaml_or_json,
|
||||
@@ -1046,7 +1043,7 @@ class BaseOAuth2TokenSerializer(BaseSerializer):
|
||||
return ret
|
||||
|
||||
def _is_valid_scope(self, value):
|
||||
if not value or (not isinstance(value, six.string_types)):
|
||||
if not value or (not isinstance(value, str)):
|
||||
return False
|
||||
words = value.split()
|
||||
for word in words:
|
||||
@@ -1545,6 +1542,18 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
def validate_host_filter(self, host_filter):
|
||||
if host_filter:
|
||||
try:
|
||||
for match in JSONBField.get_lookups().keys():
|
||||
if match == 'exact':
|
||||
# __exact is allowed
|
||||
continue
|
||||
match = '__{}'.format(match)
|
||||
if re.match(
|
||||
'ansible_facts[^=]+{}='.format(match),
|
||||
host_filter
|
||||
):
|
||||
raise models.base.ValidationError({
|
||||
'host_filter': 'ansible_facts does not support searching with {}'.format(match)
|
||||
})
|
||||
SmartFilter().query_from_string(host_filter)
|
||||
except RuntimeError as e:
|
||||
raise models.base.ValidationError(e)
|
||||
@@ -2175,10 +2184,12 @@ class InventorySourceUpdateSerializer(InventorySourceSerializer):
|
||||
|
||||
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
|
||||
|
||||
custom_virtualenv = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = InventoryUpdate
|
||||
fields = ('*', 'inventory', 'inventory_source', 'license_error', 'source_project_update',
|
||||
'-controller_node',)
|
||||
'custom_virtualenv', '-controller_node',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventoryUpdateSerializer, self).get_related(obj)
|
||||
@@ -2207,6 +2218,44 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
|
||||
return res
|
||||
|
||||
|
||||
class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
|
||||
|
||||
source_project = serializers.SerializerMethodField(
|
||||
help_text=_('The project used for this job.'),
|
||||
method_name='get_source_project_id'
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = InventoryUpdate
|
||||
fields = ('*', 'source_project',)
|
||||
|
||||
def get_source_project(self, obj):
|
||||
return getattrd(obj, 'source_project_update.unified_job_template', None)
|
||||
|
||||
def get_source_project_id(self, obj):
|
||||
return getattrd(obj, 'source_project_update.unified_job_template.id', None)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventoryUpdateDetailSerializer, self).get_related(obj)
|
||||
source_project_id = self.get_source_project_id(obj)
|
||||
|
||||
if source_project_id:
|
||||
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': source_project_id})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super(InventoryUpdateDetailSerializer, self).get_summary_fields(obj)
|
||||
summary_obj = self.get_source_project(obj)
|
||||
|
||||
if summary_obj:
|
||||
summary_fields['source_project'] = {}
|
||||
for field in SUMMARIZABLE_FK_FIELDS['project']:
|
||||
value = getattr(summary_obj, field, None)
|
||||
if value is not None:
|
||||
summary_fields['source_project'][field] = value
|
||||
return summary_fields
|
||||
|
||||
|
||||
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
|
||||
|
||||
class Meta:
|
||||
@@ -2459,9 +2508,6 @@ class CredentialTypeSerializer(BaseSerializer):
|
||||
field['label'] = _(field['label'])
|
||||
if 'help_text' in field:
|
||||
field['help_text'] = _(field['help_text'])
|
||||
if field['type'] == 'become_method':
|
||||
field.pop('type')
|
||||
field['choices'] = list(map(operator.itemgetter(0), CHOICES_PRIVILEGE_ESCALATION_METHODS))
|
||||
return value
|
||||
|
||||
def filter_field_metadata(self, fields, method):
|
||||
@@ -2476,8 +2522,7 @@ class CredentialTypeSerializer(BaseSerializer):
|
||||
|
||||
|
||||
# TODO: remove when API v1 is removed
|
||||
@six.add_metaclass(BaseSerializerMetaclass)
|
||||
class V1CredentialFields(BaseSerializer):
|
||||
class V1CredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
|
||||
|
||||
class Meta:
|
||||
model = Credential
|
||||
@@ -2495,8 +2540,7 @@ class V1CredentialFields(BaseSerializer):
|
||||
return super(V1CredentialFields, self).build_field(field_name, info, model_class, nested_depth)
|
||||
|
||||
|
||||
@six.add_metaclass(BaseSerializerMetaclass)
|
||||
class V2CredentialFields(BaseSerializer):
|
||||
class V2CredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
|
||||
|
||||
class Meta:
|
||||
model = Credential
|
||||
@@ -2784,8 +2828,7 @@ class LabelsListMixin(object):
|
||||
|
||||
|
||||
# TODO: remove when API v1 is removed
|
||||
@six.add_metaclass(BaseSerializerMetaclass)
|
||||
class V1JobOptionsSerializer(BaseSerializer):
|
||||
class V1JobOptionsSerializer(BaseSerializer, metaclass=BaseSerializerMetaclass):
|
||||
|
||||
class Meta:
|
||||
model = Credential
|
||||
@@ -2799,8 +2842,7 @@ class V1JobOptionsSerializer(BaseSerializer):
|
||||
return super(V1JobOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
|
||||
|
||||
|
||||
@six.add_metaclass(BaseSerializerMetaclass)
|
||||
class LegacyCredentialFields(BaseSerializer):
|
||||
class LegacyCredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
|
||||
|
||||
class Meta:
|
||||
model = Credential
|
||||
@@ -3293,10 +3335,11 @@ class JobDetailSerializer(JobSerializer):
|
||||
playbook_counts = serializers.SerializerMethodField(
|
||||
help_text=_('A count of all plays and tasks for the job run.'),
|
||||
)
|
||||
custom_virtualenv = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = Job
|
||||
fields = ('*', 'host_status_counts', 'playbook_counts',)
|
||||
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
|
||||
|
||||
def get_playbook_counts(self, obj):
|
||||
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
|
||||
@@ -4384,7 +4427,7 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
errors.setdefault('credentials', []).append(_(
|
||||
'Removing {} credential at launch time without replacement is not supported. '
|
||||
'Provided list lacked credential(s): {}.'
|
||||
).format(cred.unique_hash(display=True), ', '.join([six.text_type(c) for c in removed_creds])))
|
||||
).format(cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])))
|
||||
|
||||
# verify that credentials (either provided or existing) don't
|
||||
# require launch-time passwords that have not been provided
|
||||
@@ -4722,8 +4765,8 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
|
||||
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
|
||||
raise serializers.ValidationError(_(
|
||||
six.text_type('Inventory sources with `update_on_project_update` cannot be scheduled. '
|
||||
'Schedule its source project `{}` instead.').format(value.source_project.name)))
|
||||
'Inventory sources with `update_on_project_update` cannot be scheduled. '
|
||||
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
|
||||
return value
|
||||
|
||||
|
||||
@@ -5061,6 +5104,6 @@ class FactSerializer(BaseFactSerializer):
|
||||
ret = super(FactSerializer, self).to_representation(obj)
|
||||
if obj is None:
|
||||
return ret
|
||||
if 'facts' in ret and isinstance(ret['facts'], six.string_types):
|
||||
if 'facts' in ret and isinstance(ret['facts'], str):
|
||||
ret['facts'] = json.loads(ret['facts'])
|
||||
return ret
|
||||
|
||||
@@ -12,7 +12,6 @@ import requests
|
||||
import functools
|
||||
from base64 import b64encode
|
||||
from collections import OrderedDict, Iterable
|
||||
import six
|
||||
|
||||
|
||||
# Django
|
||||
@@ -524,7 +523,7 @@ class AuthView(APIView):
|
||||
not feature_enabled('ldap')) or \
|
||||
(not feature_enabled('enterprise_auth') and
|
||||
name in ['saml', 'radius']):
|
||||
continue
|
||||
continue
|
||||
|
||||
login_url = reverse('social:begin', args=(name,))
|
||||
complete_url = request.build_absolute_uri(reverse('social:complete', args=(name,)))
|
||||
@@ -1435,7 +1434,7 @@ class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
try:
|
||||
return super(HostList, self).list(*args, **kwargs)
|
||||
except Exception as e:
|
||||
return Response(dict(error=_(six.text_type(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
return Response(dict(error=_(str(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class HostDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
@@ -1878,7 +1877,7 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
show_all = bool(request.query_params.get('all', ''))
|
||||
subset = request.query_params.get('subset', '')
|
||||
if subset:
|
||||
if not isinstance(subset, six.string_types):
|
||||
if not isinstance(subset, str):
|
||||
raise ParseError(_('Inventory subset argument must be a string.'))
|
||||
if subset.startswith('slice'):
|
||||
slice_number, slice_count = Inventory.parse_slice_params(subset)
|
||||
@@ -1972,7 +1971,7 @@ class InventoryInventorySourcesUpdate(RetrieveAPIView):
|
||||
details['status'] = None
|
||||
if inventory_source.can_update:
|
||||
update = inventory_source.update()
|
||||
details.update(InventoryUpdateSerializer(update, context=self.get_serializer_context()).to_representation(update))
|
||||
details.update(InventoryUpdateDetailSerializer(update, context=self.get_serializer_context()).to_representation(update))
|
||||
details['status'] = 'started'
|
||||
details['inventory_update'] = update.id
|
||||
successes += 1
|
||||
@@ -2135,7 +2134,7 @@ class InventorySourceUpdateView(RetrieveAPIView):
|
||||
headers = {'Location': update.get_absolute_url(request=request)}
|
||||
data = OrderedDict()
|
||||
data['inventory_update'] = update.id
|
||||
data.update(InventoryUpdateSerializer(update, context=self.get_serializer_context()).to_representation(update))
|
||||
data.update(InventoryUpdateDetailSerializer(update, context=self.get_serializer_context()).to_representation(update))
|
||||
return Response(data, status=status.HTTP_202_ACCEPTED, headers=headers)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
@@ -2150,7 +2149,7 @@ class InventoryUpdateList(ListAPIView):
|
||||
class InventoryUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
|
||||
|
||||
model = InventoryUpdate
|
||||
serializer_class = InventoryUpdateSerializer
|
||||
serializer_class = InventoryUpdateDetailSerializer
|
||||
|
||||
|
||||
class InventoryUpdateCredentialsList(SubListAPIView):
|
||||
@@ -2416,11 +2415,11 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
serializer_class = EmptySerializer
|
||||
|
||||
ALLOWED_TYPES = {
|
||||
'text': six.string_types,
|
||||
'textarea': six.string_types,
|
||||
'password': six.string_types,
|
||||
'multiplechoice': six.string_types,
|
||||
'multiselect': six.string_types,
|
||||
'text': str,
|
||||
'textarea': str,
|
||||
'password': str,
|
||||
'multiplechoice': str,
|
||||
'multiselect': str,
|
||||
'integer': int,
|
||||
'float': float
|
||||
}
|
||||
@@ -2455,8 +2454,8 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
def _validate_spec_data(new_spec, old_spec):
|
||||
schema_errors = {}
|
||||
for field, expect_type, type_label in [
|
||||
('name', six.string_types, 'string'),
|
||||
('description', six.string_types, 'string'),
|
||||
('name', str, 'string'),
|
||||
('description', str, 'string'),
|
||||
('spec', list, 'list of items')]:
|
||||
if field not in new_spec:
|
||||
schema_errors['error'] = _("Field '{}' is missing from survey spec.").format(field)
|
||||
@@ -2474,7 +2473,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
old_spec_dict = JobTemplate.pivot_spec(old_spec)
|
||||
for idx, survey_item in enumerate(new_spec["spec"]):
|
||||
context = dict(
|
||||
idx=six.text_type(idx),
|
||||
idx=str(idx),
|
||||
survey_item=survey_item
|
||||
)
|
||||
# General element validation
|
||||
@@ -2486,7 +2485,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
field_name=field_name, **context
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
val = survey_item[field_name]
|
||||
allow_types = six.string_types
|
||||
allow_types = str
|
||||
type_label = 'string'
|
||||
if field_name == 'required':
|
||||
allow_types = bool
|
||||
@@ -2534,7 +2533,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
)))
|
||||
|
||||
# Process encryption substitution
|
||||
if ("default" in survey_item and isinstance(survey_item['default'], six.string_types) and
|
||||
if ("default" in survey_item and isinstance(survey_item['default'], str) and
|
||||
survey_item['default'].startswith('$encrypted$')):
|
||||
# Submission expects the existence of encrypted DB value to replace given default
|
||||
if qtype != "password":
|
||||
@@ -2546,7 +2545,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
encryptedish_default_exists = False
|
||||
if 'default' in old_element:
|
||||
old_default = old_element['default']
|
||||
if isinstance(old_default, six.string_types):
|
||||
if isinstance(old_default, str):
|
||||
if old_default.startswith('$encrypted$'):
|
||||
encryptedish_default_exists = True
|
||||
elif old_default == "": # unencrypted blank string is allowed as DB value as special case
|
||||
@@ -3075,8 +3074,8 @@ class WorkflowJobTemplateCopy(WorkflowsEnforcementMixin, CopyAPIView):
|
||||
elif field_name in ['credentials']:
|
||||
for cred in item.all():
|
||||
if not user.can_access(cred.__class__, 'use', cred):
|
||||
logger.debug(six.text_type(
|
||||
'Deep copy: removing {} from relationship due to permissions').format(cred))
|
||||
logger.debug(
|
||||
'Deep copy: removing {} from relationship due to permissions'.format(cred))
|
||||
item.remove(cred.pk)
|
||||
obj.save()
|
||||
|
||||
@@ -3619,11 +3618,6 @@ class JobRelaunch(RetrieveAPIView):
|
||||
'Cannot relaunch because previous job had 0 {status_value} hosts.'
|
||||
).format(status_value=retry_hosts)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
copy_kwargs['limit'] = ','.join(retry_host_list)
|
||||
limit_length = len(copy_kwargs['limit'])
|
||||
if limit_length > 1024:
|
||||
return Response({'limit': _(
|
||||
'Cannot relaunch because the limit length {limit_length} exceeds the max of {limit_max}.'
|
||||
).format(limit_length=limit_length, limit_max=1024)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
new_job = obj.copy_unified_job(**copy_kwargs)
|
||||
result = new_job.signal_start(**serializer.validated_data['credential_passwords'])
|
||||
|
||||
@@ -27,6 +27,7 @@ from awx.main.utils import (
|
||||
)
|
||||
from awx.api.versioning import reverse, get_request_version, drf_reverse
|
||||
from awx.conf.license import get_license, feature_enabled
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import (
|
||||
Project,
|
||||
Organization,
|
||||
@@ -203,7 +204,8 @@ class ApiV1ConfigView(APIView):
|
||||
version=get_awx_version(),
|
||||
ansible_version=get_ansible_version(),
|
||||
eula=render_to_string("eula.md") if license_data.get('license_type', 'UNLICENSED') != 'open' else '',
|
||||
analytics_status=pendo_state
|
||||
analytics_status=pendo_state,
|
||||
become_methods=PRIVILEGE_ESCALATION_METHODS,
|
||||
)
|
||||
|
||||
# If LDAP is enabled, user_ldap_fields will return a list of field
|
||||
|
||||
@@ -10,8 +10,6 @@ from django.utils.translation import ugettext_lazy as _
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import * # noqa
|
||||
|
||||
import six
|
||||
|
||||
logger = logging.getLogger('awx.conf.fields')
|
||||
|
||||
# Use DRF fields to convert/validate settings:
|
||||
@@ -139,7 +137,7 @@ class KeyValueField(DictField):
|
||||
def to_internal_value(self, data):
|
||||
ret = super(KeyValueField, self).to_internal_value(data)
|
||||
for value in data.values():
|
||||
if not isinstance(value, six.string_types + six.integer_types + (float,)):
|
||||
if not isinstance(value, (str, int, float)):
|
||||
if isinstance(value, OrderedDict):
|
||||
value = dict(value)
|
||||
self.fail('invalid_child', input=value)
|
||||
|
||||
@@ -1,480 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import base64
|
||||
import collections
|
||||
import difflib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
from django.utils.text import slugify
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Tower
|
||||
from awx import MODE
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.fields import empty, SkipField
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.utils import comment_assignments
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'category',
|
||||
nargs='*',
|
||||
type=str,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
dest='dry_run',
|
||||
default=False,
|
||||
help=_('Only show which settings would be commented/migrated.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--skip-errors',
|
||||
action='store_true',
|
||||
dest='skip_errors',
|
||||
default=False,
|
||||
help=_('Skip over settings that would raise an error when commenting/migrating.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-comment',
|
||||
action='store_true',
|
||||
dest='no_comment',
|
||||
default=False,
|
||||
help=_('Skip commenting out settings in files.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--comment-only',
|
||||
action='store_true',
|
||||
dest='comment_only',
|
||||
default=False,
|
||||
help=_('Skip migrating and only comment out settings in files.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--backup-suffix',
|
||||
dest='backup_suffix',
|
||||
default=now().strftime('.%Y%m%d%H%M%S'),
|
||||
help=_('Backup existing settings files with this suffix.'),
|
||||
)
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.skip_errors = bool(options.get('skip_errors', False))
|
||||
self.no_comment = bool(options.get('no_comment', False))
|
||||
self.comment_only = bool(options.get('comment_only', False))
|
||||
self.backup_suffix = options.get('backup_suffix', '')
|
||||
self.categories = options.get('category', None) or ['all']
|
||||
self.style.HEADING = self.style.MIGRATE_HEADING
|
||||
self.style.LABEL = self.style.MIGRATE_LABEL
|
||||
self.style.OK = self.style.SQL_FIELD
|
||||
self.style.SKIP = self.style.WARNING
|
||||
self.style.VALUE = self.style.SQL_KEYWORD
|
||||
|
||||
# Determine if any categories provided are invalid.
|
||||
category_slugs = []
|
||||
invalid_categories = []
|
||||
for category in self.categories:
|
||||
category_slug = slugify(category)
|
||||
if category_slug in settings_registry.get_registered_categories():
|
||||
if category_slug not in category_slugs:
|
||||
category_slugs.append(category_slug)
|
||||
else:
|
||||
if category not in invalid_categories:
|
||||
invalid_categories.append(category)
|
||||
if len(invalid_categories) == 1:
|
||||
raise CommandError('Invalid setting category: {}'.format(invalid_categories[0]))
|
||||
elif len(invalid_categories) > 1:
|
||||
raise CommandError('Invalid setting categories: {}'.format(', '.join(invalid_categories)))
|
||||
|
||||
# Build a list of all settings to be migrated.
|
||||
registered_settings = []
|
||||
for category_slug in category_slugs:
|
||||
for registered_setting in settings_registry.get_registered_settings(category_slug=category_slug, read_only=False):
|
||||
if registered_setting not in registered_settings:
|
||||
registered_settings.append(registered_setting)
|
||||
|
||||
self._migrate_settings(registered_settings)
|
||||
|
||||
def _get_settings_file_patterns(self):
|
||||
if MODE == 'development':
|
||||
return [
|
||||
'/etc/tower/settings.py',
|
||||
'/etc/tower/conf.d/*.py',
|
||||
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'settings', 'local_*.py')
|
||||
]
|
||||
else:
|
||||
return [
|
||||
os.environ.get('AWX_SETTINGS_FILE', '/etc/tower/settings.py'),
|
||||
os.path.join(os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/'), '*.py'),
|
||||
]
|
||||
|
||||
def _get_license_file(self):
|
||||
return os.environ.get('AWX_LICENSE_FILE', '/etc/tower/license')
|
||||
|
||||
def _comment_license_file(self, dry_run=True):
|
||||
license_file = self._get_license_file()
|
||||
diff_lines = []
|
||||
if os.path.exists(license_file):
|
||||
try:
|
||||
raw_license_data = open(license_file).read()
|
||||
json.loads(raw_license_data)
|
||||
except Exception as e:
|
||||
raise CommandError('Error reading license from {0}: {1!r}'.format(license_file, e))
|
||||
if self.backup_suffix:
|
||||
backup_license_file = '{}{}'.format(license_file, self.backup_suffix)
|
||||
else:
|
||||
backup_license_file = '{}.old'.format(license_file)
|
||||
diff_lines = list(difflib.unified_diff(
|
||||
raw_license_data.splitlines(),
|
||||
[],
|
||||
fromfile=backup_license_file,
|
||||
tofile=license_file,
|
||||
lineterm='',
|
||||
))
|
||||
if not dry_run:
|
||||
if self.backup_suffix:
|
||||
shutil.copy2(license_file, backup_license_file)
|
||||
os.remove(license_file)
|
||||
return diff_lines
|
||||
|
||||
def _get_local_settings_file(self):
|
||||
if MODE == 'development':
|
||||
static_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'ui', 'static')
|
||||
else:
|
||||
static_root = settings.STATIC_ROOT
|
||||
return os.path.join(static_root, 'local_settings.json')
|
||||
|
||||
def _comment_local_settings_file(self, dry_run=True):
|
||||
local_settings_file = self._get_local_settings_file()
|
||||
diff_lines = []
|
||||
if os.path.exists(local_settings_file):
|
||||
try:
|
||||
raw_local_settings_data = open(local_settings_file).read()
|
||||
json.loads(raw_local_settings_data)
|
||||
except Exception as e:
|
||||
if not self.skip_errors:
|
||||
raise CommandError('Error reading local settings from {0}: {1!r}'.format(local_settings_file, e))
|
||||
return diff_lines
|
||||
if self.backup_suffix:
|
||||
backup_local_settings_file = '{}{}'.format(local_settings_file, self.backup_suffix)
|
||||
else:
|
||||
backup_local_settings_file = '{}.old'.format(local_settings_file)
|
||||
diff_lines = list(difflib.unified_diff(
|
||||
raw_local_settings_data.splitlines(),
|
||||
[],
|
||||
fromfile=backup_local_settings_file,
|
||||
tofile=local_settings_file,
|
||||
lineterm='',
|
||||
))
|
||||
if not dry_run:
|
||||
if self.backup_suffix:
|
||||
shutil.copy2(local_settings_file, backup_local_settings_file)
|
||||
os.remove(local_settings_file)
|
||||
return diff_lines
|
||||
|
||||
def _get_custom_logo_file(self):
|
||||
if MODE == 'development':
|
||||
static_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'ui', 'static')
|
||||
else:
|
||||
static_root = settings.STATIC_ROOT
|
||||
return os.path.join(static_root, 'assets', 'custom_console_logo.png')
|
||||
|
||||
def _comment_custom_logo_file(self, dry_run=True):
|
||||
custom_logo_file = self._get_custom_logo_file()
|
||||
diff_lines = []
|
||||
if os.path.exists(custom_logo_file):
|
||||
try:
|
||||
raw_custom_logo_data = open(custom_logo_file).read()
|
||||
except Exception as e:
|
||||
if not self.skip_errors:
|
||||
raise CommandError('Error reading custom logo from {0}: {1!r}'.format(custom_logo_file, e))
|
||||
return diff_lines
|
||||
if self.backup_suffix:
|
||||
backup_custom_logo_file = '{}{}'.format(custom_logo_file, self.backup_suffix)
|
||||
else:
|
||||
backup_custom_logo_file = '{}.old'.format(custom_logo_file)
|
||||
diff_lines = list(difflib.unified_diff(
|
||||
['<PNG Image ({} bytes)>'.format(len(raw_custom_logo_data))],
|
||||
[],
|
||||
fromfile=backup_custom_logo_file,
|
||||
tofile=custom_logo_file,
|
||||
lineterm='',
|
||||
))
|
||||
if not dry_run:
|
||||
if self.backup_suffix:
|
||||
shutil.copy2(custom_logo_file, backup_custom_logo_file)
|
||||
os.remove(custom_logo_file)
|
||||
return diff_lines
|
||||
|
||||
def _check_if_needs_comment(self, patterns, setting):
|
||||
files_to_comment = []
|
||||
# If any diffs are returned, this setting needs to be commented.
|
||||
diffs = comment_assignments(patterns, setting, dry_run=True)
|
||||
if setting == 'LICENSE':
|
||||
diffs.extend(self._comment_license_file(dry_run=True))
|
||||
elif setting == 'CUSTOM_LOGIN_INFO':
|
||||
diffs.extend(self._comment_local_settings_file(dry_run=True))
|
||||
elif setting == 'CUSTOM_LOGO':
|
||||
diffs.extend(self._comment_custom_logo_file(dry_run=True))
|
||||
for diff in diffs:
|
||||
for line in diff.splitlines():
|
||||
if line.startswith('+++ '):
|
||||
files_to_comment.append(line[4:])
|
||||
return files_to_comment
|
||||
|
||||
def _check_if_needs_migration(self, setting):
|
||||
# Check whether the current value differs from the default.
|
||||
default_value = settings.DEFAULTS_SNAPSHOT.get(setting, empty)
|
||||
if default_value is empty and setting != 'LICENSE':
|
||||
field = settings_registry.get_setting_field(setting, read_only=True)
|
||||
try:
|
||||
default_value = field.get_default()
|
||||
except SkipField:
|
||||
pass
|
||||
current_value = getattr(settings, setting, empty)
|
||||
if setting == 'CUSTOM_LOGIN_INFO' and current_value in {empty, ''}:
|
||||
local_settings_file = self._get_local_settings_file()
|
||||
try:
|
||||
if os.path.exists(local_settings_file):
|
||||
local_settings = json.load(open(local_settings_file))
|
||||
current_value = local_settings.get('custom_login_info', '')
|
||||
except Exception as e:
|
||||
if not self.skip_errors:
|
||||
raise CommandError('Error reading custom login info from {0}: {1!r}'.format(local_settings_file, e))
|
||||
if setting == 'CUSTOM_LOGO' and current_value in {empty, ''}:
|
||||
custom_logo_file = self._get_custom_logo_file()
|
||||
try:
|
||||
if os.path.exists(custom_logo_file):
|
||||
custom_logo_data = open(custom_logo_file).read()
|
||||
if custom_logo_data:
|
||||
current_value = 'data:image/png;base64,{}'.format(base64.b64encode(custom_logo_data))
|
||||
else:
|
||||
current_value = ''
|
||||
except Exception as e:
|
||||
if not self.skip_errors:
|
||||
raise CommandError('Error reading custom logo from {0}: {1!r}'.format(custom_logo_file, e))
|
||||
if current_value != default_value:
|
||||
if current_value is empty:
|
||||
current_value = None
|
||||
return current_value
|
||||
return empty
|
||||
|
||||
def _display_tbd(self, setting, files_to_comment, migrate_value, comment_error=None, migrate_error=None):
|
||||
if self.verbosity >= 1:
|
||||
if files_to_comment:
|
||||
if migrate_value is not empty:
|
||||
action = 'Migrate + Comment'
|
||||
else:
|
||||
action = 'Comment'
|
||||
if comment_error or migrate_error:
|
||||
action = self.style.ERROR('{} (skipped)'.format(action))
|
||||
else:
|
||||
action = self.style.OK(action)
|
||||
self.stdout.write(' {}: {}'.format(
|
||||
self.style.LABEL(setting),
|
||||
action,
|
||||
))
|
||||
if self.verbosity >= 2:
|
||||
if migrate_error:
|
||||
self.stdout.write(' - Migrate value: {}'.format(
|
||||
self.style.ERROR(migrate_error),
|
||||
))
|
||||
elif migrate_value is not empty:
|
||||
self.stdout.write(' - Migrate value: {}'.format(
|
||||
self.style.VALUE(repr(migrate_value)),
|
||||
))
|
||||
if comment_error:
|
||||
self.stdout.write(' - Comment: {}'.format(
|
||||
self.style.ERROR(comment_error),
|
||||
))
|
||||
elif files_to_comment:
|
||||
for file_to_comment in files_to_comment:
|
||||
self.stdout.write(' - Comment in: {}'.format(
|
||||
self.style.VALUE(file_to_comment),
|
||||
))
|
||||
else:
|
||||
if self.verbosity >= 2:
|
||||
self.stdout.write(' {}: {}'.format(
|
||||
self.style.LABEL(setting),
|
||||
self.style.SKIP('No Migration'),
|
||||
))
|
||||
|
||||
def _display_migrate(self, setting, action, display_value):
|
||||
if self.verbosity >= 1:
|
||||
if action == 'No Change':
|
||||
action = self.style.SKIP(action)
|
||||
else:
|
||||
action = self.style.OK(action)
|
||||
self.stdout.write(' {}: {}'.format(
|
||||
self.style.LABEL(setting),
|
||||
action,
|
||||
))
|
||||
if self.verbosity >= 2:
|
||||
for line in display_value.splitlines():
|
||||
self.stdout.write(' {}'.format(
|
||||
self.style.VALUE(line),
|
||||
))
|
||||
|
||||
def _display_diff_summary(self, filename, added, removed):
|
||||
self.stdout.write(' {} {}{} {}{}'.format(
|
||||
self.style.LABEL(filename),
|
||||
self.style.ERROR('-'),
|
||||
self.style.ERROR(int(removed)),
|
||||
self.style.OK('+'),
|
||||
self.style.OK(str(added)),
|
||||
))
|
||||
|
||||
def _display_comment(self, diffs):
|
||||
for diff in diffs:
|
||||
if self.verbosity >= 2:
|
||||
for line in diff.splitlines():
|
||||
display_line = line
|
||||
if line.startswith('--- ') or line.startswith('+++ '):
|
||||
display_line = self.style.LABEL(line)
|
||||
elif line.startswith('-'):
|
||||
display_line = self.style.ERROR(line)
|
||||
elif line.startswith('+'):
|
||||
display_line = self.style.OK(line)
|
||||
elif line.startswith('@@'):
|
||||
display_line = self.style.VALUE(line)
|
||||
if line.startswith('--- ') or line.startswith('+++ '):
|
||||
self.stdout.write(' ' + display_line)
|
||||
else:
|
||||
self.stdout.write(' ' + display_line)
|
||||
elif self.verbosity >= 1:
|
||||
filename, lines_added, lines_removed = None, 0, 0
|
||||
for line in diff.splitlines():
|
||||
if line.startswith('+++ '):
|
||||
if filename:
|
||||
self._display_diff_summary(filename, lines_added, lines_removed)
|
||||
filename, lines_added, lines_removed = line[4:], 0, 0
|
||||
elif line.startswith('+'):
|
||||
lines_added += 1
|
||||
elif line.startswith('-'):
|
||||
lines_removed += 1
|
||||
if filename:
|
||||
self._display_diff_summary(filename, lines_added, lines_removed)
|
||||
|
||||
def _discover_settings(self, registered_settings):
|
||||
if self.verbosity >= 1:
|
||||
self.stdout.write(self.style.HEADING('Discovering settings to be migrated and commented:'))
|
||||
|
||||
# Determine which settings need to be commented/migrated.
|
||||
to_migrate = collections.OrderedDict()
|
||||
to_comment = collections.OrderedDict()
|
||||
patterns = self._get_settings_file_patterns()
|
||||
|
||||
for name in registered_settings:
|
||||
comment_error, migrate_error = None, None
|
||||
files_to_comment = []
|
||||
try:
|
||||
files_to_comment = self._check_if_needs_comment(patterns, name)
|
||||
except Exception as e:
|
||||
comment_error = 'Error commenting {0}: {1!r}'.format(name, e)
|
||||
if not self.skip_errors:
|
||||
raise CommandError(comment_error)
|
||||
if files_to_comment:
|
||||
to_comment[name] = files_to_comment
|
||||
migrate_value = empty
|
||||
if files_to_comment:
|
||||
migrate_value = self._check_if_needs_migration(name)
|
||||
if migrate_value is not empty:
|
||||
field = settings_registry.get_setting_field(name)
|
||||
assert not field.read_only
|
||||
try:
|
||||
data = field.to_representation(migrate_value)
|
||||
setting_value = field.run_validation(data)
|
||||
db_value = field.to_representation(setting_value)
|
||||
to_migrate[name] = db_value
|
||||
except Exception as e:
|
||||
to_comment.pop(name)
|
||||
migrate_error = 'Unable to assign value {0!r} to setting "{1}: {2!s}".'.format(migrate_value, name, e)
|
||||
if not self.skip_errors:
|
||||
raise CommandError(migrate_error)
|
||||
self._display_tbd(name, files_to_comment, migrate_value, comment_error, migrate_error)
|
||||
if self.verbosity == 1 and not to_migrate and not to_comment:
|
||||
self.stdout.write(' No settings found to migrate or comment!')
|
||||
return (to_migrate, to_comment)
|
||||
|
||||
def _migrate(self, to_migrate):
|
||||
if self.verbosity >= 1:
|
||||
if self.dry_run:
|
||||
self.stdout.write(self.style.HEADING('Migrating settings to database (dry-run):'))
|
||||
else:
|
||||
self.stdout.write(self.style.HEADING('Migrating settings to database:'))
|
||||
if not to_migrate:
|
||||
self.stdout.write(' No settings to migrate!')
|
||||
|
||||
# Now migrate those settings to the database.
|
||||
for name, db_value in to_migrate.items():
|
||||
display_value = json.dumps(db_value, indent=4)
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
action = 'No Change'
|
||||
if not setting:
|
||||
action = 'Migrated'
|
||||
if not self.dry_run:
|
||||
Setting.objects.create(key=name, user=None, value=db_value)
|
||||
elif setting.value != db_value or type(setting.value) != type(db_value):
|
||||
action = 'Updated'
|
||||
if not self.dry_run:
|
||||
setting.value = db_value
|
||||
setting.save(update_fields=['value'])
|
||||
self._display_migrate(name, action, display_value)
|
||||
|
||||
def _comment(self, to_comment):
|
||||
if self.verbosity >= 1:
|
||||
if bool(self.dry_run or self.no_comment):
|
||||
self.stdout.write(self.style.HEADING('Commenting settings in files (dry-run):'))
|
||||
else:
|
||||
self.stdout.write(self.style.HEADING('Commenting settings in files:'))
|
||||
if not to_comment:
|
||||
self.stdout.write(' No settings to comment!')
|
||||
|
||||
# Now comment settings in settings files.
|
||||
if to_comment:
|
||||
to_comment_patterns = []
|
||||
license_file_to_comment = None
|
||||
local_settings_file_to_comment = None
|
||||
custom_logo_file_to_comment = None
|
||||
for files_to_comment in to_comment.values():
|
||||
for file_to_comment in files_to_comment:
|
||||
if file_to_comment == self._get_license_file():
|
||||
license_file_to_comment = file_to_comment
|
||||
elif file_to_comment == self._get_local_settings_file():
|
||||
local_settings_file_to_comment = file_to_comment
|
||||
elif file_to_comment == self._get_custom_logo_file():
|
||||
custom_logo_file_to_comment = file_to_comment
|
||||
elif file_to_comment not in to_comment_patterns:
|
||||
to_comment_patterns.append(file_to_comment)
|
||||
# Run once in dry-run mode to catch any errors from updating the files.
|
||||
diffs = comment_assignments(to_comment_patterns, list(to_comment.keys()), dry_run=True, backup_suffix=self.backup_suffix)
|
||||
# Then, if really updating, run again.
|
||||
if not self.dry_run and not self.no_comment:
|
||||
diffs = comment_assignments(to_comment_patterns, list(to_comment.keys()), dry_run=False, backup_suffix=self.backup_suffix)
|
||||
if license_file_to_comment:
|
||||
diffs.extend(self._comment_license_file(dry_run=False))
|
||||
if local_settings_file_to_comment:
|
||||
diffs.extend(self._comment_local_settings_file(dry_run=False))
|
||||
if custom_logo_file_to_comment:
|
||||
diffs.extend(self._comment_custom_logo_file(dry_run=False))
|
||||
self._display_comment(diffs)
|
||||
|
||||
def _migrate_settings(self, registered_settings):
|
||||
to_migrate, to_comment = self._discover_settings(registered_settings)
|
||||
|
||||
if not bool(self.comment_only):
|
||||
self._migrate(to_migrate)
|
||||
self._comment(to_comment)
|
||||
@@ -1,7 +1,6 @@
|
||||
import base64
|
||||
import hashlib
|
||||
|
||||
import six
|
||||
from django.utils.encoding import smart_str
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
@@ -91,7 +90,7 @@ def encrypt_field(instance, field_name, ask=False, subfield=None, skip_utf8=Fals
|
||||
if skip_utf8:
|
||||
utf8 = False
|
||||
else:
|
||||
utf8 = type(value) == six.text_type
|
||||
utf8 = type(value) == str
|
||||
value = smart_str(value)
|
||||
key = get_encryption_key(field_name, getattr(instance, 'pk', None))
|
||||
encryptor = Cipher(AES(key), ECB(), default_backend()).encryptor()
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Django REST Framework
|
||||
from rest_framework import serializers
|
||||
|
||||
import six
|
||||
|
||||
# Tower
|
||||
from awx.api.fields import VerbatimField
|
||||
from awx.api.serializers import BaseSerializer
|
||||
@@ -47,12 +45,12 @@ class SettingFieldMixin(object):
|
||||
"""Mixin to use a registered setting field class for API display/validation."""
|
||||
|
||||
def to_representation(self, obj):
|
||||
if getattr(self, 'encrypted', False) and isinstance(obj, six.string_types) and obj:
|
||||
if getattr(self, 'encrypted', False) and isinstance(obj, str) and obj:
|
||||
return '$encrypted$'
|
||||
return obj
|
||||
|
||||
def to_internal_value(self, value):
|
||||
if getattr(self, 'encrypted', False) and isinstance(value, six.string_types) and value.startswith('$encrypted$'):
|
||||
if getattr(self, 'encrypted', False) and isinstance(value, str) and value.startswith('$encrypted$'):
|
||||
raise serializers.SkipField()
|
||||
obj = super(SettingFieldMixin, self).to_internal_value(value)
|
||||
return super(SettingFieldMixin, self).to_representation(obj)
|
||||
|
||||
@@ -299,7 +299,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
self.__dict__['_awx_conf_preload_expires'] = time.time() + SETTING_CACHE_TIMEOUT
|
||||
# Check for any settings that have been defined in Python files and
|
||||
# make those read-only to avoid overriding in the database.
|
||||
if not self._awx_conf_init_readonly and 'migrate_to_database_settings' not in sys.argv:
|
||||
if not self._awx_conf_init_readonly:
|
||||
defaults_snapshot = self._get_default('DEFAULTS_SNAPSHOT')
|
||||
for key in get_writeable_settings(self.registry):
|
||||
init_default = defaults_snapshot.get(key, None)
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import urllib.parse
|
||||
|
||||
import pytest
|
||||
|
||||
from django.core.urlresolvers import resolve
|
||||
from django.utils.six.moves.urllib.parse import urlparse
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from rest_framework.test import (
|
||||
@@ -33,7 +34,7 @@ def admin():
|
||||
@pytest.fixture
|
||||
def api_request(admin):
|
||||
def rf(verb, url, data=None, user=admin):
|
||||
view, view_args, view_kwargs = resolve(urlparse(url)[2])
|
||||
view, view_args, view_kwargs = resolve(urllib.parse.urlparse(url)[2])
|
||||
request = getattr(APIRequestFactory(), verb)(url, data=data, format='json')
|
||||
if user:
|
||||
force_authenticate(request, user=user)
|
||||
|
||||
@@ -13,7 +13,6 @@ from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import pytest
|
||||
import six
|
||||
|
||||
from awx.conf import models, fields
|
||||
from awx.conf.settings import SettingsWrapper, EncryptedCacheProxy, SETTING_CACHE_NOTSET
|
||||
@@ -70,7 +69,7 @@ def test_cached_settings_unicode_is_auto_decoded(settings):
|
||||
|
||||
value = 'Iñtërnâtiônàlizætiøn' # this simulates what python-memcached does on cache.set()
|
||||
settings.cache.set('DEBUG', value)
|
||||
assert settings.cache.get('DEBUG') == six.u('Iñtërnâtiônàlizætiøn')
|
||||
assert settings.cache.get('DEBUG') == 'Iñtërnâtiônàlizætiøn'
|
||||
|
||||
|
||||
def test_read_only_setting(settings):
|
||||
|
||||
@@ -6,8 +6,6 @@ import glob
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import six
|
||||
|
||||
# AWX
|
||||
from awx.conf.registry import settings_registry
|
||||
|
||||
@@ -15,7 +13,7 @@ __all__ = ['comment_assignments', 'conf_to_dict']
|
||||
|
||||
|
||||
def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix='.old'):
|
||||
if isinstance(patterns, six.string_types):
|
||||
if isinstance(patterns, str):
|
||||
patterns = [patterns]
|
||||
diffs = []
|
||||
for pattern in patterns:
|
||||
@@ -34,7 +32,7 @@ def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix=
|
||||
def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup_filename=None):
|
||||
from redbaron import RedBaron, indent
|
||||
|
||||
if isinstance(assignment_names, six.string_types):
|
||||
if isinstance(assignment_names, str):
|
||||
assignment_names = [assignment_names]
|
||||
else:
|
||||
assignment_names = assignment_names[:]
|
||||
|
||||
@@ -135,7 +135,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.value = value
|
||||
setting.save(update_fields=['value'])
|
||||
settings_change_list.append(key)
|
||||
if settings_change_list and 'migrate_to_database_settings' not in sys.argv:
|
||||
if settings_change_list:
|
||||
handle_setting_changes.delay(settings_change_list)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
@@ -150,7 +150,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
continue
|
||||
setting.delete()
|
||||
settings_change_list.append(setting.key)
|
||||
if settings_change_list and 'migrate_to_database_settings' not in sys.argv:
|
||||
if settings_change_list:
|
||||
handle_setting_changes.delay(settings_change_list)
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
|
||||
@@ -2020,7 +2020,7 @@ msgstr ""
|
||||
|
||||
#: awx/main/conf.py:286
|
||||
msgid ""
|
||||
"Allows roles to be dynamically downlaoded from a requirements.yml file for "
|
||||
"Allows roles to be dynamically downloaded from a requirements.yml file for "
|
||||
"SCM projects."
|
||||
msgstr ""
|
||||
|
||||
|
||||
@@ -2020,7 +2020,7 @@ msgstr ""
|
||||
|
||||
#: awx/main/conf.py:286
|
||||
msgid ""
|
||||
"Allows roles to be dynamically downlaoded from a requirements.yml file for "
|
||||
"Allows roles to be dynamically downloaded from a requirements.yml file for "
|
||||
"SCM projects."
|
||||
msgstr ""
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import six
|
||||
from functools import reduce
|
||||
|
||||
# Django
|
||||
@@ -2590,7 +2589,7 @@ class RoleAccess(BaseAccess):
|
||||
if (isinstance(obj.content_object, Organization) and
|
||||
obj.role_field in (Organization.member_role.field.parent_role + ['member_role'])):
|
||||
if not isinstance(sub_obj, User):
|
||||
logger.error(six.text_type('Unexpected attempt to associate {} with organization role.').format(sub_obj))
|
||||
logger.error('Unexpected attempt to associate {} with organization role.'.format(sub_obj))
|
||||
return False
|
||||
if not UserAccess(self.user).can_admin(sub_obj, None, allow_orphans=True):
|
||||
return False
|
||||
|
||||
@@ -295,7 +295,7 @@ register(
|
||||
field_class=fields.BooleanField,
|
||||
default=True,
|
||||
label=_('Enable Role Download'),
|
||||
help_text=_('Allows roles to be dynamically downlaoded from a requirements.yml file for SCM projects.'),
|
||||
help_text=_('Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import random
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
@@ -325,6 +324,11 @@ class AutoscalePool(WorkerPool):
|
||||
2. Clean up unnecessary, idle workers.
|
||||
3. Check to see if the database says this node is running any tasks
|
||||
that aren't actually running. If so, reap them.
|
||||
|
||||
IMPORTANT: this function is one of the few places in the dispatcher
|
||||
(aside from setting lookups) where we talk to the database. As such,
|
||||
if there's an outage, this method _can_ throw various
|
||||
django.db.utils.Error exceptions. Act accordingly.
|
||||
"""
|
||||
orphaned = []
|
||||
for w in self.workers[::]:
|
||||
@@ -366,17 +370,7 @@ class AutoscalePool(WorkerPool):
|
||||
for worker in self.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
running_uuids.extend(list(worker.managed_tasks.keys()))
|
||||
try:
|
||||
reaper.reap(excluded_uuids=running_uuids)
|
||||
except Exception:
|
||||
# we _probably_ failed here due to DB connectivity issues, so
|
||||
# don't use our logger (it accesses the database for configuration)
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb)
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
reaper.reap(excluded_uuids=running_uuids)
|
||||
|
||||
def up(self):
|
||||
if self.full:
|
||||
|
||||
@@ -81,7 +81,11 @@ class AWXConsumer(ConsumerMixin):
|
||||
|
||||
def process_task(self, body, message):
|
||||
if 'control' in body:
|
||||
return self.control(body, message)
|
||||
try:
|
||||
return self.control(body, message)
|
||||
except Exception:
|
||||
logger.exception("Exception handling control message:")
|
||||
return
|
||||
if len(self.pool):
|
||||
if "uuid" in body and body['uuid']:
|
||||
try:
|
||||
|
||||
@@ -4,7 +4,6 @@ import importlib
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import six
|
||||
|
||||
from awx.main.tasks import dispatch_startup, inform_cluster_of_shutdown
|
||||
|
||||
@@ -90,7 +89,7 @@ class TaskWorker(BaseWorker):
|
||||
try:
|
||||
if getattr(exc, 'is_awx_task_error', False):
|
||||
# Error caused by user / tracked in job output
|
||||
logger.warning(six.text_type("{}").format(exc))
|
||||
logger.warning("{}".format(exc))
|
||||
else:
|
||||
task = body['task']
|
||||
args = body.get('args', [])
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
# Copyright (c) 2018 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class _AwxTaskError():
|
||||
def build_exception(self, task, message=None):
|
||||
if message is None:
|
||||
message = six.text_type("Execution error running {}").format(task.log_format)
|
||||
message = "Execution error running {}".format(task.log_format)
|
||||
e = Exception(message)
|
||||
e.task = task
|
||||
e.is_awx_task_error = True
|
||||
@@ -15,7 +14,7 @@ class _AwxTaskError():
|
||||
|
||||
def TaskCancel(self, task, rc):
|
||||
"""Canceled flag caused run_pexpect to kill the job run"""
|
||||
message=six.text_type("{} was canceled (rc={})").format(task.log_format, rc)
|
||||
message="{} was canceled (rc={})".format(task.log_format, rc)
|
||||
e = self.build_exception(task, message)
|
||||
e.rc = rc
|
||||
e.awx_task_error_type = "TaskCancel"
|
||||
@@ -23,7 +22,7 @@ class _AwxTaskError():
|
||||
|
||||
def TaskError(self, task, rc):
|
||||
"""Userspace error (non-zero exit code) in run_pexpect subprocess"""
|
||||
message = six.text_type("{} encountered an error (rc={}), please see task stdout for details.").format(task.log_format, rc)
|
||||
message = "{} encountered an error (rc={}), please see task stdout for details.".format(task.log_format, rc)
|
||||
e = self.build_exception(task, message)
|
||||
e.rc = rc
|
||||
e.awx_task_error_type = "TaskError"
|
||||
|
||||
@@ -4,9 +4,7 @@
|
||||
# Python
|
||||
import copy
|
||||
import json
|
||||
import operator
|
||||
import re
|
||||
import six
|
||||
import urllib.parse
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
@@ -46,7 +44,7 @@ from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.encryption import encrypt_value, decrypt_value, get_encryption_key
|
||||
from awx.main.validators import validate_ssh_private_key
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
|
||||
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS, ENV_BLACKLIST
|
||||
from awx.main.constants import ENV_BLACKLIST
|
||||
from awx.main import utils
|
||||
|
||||
|
||||
@@ -80,7 +78,7 @@ class JSONField(upstream_JSONField):
|
||||
|
||||
class JSONBField(upstream_JSONBField):
|
||||
def get_prep_lookup(self, lookup_type, value):
|
||||
if isinstance(value, six.string_types) and value == "null":
|
||||
if isinstance(value, str) and value == "null":
|
||||
return 'null'
|
||||
return super(JSONBField, self).get_prep_lookup(lookup_type, value)
|
||||
|
||||
@@ -95,7 +93,7 @@ class JSONBField(upstream_JSONBField):
|
||||
def from_db_value(self, value, expression, connection, context):
|
||||
# Work around a bug in django-jsonfield
|
||||
# https://bitbucket.org/schinckel/django-jsonfield/issues/57/cannot-use-in-the-same-project-as-djangos
|
||||
if isinstance(value, six.string_types):
|
||||
if isinstance(value, str):
|
||||
return json.loads(value)
|
||||
return value
|
||||
|
||||
@@ -411,7 +409,7 @@ class JSONSchemaField(JSONBField):
|
||||
format_checker=self.format_checker
|
||||
).iter_errors(value):
|
||||
if error.validator == 'pattern' and 'error' in error.schema:
|
||||
error.message = six.text_type(error.schema['error']).format(instance=error.instance)
|
||||
error.message = error.schema['error'].format(instance=error.instance)
|
||||
elif error.validator == 'type':
|
||||
expected_type = error.validator_value
|
||||
if expected_type == 'object':
|
||||
@@ -450,7 +448,7 @@ class JSONSchemaField(JSONBField):
|
||||
def from_db_value(self, value, expression, connection, context):
|
||||
# Work around a bug in django-jsonfield
|
||||
# https://bitbucket.org/schinckel/django-jsonfield/issues/57/cannot-use-in-the-same-project-as-djangos
|
||||
if isinstance(value, six.string_types):
|
||||
if isinstance(value, str):
|
||||
return json.loads(value)
|
||||
return value
|
||||
|
||||
@@ -512,9 +510,6 @@ class CredentialInputField(JSONSchemaField):
|
||||
properties = {}
|
||||
for field in model_instance.credential_type.inputs.get('fields', []):
|
||||
field = field.copy()
|
||||
if field['type'] == 'become_method':
|
||||
field.pop('type')
|
||||
field['choices'] = list(map(operator.itemgetter(0), CHOICES_PRIVILEGE_ESCALATION_METHODS))
|
||||
properties[field['id']] = field
|
||||
if field.get('choices', []):
|
||||
field['enum'] = list(field['choices'])[:]
|
||||
@@ -547,7 +542,7 @@ class CredentialInputField(JSONSchemaField):
|
||||
v != '$encrypted$',
|
||||
model_instance.pk
|
||||
]):
|
||||
if not isinstance(getattr(model_instance, k), six.string_types):
|
||||
if not isinstance(getattr(model_instance, k), str):
|
||||
raise django_exceptions.ValidationError(
|
||||
_('secret values must be of type string, not {}').format(type(v).__name__),
|
||||
code='invalid',
|
||||
@@ -564,7 +559,7 @@ class CredentialInputField(JSONSchemaField):
|
||||
format_checker=self.format_checker
|
||||
).iter_errors(decrypted_values):
|
||||
if error.validator == 'pattern' and 'error' in error.schema:
|
||||
error.message = six.text_type(error.schema['error']).format(instance=error.instance)
|
||||
error.message = error.schema['error'].format(instance=error.instance)
|
||||
if error.validator == 'dependencies':
|
||||
# replace the default error messaging w/ a better i18n string
|
||||
# I wish there was a better way to determine the parameters of
|
||||
@@ -658,7 +653,7 @@ class CredentialTypeInputField(JSONSchemaField):
|
||||
'items': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'type': {'enum': ['string', 'boolean', 'become_method']},
|
||||
'type': {'enum': ['string', 'boolean']},
|
||||
'format': {'enum': ['ssh_private_key']},
|
||||
'choices': {
|
||||
'type': 'array',
|
||||
@@ -719,17 +714,6 @@ class CredentialTypeInputField(JSONSchemaField):
|
||||
# If no type is specified, default to string
|
||||
field['type'] = 'string'
|
||||
|
||||
if field['type'] == 'become_method':
|
||||
if not model_instance.managed_by_tower:
|
||||
raise django_exceptions.ValidationError(
|
||||
_('become_method is a reserved type name'),
|
||||
code='invalid',
|
||||
params={'value': value},
|
||||
)
|
||||
else:
|
||||
field.pop('type')
|
||||
field['choices'] = CHOICES_PRIVILEGE_ESCALATION_METHODS
|
||||
|
||||
for key in ('choices', 'multiline', 'format', 'secret',):
|
||||
if key in field and field['type'] != 'string':
|
||||
raise django_exceptions.ValidationError(
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.core.management.base import BaseCommand
|
||||
@@ -43,7 +42,7 @@ class Command(BaseCommand):
|
||||
n_deleted_items = 0
|
||||
pks_to_delete = set()
|
||||
for asobj in ActivityStream.objects.iterator():
|
||||
asobj_disp = '"%s" id: %s' % (six.text_type(asobj), asobj.id)
|
||||
asobj_disp = '"%s" id: %s' % (str(asobj), asobj.id)
|
||||
if asobj.timestamp >= self.cutoff:
|
||||
if self.dry_run:
|
||||
self.logger.info("would skip %s" % asobj_disp)
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
@@ -68,7 +67,7 @@ class Command(BaseCommand):
|
||||
jobs = Job.objects.filter(created__lt=self.cutoff)
|
||||
for job in jobs.iterator():
|
||||
job_display = '"%s" (%d host summaries, %d events)' % \
|
||||
(six.text_type(job),
|
||||
(str(job),
|
||||
job.job_host_summaries.count(), job.job_events.count())
|
||||
if job.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
@@ -89,7 +88,7 @@ class Command(BaseCommand):
|
||||
ad_hoc_commands = AdHocCommand.objects.filter(created__lt=self.cutoff)
|
||||
for ad_hoc_command in ad_hoc_commands.iterator():
|
||||
ad_hoc_command_display = '"%s" (%d events)' % \
|
||||
(six.text_type(ad_hoc_command),
|
||||
(str(ad_hoc_command),
|
||||
ad_hoc_command.ad_hoc_command_events.count())
|
||||
if ad_hoc_command.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
@@ -109,7 +108,7 @@ class Command(BaseCommand):
|
||||
skipped, deleted = 0, 0
|
||||
project_updates = ProjectUpdate.objects.filter(created__lt=self.cutoff)
|
||||
for pu in project_updates.iterator():
|
||||
pu_display = '"%s" (type %s)' % (six.text_type(pu), six.text_type(pu.launch_type))
|
||||
pu_display = '"%s" (type %s)' % (str(pu), str(pu.launch_type))
|
||||
if pu.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s project update %s', action_text, pu.status, pu_display)
|
||||
@@ -132,7 +131,7 @@ class Command(BaseCommand):
|
||||
skipped, deleted = 0, 0
|
||||
inventory_updates = InventoryUpdate.objects.filter(created__lt=self.cutoff)
|
||||
for iu in inventory_updates.iterator():
|
||||
iu_display = '"%s" (source %s)' % (six.text_type(iu), six.text_type(iu.source))
|
||||
iu_display = '"%s" (source %s)' % (str(iu), str(iu.source))
|
||||
if iu.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s inventory update %s', action_text, iu.status, iu_display)
|
||||
@@ -155,7 +154,7 @@ class Command(BaseCommand):
|
||||
skipped, deleted = 0, 0
|
||||
system_jobs = SystemJob.objects.filter(created__lt=self.cutoff)
|
||||
for sj in system_jobs.iterator():
|
||||
sj_display = '"%s" (type %s)' % (six.text_type(sj), six.text_type(sj.job_type))
|
||||
sj_display = '"%s" (type %s)' % (str(sj), str(sj.job_type))
|
||||
if sj.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s system_job %s', action_text, sj.status, sj_display)
|
||||
@@ -185,7 +184,7 @@ class Command(BaseCommand):
|
||||
workflow_jobs = WorkflowJob.objects.filter(created__lt=self.cutoff)
|
||||
for workflow_job in workflow_jobs.iterator():
|
||||
workflow_job_display = '"{}" ({} nodes)'.format(
|
||||
six.text_type(workflow_job),
|
||||
str(workflow_job),
|
||||
workflow_job.workflow_nodes.count())
|
||||
if workflow_job.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
@@ -206,7 +205,7 @@ class Command(BaseCommand):
|
||||
notifications = Notification.objects.filter(created__lt=self.cutoff)
|
||||
for notification in notifications.iterator():
|
||||
notification_display = '"{}" (started {}, {} type, {} sent)'.format(
|
||||
six.text_type(notification), six.text_type(notification.created),
|
||||
str(notification), str(notification.created),
|
||||
notification.notification_type, notification.notifications_sent)
|
||||
if notification.status in ('pending',):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
|
||||
@@ -105,10 +105,26 @@ class AnsibleInventoryLoader(object):
|
||||
logger.info('Using PYTHONPATH: {}'.format(env.get('PYTHONPATH', None)))
|
||||
return env
|
||||
|
||||
def get_path_to_ansible_inventory(self):
|
||||
venv_exe = os.path.join(self.venv_path, 'bin', 'ansible-inventory')
|
||||
if os.path.exists(venv_exe):
|
||||
return venv_exe
|
||||
elif os.path.exists(
|
||||
os.path.join(self.venv_path, 'bin', 'ansible')
|
||||
):
|
||||
# if bin/ansible exists but bin/ansible-inventory doesn't, it's
|
||||
# probably a really old version of ansible that doesn't support
|
||||
# ansible-inventory
|
||||
raise RuntimeError(
|
||||
"{} does not exist (please upgrade to ansible >= 2.4)".format(
|
||||
venv_exe
|
||||
)
|
||||
)
|
||||
return shutil.which('ansible-inventory')
|
||||
|
||||
def get_base_args(self):
|
||||
# get ansible-inventory absolute path for running in bubblewrap/proot, in Popen
|
||||
abs_ansible_inventory = shutil.which('ansible-inventory')
|
||||
bargs= [abs_ansible_inventory, '-i', self.source]
|
||||
bargs= [self.get_path_to_ansible_inventory(), '-i', self.source]
|
||||
logger.debug('Using base command: {}'.format(' '.join(bargs)))
|
||||
return bargs
|
||||
|
||||
@@ -136,6 +152,9 @@ class AnsibleInventoryLoader(object):
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source)]
|
||||
logger.debug("Running from `{}` working directory.".format(cwd))
|
||||
|
||||
if self.venv_path != settings.ANSIBLE_VENV_PATH:
|
||||
kwargs['proot_custom_virtualenv'] = self.venv_path
|
||||
|
||||
return wrap_args_with_proot(cmd, cwd, **kwargs)
|
||||
|
||||
def command_to_json(self, cmd):
|
||||
@@ -407,7 +426,7 @@ class Command(BaseCommand):
|
||||
# Build list of all host pks, remove all that should not be deleted.
|
||||
del_host_pks = set(hosts_qs.values_list('pk', flat=True))
|
||||
if self.instance_id_var:
|
||||
all_instance_ids = self.mem_instance_id_map.keys()
|
||||
all_instance_ids = list(self.mem_instance_id_map.keys())
|
||||
instance_ids = []
|
||||
for offset in range(0, len(all_instance_ids), self._batch_size):
|
||||
instance_ids = all_instance_ids[offset:(offset + self._batch_size)]
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
from awx.main.models import Instance, InstanceGroup
|
||||
from django.core.management.base import BaseCommand
|
||||
import six
|
||||
|
||||
|
||||
class Ungrouped(object):
|
||||
@@ -42,7 +41,7 @@ class Command(BaseCommand):
|
||||
fmt += ' policy>={0.policy_instance_minimum}'
|
||||
if instance_group.controller:
|
||||
fmt += ' controller={0.controller.name}'
|
||||
print(six.text_type(fmt + ']').format(instance_group))
|
||||
print((fmt + ']').format(instance_group))
|
||||
for x in instance_group.instances.all():
|
||||
color = '\033[92m'
|
||||
if x.capacity == 0 or x.enabled is False:
|
||||
@@ -52,5 +51,5 @@ class Command(BaseCommand):
|
||||
fmt += ' last_isolated_check="{0.last_isolated_check:%Y-%m-%d %H:%M:%S}"'
|
||||
if x.capacity:
|
||||
fmt += ' heartbeat="{0.modified:%Y-%m-%d %H:%M:%S}"'
|
||||
print(six.text_type(fmt + '\033[0m').format(x, x.version or '?'))
|
||||
print((fmt + '\033[0m').format(x, x.version or '?'))
|
||||
print('')
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Copyright (c) 2017 Ansible Tower by Red Hat
|
||||
# All Rights Reserved.
|
||||
import sys
|
||||
import six
|
||||
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.models import Instance, InstanceGroup
|
||||
@@ -73,7 +72,7 @@ class Command(BaseCommand):
|
||||
if instance.exists():
|
||||
instances.append(instance[0])
|
||||
else:
|
||||
raise InstanceNotFound(six.text_type("Instance does not exist: {}").format(inst_name), changed)
|
||||
raise InstanceNotFound("Instance does not exist: {}".format(inst_name), changed)
|
||||
|
||||
ig.instances.add(*instances)
|
||||
|
||||
@@ -99,24 +98,24 @@ class Command(BaseCommand):
|
||||
if options.get('hostnames'):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
with advisory_lock(six.text_type('instance_group_registration_{}').format(queuename)):
|
||||
with advisory_lock('instance_group_registration_{}'.format(queuename)):
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
if created:
|
||||
print(six.text_type("Creating instance group {}".format(ig.name)))
|
||||
print("Creating instance group {}".format(ig.name))
|
||||
elif not created:
|
||||
print(six.text_type("Instance Group already registered {}").format(ig.name))
|
||||
print("Instance Group already registered {}".format(ig.name))
|
||||
|
||||
if ctrl:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl)
|
||||
if changed2:
|
||||
print(six.text_type("Set controller group {} on {}.").format(ctrl, queuename))
|
||||
print("Set controller group {} on {}.".format(ctrl, queuename))
|
||||
|
||||
try:
|
||||
(instances, changed3) = self.add_instances_to_group(ig, hostname_list)
|
||||
for i in instances:
|
||||
print(six.text_type("Added instance {} to {}").format(i.hostname, ig.name))
|
||||
print("Added instance {} to {}".format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
instance_not_found_err = e
|
||||
|
||||
@@ -126,4 +125,3 @@ class Command(BaseCommand):
|
||||
if instance_not_found_err:
|
||||
print(instance_not_found_err.message)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -38,20 +38,20 @@ class HostManager(models.Manager):
|
||||
hasattr(self.instance, 'host_filter') and
|
||||
hasattr(self.instance, 'kind')):
|
||||
if self.instance.kind == 'smart' and self.instance.host_filter is not None:
|
||||
q = SmartFilter.query_from_string(self.instance.host_filter)
|
||||
if self.instance.organization_id:
|
||||
q = q.filter(inventory__organization=self.instance.organization_id)
|
||||
# If we are using host_filters, disable the core_filters, this allows
|
||||
# us to access all of the available Host entries, not just the ones associated
|
||||
# with a specific FK/relation.
|
||||
#
|
||||
# If we don't disable this, a filter of {'inventory': self.instance} gets automatically
|
||||
# injected by the related object mapper.
|
||||
self.core_filters = {}
|
||||
q = SmartFilter.query_from_string(self.instance.host_filter)
|
||||
if self.instance.organization_id:
|
||||
q = q.filter(inventory__organization=self.instance.organization_id)
|
||||
# If we are using host_filters, disable the core_filters, this allows
|
||||
# us to access all of the available Host entries, not just the ones associated
|
||||
# with a specific FK/relation.
|
||||
#
|
||||
# If we don't disable this, a filter of {'inventory': self.instance} gets automatically
|
||||
# injected by the related object mapper.
|
||||
self.core_filters = {}
|
||||
|
||||
qs = qs & q
|
||||
unique_by_name = qs.order_by('name', 'pk').distinct('name')
|
||||
return qs.filter(pk__in=unique_by_name)
|
||||
qs = qs & q
|
||||
unique_by_name = qs.order_by('name', 'pk').distinct('name')
|
||||
return qs.filter(pk__in=unique_by_name)
|
||||
return qs
|
||||
|
||||
|
||||
|
||||
@@ -4,11 +4,11 @@
|
||||
import uuid
|
||||
import logging
|
||||
import threading
|
||||
import six
|
||||
import time
|
||||
import cProfile
|
||||
import pstats
|
||||
import os
|
||||
import urllib.parse
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
@@ -195,7 +195,7 @@ class URLModificationMiddleware(object):
|
||||
|
||||
def process_request(self, request):
|
||||
if hasattr(request, 'environ') and 'REQUEST_URI' in request.environ:
|
||||
old_path = six.moves.urllib.parse.urlsplit(request.environ['REQUEST_URI']).path
|
||||
old_path = urllib.parse.urlsplit(request.environ['REQUEST_URI']).path
|
||||
old_path = old_path[request.path.find(request.path_info):]
|
||||
else:
|
||||
old_path = request.path_info
|
||||
|
||||
25
awx/main/migrations/0056_v350_custom_venv_history.py
Normal file
25
awx/main/migrations/0056_v350_custom_venv_history.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-01-22 22:20
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0055_v340_add_grafana_notification'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='custom_virtualenv',
|
||||
field=models.CharField(blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='custom_virtualenv',
|
||||
field=models.CharField(blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True),
|
||||
),
|
||||
]
|
||||
19
awx/main/migrations/0057_v350_remove_become_method_type.py
Normal file
19
awx/main/migrations/0057_v350_remove_become_method_type.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-01-29 19:56
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0056_v350_custom_venv_history'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(credentialtypes.remove_become_methods),
|
||||
]
|
||||
25
awx/main/migrations/0058_v350_remove_limit_limit.py
Normal file
25
awx/main/migrations/0058_v350_remove_limit_limit.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-05 18:29
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0057_v350_remove_become_method_type'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='job',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default=''),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default=''),
|
||||
),
|
||||
]
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
import six
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
@@ -39,8 +38,8 @@ def rename_inventory_sources(apps, schema_editor):
|
||||
Q(deprecated_group__inventory__organization=org)).distinct().all()):
|
||||
|
||||
inventory = invsrc.deprecated_group.inventory if invsrc.deprecated_group else invsrc.inventory
|
||||
name = six.text_type('{0} - {1} - {2}').format(invsrc.name, inventory.name, i)
|
||||
logger.debug(six.text_type("Renaming InventorySource({0}) {1} -> {2}").format(
|
||||
name = '{0} - {1} - {2}'.format(invsrc.name, inventory.name, i)
|
||||
logger.debug("Renaming InventorySource({0}) {1} -> {2}".format(
|
||||
invsrc.pk, invsrc.name, name
|
||||
))
|
||||
invsrc.name = name
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
import json
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import six
|
||||
|
||||
from awx.conf.migrations._reencrypt import (
|
||||
decrypt_field,
|
||||
|
||||
@@ -3,8 +3,6 @@ import logging
|
||||
from django.utils.timezone import now
|
||||
from django.utils.text import slugify
|
||||
|
||||
import six
|
||||
|
||||
from awx.main.models.base import PERM_INVENTORY_SCAN, PERM_INVENTORY_DEPLOY
|
||||
from awx.main import utils
|
||||
|
||||
@@ -26,7 +24,7 @@ def _create_fact_scan_project(ContentType, Project, org):
|
||||
polymorphic_ctype=ct)
|
||||
proj.save()
|
||||
|
||||
slug_name = slugify(six.text_type(name)).replace(u'-', u'_')
|
||||
slug_name = slugify(str(name)).replace(u'-', u'_')
|
||||
proj.local_path = u'_%d__%s' % (int(proj.pk), slug_name)
|
||||
|
||||
proj.save()
|
||||
|
||||
@@ -7,7 +7,6 @@ import os
|
||||
import re
|
||||
import stat
|
||||
import tempfile
|
||||
import six
|
||||
|
||||
# Jinja2
|
||||
from jinja2 import Template
|
||||
@@ -33,7 +32,6 @@ from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS
|
||||
from . import injectors as builtin_injectors
|
||||
|
||||
__all__ = ['Credential', 'CredentialType', 'V1Credential', 'build_safe_env']
|
||||
@@ -164,7 +162,6 @@ class V1Credential(object):
|
||||
max_length=32,
|
||||
blank=True,
|
||||
default='',
|
||||
choices=CHOICES_PRIVILEGE_ESCALATION_METHODS,
|
||||
help_text=_('Privilege escalation method.')
|
||||
),
|
||||
'become_username': models.CharField(
|
||||
@@ -325,10 +322,11 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
|
||||
@property
|
||||
def has_encrypted_ssh_key_data(self):
|
||||
if self.pk:
|
||||
try:
|
||||
ssh_key_data = decrypt_field(self, 'ssh_key_data')
|
||||
else:
|
||||
ssh_key_data = self.ssh_key_data
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
try:
|
||||
pem_objects = validate_ssh_private_key(ssh_key_data)
|
||||
for pem_object in pem_objects:
|
||||
@@ -383,9 +381,8 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
super(Credential, self).save(*args, **kwargs)
|
||||
|
||||
def encrypt_field(self, field, ask):
|
||||
if not hasattr(self, field):
|
||||
if field not in self.inputs:
|
||||
return None
|
||||
|
||||
encrypted = encrypt_field(self, field, ask=ask)
|
||||
if encrypted:
|
||||
self.inputs[field] = encrypted
|
||||
@@ -418,11 +415,11 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
type_alias = self.credential_type_id
|
||||
if self.kind == 'vault' and self.has_input('vault_id'):
|
||||
if display:
|
||||
fmt_str = six.text_type('{} (id={})')
|
||||
fmt_str = '{} (id={})'
|
||||
else:
|
||||
fmt_str = six.text_type('{}_{}')
|
||||
fmt_str = '{}_{}'
|
||||
return fmt_str.format(type_alias, self.get_input('vault_id'))
|
||||
return six.text_type(type_alias)
|
||||
return str(type_alias)
|
||||
|
||||
@staticmethod
|
||||
def unique_dict(cred_qs):
|
||||
@@ -444,7 +441,12 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
:param default(optional[str]): A default return value to use.
|
||||
"""
|
||||
if field_name in self.credential_type.secret_fields:
|
||||
return decrypt_field(self, field_name)
|
||||
try:
|
||||
return decrypt_field(self, field_name)
|
||||
except AttributeError:
|
||||
if 'default' in kwargs:
|
||||
return kwargs['default']
|
||||
raise AttributeError
|
||||
if field_name in self.inputs:
|
||||
return self.inputs[field_name]
|
||||
if 'default' in kwargs:
|
||||
@@ -535,7 +537,7 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
if field['id'] == field_id:
|
||||
if 'choices' in field:
|
||||
return field['choices'][0]
|
||||
return {'string': '', 'boolean': False, 'become_method': ''}[field['type']]
|
||||
return {'string': '', 'boolean': False}[field['type']]
|
||||
|
||||
@classmethod
|
||||
def default(cls, f):
|
||||
@@ -674,9 +676,7 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
try:
|
||||
injector_field.validate_env_var_allowed(env_var)
|
||||
except ValidationError as e:
|
||||
logger.error(six.text_type(
|
||||
'Ignoring prohibited env var {}, reason: {}'
|
||||
).format(env_var, e))
|
||||
logger.error('Ignoring prohibited env var {}, reason: {}'.format(env_var, e))
|
||||
continue
|
||||
env[env_var] = Template(tmpl).render(**namespace)
|
||||
safe_env[env_var] = Template(tmpl).render(**safe_namespace)
|
||||
@@ -734,7 +734,7 @@ def ssh(cls):
|
||||
}, {
|
||||
'id': 'become_method',
|
||||
'label': ugettext_noop('Privilege Escalation Method'),
|
||||
'type': 'become_method',
|
||||
'type': 'string',
|
||||
'help_text': ugettext_noop('Specify a method for "become" operations. This is '
|
||||
'equivalent to specifying the --become-method '
|
||||
'Ansible parameter.')
|
||||
|
||||
@@ -9,7 +9,6 @@ from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.encoding import force_text
|
||||
import six
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import JSONField
|
||||
@@ -35,7 +34,7 @@ def sanitize_event_keys(kwargs, valid_keys):
|
||||
for key in [
|
||||
'play', 'role', 'task', 'playbook'
|
||||
]:
|
||||
if isinstance(kwargs.get('event_data', {}).get(key), six.string_types):
|
||||
if isinstance(kwargs.get('event_data', {}).get(key), str):
|
||||
if len(kwargs['event_data'][key]) > 1024:
|
||||
kwargs['event_data'][key] = Truncator(kwargs['event_data'][key]).chars(1024)
|
||||
|
||||
@@ -353,9 +352,16 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
if hasattr(self, 'job') and not from_parent_update:
|
||||
if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False):
|
||||
self._update_hosts()
|
||||
if self.event == 'playbook_on_stats':
|
||||
self._update_parents_failed_and_changed()
|
||||
if self.parent_uuid:
|
||||
kwargs = {}
|
||||
if self.changed is True:
|
||||
kwargs['changed'] = True
|
||||
if self.failed is True:
|
||||
kwargs['failed'] = True
|
||||
if kwargs:
|
||||
JobEvent.objects.filter(job_id=self.job_id, uuid=self.parent_uuid).update(**kwargs)
|
||||
|
||||
if self.event == 'playbook_on_stats':
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
try:
|
||||
@@ -436,15 +442,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
updated_fields.add('host_name')
|
||||
return updated_fields
|
||||
|
||||
def _update_parents_failed_and_changed(self):
|
||||
# Update parent events to reflect failed, changed
|
||||
runner_events = JobEvent.objects.filter(job=self.job,
|
||||
event__startswith='runner_on')
|
||||
changed_events = runner_events.filter(changed=True)
|
||||
failed_events = runner_events.filter(failed=True)
|
||||
JobEvent.objects.filter(uuid__in=changed_events.values_list('parent_uuid', flat=True)).update(changed=True)
|
||||
JobEvent.objects.filter(uuid__in=failed_events.values_list('parent_uuid', flat=True)).update(failed=True)
|
||||
|
||||
def _update_hosts(self, extra_host_pks=None):
|
||||
# Update job event hosts m2m from host_name, propagate to parent events.
|
||||
extra_host_pks = set(extra_host_pks or [])
|
||||
|
||||
@@ -152,10 +152,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.save(update_fields=['capacity', 'version', 'modified', 'cpu',
|
||||
'memory', 'cpu_capacity', 'mem_capacity'])
|
||||
|
||||
def clean_hostname(self):
|
||||
return self.hostname
|
||||
|
||||
|
||||
|
||||
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
"""A model representing a Queue/Group of AWX Instances."""
|
||||
@@ -222,8 +218,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
|
||||
def clean_name(self):
|
||||
return self.name
|
||||
|
||||
def fit_task_to_most_remaining_capacity_instance(self, task):
|
||||
instance_most_capacity = None
|
||||
|
||||
@@ -9,7 +9,6 @@ import logging
|
||||
import re
|
||||
import copy
|
||||
import os.path
|
||||
import six
|
||||
from urllib.parse import urljoin
|
||||
|
||||
# Django
|
||||
@@ -41,6 +40,7 @@ from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
TaskManagerInventoryUpdateMixin,
|
||||
RelatedJobsMixin,
|
||||
CustomVirtualEnvMixin,
|
||||
)
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
@@ -1355,7 +1355,7 @@ class InventorySourceOptions(BaseModel):
|
||||
source_vars_dict = VarsDictProperty('source_vars')
|
||||
|
||||
def clean_instance_filters(self):
|
||||
instance_filters = six.text_type(self.instance_filters or '')
|
||||
instance_filters = str(self.instance_filters or '')
|
||||
if self.source == 'ec2':
|
||||
invalid_filters = []
|
||||
instance_filter_re = re.compile(r'^((tag:.+)|([a-z][a-z\.-]*[a-z]))=.*$')
|
||||
@@ -1381,7 +1381,7 @@ class InventorySourceOptions(BaseModel):
|
||||
return ''
|
||||
|
||||
def clean_group_by(self):
|
||||
group_by = six.text_type(self.group_by or '')
|
||||
group_by = str(self.group_by or '')
|
||||
if self.source == 'ec2':
|
||||
get_choices = getattr(self, 'get_%s_group_by_choices' % self.source)
|
||||
valid_choices = [x[0] for x in get_choices()]
|
||||
@@ -1538,7 +1538,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, RelatedJobsMix
|
||||
if '_eager_fields' not in kwargs:
|
||||
kwargs['_eager_fields'] = {}
|
||||
if 'name' not in kwargs['_eager_fields']:
|
||||
name = six.text_type('{} - {}').format(self.inventory.name, self.name)
|
||||
name = '{} - {}'.format(self.inventory.name, self.name)
|
||||
name_field = self._meta.get_field('name')
|
||||
if len(name) > name_field.max_length:
|
||||
name = name[:name_field.max_length]
|
||||
@@ -1622,7 +1622,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, RelatedJobsMix
|
||||
return InventoryUpdate.objects.filter(inventory_source=self)
|
||||
|
||||
|
||||
class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin, TaskManagerInventoryUpdateMixin):
|
||||
class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin, TaskManagerInventoryUpdateMixin, CustomVirtualEnvMixin):
|
||||
'''
|
||||
Internal job for tracking inventory updates from external sources.
|
||||
'''
|
||||
@@ -1743,6 +1743,10 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
|
||||
@property
|
||||
def ansible_virtualenv_path(self):
|
||||
if self.inventory_source and self.inventory_source.source_project:
|
||||
project = self.inventory_source.source_project
|
||||
if project and project.custom_virtualenv:
|
||||
return project.custom_virtualenv
|
||||
if self.inventory_source and self.inventory_source.inventory:
|
||||
organization = self.inventory_source.inventory.organization
|
||||
if organization and organization.custom_virtualenv:
|
||||
|
||||
@@ -10,7 +10,6 @@ import time
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -94,8 +93,7 @@ class JobOptions(BaseModel):
|
||||
blank=True,
|
||||
default=0,
|
||||
)
|
||||
limit = models.CharField(
|
||||
max_length=1024,
|
||||
limit = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
)
|
||||
@@ -452,7 +450,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
|
||||
@property
|
||||
def cache_timeout_blocked(self):
|
||||
if Job.objects.filter(job_template=self, status__in=['pending', 'waiting', 'running']).count() > getattr(settings, 'SCHEDULE_MAX_JOBS', 10):
|
||||
if Job.objects.filter(job_template=self, status__in=['pending', 'waiting', 'running']).count() >= getattr(settings, 'SCHEDULE_MAX_JOBS', 10):
|
||||
logger.error("Job template %s could not be started because there are more than %s other jobs from that template waiting to run" %
|
||||
(self.name, getattr(settings, 'SCHEDULE_MAX_JOBS', 10)))
|
||||
return True
|
||||
@@ -490,7 +488,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
return UnifiedJob.objects.filter(unified_job_template=self)
|
||||
|
||||
|
||||
class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskManagerJobMixin):
|
||||
class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskManagerJobMixin, CustomVirtualEnvMixin):
|
||||
'''
|
||||
A job applies a project (with playbook) to an inventory source with a given
|
||||
credential. It represents a single invocation of ansible-playbook with the
|
||||
@@ -823,7 +821,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
timeout = now() - datetime.timedelta(seconds=timeout)
|
||||
hosts = hosts.filter(ansible_facts_modified__gte=timeout)
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(six.text_type, [destination, host.name]))
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
@@ -840,7 +838,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def finish_job_fact_cache(self, destination, modification_times):
|
||||
destination = os.path.join(destination, 'facts')
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(six.text_type, [destination, host.name]))
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
|
||||
@@ -3,7 +3,6 @@ import os
|
||||
import json
|
||||
from copy import copy, deepcopy
|
||||
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.apps import apps
|
||||
@@ -167,7 +166,7 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
decrypted_default = default
|
||||
if (
|
||||
survey_element['type'] == "password" and
|
||||
isinstance(decrypted_default, six.string_types) and
|
||||
isinstance(decrypted_default, str) and
|
||||
decrypted_default.startswith('$encrypted$')
|
||||
):
|
||||
decrypted_default = decrypt_value(get_encryption_key('value', pk=None), decrypted_default)
|
||||
@@ -190,7 +189,7 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
if (survey_element['type'] == "password"):
|
||||
password_value = data.get(survey_element['variable'])
|
||||
if (
|
||||
isinstance(password_value, six.string_types) and
|
||||
isinstance(password_value, str) and
|
||||
password_value == '$encrypted$'
|
||||
):
|
||||
if survey_element.get('default') is None and survey_element['required']:
|
||||
@@ -203,7 +202,7 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
errors.append("'%s' value missing" % survey_element['variable'])
|
||||
elif survey_element['type'] in ["textarea", "text", "password"]:
|
||||
if survey_element['variable'] in data:
|
||||
if not isinstance(data[survey_element['variable']], six.string_types):
|
||||
if not isinstance(data[survey_element['variable']], str):
|
||||
errors.append("Value %s for '%s' expected to be a string." % (data[survey_element['variable']],
|
||||
survey_element['variable']))
|
||||
return errors
|
||||
@@ -247,7 +246,7 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
errors.append("'%s' value is expected to be a list." % survey_element['variable'])
|
||||
else:
|
||||
choice_list = copy(survey_element['choices'])
|
||||
if isinstance(choice_list, six.string_types):
|
||||
if isinstance(choice_list, str):
|
||||
choice_list = choice_list.split('\n')
|
||||
for val in data[survey_element['variable']]:
|
||||
if val not in choice_list:
|
||||
@@ -255,7 +254,7 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
choice_list))
|
||||
elif survey_element['type'] == 'multiplechoice':
|
||||
choice_list = copy(survey_element['choices'])
|
||||
if isinstance(choice_list, six.string_types):
|
||||
if isinstance(choice_list, str):
|
||||
choice_list = choice_list.split('\n')
|
||||
if survey_element['variable'] in data:
|
||||
if data[survey_element['variable']] not in choice_list:
|
||||
@@ -315,7 +314,7 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
if 'prompts' not in _exclude_errors:
|
||||
errors['extra_vars'] = [_('Variables {list_of_keys} are not allowed on launch. Check the Prompt on Launch setting '+
|
||||
'on the {model_name} to include Extra Variables.').format(
|
||||
list_of_keys=six.text_type(', ').join([six.text_type(key) for key in extra_vars.keys()]),
|
||||
list_of_keys=', '.join([str(key) for key in extra_vars.keys()]),
|
||||
model_name=self._meta.verbose_name.title())]
|
||||
|
||||
return (accepted, rejected, errors)
|
||||
@@ -386,7 +385,7 @@ class SurveyJobMixin(models.Model):
|
||||
extra_vars = json.loads(self.extra_vars)
|
||||
for key in self.survey_passwords:
|
||||
value = extra_vars.get(key)
|
||||
if value and isinstance(value, six.string_types) and value.startswith('$encrypted$'):
|
||||
if value and isinstance(value, str) and value.startswith('$encrypted$'):
|
||||
extra_vars[key] = decrypt_value(get_encryption_key('value', pk=None), value)
|
||||
return json.dumps(extra_vars)
|
||||
else:
|
||||
|
||||
@@ -15,7 +15,6 @@ from django.utils.text import slugify
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.utils.timezone import now, make_aware, get_default_timezone
|
||||
|
||||
import six
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
@@ -134,7 +133,7 @@ class ProjectOptions(models.Model):
|
||||
def clean_scm_url(self):
|
||||
if self.scm_type == 'insights':
|
||||
self.scm_url = settings.INSIGHTS_URL_BASE
|
||||
scm_url = six.text_type(self.scm_url or '')
|
||||
scm_url = str(self.scm_url or '')
|
||||
if not self.scm_type:
|
||||
return ''
|
||||
try:
|
||||
@@ -145,7 +144,7 @@ class ProjectOptions(models.Model):
|
||||
scm_url_parts = urlparse.urlsplit(scm_url)
|
||||
if self.scm_type and not any(scm_url_parts):
|
||||
raise ValidationError(_('SCM URL is required.'))
|
||||
return six.text_type(self.scm_url or '')
|
||||
return str(self.scm_url or '')
|
||||
|
||||
def clean_credential(self):
|
||||
if not self.scm_type:
|
||||
@@ -329,7 +328,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
skip_update = bool(kwargs.pop('skip_update', False))
|
||||
# Create auto-generated local path if project uses SCM.
|
||||
if self.pk and self.scm_type and not self.local_path.startswith('_'):
|
||||
slug_name = slugify(six.text_type(self.name)).replace(u'-', u'_')
|
||||
slug_name = slugify(str(self.name)).replace(u'-', u'_')
|
||||
self.local_path = u'_%d__%s' % (int(self.pk), slug_name)
|
||||
if 'local_path' not in update_fields:
|
||||
update_fields.append('local_path')
|
||||
@@ -544,8 +543,7 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
res = super(ProjectUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
|
||||
if res and self.launch_type != 'sync':
|
||||
for inv_src in self.scm_inventory_updates.filter(status='running'):
|
||||
inv_src.cancel(job_explanation=six.text_type(
|
||||
'Source project update `{}` was canceled.').format(self.name))
|
||||
inv_src.cancel(job_explanation='Source project update `{}` was canceled.'.format(self.name))
|
||||
return res
|
||||
|
||||
'''
|
||||
|
||||
@@ -204,7 +204,7 @@ class Role(models.Model):
|
||||
value = description.get('default')
|
||||
|
||||
if '%s' in value and content_type:
|
||||
value = value % model_name
|
||||
value = value % model_name
|
||||
|
||||
return value
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import socket
|
||||
import subprocess
|
||||
import tempfile
|
||||
from collections import OrderedDict
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -351,8 +350,8 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
validated_kwargs = kwargs.copy()
|
||||
if unallowed_fields:
|
||||
if parent_field_name is None:
|
||||
logger.warn(six.text_type('Fields {} are not allowed as overrides to spawn from {}.').format(
|
||||
six.text_type(', ').join(unallowed_fields), self
|
||||
logger.warn('Fields {} are not allowed as overrides to spawn from {}.'.format(
|
||||
', '.join(unallowed_fields), self
|
||||
))
|
||||
for f in unallowed_fields:
|
||||
validated_kwargs.pop(f)
|
||||
@@ -1305,9 +1304,9 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
'dispatcher', self.execution_node
|
||||
).running(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error(six.text_type(
|
||||
'could not reach dispatcher on {} within {}s'
|
||||
).format(self.execution_node, timeout))
|
||||
logger.error('could not reach dispatcher on {} within {}s'.format(
|
||||
self.execution_node, timeout
|
||||
))
|
||||
running = False
|
||||
return running
|
||||
|
||||
@@ -1374,14 +1373,13 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
|
||||
created_by = getattr_dne(self, 'created_by')
|
||||
|
||||
if not created_by:
|
||||
wj = self.get_workflow_job()
|
||||
if wj:
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_workflow_job_id'.format(name)] = wj.pk
|
||||
r['{}_workflow_job_name'.format(name)] = wj.name
|
||||
created_by = getattr_dne(wj, 'created_by')
|
||||
wj = self.get_workflow_job()
|
||||
if wj:
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_workflow_job_id'.format(name)] = wj.pk
|
||||
r['{}_workflow_job_name'.format(name)] = wj.name
|
||||
|
||||
if not created_by:
|
||||
schedule = getattr_dne(self, 'schedule')
|
||||
if schedule:
|
||||
for name in ('awx', 'tower'):
|
||||
|
||||
@@ -276,6 +276,8 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
data['extra_vars'] = extra_vars
|
||||
# ensure that unified jobs created by WorkflowJobs are marked
|
||||
data['_eager_fields'] = {'launch_type': 'workflow'}
|
||||
if self.workflow_job and self.workflow_job.created_by:
|
||||
data['_eager_fields']['created_by'] = self.workflow_job.created_by
|
||||
# Extra processing in the case that this is a slice job
|
||||
if 'job_slice' in self.ancestor_artifacts and is_root_node:
|
||||
data['_eager_fields']['allow_simultaneous'] = True
|
||||
@@ -405,7 +407,11 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
|
||||
@property
|
||||
def cache_timeout_blocked(self):
|
||||
# TODO: don't allow running of job template if same workflow template running
|
||||
if WorkflowJob.objects.filter(workflow_job_template=self,
|
||||
status__in=['pending', 'waiting', 'running']).count() >= getattr(settings, 'SCHEDULE_MAX_JOBS', 10):
|
||||
logger.error("Workflow Job template %s could not be started because there are more than %s other jobs from that template waiting to run" %
|
||||
(self.name, getattr(settings, 'SCHEDULE_MAX_JOBS', 10)))
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
|
||||
@@ -6,7 +6,6 @@ from datetime import timedelta
|
||||
import logging
|
||||
import uuid
|
||||
import json
|
||||
import six
|
||||
import random
|
||||
|
||||
# Django
|
||||
@@ -131,7 +130,7 @@ class TaskManager():
|
||||
job.job_explanation = _(
|
||||
"Workflow Job spawned from workflow could not start because it "
|
||||
"would result in recursion (spawn order, most recent first: {})"
|
||||
).format(six.text_type(', ').join([six.text_type('<{}>').format(tmp) for tmp in display_list]))
|
||||
).format(', '.join(['<{}>'.format(tmp) for tmp in display_list]))
|
||||
else:
|
||||
logger.debug('Starting workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]))
|
||||
@@ -182,7 +181,7 @@ class TaskManager():
|
||||
logger.info('Marking %s as %s.', workflow_job.log_format, 'failed' if has_failed else 'successful')
|
||||
result.append(workflow_job.id)
|
||||
new_status = 'failed' if has_failed else 'successful'
|
||||
logger.debug(six.text_type("Transitioning {} to {} status.").format(workflow_job.log_format, new_status))
|
||||
logger.debug("Transitioning {} to {} status.".format(workflow_job.log_format, new_status))
|
||||
update_fields = ['status', 'start_args']
|
||||
workflow_job.status = new_status
|
||||
if reason:
|
||||
@@ -217,7 +216,7 @@ class TaskManager():
|
||||
try:
|
||||
controller_node = rampart_group.choose_online_controller_node()
|
||||
except IndexError:
|
||||
logger.debug(six.text_type("No controllers available in group {} to run {}").format(
|
||||
logger.debug("No controllers available in group {} to run {}".format(
|
||||
rampart_group.name, task.log_format))
|
||||
return
|
||||
|
||||
@@ -240,19 +239,19 @@ class TaskManager():
|
||||
# non-Ansible jobs on isolated instances run on controller
|
||||
task.instance_group = rampart_group.controller
|
||||
task.execution_node = random.choice(list(rampart_group.controller.instances.all().values_list('hostname', flat=True)))
|
||||
logger.info(six.text_type('Submitting isolated {} to queue {}.').format(
|
||||
logger.info('Submitting isolated {} to queue {}.'.format(
|
||||
task.log_format, task.instance_group.name, task.execution_node))
|
||||
elif controller_node:
|
||||
task.instance_group = rampart_group
|
||||
task.execution_node = instance.hostname
|
||||
task.controller_node = controller_node
|
||||
logger.info(six.text_type('Submitting isolated {} to queue {} controlled by {}.').format(
|
||||
logger.info('Submitting isolated {} to queue {} controlled by {}.'.format(
|
||||
task.log_format, task.execution_node, controller_node))
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
if instance is not None:
|
||||
task.execution_node = instance.hostname
|
||||
logger.info(six.text_type('Submitting {} to <instance group, instance> <{},{}>.').format(
|
||||
logger.info('Submitting {} to <instance group, instance> <{},{}>.'.format(
|
||||
task.log_format, task.instance_group_id, task.execution_node))
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
@@ -358,10 +357,10 @@ class TaskManager():
|
||||
return False
|
||||
|
||||
def get_latest_project_update(self, job):
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=job.project, job_type='check').order_by("-created")
|
||||
if not latest_project_update.exists():
|
||||
return None
|
||||
return latest_project_update.first()
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=job.project, job_type='check').order_by("-created")
|
||||
if not latest_project_update.exists():
|
||||
return None
|
||||
return latest_project_update.first()
|
||||
|
||||
def should_update_related_project(self, job, latest_project_update):
|
||||
now = tz_now()
|
||||
@@ -436,7 +435,7 @@ class TaskManager():
|
||||
def process_dependencies(self, dependent_task, dependency_tasks):
|
||||
for task in dependency_tasks:
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug(six.text_type("Dependent {} is blocked from running").format(task.log_format))
|
||||
logger.debug("Dependent {} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
@@ -445,16 +444,16 @@ class TaskManager():
|
||||
if idle_instance_that_fits is None:
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
if self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug(six.text_type("Skipping group {} capacity <= 0").format(rampart_group.name))
|
||||
logger.debug("Skipping group {} capacity <= 0".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug(six.text_type("Starting dependent {} in group {} instance {}").format(
|
||||
logger.debug("Starting dependent {} in group {} instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug(six.text_type("Starting dependent {} in group {} on idle instance {}").format(
|
||||
logger.debug("Starting dependent {} in group {} on idle instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
if execution_instance:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
@@ -464,17 +463,17 @@ class TaskManager():
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug(six.text_type("No instance available in group {} to run job {} w/ capacity requirement {}").format(
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug(six.text_type("Dependent {} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
|
||||
logger.debug("Dependent {} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug(six.text_type("{} is blocked from running").format(task.log_format))
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
@@ -482,7 +481,7 @@ class TaskManager():
|
||||
if isinstance(task, WorkflowJob):
|
||||
if task.unified_job_template_id in running_workflow_templates:
|
||||
if not task.allow_simultaneous:
|
||||
logger.debug(six.text_type("{} is blocked from running, workflow already running").format(task.log_format))
|
||||
logger.debug("{} is blocked from running, workflow already running".format(task.log_format))
|
||||
continue
|
||||
else:
|
||||
running_workflow_templates.add(task.unified_job_template_id)
|
||||
@@ -493,17 +492,17 @@ class TaskManager():
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
|
||||
if remaining_capacity <= 0:
|
||||
logger.debug(six.text_type("Skipping group {}, remaining_capacity {} <= 0").format(
|
||||
logger.debug("Skipping group {}, remaining_capacity {} <= 0".format(
|
||||
rampart_group.name, remaining_capacity))
|
||||
continue
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug(six.text_type("Starting {} in group {} instance {} (remaining_capacity={})").format(
|
||||
logger.debug("Starting {} in group {} instance {} (remaining_capacity={})".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname, remaining_capacity))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug(six.text_type("Starting {} in group {} instance {} (remaining_capacity={})").format(
|
||||
logger.debug("Starting {} in group {} instance {} (remaining_capacity={})".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname, remaining_capacity))
|
||||
if execution_instance:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
@@ -511,10 +510,10 @@ class TaskManager():
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug(six.text_type("No instance available in group {} to run job {} w/ capacity requirement {}").format(
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug(six.text_type("{} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
|
||||
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
@@ -527,7 +526,7 @@ class TaskManager():
|
||||
return (task.task_impact + current_capacity > capacity_total)
|
||||
|
||||
def consume_capacity(self, task, instance_group):
|
||||
logger.debug(six.text_type('{} consumed {} capacity units from {} with prior total of {}').format(
|
||||
logger.debug('{} consumed {} capacity units from {} with prior total of {}'.format(
|
||||
task.log_format, task.task_impact, instance_group,
|
||||
self.graph[instance_group]['consumed_capacity']))
|
||||
self.graph[instance_group]['consumed_capacity'] += task.task_impact
|
||||
|
||||
@@ -28,7 +28,6 @@ from django.utils import timezone
|
||||
from crum import get_current_request, get_current_user
|
||||
from crum.signals import current_user_getter
|
||||
|
||||
import six
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
@@ -117,7 +116,7 @@ def emit_update_inventory_computed_fields(sender, **kwargs):
|
||||
elif sender == Group.inventory_sources.through:
|
||||
sender_name = 'group.inventory_sources'
|
||||
else:
|
||||
sender_name = six.text_type(sender._meta.verbose_name)
|
||||
sender_name = str(sender._meta.verbose_name)
|
||||
if kwargs['signal'] == post_save:
|
||||
if sender == Job:
|
||||
return
|
||||
@@ -147,7 +146,7 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
sender_name = six.text_type(sender._meta.verbose_name)
|
||||
sender_name = str(sender._meta.verbose_name)
|
||||
logger.debug("%s created or deleted, updating inventory computed fields: %r %r",
|
||||
sender_name, sender, kwargs)
|
||||
try:
|
||||
@@ -437,7 +436,7 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
# Special case where Job survey password variables need to be hidden
|
||||
if type(instance) == Job:
|
||||
changes['credentials'] = [
|
||||
six.text_type('{} ({})').format(c.name, c.id)
|
||||
'{} ({})'.format(c.name, c.id)
|
||||
for c in instance.credentials.iterator()
|
||||
]
|
||||
changes['labels'] = [l.name for l in instance.labels.iterator()]
|
||||
|
||||
@@ -13,7 +13,6 @@ import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import six
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
@@ -93,7 +92,7 @@ def dispatch_startup():
|
||||
with disable_activity_stream():
|
||||
sch.save()
|
||||
except Exception:
|
||||
logger.exception(six.text_type("Failed to rebuild schedule {}.").format(sch))
|
||||
logger.exception("Failed to rebuild schedule {}.".format(sch))
|
||||
|
||||
#
|
||||
# When the dispatcher starts, if the instance cannot be found in the database,
|
||||
@@ -125,8 +124,8 @@ def inform_cluster_of_shutdown():
|
||||
reaper.reap(this_inst)
|
||||
except Exception:
|
||||
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
|
||||
logger.warning(six.text_type('Normal shutdown signal for instance {}, '
|
||||
'removed self from capacity pool.').format(this_inst.hostname))
|
||||
logger.warning('Normal shutdown signal for instance {}, '
|
||||
'removed self from capacity pool.'.format(this_inst.hostname))
|
||||
except Exception:
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
@@ -164,14 +163,14 @@ def apply_cluster_membership_policies():
|
||||
])
|
||||
for hostname in ig.policy_instance_list:
|
||||
if hostname not in instance_hostnames_map:
|
||||
logger.info(six.text_type("Unknown instance {} in {} policy list").format(hostname, ig.name))
|
||||
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
|
||||
continue
|
||||
inst = instance_hostnames_map[hostname]
|
||||
group_actual.instances.append(inst.id)
|
||||
# NOTE: arguable behavior: policy-list-group is not added to
|
||||
# instance's group count for consideration in minimum-policy rules
|
||||
if group_actual.instances:
|
||||
logger.info(six.text_type("Policy List, adding Instances {} to Group {}").format(group_actual.instances, ig.name))
|
||||
logger.info("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
|
||||
|
||||
if ig.controller_id is None:
|
||||
actual_groups.append(group_actual)
|
||||
@@ -199,7 +198,7 @@ def apply_cluster_membership_policies():
|
||||
i.groups.append(g.obj.id)
|
||||
policy_min_added.append(i.obj.id)
|
||||
if policy_min_added:
|
||||
logger.info(six.text_type("Policy minimum, adding Instances {} to Group {}").format(policy_min_added, g.obj.name))
|
||||
logger.info("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
|
||||
|
||||
# Finally, process instance policy percentages
|
||||
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
|
||||
@@ -215,7 +214,7 @@ def apply_cluster_membership_policies():
|
||||
i.groups.append(g.obj.id)
|
||||
policy_per_added.append(i.obj.id)
|
||||
if policy_per_added:
|
||||
logger.info(six.text_type("Policy percentage, adding Instances {} to Group {}").format(policy_per_added, g.obj.name))
|
||||
logger.info("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
|
||||
|
||||
# Determine if any changes need to be made
|
||||
needs_change = False
|
||||
@@ -259,15 +258,15 @@ def delete_project_files(project_path):
|
||||
if os.path.exists(project_path):
|
||||
try:
|
||||
shutil.rmtree(project_path)
|
||||
logger.info(six.text_type('Success removing project files {}').format(project_path))
|
||||
logger.info('Success removing project files {}'.format(project_path))
|
||||
except Exception:
|
||||
logger.exception(six.text_type('Could not remove project directory {}').format(project_path))
|
||||
logger.exception('Could not remove project directory {}'.format(project_path))
|
||||
if os.path.exists(lock_file):
|
||||
try:
|
||||
os.remove(lock_file)
|
||||
logger.debug(six.text_type('Success removing {}').format(lock_file))
|
||||
logger.debug('Success removing {}'.format(lock_file))
|
||||
except Exception:
|
||||
logger.exception(six.text_type('Could not remove lock file {}').format(lock_file))
|
||||
logger.exception('Could not remove lock file {}'.format(lock_file))
|
||||
|
||||
|
||||
@task()
|
||||
@@ -288,7 +287,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
notification.status = "successful"
|
||||
notification.notifications_sent = sent
|
||||
except Exception as e:
|
||||
logger.error(six.text_type("Send Notification Failed {}").format(e))
|
||||
logger.error("Send Notification Failed {}".format(e))
|
||||
notification.status = "failed"
|
||||
notification.error = smart_str(e)
|
||||
update_fields.append('error')
|
||||
@@ -296,7 +295,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
try:
|
||||
notification.save(update_fields=update_fields)
|
||||
except Exception:
|
||||
logger.exception(six.text_type('Error saving notification {} result.').format(notification.id))
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task()
|
||||
@@ -327,7 +326,7 @@ def purge_old_stdout_files():
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT,f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
|
||||
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT,f))
|
||||
logger.info(six.text_type("Removing {}").format(os.path.join(settings.JOBOUTPUT_ROOT,f)))
|
||||
logger.info("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT,f)))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@@ -340,7 +339,7 @@ def cluster_node_heartbeat():
|
||||
|
||||
(changed, instance) = Instance.objects.get_or_register()
|
||||
if changed:
|
||||
logger.info(six.text_type("Registered tower node '{}'").format(instance.hostname))
|
||||
logger.info("Registered tower node '{}'".format(instance.hostname))
|
||||
|
||||
for inst in list(instance_list):
|
||||
if inst.hostname == settings.CLUSTER_HOST_ID:
|
||||
@@ -352,7 +351,7 @@ def cluster_node_heartbeat():
|
||||
if this_inst:
|
||||
startup_event = this_inst.is_lost(ref_time=nowtime)
|
||||
if this_inst.capacity == 0 and this_inst.enabled:
|
||||
logger.warning(six.text_type('Rejoining the cluster as instance {}.').format(this_inst.hostname))
|
||||
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
|
||||
if this_inst.enabled:
|
||||
this_inst.refresh_capacity()
|
||||
elif this_inst.capacity != 0 and not this_inst.enabled:
|
||||
@@ -367,11 +366,12 @@ def cluster_node_heartbeat():
|
||||
if other_inst.version == "":
|
||||
continue
|
||||
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
|
||||
logger.error(six.text_type("Host {} reports version {}, but this node {} is at {}, shutting down")
|
||||
.format(other_inst.hostname,
|
||||
other_inst.version,
|
||||
this_inst.hostname,
|
||||
this_inst.version))
|
||||
logger.error("Host {} reports version {}, but this node {} is at {}, shutting down".format(
|
||||
other_inst.hostname,
|
||||
other_inst.version,
|
||||
this_inst.hostname,
|
||||
this_inst.version
|
||||
))
|
||||
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
|
||||
# The heartbeat task will reset the capacity to the system capacity after upgrade.
|
||||
stop_local_services(communicate=False)
|
||||
@@ -392,17 +392,17 @@ def cluster_node_heartbeat():
|
||||
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
other_inst.capacity = 0
|
||||
other_inst.save(update_fields=['capacity'])
|
||||
logger.error(six.text_type("Host {} last checked in at {}, marked as lost.").format(
|
||||
logger.error("Host {} last checked in at {}, marked as lost.".format(
|
||||
other_inst.hostname, other_inst.modified))
|
||||
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
deprovision_hostname = other_inst.hostname
|
||||
other_inst.delete()
|
||||
logger.info(six.text_type("Host {} Automatically Deprovisioned.").format(deprovision_hostname))
|
||||
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug(six.text_type('Another instance has marked {} as lost').format(other_inst.hostname))
|
||||
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
|
||||
else:
|
||||
logger.exception(six.text_type('Error marking {} as lost').format(other_inst.hostname))
|
||||
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@@ -429,59 +429,65 @@ def awx_isolated_heartbeat():
|
||||
isolated_instance.save(update_fields=['last_isolated_check'])
|
||||
# Slow pass looping over isolated IGs and their isolated instances
|
||||
if len(isolated_instance_qs) > 0:
|
||||
logger.debug(six.text_type("Managing isolated instances {}.").format(','.join([inst.hostname for inst in isolated_instance_qs])))
|
||||
logger.debug("Managing isolated instances {}.".format(','.join([inst.hostname for inst in isolated_instance_qs])))
|
||||
isolated_manager.IsolatedManager.health_check(isolated_instance_qs, awx_application_version)
|
||||
|
||||
|
||||
@task()
|
||||
def awx_periodic_scheduler():
|
||||
run_now = now()
|
||||
state = TowerScheduleState.get_solo()
|
||||
last_run = state.schedule_last_run
|
||||
logger.debug("Last scheduler run was: %s", last_run)
|
||||
state.schedule_last_run = run_now
|
||||
state.save()
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug("Not running periodic scheduler, another task holds lock")
|
||||
return
|
||||
logger.debug("Starting periodic scheduler")
|
||||
|
||||
old_schedules = Schedule.objects.enabled().before(last_run)
|
||||
for schedule in old_schedules:
|
||||
schedule.save()
|
||||
schedules = Schedule.objects.enabled().between(last_run, run_now)
|
||||
run_now = now()
|
||||
state = TowerScheduleState.get_solo()
|
||||
last_run = state.schedule_last_run
|
||||
logger.debug("Last scheduler run was: %s", last_run)
|
||||
state.schedule_last_run = run_now
|
||||
state.save()
|
||||
|
||||
invalid_license = False
|
||||
try:
|
||||
access_registry[Job](None).check_license()
|
||||
except PermissionDenied as e:
|
||||
invalid_license = e
|
||||
old_schedules = Schedule.objects.enabled().before(last_run)
|
||||
for schedule in old_schedules:
|
||||
schedule.save()
|
||||
schedules = Schedule.objects.enabled().between(last_run, run_now)
|
||||
|
||||
for schedule in schedules:
|
||||
template = schedule.unified_job_template
|
||||
schedule.save() # To update next_run timestamp.
|
||||
if template.cache_timeout_blocked:
|
||||
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
|
||||
continue
|
||||
invalid_license = False
|
||||
try:
|
||||
job_kwargs = schedule.get_job_kwargs()
|
||||
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
|
||||
logger.info(six.text_type('Spawned {} from schedule {}-{}.').format(
|
||||
new_unified_job.log_format, schedule.name, schedule.pk))
|
||||
access_registry[Job](None).check_license()
|
||||
except PermissionDenied as e:
|
||||
invalid_license = e
|
||||
|
||||
if invalid_license:
|
||||
for schedule in schedules:
|
||||
template = schedule.unified_job_template
|
||||
schedule.save() # To update next_run timestamp.
|
||||
if template.cache_timeout_blocked:
|
||||
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
|
||||
continue
|
||||
try:
|
||||
job_kwargs = schedule.get_job_kwargs()
|
||||
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
|
||||
logger.info('Spawned {} from schedule {}-{}.'.format(
|
||||
new_unified_job.log_format, schedule.name, schedule.pk))
|
||||
|
||||
if invalid_license:
|
||||
new_unified_job.status = 'failed'
|
||||
new_unified_job.job_explanation = str(invalid_license)
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
raise invalid_license
|
||||
can_start = new_unified_job.signal_start()
|
||||
except Exception:
|
||||
logger.exception('Error spawning scheduled job.')
|
||||
continue
|
||||
if not can_start:
|
||||
new_unified_job.status = 'failed'
|
||||
new_unified_job.job_explanation = str(invalid_license)
|
||||
new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials"
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
raise invalid_license
|
||||
can_start = new_unified_job.signal_start()
|
||||
except Exception:
|
||||
logger.exception('Error spawning scheduled job.')
|
||||
continue
|
||||
if not can_start:
|
||||
new_unified_job.status = 'failed'
|
||||
new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials"
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
state.save()
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
state.save()
|
||||
|
||||
|
||||
@task()
|
||||
@@ -575,7 +581,7 @@ def update_host_smart_inventory_memberships():
|
||||
changed_inventories.add(smart_inventory)
|
||||
SmartInventoryMembership.objects.bulk_create(memberships)
|
||||
except IntegrityError as e:
|
||||
logger.error(six.text_type("Update Host Smart Inventory Memberships failed due to an exception: {}").format(e))
|
||||
logger.error("Update Host Smart Inventory Memberships failed due to an exception: {}".format(e))
|
||||
return
|
||||
# Update computed fields for changed inventories outside atomic action
|
||||
for smart_inventory in changed_inventories:
|
||||
@@ -602,7 +608,7 @@ def delete_inventory(inventory_id, user_id, retries=5):
|
||||
'inventories-status_changed',
|
||||
{'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'}
|
||||
)
|
||||
logger.debug(six.text_type('Deleted inventory {} as user {}.').format(inventory_id, user_id))
|
||||
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
|
||||
except Inventory.DoesNotExist:
|
||||
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
|
||||
return
|
||||
@@ -626,7 +632,7 @@ def with_path_cleanup(f):
|
||||
elif os.path.exists(p):
|
||||
os.remove(p)
|
||||
except OSError:
|
||||
logger.exception(six.text_type("Failed to remove tmp file: {}").format(p))
|
||||
logger.exception("Failed to remove tmp file: {}".format(p))
|
||||
self.cleanup_paths = []
|
||||
return _wrapped
|
||||
|
||||
@@ -948,6 +954,11 @@ class BaseTask(object):
|
||||
|
||||
if not os.path.exists(settings.AWX_PROOT_BASE_PATH):
|
||||
raise RuntimeError('AWX_PROOT_BASE_PATH=%s does not exist' % settings.AWX_PROOT_BASE_PATH)
|
||||
|
||||
# store a record of the venv used at runtime
|
||||
if hasattr(instance, 'custom_virtualenv'):
|
||||
self.update_model(pk, custom_virtualenv=getattr(instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
|
||||
|
||||
# Fetch ansible version once here to support version-dependent features.
|
||||
kwargs['ansible_version'] = get_ansible_version()
|
||||
kwargs['private_data_dir'] = self.build_private_data_dir(instance, **kwargs)
|
||||
@@ -1059,13 +1070,13 @@ class BaseTask(object):
|
||||
try:
|
||||
self.post_run_hook(instance, status, **kwargs)
|
||||
except Exception:
|
||||
logger.exception(six.text_type('{} Post run hook errored.').format(instance.log_format))
|
||||
logger.exception('{} Post run hook errored.'.format(instance.log_format))
|
||||
instance = self.update_model(pk)
|
||||
if instance.cancel_flag:
|
||||
status = 'canceled'
|
||||
cancel_wait = (now() - instance.modified).seconds if instance.modified else 0
|
||||
if cancel_wait > 5:
|
||||
logger.warn(six.text_type('Request to cancel {} took {} seconds to complete.').format(instance.log_format, cancel_wait))
|
||||
logger.warn('Request to cancel {} took {} seconds to complete.'.format(instance.log_format, cancel_wait))
|
||||
|
||||
instance = self.update_model(pk, status=status, result_traceback=tb,
|
||||
output_replacements=output_replacements,
|
||||
@@ -1074,7 +1085,7 @@ class BaseTask(object):
|
||||
try:
|
||||
self.final_run_hook(instance, status, **kwargs)
|
||||
except Exception:
|
||||
logger.exception(six.text_type('{} Final run hook errored.').format(instance.log_format))
|
||||
logger.exception('{} Final run hook errored.'.format(instance.log_format))
|
||||
instance.websocket_emit_status(status)
|
||||
if status != 'successful':
|
||||
if status == 'canceled':
|
||||
@@ -1253,7 +1264,7 @@ class RunJob(BaseTask):
|
||||
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
|
||||
|
||||
authorize = network_cred.get_input('authorize', default=False)
|
||||
env['ANSIBLE_NET_AUTHORIZE'] = six.text_type(int(authorize))
|
||||
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
|
||||
if authorize:
|
||||
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
|
||||
|
||||
@@ -1679,15 +1690,15 @@ class RunProjectUpdate(BaseTask):
|
||||
if not inv_src.update_on_project_update:
|
||||
continue
|
||||
if inv_src.scm_last_revision == scm_revision:
|
||||
logger.debug(six.text_type('Skipping SCM inventory update for `{}` because '
|
||||
'project has not changed.').format(inv_src.name))
|
||||
logger.debug('Skipping SCM inventory update for `{}` because '
|
||||
'project has not changed.'.format(inv_src.name))
|
||||
continue
|
||||
logger.debug(six.text_type('Local dependent inventory update for `{}`.').format(inv_src.name))
|
||||
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
|
||||
with transaction.atomic():
|
||||
if InventoryUpdate.objects.filter(inventory_source=inv_src,
|
||||
status__in=ACTIVE_STATES).exists():
|
||||
logger.info(six.text_type('Skipping SCM inventory update for `{}` because '
|
||||
'another update is already active.').format(inv_src.name))
|
||||
logger.info('Skipping SCM inventory update for `{}` because '
|
||||
'another update is already active.'.format(inv_src.name))
|
||||
continue
|
||||
local_inv_update = inv_src.create_inventory_update(
|
||||
_eager_fields=dict(
|
||||
@@ -1700,8 +1711,9 @@ class RunProjectUpdate(BaseTask):
|
||||
try:
|
||||
inv_update_class().run(local_inv_update.id)
|
||||
except Exception:
|
||||
logger.exception(six.text_type('{} Unhandled exception updating dependent SCM inventory sources.')
|
||||
.format(project_update.log_format))
|
||||
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(
|
||||
project_update.log_format
|
||||
))
|
||||
|
||||
try:
|
||||
project_update.refresh_from_db()
|
||||
@@ -1714,10 +1726,10 @@ class RunProjectUpdate(BaseTask):
|
||||
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
|
||||
continue
|
||||
if project_update.cancel_flag:
|
||||
logger.info(six.text_type('Project update {} was canceled while updating dependent inventories.').format(project_update.log_format))
|
||||
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
|
||||
break
|
||||
if local_inv_update.cancel_flag:
|
||||
logger.info(six.text_type('Continuing to process project dependencies after {} was canceled').format(local_inv_update.log_format))
|
||||
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
|
||||
if local_inv_update.status == 'successful':
|
||||
inv_src.scm_last_revision = scm_revision
|
||||
inv_src.save(update_fields=['scm_last_revision'])
|
||||
@@ -1726,7 +1738,7 @@ class RunProjectUpdate(BaseTask):
|
||||
try:
|
||||
fcntl.flock(self.lock_fd, fcntl.LOCK_UN)
|
||||
except IOError as e:
|
||||
logger.error(six.text_type("I/O error({0}) while trying to open lock file [{1}]: {2}").format(e.errno, instance.get_lock_file(), e.strerror))
|
||||
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
|
||||
os.close(self.lock_fd)
|
||||
raise
|
||||
|
||||
@@ -1744,7 +1756,7 @@ class RunProjectUpdate(BaseTask):
|
||||
try:
|
||||
self.lock_fd = os.open(lock_path, os.O_RDONLY | os.O_CREAT)
|
||||
except OSError as e:
|
||||
logger.error(six.text_type("I/O error({0}) while trying to open lock file [{1}]: {2}").format(e.errno, lock_path, e.strerror))
|
||||
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
|
||||
start_time = time.time()
|
||||
@@ -1752,23 +1764,23 @@ class RunProjectUpdate(BaseTask):
|
||||
try:
|
||||
instance.refresh_from_db(fields=['cancel_flag'])
|
||||
if instance.cancel_flag:
|
||||
logger.info(six.text_type("ProjectUpdate({0}) was cancelled".format(instance.pk)))
|
||||
logger.info("ProjectUpdate({0}) was cancelled".format(instance.pk))
|
||||
return
|
||||
fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
break
|
||||
except IOError as e:
|
||||
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
||||
os.close(self.lock_fd)
|
||||
logger.error(six.text_type("I/O error({0}) while trying to aquire lock on file [{1}]: {2}").format(e.errno, lock_path, e.strerror))
|
||||
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
else:
|
||||
time.sleep(1.0)
|
||||
waiting_time = time.time() - start_time
|
||||
|
||||
if waiting_time > 1.0:
|
||||
logger.info(six.text_type(
|
||||
logger.info(
|
||||
'{} spent {} waiting to acquire lock for local source tree '
|
||||
'for path {}.').format(instance.log_format, waiting_time, lock_path))
|
||||
'for path {}.'.format(instance.log_format, waiting_time, lock_path))
|
||||
|
||||
def pre_run_hook(self, instance, **kwargs):
|
||||
# re-create root project folder if a natural disaster has destroyed it
|
||||
@@ -1785,7 +1797,7 @@ class RunProjectUpdate(BaseTask):
|
||||
if lines:
|
||||
p.scm_revision = lines[0].strip()
|
||||
else:
|
||||
logger.info(six.text_type("{} Could not find scm revision in check").format(instance.log_format))
|
||||
logger.info("{} Could not find scm revision in check".format(instance.log_format))
|
||||
p.playbook_files = p.playbooks
|
||||
p.inventory_files = p.inventories
|
||||
p.save()
|
||||
@@ -1907,7 +1919,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
ec2_opts['cache_path'] = cache_path
|
||||
ec2_opts.setdefault('cache_max_age', '300')
|
||||
for k, v in ec2_opts.items():
|
||||
cp.set(section, k, six.text_type(v))
|
||||
cp.set(section, k, str(v))
|
||||
# Allow custom options to vmware inventory script.
|
||||
elif inventory_update.source == 'vmware':
|
||||
|
||||
@@ -1926,7 +1938,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
vmware_opts.setdefault('groupby_patterns', inventory_update.group_by)
|
||||
|
||||
for k, v in vmware_opts.items():
|
||||
cp.set(section, k, six.text_type(v))
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
elif inventory_update.source == 'satellite6':
|
||||
section = 'foreman'
|
||||
@@ -1945,7 +1957,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
else:
|
||||
cp.set(section, k, six.text_type(v))
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
@@ -2004,7 +2016,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
azure_rm_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for k, v in azure_rm_opts.items():
|
||||
cp.set(section, k, six.text_type(v))
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
# Return INI content.
|
||||
if cp.sections():
|
||||
@@ -2089,7 +2101,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
elif inventory_update.source in ['scm', 'custom']:
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLACKLIST:
|
||||
env[str(env_k)] = six.text_type(inventory_update.source_vars_dict[env_k])
|
||||
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
|
||||
elif inventory_update.source == 'tower':
|
||||
env['TOWER_INVENTORY'] = inventory_update.instance_filters
|
||||
env['TOWER_LICENSE_TYPE'] = get_licenser().validate()['license_type']
|
||||
@@ -2405,7 +2417,7 @@ class RunSystemJob(BaseTask):
|
||||
'--management-jobs', '--ad-hoc-commands', '--workflow-jobs',
|
||||
'--notifications'])
|
||||
except Exception:
|
||||
logger.exception(six.text_type("{} Failed to parse system job").format(system_job.log_format))
|
||||
logger.exception("{} Failed to parse system job".format(system_job.log_format))
|
||||
return args
|
||||
|
||||
def build_env(self, instance, **kwargs):
|
||||
@@ -2431,7 +2443,7 @@ def _reconstruct_relationships(copy_mapping):
|
||||
setattr(new_obj, field_name, related_obj)
|
||||
elif field.many_to_many:
|
||||
for related_obj in getattr(old_obj, field_name).all():
|
||||
logger.debug(six.text_type('Deep copy: Adding {} to {}({}).{} relationship').format(
|
||||
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(
|
||||
related_obj, new_obj, model, field_name
|
||||
))
|
||||
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
|
||||
@@ -2443,7 +2455,7 @@ def deep_copy_model_obj(
|
||||
model_module, model_name, obj_pk, new_obj_pk,
|
||||
user_pk, sub_obj_list, permission_check_func=None
|
||||
):
|
||||
logger.info(six.text_type('Deep copy {} from {} to {}.').format(model_name, obj_pk, new_obj_pk))
|
||||
logger.info('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
model = getattr(importlib.import_module(model_module), model_name, None)
|
||||
|
||||
@@ -167,7 +167,7 @@ class TestSwaggerGeneration():
|
||||
# replace ISO dates w/ the same value so we don't generate
|
||||
# needless diffs
|
||||
data = re.sub(
|
||||
r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]+Z',
|
||||
r'[0-9]{4}-[0-9]{2}-[0-9]{2}(T|\s)[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]+(Z|\+[0-9]{2}:[0-9]{2})?',
|
||||
r'2018-02-01T08:00:00.000000Z',
|
||||
data
|
||||
)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
import six
|
||||
|
||||
from awx.main.models import (
|
||||
Organization,
|
||||
@@ -150,7 +149,7 @@ def create_survey_spec(variables=None, default_type='integer', required=True, mi
|
||||
vars_list = variables
|
||||
else:
|
||||
vars_list = [variables]
|
||||
if isinstance(variables[0], six.string_types):
|
||||
if isinstance(variables[0], str):
|
||||
slogan = variables[0]
|
||||
else:
|
||||
slogan = variables[0].get('question_name', 'something')
|
||||
|
||||
@@ -2,7 +2,6 @@ from unittest import mock
|
||||
import pytest
|
||||
import json
|
||||
|
||||
import six
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import timestamp_apiformat
|
||||
@@ -107,7 +106,7 @@ def test_content(hosts, fact_scans, get, user, fact_ansible_json, monkeypatch_js
|
||||
|
||||
assert fact_known.host_id == response.data['host']
|
||||
# TODO: Just make response.data['facts'] when we're only dealing with postgres, or if jsonfields ever fixes this bug
|
||||
assert fact_ansible_json == (json.loads(response.data['facts']) if isinstance(response.data['facts'], six.text_type) else response.data['facts'])
|
||||
assert fact_ansible_json == (json.loads(response.data['facts']) if isinstance(response.data['facts'], str) else response.data['facts'])
|
||||
assert timestamp_apiformat(fact_known.timestamp) == response.data['timestamp']
|
||||
assert fact_known.module == response.data['module']
|
||||
|
||||
@@ -119,7 +118,7 @@ def _test_search_by_module(hosts, fact_scans, get, user, fact_json, module_name)
|
||||
(fact_known, response) = setup_common(hosts, fact_scans, get, user, module_name=module_name, get_params=params)
|
||||
|
||||
# TODO: Just make response.data['facts'] when we're only dealing with postgres, or if jsonfields ever fixes this bug
|
||||
assert fact_json == (json.loads(response.data['facts']) if isinstance(response.data['facts'], six.text_type) else response.data['facts'])
|
||||
assert fact_json == (json.loads(response.data['facts']) if isinstance(response.data['facts'], str) else response.data['facts'])
|
||||
assert timestamp_apiformat(fact_known.timestamp) == response.data['timestamp']
|
||||
assert module_name == response.data['module']
|
||||
|
||||
|
||||
@@ -281,6 +281,36 @@ def test_host_filter_unicode(post, admin_user, organization):
|
||||
assert si.host_filter == u'ansible_facts__ansible_distribution=レッドハット'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize("lookup", ['icontains', 'has_keys'])
|
||||
def test_host_filter_invalid_ansible_facts_lookup(post, admin_user, organization, lookup):
|
||||
resp = post(
|
||||
reverse('api:inventory_list'),
|
||||
data={
|
||||
'name': 'smart inventory', 'kind': 'smart',
|
||||
'organization': organization.pk,
|
||||
'host_filter': u'ansible_facts__ansible_distribution__{}=cent'.format(lookup)
|
||||
},
|
||||
user=admin_user,
|
||||
expect=400
|
||||
)
|
||||
assert 'ansible_facts does not support searching with __{}'.format(lookup) in json.dumps(resp.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_filter_ansible_facts_exact(post, admin_user, organization):
|
||||
post(
|
||||
reverse('api:inventory_list'),
|
||||
data={
|
||||
'name': 'smart inventory', 'kind': 'smart',
|
||||
'organization': organization.pk,
|
||||
'host_filter': 'ansible_facts__ansible_distribution__exact="CentOS"'
|
||||
},
|
||||
user=admin_user,
|
||||
expect=201
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("role_field,expected_status_code", [
|
||||
(None, 403),
|
||||
('admin_role', 201),
|
||||
|
||||
@@ -17,7 +17,6 @@ from awx.api.versioning import reverse
|
||||
from awx.conf.models import Setting
|
||||
from awx.main.utils.handlers import AWXProxyHandler, LoggingConnectivityException
|
||||
|
||||
import six
|
||||
|
||||
TEST_GIF_LOGO = 'data:image/gif;base64,R0lGODlhIQAjAPIAAP//////AP8AAMzMAJmZADNmAAAAAAAAACH/C05FVFNDQVBFMi4wAwEAAAAh+QQJCgAHACwAAAAAIQAjAAADo3i63P4wykmrvTjrzZsxXfR94WMQBFh6RECuixHMLyzPQ13ewZCvow9OpzEAjIBj79cJJmU+FceIVEZ3QRozxBttmyOBwPBtisdX4Bha3oxmS+llFIPHQXQKkiSEXz9PeklHBzx3hYNyEHt4fmmAhHp8Nz45KgV5FgWFOFEGmwWbGqEfniChohmoQZ+oqRiZDZhEgk81I4mwg4EKVbxzrDHBEAkAIfkECQoABwAsAAAAACEAIwAAA6V4utz+MMpJq724GpP15p1kEAQYQmOwnWjgrmxjuMEAx8rsDjZ+fJvdLWQAFAHGWo8FRM54JqIRmYTigDrDMqZTbbbMj0CgjTLHZKvPQH6CTx+a2vKR0XbbOsoZ7SphG057gjl+c0dGgzeGNiaBiSgbBQUHBV08NpOVlkMSk0FKjZuURHiiOJxQnSGfQJuoEKREejK0dFRGjoiQt7iOuLx0rgxYEQkAIfkECQoABwAsAAAAACEAIwAAA7h4utxnxslJDSGR6nrz/owxYB64QUEwlGaVqlB7vrAJscsd3Lhy+wBArGEICo3DUFH4QDqK0GMy51xOgcGlEAfJ+iAFie62chR+jYKaSAuQGOqwJp7jGQRDuol+F/jxZWsyCmoQfwYwgoM5Oyg1i2w0A2WQIW2TPYOIkleQmy+UlYygoaIPnJmapKmqKiusMmSdpjxypnALtrcHioq3ury7hGm3dnVosVpMWFmwREZbddDOSsjVswcJACH5BAkKAAcALAAAAAAhACMAAAOxeLrc/jDKSZUxNS9DCNYV54HURQwfGRlDEFwqdLVuGjOsW9/Odb0wnsUAKBKNwsMFQGwyNUHckVl8bqI4o43lA26PNkv1S9DtNuOeVirw+aTI3qWAQwnud1vhLSnQLS0GeFF+GoVKNF0fh4Z+LDQ6Bn5/MTNmL0mAl2E3j2aclTmRmYCQoKEDiaRDKFhJez6UmbKyQowHtzy1uEl8DLCnEktrQ2PBD1NxSlXKIW5hz6cJACH5BAkKAAcALAAAAAAhACMAAAOkeLrc/jDKSau9OOvNlTFd9H3hYxAEWDJfkK5LGwTq+g0zDR/GgM+10A04Cm56OANgqTRmkDTmSOiLMgFOTM9AnFJHuexzYBAIijZf2SweJ8ttbbXLmd5+wBiJosSCoGF/fXEeS1g8gHl9hxODKkh4gkwVIwUekESIhA4FlgV3PyCWG52WI2oGnR2lnUWpqhqVEF4Xi7QjhpsshpOFvLosrnpoEAkAIfkECQoABwAsAAAAACEAIwAAA6l4utz+MMpJq71YGpPr3t1kEAQXQltQnk8aBCa7bMMLy4wx1G8s072PL6SrGQDI4zBThCU/v50zCVhidIYgNPqxWZkDg0AgxB2K4vEXbBSvr1JtZ3uOext0x7FqovF6OXtfe1UzdjAxhINPM013ChtJER8FBQeVRX8GlpggFZWWfjwblTiigGZnfqRmpUKbljKxDrNMeY2eF4R8jUiSur6/Z8GFV2WBtwwJACH5BAkKAAcALAAAAAAhACMAAAO6eLrcZi3KyQwhkGpq8f6ONWQgaAxB8JTfg6YkO50pzD5xhaurhCsGAKCnEw6NucNDCAkyI8ugdAhFKpnJJdMaeiofBejowUseCr9GYa0j1GyMdVgjBxoEuPSZXWKf7gKBeHtzMms0gHgGfDIVLztmjScvNZEyk28qjT40b5aXlHCbDgOhnzedoqOOlKeopaqrCy56sgtotbYKhYW6e7e9tsHBssO6eSTIm1peV0iuFUZDyU7NJnmcuQsJACH5BAkKAAcALAAAAAAhACMAAAOteLrc/jDKSZsxNS9DCNYV54Hh4H0kdAXBgKaOwbYX/Miza1vrVe8KA2AoJL5gwiQgeZz4GMXlcHl8xozQ3kW3KTajL9zsBJ1+sV2fQfALem+XAlRApxu4ioI1UpC76zJ4fRqDBzI+LFyFhH1iiS59fkgziW07jjRAG5QDeECOLk2Tj6KjnZafW6hAej6Smgevr6yysza2tiCuMasUF2Yov2gZUUQbU8YaaqjLpQkAOw==' # NOQA
|
||||
TEST_PNG_LOGO = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACEAAAAjCAYAAAAaLGNkAAAAAXNSR0IB2cksfwAAAdVpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iPgogICAgICAgICA8dGlmZjpDb21wcmVzc2lvbj4xPC90aWZmOkNvbXByZXNzaW9uPgogICAgICAgICA8dGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPjI8L3RpZmY6UGhvdG9tZXRyaWNJbnRlcnByZXRhdGlvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+Cjl0tmoAAAHVSURBVFgJ7VZRsoMgDNTOu5E9U+/Ud6Z6JssGNg2oNKD90xkHCNnNkgTbYbieKwNXBn6bgSXQ4+16xi5UDiqDN3Pecr6+1fM5DHh7n1NEIPjjoRLKzOjG3qQ5dRtEy2LCjh/Gz2wDZE2nZYKkrxdn/kY9XQQkGCGqqDY5IgJFkEKgBCzDNGXhTKEye7boFRH6IPJj5EshiNCSjV4R4eSx7zhmR2tcdIuwmWiMeao7e0JHViZEWUI5aP8a9O+rx74D6sGEiJftiX3YeueIiFXg2KrhpqzjVC3dPZFYJZ7NOwwtNwM8R0UkLfH0sT5qck+OlkMq0BucKr0iWG7gpAQksD9esM1z3Lnf6SHjLh67nnKEGxC/iomWhByTeXOQJGHHcKxwHhHKnt1HIdYtmexkIb/HOURWTSJqn2gKMDG0bDUc/D0iAseovxUBoylmQCug6IVhSv+4DIeKI94jAr4AjiSEgQ25JYB+YWT9BZ94AM8erwgFkRifaArA6U0G5KT0m//z26REZuK9okgrT6VwE1jTHjbVzyNAyRwTEPOtuiex9FVBNZCkruaA4PZqFp1u8Rpww9/6rcK5y0EkAxRiZJt79PWOVYWGRE9pbJhavMengMflGyumk0akMsQnAAAAAElFTkSuQmCC' # NOQA
|
||||
@@ -78,7 +77,7 @@ def test_awx_task_env_validity(get, patch, admin, value, expected):
|
||||
|
||||
if expected == 200:
|
||||
resp = get(url, user=admin)
|
||||
assert resp.data['AWX_TASK_ENV'] == dict((k, six.text_type(v)) for k, v in value.items())
|
||||
assert resp.data['AWX_TASK_ENV'] == dict((k, str(v)) for k, v in value.items())
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -3,15 +3,14 @@ import pytest
|
||||
from unittest import mock
|
||||
import json
|
||||
import os
|
||||
import six
|
||||
import tempfile
|
||||
import shutil
|
||||
import urllib.parse
|
||||
from datetime import timedelta
|
||||
from unittest.mock import PropertyMock
|
||||
|
||||
# Django
|
||||
from django.core.urlresolvers import resolve
|
||||
from django.utils.six.moves.urllib.parse import urlparse
|
||||
from django.utils import timezone
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
@@ -523,7 +522,7 @@ def _request(verb):
|
||||
if 'format' not in kwargs and 'content_type' not in kwargs:
|
||||
kwargs['format'] = 'json'
|
||||
|
||||
view, view_args, view_kwargs = resolve(urlparse(url)[2])
|
||||
view, view_args, view_kwargs = resolve(urllib.parse.urlparse(url)[2])
|
||||
request = getattr(APIRequestFactory(), verb)(url, **kwargs)
|
||||
if isinstance(kwargs.get('cookies', None), dict):
|
||||
for key, value in kwargs['cookies'].items():
|
||||
@@ -730,7 +729,7 @@ def get_db_prep_save(self, value, connection, **kwargs):
|
||||
return None
|
||||
# default values come in as strings; only non-strings should be
|
||||
# run through `dumps`
|
||||
if not isinstance(value, six.string_types):
|
||||
if not isinstance(value, str):
|
||||
value = dumps(value)
|
||||
|
||||
return value
|
||||
|
||||
@@ -7,6 +7,50 @@ from awx.main.models import (Job, JobEvent, ProjectUpdate, ProjectUpdateEvent,
|
||||
SystemJobEvent)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_parent_changed(emit):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start')
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.changed is False
|
||||
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='runner_on_ok',
|
||||
event_data={
|
||||
'res': {'changed': ['localhost']}
|
||||
}
|
||||
)
|
||||
assert JobEvent.objects.count() == 2
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.changed is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('event', JobEvent.FAILED_EVENTS)
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_parent_failed(emit, event):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start')
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.failed is False
|
||||
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event=event
|
||||
)
|
||||
assert JobEvent.objects.count() == 2
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.failed is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.consumers.emit_channel_notification')
|
||||
def test_job_event_websocket_notifications(emit):
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
import pytest
|
||||
from unittest import mock
|
||||
import six
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
@@ -249,7 +248,7 @@ def test_inventory_update_name(inventory, inventory_source):
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_name_with_unicode(inventory, inventory_source):
|
||||
inventory.name = six.u('オオオ')
|
||||
inventory.name = 'オオオ'
|
||||
inventory.save()
|
||||
iu = inventory_source.update()
|
||||
assert iu.name.startswith(inventory.name)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import pytest
|
||||
import six
|
||||
|
||||
from awx.main.models import JobTemplate, Job, JobHostSummary, WorkflowJob
|
||||
|
||||
@@ -71,12 +70,12 @@ def test_job_host_summary_representation(host):
|
||||
host=host, job=job,
|
||||
changed=1, dark=2, failures=3, ok=4, processed=5, skipped=6
|
||||
)
|
||||
assert 'single-host changed=1 dark=2 failures=3 ok=4 processed=5 skipped=6' == six.text_type(jhs)
|
||||
assert 'single-host changed=1 dark=2 failures=3 ok=4 processed=5 skipped=6' == str(jhs)
|
||||
|
||||
# Representation should be robust to deleted related items
|
||||
jhs = JobHostSummary.objects.get(pk=jhs.id)
|
||||
host.delete()
|
||||
assert 'N/A changed=1 dark=2 failures=3 ok=4 processed=5 skipped=6' == six.text_type(jhs)
|
||||
assert 'N/A changed=1 dark=2 failures=3 ok=4 processed=5 skipped=6' == str(jhs)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import itertools
|
||||
import pytest
|
||||
import six
|
||||
|
||||
# CRUM
|
||||
from crum import impersonate
|
||||
@@ -74,7 +73,7 @@ class TestCreateUnifiedJob:
|
||||
new_creds = []
|
||||
for cred in jt_linked.credentials.all():
|
||||
new_creds.append(Credential.objects.create(
|
||||
name=six.text_type(cred.name) + six.text_type('_new'),
|
||||
name=str(cred.name) + '_new',
|
||||
credential_type=cred.credential_type,
|
||||
inputs=cred.inputs
|
||||
))
|
||||
@@ -112,15 +111,15 @@ class TestMetaVars:
|
||||
for key in user_vars:
|
||||
assert key not in job.awx_meta_vars()
|
||||
|
||||
def test_workflow_job_metavars(self, admin_user):
|
||||
def test_workflow_job_metavars(self, admin_user, job_template):
|
||||
workflow_job = WorkflowJob.objects.create(
|
||||
name='workflow-job',
|
||||
created_by=admin_user
|
||||
)
|
||||
job = Job.objects.create(
|
||||
name='fake-job',
|
||||
launch_type='workflow'
|
||||
)
|
||||
node = workflow_job.workflow_nodes.create(unified_job_template=job_template)
|
||||
job_kv = node.get_job_kwargs()
|
||||
job = node.unified_job_template.create_unified_job(**job_kv)
|
||||
|
||||
workflow_job.workflow_nodes.create(job=job)
|
||||
data = job.awx_meta_vars()
|
||||
assert data['awx_user_id'] == admin_user.id
|
||||
|
||||
@@ -82,14 +82,14 @@ def test_multi_group_with_shared_dependency(instance_factory, default_instance_g
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_job_no_instancegroup(workflow_job_template_factory, default_instance_group, mocker):
|
||||
wfjt = workflow_job_template_factory('anicedayforawalk').workflow_job_template
|
||||
wfj = WorkflowJob.objects.create(workflow_job_template=wfjt)
|
||||
wfj.status = "pending"
|
||||
wfj.save()
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(wfj, None, [], None)
|
||||
assert wfj.instance_group is None
|
||||
wfjt = workflow_job_template_factory('anicedayforawalk').workflow_job_template
|
||||
wfj = WorkflowJob.objects.create(workflow_job_template=wfjt)
|
||||
wfj.status = "pending"
|
||||
wfj.save()
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(wfj, None, [], None)
|
||||
assert wfj.instance_group is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
import itertools
|
||||
|
||||
import pytest
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
from awx.main.utils import decrypt_field
|
||||
from awx.main.models import Credential, CredentialType, V1Credential
|
||||
from awx.main.models import Credential, CredentialType
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
@@ -206,10 +204,11 @@ def test_vault_validation(organization, inputs, valid):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('become_method, valid', list(zip(
|
||||
dict(V1Credential.FIELDS['become_method'].choices).keys(),
|
||||
itertools.repeat(True)
|
||||
)) + [('invalid-choice', False)])
|
||||
@pytest.mark.parametrize('become_method, valid', [
|
||||
('', True),
|
||||
('sudo', True),
|
||||
('custom-plugin', True),
|
||||
])
|
||||
def test_choices_validity(become_method, valid, organization):
|
||||
inputs = {'become_method': become_method}
|
||||
cred_type = CredentialType.defaults['ssh']()
|
||||
@@ -345,6 +344,10 @@ def test_credential_get_input(organization_factory):
|
||||
'id': 'vault_id',
|
||||
'type': 'string',
|
||||
'secret': False
|
||||
}, {
|
||||
'id': 'secret',
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}]
|
||||
}
|
||||
)
|
||||
@@ -372,6 +375,12 @@ def test_credential_get_input(organization_factory):
|
||||
cred.get_input('field_not_on_credential_type')
|
||||
# verify that the provided default is used for undefined inputs
|
||||
assert cred.get_input('field_not_on_credential_type', default='bar') == 'bar'
|
||||
# verify expected exception is raised when attempting to access an unset secret
|
||||
# input without providing a default
|
||||
with pytest.raises(AttributeError):
|
||||
cred.get_input('secret')
|
||||
# verify that the provided default is used for undefined inputs
|
||||
assert cred.get_input('secret', default='fiz') == 'fiz'
|
||||
# verify return values for encrypted secret fields are decrypted
|
||||
assert cred.inputs['vault_password'].startswith('$encrypted$')
|
||||
assert cred.get_input('vault_password') == 'testing321'
|
||||
|
||||
@@ -3,6 +3,7 @@ import multiprocessing
|
||||
import random
|
||||
import signal
|
||||
import time
|
||||
from unittest import mock
|
||||
|
||||
from django.utils.timezone import now as tz_now
|
||||
import pytest
|
||||
@@ -200,7 +201,9 @@ class TestAutoScaling:
|
||||
assert len(self.pool) == 10
|
||||
|
||||
# cleanup should scale down to 8 workers
|
||||
self.pool.cleanup()
|
||||
with mock.patch('awx.main.dispatch.reaper.reap') as reap:
|
||||
self.pool.cleanup()
|
||||
reap.assert_called()
|
||||
assert len(self.pool) == 2
|
||||
|
||||
def test_max_scale_up(self):
|
||||
@@ -248,7 +251,9 @@ class TestAutoScaling:
|
||||
time.sleep(1) # wait a moment for sigterm
|
||||
|
||||
# clean up and the dead worker
|
||||
self.pool.cleanup()
|
||||
with mock.patch('awx.main.dispatch.reaper.reap') as reap:
|
||||
self.pool.cleanup()
|
||||
reap.assert_called()
|
||||
assert len(self.pool) == 1
|
||||
assert self.pool.workers[0].pid == alive_pid
|
||||
|
||||
|
||||
@@ -16,117 +16,117 @@ from awx.api.versioning import reverse
|
||||
@pytest.mark.django_db
|
||||
class TestOAuth2Application:
|
||||
|
||||
@pytest.mark.parametrize("user_for_access, can_access_list", [
|
||||
(0, [True, True]),
|
||||
(1, [True, True]),
|
||||
(2, [True, True]),
|
||||
(3, [False, False]),
|
||||
])
|
||||
def test_can_read(
|
||||
self, admin, org_admin, org_member, alice, user_for_access, can_access_list, organization
|
||||
):
|
||||
user_list = [admin, org_admin, org_member, alice]
|
||||
access = OAuth2ApplicationAccess(user_list[user_for_access])
|
||||
app_creation_user_list = [admin, org_admin]
|
||||
for user, can_access in zip(app_creation_user_list, can_access_list):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(user.username), user=user,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
assert access.can_read(app) is can_access
|
||||
|
||||
def test_admin_only_can_read(self, user, organization):
|
||||
user = user('org-admin', False)
|
||||
organization.admin_role.members.add(user)
|
||||
access = OAuth2ApplicationAccess(user)
|
||||
@pytest.mark.parametrize("user_for_access, can_access_list", [
|
||||
(0, [True, True]),
|
||||
(1, [True, True]),
|
||||
(2, [True, True]),
|
||||
(3, [False, False]),
|
||||
])
|
||||
def test_can_read(
|
||||
self, admin, org_admin, org_member, alice, user_for_access, can_access_list, organization
|
||||
):
|
||||
user_list = [admin, org_admin, org_member, alice]
|
||||
access = OAuth2ApplicationAccess(user_list[user_for_access])
|
||||
app_creation_user_list = [admin, org_admin]
|
||||
for user, can_access in zip(app_creation_user_list, can_access_list):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(user.username), user=user,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
assert access.can_read(app) is True
|
||||
assert access.can_read(app) is can_access
|
||||
|
||||
def test_app_activity_stream(self, org_admin, alice, organization):
|
||||
def test_admin_only_can_read(self, user, organization):
|
||||
user = user('org-admin', False)
|
||||
organization.admin_role.members.add(user)
|
||||
access = OAuth2ApplicationAccess(user)
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(user.username), user=user,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
assert access.can_read(app) is True
|
||||
|
||||
def test_app_activity_stream(self, org_admin, alice, organization):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(org_admin.username), user=org_admin,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
access = OAuth2ApplicationAccess(org_admin)
|
||||
assert access.can_read(app) is True
|
||||
access = ActivityStreamAccess(org_admin)
|
||||
activity_stream = ActivityStream.objects.filter(o_auth2_application=app).latest('pk')
|
||||
assert access.can_read(activity_stream) is True
|
||||
access = ActivityStreamAccess(alice)
|
||||
assert access.can_read(app) is False
|
||||
assert access.can_read(activity_stream) is False
|
||||
|
||||
|
||||
def test_token_activity_stream(self, org_admin, alice, organization, post):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(org_admin.username), user=org_admin,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': app.pk}),
|
||||
{'scope': 'read'}, org_admin, expect=201
|
||||
)
|
||||
token = AccessToken.objects.get(token=response.data['token'])
|
||||
access = OAuth2ApplicationAccess(org_admin)
|
||||
assert access.can_read(app) is True
|
||||
access = ActivityStreamAccess(org_admin)
|
||||
activity_stream = ActivityStream.objects.filter(o_auth2_access_token=token).latest('pk')
|
||||
assert access.can_read(activity_stream) is True
|
||||
access = ActivityStreamAccess(alice)
|
||||
assert access.can_read(token) is False
|
||||
assert access.can_read(activity_stream) is False
|
||||
|
||||
|
||||
|
||||
def test_can_edit_delete_app_org_admin(
|
||||
self, admin, org_admin, org_member, alice, organization
|
||||
):
|
||||
user_list = [admin, org_admin, org_member, alice]
|
||||
can_access_list = [True, True, False, False]
|
||||
for user, can_access in zip(user_list, can_access_list):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(org_admin.username), user=org_admin,
|
||||
name='test app for {}'.format(user.username), user=org_admin,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
access = OAuth2ApplicationAccess(org_admin)
|
||||
assert access.can_read(app) is True
|
||||
access = ActivityStreamAccess(org_admin)
|
||||
activity_stream = ActivityStream.objects.filter(o_auth2_application=app).latest('pk')
|
||||
assert access.can_read(activity_stream) is True
|
||||
access = ActivityStreamAccess(alice)
|
||||
assert access.can_read(app) is False
|
||||
assert access.can_read(activity_stream) is False
|
||||
access = OAuth2ApplicationAccess(user)
|
||||
assert access.can_change(app, {}) is can_access
|
||||
assert access.can_delete(app) is can_access
|
||||
|
||||
|
||||
def test_token_activity_stream(self, org_admin, alice, organization, post):
|
||||
|
||||
def test_can_edit_delete_app_admin(
|
||||
self, admin, org_admin, org_member, alice, organization
|
||||
):
|
||||
user_list = [admin, org_admin, org_member, alice]
|
||||
can_access_list = [True, True, False, False]
|
||||
for user, can_access in zip(user_list, can_access_list):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(org_admin.username), user=org_admin,
|
||||
name='test app for {}'.format(user.username), user=admin,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': app.pk}),
|
||||
{'scope': 'read'}, org_admin, expect=201
|
||||
)
|
||||
token = AccessToken.objects.get(token=response.data['token'])
|
||||
access = OAuth2ApplicationAccess(org_admin)
|
||||
assert access.can_read(app) is True
|
||||
access = ActivityStreamAccess(org_admin)
|
||||
activity_stream = ActivityStream.objects.filter(o_auth2_access_token=token).latest('pk')
|
||||
assert access.can_read(activity_stream) is True
|
||||
access = ActivityStreamAccess(alice)
|
||||
assert access.can_read(token) is False
|
||||
assert access.can_read(activity_stream) is False
|
||||
|
||||
|
||||
|
||||
def test_can_edit_delete_app_org_admin(
|
||||
self, admin, org_admin, org_member, alice, organization
|
||||
):
|
||||
user_list = [admin, org_admin, org_member, alice]
|
||||
can_access_list = [True, True, False, False]
|
||||
for user, can_access in zip(user_list, can_access_list):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(user.username), user=org_admin,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
access = OAuth2ApplicationAccess(user)
|
||||
assert access.can_change(app, {}) is can_access
|
||||
assert access.can_delete(app) is can_access
|
||||
|
||||
|
||||
def test_can_edit_delete_app_admin(
|
||||
self, admin, org_admin, org_member, alice, organization
|
||||
):
|
||||
user_list = [admin, org_admin, org_member, alice]
|
||||
can_access_list = [True, True, False, False]
|
||||
for user, can_access in zip(user_list, can_access_list):
|
||||
app = Application.objects.create(
|
||||
name='test app for {}'.format(user.username), user=admin,
|
||||
client_type='confidential', authorization_grant_type='password', organization=organization
|
||||
)
|
||||
access = OAuth2ApplicationAccess(user)
|
||||
assert access.can_change(app, {}) is can_access
|
||||
assert access.can_delete(app) is can_access
|
||||
|
||||
access = OAuth2ApplicationAccess(user)
|
||||
assert access.can_change(app, {}) is can_access
|
||||
assert access.can_delete(app) is can_access
|
||||
|
||||
def test_superuser_can_always_create(self, admin, org_admin, org_member, alice, organization):
|
||||
access = OAuth2ApplicationAccess(admin)
|
||||
|
||||
def test_superuser_can_always_create(self, admin, org_admin, org_member, alice, organization):
|
||||
access = OAuth2ApplicationAccess(admin)
|
||||
for user in [admin, org_admin, org_member, alice]:
|
||||
assert access.can_add({
|
||||
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
|
||||
'authorization_grant_type': 'password', 'organization': organization.id
|
||||
})
|
||||
|
||||
def test_normal_user_cannot_create(self, admin, org_admin, org_member, alice, organization):
|
||||
for access_user in [org_member, alice]:
|
||||
access = OAuth2ApplicationAccess(access_user)
|
||||
for user in [admin, org_admin, org_member, alice]:
|
||||
assert access.can_add({
|
||||
assert not access.can_add({
|
||||
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
|
||||
'authorization_grant_type': 'password', 'organization': organization.id
|
||||
})
|
||||
|
||||
def test_normal_user_cannot_create(self, admin, org_admin, org_member, alice, organization):
|
||||
for access_user in [org_member, alice]:
|
||||
access = OAuth2ApplicationAccess(access_user)
|
||||
for user in [admin, org_admin, org_member, alice]:
|
||||
assert not access.can_add({
|
||||
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
|
||||
'authorization_grant_type': 'password', 'organization': organization.id
|
||||
})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -113,7 +113,7 @@ class TestInventoryInventorySourcesUpdate:
|
||||
|
||||
with mocker.patch.object(InventoryInventorySourcesUpdate, 'get_object', return_value=obj):
|
||||
with mocker.patch.object(InventoryInventorySourcesUpdate, 'get_serializer_context', return_value=None):
|
||||
with mocker.patch('awx.api.views.InventoryUpdateSerializer') as serializer_class:
|
||||
with mocker.patch('awx.api.views.InventoryUpdateDetailSerializer') as serializer_class:
|
||||
serializer = serializer_class.return_value
|
||||
serializer.to_representation.return_value = {}
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@ from django.utils.encoding import smart_str, smart_bytes
|
||||
from awx.main.expect import run, isolated_manager
|
||||
|
||||
from django.conf import settings
|
||||
import six
|
||||
|
||||
HERE, FILENAME = os.path.split(__file__)
|
||||
|
||||
@@ -107,7 +106,7 @@ def test_cancel_callback_error():
|
||||
|
||||
|
||||
@pytest.mark.timeout(3) # https://github.com/ansible/tower/issues/2391#issuecomment-401946895
|
||||
@pytest.mark.parametrize('value', ['abc123', six.u('Iñtërnâtiônàlizætiøn')])
|
||||
@pytest.mark.parametrize('value', ['abc123', 'Iñtërnâtiônàlizætiøn'])
|
||||
def test_env_vars(value):
|
||||
stdout = StringIO()
|
||||
status, rc = run.run_pexpect(
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import pytest
|
||||
import six
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
from rest_framework.serializers import ValidationError as DRFValidationError
|
||||
@@ -152,8 +151,7 @@ def test_cred_type_injectors_schema(injectors, valid):
|
||||
)
|
||||
field = CredentialType._meta.get_field('injectors')
|
||||
if valid is False:
|
||||
with pytest.raises(ValidationError, message=six.text_type(
|
||||
"Injector was supposed to throw a validation error, data: {}").format(injectors)):
|
||||
with pytest.raises(ValidationError, message="Injector was supposed to throw a validation error, data: {}".format(injectors)):
|
||||
field.clean(injectors, type_)
|
||||
else:
|
||||
field.clean(injectors, type_)
|
||||
|
||||
@@ -14,7 +14,6 @@ from backports.tempfile import TemporaryDirectory
|
||||
import fcntl
|
||||
from unittest import mock
|
||||
import pytest
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from django.conf import settings
|
||||
@@ -1562,7 +1561,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
self.task.run(self.pk)
|
||||
|
||||
def test_custom_environment_injectors_with_unicode_content(self):
|
||||
value = six.u('Iñtërnâtiônàlizætiøn')
|
||||
value = 'Iñtërnâtiônàlizætiøn'
|
||||
some_cloud = CredentialType(
|
||||
kind='cloud',
|
||||
name='SomeCloud',
|
||||
@@ -1656,6 +1655,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
'password': 'secret'
|
||||
}
|
||||
)
|
||||
azure_rm_credential.inputs['secret'] = ''
|
||||
azure_rm_credential.inputs['secret'] = encrypt_field(azure_rm_credential, 'secret')
|
||||
self.instance.credentials.add(azure_rm_credential)
|
||||
|
||||
@@ -2089,6 +2089,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
'host': 'https://keystone.example.org'
|
||||
}
|
||||
)
|
||||
cred.inputs['ssh_key_data'] = ''
|
||||
cred.inputs['ssh_key_data'] = encrypt_field(
|
||||
cred, 'ssh_key_data'
|
||||
)
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
import os
|
||||
import os.path
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.utils.ansible import could_be_playbook, could_be_inventory
|
||||
|
||||
HERE, _ = os.path.split(__file__)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filename', os.listdir(os.path.join(HERE, 'playbooks', 'valid')))
|
||||
def test_could_be_playbook(filename):
|
||||
path = os.path.join(HERE, 'playbooks', 'valid')
|
||||
assert could_be_playbook(HERE, path, filename).endswith(filename)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filename', os.listdir(os.path.join(HERE, 'playbooks', 'invalid')))
|
||||
def test_is_not_playbook(filename):
|
||||
path = os.path.join(HERE, 'playbooks', 'invalid')
|
||||
assert could_be_playbook(HERE, path, filename) is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filename', os.listdir(os.path.join(HERE, 'inventories', 'valid')))
|
||||
def test_could_be_inventory(filename):
|
||||
path = os.path.join(HERE, 'inventories', 'valid')
|
||||
assert could_be_inventory(HERE, path, filename).endswith(filename)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filename', os.listdir(os.path.join(HERE, 'inventories', 'invalid')))
|
||||
def test_is_not_inventory(filename):
|
||||
path = os.path.join(HERE, 'inventories', 'invalid')
|
||||
assert could_be_inventory(HERE, path, filename) is None
|
||||
33
awx/main/tests/unit/utils/test_ansible.py
Normal file
33
awx/main/tests/unit/utils/test_ansible.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import os
|
||||
import os.path
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.tests import data
|
||||
from awx.main.utils.ansible import could_be_playbook, could_be_inventory
|
||||
|
||||
DATA = os.path.join(os.path.dirname(data.__file__), 'ansible_utils')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filename', os.listdir(os.path.join(DATA, 'playbooks', 'valid')))
|
||||
def test_could_be_playbook(filename):
|
||||
path = os.path.join(DATA, 'playbooks', 'valid')
|
||||
assert could_be_playbook(DATA, path, filename).endswith(filename)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filename', os.listdir(os.path.join(DATA, 'playbooks', 'invalid')))
|
||||
def test_is_not_playbook(filename):
|
||||
path = os.path.join(DATA, 'playbooks', 'invalid')
|
||||
assert could_be_playbook(DATA, path, filename) is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filename', os.listdir(os.path.join(DATA, 'inventories', 'valid')))
|
||||
def test_could_be_inventory(filename):
|
||||
path = os.path.join(DATA, 'inventories', 'valid')
|
||||
assert could_be_inventory(DATA, path, filename).endswith(filename)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filename', os.listdir(os.path.join(DATA, 'inventories', 'invalid')))
|
||||
def test_is_not_inventory(filename):
|
||||
path = os.path.join(DATA, 'inventories', 'invalid')
|
||||
assert could_be_inventory(DATA, path, filename) is None
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import pytest
|
||||
|
||||
from awx.conf.models import Setting
|
||||
from awx.main.utils import encryption
|
||||
|
||||
@@ -45,6 +47,16 @@ def test_encrypt_field_with_empty_value():
|
||||
assert encrypted is None
|
||||
|
||||
|
||||
def test_encrypt_field_with_undefined_attr_raises_expected_exception():
|
||||
with pytest.raises(AttributeError):
|
||||
encryption.encrypt_field({}, 'undefined_attr')
|
||||
|
||||
|
||||
def test_decrypt_field_with_undefined_attr_raises_expected_exception():
|
||||
with pytest.raises(AttributeError):
|
||||
encryption.decrypt_field({}, 'undefined_attr')
|
||||
|
||||
|
||||
class TestSurveyReversibilityValue:
|
||||
'''
|
||||
Tests to enforce the contract with survey password question encrypted values
|
||||
|
||||
@@ -9,7 +9,6 @@ from awx.main.utils.filters import SmartFilter, ExternalLoggerEnabled
|
||||
# Django
|
||||
from django.db.models import Q
|
||||
|
||||
import six
|
||||
|
||||
|
||||
@pytest.mark.parametrize('params, logger_name, expected', [
|
||||
@@ -106,12 +105,13 @@ class TestSmartFilterQueryFromString():
|
||||
('a__b__c=false', Q(**{u"a__b__c": False})),
|
||||
('a__b__c=null', Q(**{u"a__b__c": None})),
|
||||
('ansible_facts__a="true"', Q(**{u"ansible_facts__contains": {u"a": u"true"}})),
|
||||
('ansible_facts__a__exact="true"', Q(**{u"ansible_facts__contains": {u"a": u"true"}})),
|
||||
#('"a__b\"__c"="true"', Q(**{u"a__b\"__c": "true"})),
|
||||
#('a__b\"__c="true"', Q(**{u"a__b\"__c": "true"})),
|
||||
])
|
||||
def test_query_generated(self, mock_get_host_model, filter_string, q_expected):
|
||||
q = SmartFilter.query_from_string(filter_string)
|
||||
assert six.text_type(q) == six.text_type(q_expected)
|
||||
assert str(q) == str(q_expected)
|
||||
|
||||
@pytest.mark.parametrize("filter_string", [
|
||||
'ansible_facts__facts__facts__blank='
|
||||
@@ -138,7 +138,7 @@ class TestSmartFilterQueryFromString():
|
||||
])
|
||||
def test_unicode(self, mock_get_host_model, filter_string, q_expected):
|
||||
q = SmartFilter.query_from_string(filter_string)
|
||||
assert six.text_type(q) == six.text_type(q_expected)
|
||||
assert str(q) == str(q_expected)
|
||||
|
||||
@pytest.mark.parametrize("filter_string,q_expected", [
|
||||
('(a=b)', Q(**{u"a": u"b"})),
|
||||
@@ -154,7 +154,7 @@ class TestSmartFilterQueryFromString():
|
||||
])
|
||||
def test_boolean_parenthesis(self, mock_get_host_model, filter_string, q_expected):
|
||||
q = SmartFilter.query_from_string(filter_string)
|
||||
assert six.text_type(q) == six.text_type(q_expected)
|
||||
assert str(q) == str(q_expected)
|
||||
|
||||
@pytest.mark.parametrize("filter_string,q_expected", [
|
||||
('ansible_facts__a__b__c[]=3', Q(**{u"ansible_facts__contains": {u"a": {u"b": {u"c": [3]}}}})),
|
||||
@@ -177,7 +177,7 @@ class TestSmartFilterQueryFromString():
|
||||
])
|
||||
def test_contains_query_generated(self, mock_get_host_model, filter_string, q_expected):
|
||||
q = SmartFilter.query_from_string(filter_string)
|
||||
assert six.text_type(q) == six.text_type(q_expected)
|
||||
assert str(q) == str(q_expected)
|
||||
|
||||
@pytest.mark.parametrize("filter_string,q_expected", [
|
||||
#('a__b__c[]="true"', Q(**{u"a__b__c__contains": u"\"true\""})),
|
||||
@@ -187,7 +187,7 @@ class TestSmartFilterQueryFromString():
|
||||
])
|
||||
def test_contains_query_generated_unicode(self, mock_get_host_model, filter_string, q_expected):
|
||||
q = SmartFilter.query_from_string(filter_string)
|
||||
assert six.text_type(q) == six.text_type(q_expected)
|
||||
assert str(q) == str(q_expected)
|
||||
|
||||
@pytest.mark.parametrize("filter_string,q_expected", [
|
||||
('ansible_facts__a=null', Q(**{u"ansible_facts__contains": {u"a": None}})),
|
||||
@@ -195,7 +195,7 @@ class TestSmartFilterQueryFromString():
|
||||
])
|
||||
def test_contains_query_generated_null(self, mock_get_host_model, filter_string, q_expected):
|
||||
q = SmartFilter.query_from_string(filter_string)
|
||||
assert six.text_type(q) == six.text_type(q_expected)
|
||||
assert str(q) == str(q_expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("filter_string,q_expected", [
|
||||
@@ -213,7 +213,7 @@ class TestSmartFilterQueryFromString():
|
||||
])
|
||||
def test_search_related_fields(self, mock_get_host_model, filter_string, q_expected):
|
||||
q = SmartFilter.query_from_string(filter_string)
|
||||
assert six.text_type(q) == six.text_type(q_expected)
|
||||
assert str(q) == str(q_expected)
|
||||
|
||||
|
||||
'''
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import six
|
||||
|
||||
from awx.main.models import Job, JobEvent
|
||||
|
||||
@@ -15,7 +14,7 @@ def test_log_from_job_event_object():
|
||||
|
||||
# Check entire body of data for any exceptions from getattr on event object
|
||||
for fd in data_for_log:
|
||||
if not isinstance(data_for_log[fd], six.string_types):
|
||||
if not isinstance(data_for_log[fd], str):
|
||||
continue
|
||||
assert 'Exception' not in data_for_log[fd], 'Exception delivered in data: {}'.format(data_for_log[fd])
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import urllib.parse
|
||||
import threading
|
||||
import contextlib
|
||||
import tempfile
|
||||
import six
|
||||
import psutil
|
||||
from functools import reduce, wraps
|
||||
from io import StringIO
|
||||
@@ -82,7 +81,7 @@ def get_object_or_403(klass, *args, **kwargs):
|
||||
|
||||
|
||||
def to_python_boolean(value, allow_none=False):
|
||||
value = six.text_type(value)
|
||||
value = str(value)
|
||||
if value.lower() in ('true', '1', 't'):
|
||||
return True
|
||||
elif value.lower() in ('false', '0', 'f'):
|
||||
@@ -90,7 +89,7 @@ def to_python_boolean(value, allow_none=False):
|
||||
elif allow_none and value.lower() in ('none', 'null'):
|
||||
return None
|
||||
else:
|
||||
raise ValueError(_(u'Unable to convert "%s" to boolean') % six.text_type(value))
|
||||
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
|
||||
|
||||
|
||||
def region_sorting(region):
|
||||
@@ -339,7 +338,7 @@ def update_scm_url(scm_type, url, username=True, password=True,
|
||||
netloc = u''
|
||||
netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
|
||||
if parts.port:
|
||||
netloc = u':'.join([netloc, six.text_type(parts.port)])
|
||||
netloc = u':'.join([netloc, str(parts.port)])
|
||||
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path,
|
||||
parts.query, parts.fragment])
|
||||
if scp_format and parts.scheme == 'git+ssh':
|
||||
@@ -376,7 +375,7 @@ def _convert_model_field_for_display(obj, field_name, password_fields=None):
|
||||
if password_fields is None:
|
||||
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
|
||||
if field_name in password_fields or (
|
||||
isinstance(field_val, six.string_types) and
|
||||
isinstance(field_val, str) and
|
||||
field_val.startswith('$encrypted$')
|
||||
):
|
||||
return u'hidden'
|
||||
@@ -498,7 +497,7 @@ def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
|
||||
if isinstance(override_field_val, (set, list, QuerySet)):
|
||||
getattr(obj2, field_name).add(*override_field_val)
|
||||
continue
|
||||
if override_field_val.__class__.__name__ is 'ManyRelatedManager':
|
||||
if override_field_val.__class__.__name__ == 'ManyRelatedManager':
|
||||
src_field_value = override_field_val
|
||||
dest_field = getattr(obj2, field_name)
|
||||
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
|
||||
@@ -623,7 +622,7 @@ def parse_yaml_or_json(vars_str, silent_failure=True):
|
||||
'''
|
||||
if isinstance(vars_str, dict):
|
||||
return vars_str
|
||||
elif isinstance(vars_str, six.string_types) and vars_str == '""':
|
||||
elif isinstance(vars_str, str) and vars_str == '""':
|
||||
return {}
|
||||
|
||||
try:
|
||||
@@ -883,7 +882,7 @@ def wrap_args_with_proot(args, cwd, **kwargs):
|
||||
path = os.path.realpath(path)
|
||||
new_args.extend(['--bind', '%s' % (path,), '%s' % (path,)])
|
||||
if kwargs.get('isolated'):
|
||||
if 'ansible-playbook' in args:
|
||||
if '/bin/ansible-playbook' in ' '.join(args):
|
||||
# playbook runs should cwd to the SCM checkout dir
|
||||
new_args.extend(['--chdir', os.path.join(kwargs['private_data_dir'], 'project')])
|
||||
else:
|
||||
|
||||
@@ -3,7 +3,6 @@ import hashlib
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
import six
|
||||
from cryptography.fernet import Fernet, InvalidToken
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from django.utils.encoding import smart_str, smart_bytes
|
||||
@@ -63,7 +62,13 @@ def encrypt_field(instance, field_name, ask=False, subfield=None):
|
||||
'''
|
||||
Return content of the given instance and field name encrypted.
|
||||
'''
|
||||
value = getattr(instance, field_name)
|
||||
try:
|
||||
value = instance.inputs[field_name]
|
||||
except (TypeError, AttributeError):
|
||||
value = getattr(instance, field_name)
|
||||
except KeyError:
|
||||
raise AttributeError(field_name)
|
||||
|
||||
if isinstance(value, dict) and subfield is not None:
|
||||
value = value[subfield]
|
||||
if value is None:
|
||||
@@ -98,7 +103,13 @@ def decrypt_field(instance, field_name, subfield=None):
|
||||
'''
|
||||
Return content of the given instance and field name decrypted.
|
||||
'''
|
||||
value = getattr(instance, field_name)
|
||||
try:
|
||||
value = instance.inputs[field_name]
|
||||
except (TypeError, AttributeError):
|
||||
value = getattr(instance, field_name)
|
||||
except KeyError:
|
||||
raise AttributeError(field_name)
|
||||
|
||||
if isinstance(value, dict) and subfield is not None:
|
||||
value = value[subfield]
|
||||
value = smart_str(value)
|
||||
@@ -132,6 +143,6 @@ def encrypt_dict(data, fields):
|
||||
|
||||
|
||||
def is_encrypted(value):
|
||||
if not isinstance(value, six.string_types):
|
||||
if not isinstance(value, str):
|
||||
return False
|
||||
return value.startswith('$encrypted$') and len(value) > len('$encrypted$')
|
||||
|
||||
@@ -8,10 +8,9 @@ from pyparsing import (
|
||||
CharsNotIn,
|
||||
ParseException,
|
||||
)
|
||||
import logging
|
||||
from logging import Filter, _nameToLevel
|
||||
|
||||
import six
|
||||
|
||||
from django.apps import apps
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
@@ -20,6 +19,8 @@ from awx.main.utils.common import get_search_fields
|
||||
|
||||
__all__ = ['SmartFilter', 'ExternalLoggerEnabled']
|
||||
|
||||
logger = logging.getLogger('awx.main.utils')
|
||||
|
||||
|
||||
class FieldFromSettings(object):
|
||||
"""
|
||||
@@ -154,12 +155,12 @@ class SmartFilter(object):
|
||||
self.result = Host.objects.filter(**kwargs)
|
||||
|
||||
def strip_quotes_traditional_logic(self, v):
|
||||
if type(v) is six.text_type and v.startswith('"') and v.endswith('"'):
|
||||
if type(v) is str and v.startswith('"') and v.endswith('"'):
|
||||
return v[1:-1]
|
||||
return v
|
||||
|
||||
def strip_quotes_json_logic(self, v):
|
||||
if type(v) is six.text_type and v.startswith('"') and v.endswith('"') and v != u'"null"':
|
||||
if type(v) is str and v.startswith('"') and v.endswith('"') and v != u'"null"':
|
||||
return v[1:-1]
|
||||
return v
|
||||
|
||||
@@ -172,10 +173,26 @@ class SmartFilter(object):
|
||||
relationship refered to to see if it's a jsonb type.
|
||||
'''
|
||||
def _json_path_to_contains(self, k, v):
|
||||
from awx.main.fields import JSONBField # avoid a circular import
|
||||
if not k.startswith(SmartFilter.SEARCHABLE_RELATIONSHIP):
|
||||
v = self.strip_quotes_traditional_logic(v)
|
||||
return (k, v)
|
||||
|
||||
for match in JSONBField.get_lookups().keys():
|
||||
match = '__{}'.format(match)
|
||||
if k.endswith(match):
|
||||
if match == '__exact':
|
||||
# appending __exact is basically a no-op, because that's
|
||||
# what the query means if you leave it off
|
||||
k = k[:-len(match)]
|
||||
else:
|
||||
logger.error(
|
||||
'host_filter:{} does not support searching with {}'.format(
|
||||
SmartFilter.SEARCHABLE_RELATIONSHIP,
|
||||
match
|
||||
)
|
||||
)
|
||||
|
||||
# Strip off leading relationship key
|
||||
if k.startswith(SmartFilter.SEARCHABLE_RELATIONSHIP + '__'):
|
||||
strip_len = len(SmartFilter.SEARCHABLE_RELATIONSHIP) + 2
|
||||
@@ -238,7 +255,7 @@ class SmartFilter(object):
|
||||
# value
|
||||
# ="something"
|
||||
if t_len > (v_offset + 2) and t[v_offset] == "\"" and t[v_offset + 2] == "\"":
|
||||
v = u'"' + six.text_type(t[v_offset + 1]) + u'"'
|
||||
v = u'"' + str(t[v_offset + 1]) + u'"'
|
||||
#v = t[v_offset + 1]
|
||||
# empty ""
|
||||
elif t_len > (v_offset + 1):
|
||||
@@ -307,9 +324,9 @@ class SmartFilter(object):
|
||||
* handle key with __ in it
|
||||
'''
|
||||
filter_string_raw = filter_string
|
||||
filter_string = six.text_type(filter_string)
|
||||
filter_string = str(filter_string)
|
||||
|
||||
unicode_spaces = list(set(six.text_type(c) for c in filter_string if c.isspace()))
|
||||
unicode_spaces = list(set(str(c) for c in filter_string if c.isspace()))
|
||||
unicode_spaces_other = unicode_spaces + [u'(', u')', u'=', u'"']
|
||||
atom = CharsNotIn(unicode_spaces_other)
|
||||
atom_inside_quotes = CharsNotIn(u'"')
|
||||
|
||||
@@ -7,7 +7,6 @@ import json
|
||||
import time
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
@@ -40,7 +39,7 @@ class LogstashFormatter(LogstashFormatterVersion1):
|
||||
data = copy(raw_data['ansible_facts'])
|
||||
else:
|
||||
data = copy(raw_data)
|
||||
if isinstance(data, six.string_types):
|
||||
if isinstance(data, str):
|
||||
data = json.loads(data)
|
||||
data_for_log = {}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import requests
|
||||
import time
|
||||
import socket
|
||||
import select
|
||||
import six
|
||||
from urllib import parse as urlparse
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from requests.exceptions import RequestException
|
||||
@@ -211,7 +210,7 @@ def _encode_payload_for_socket(payload):
|
||||
encoded_payload = payload
|
||||
if isinstance(encoded_payload, dict):
|
||||
encoded_payload = json.dumps(encoded_payload, ensure_ascii=False)
|
||||
if isinstance(encoded_payload, six.text_type):
|
||||
if isinstance(encoded_payload, str):
|
||||
encoded_payload = encoded_payload.encode('utf-8')
|
||||
return encoded_payload
|
||||
|
||||
@@ -237,7 +236,7 @@ class TCPHandler(BaseHandler):
|
||||
except Exception as e:
|
||||
ret = SocketResult(False, "Error sending message from %s: %s" %
|
||||
(TCPHandler.__name__,
|
||||
' '.join(six.text_type(arg) for arg in e.args)))
|
||||
' '.join(str(arg) for arg in e.args)))
|
||||
logger.exception(ret.reason)
|
||||
finally:
|
||||
sok.close()
|
||||
|
||||
@@ -54,7 +54,7 @@ class MemGroup(MemObject):
|
||||
return '<_in-memory-group_ `{}`>'.format(self.name)
|
||||
|
||||
def add_child_group(self, group):
|
||||
assert group.name is not 'all', 'group name is all'
|
||||
assert group.name != 'all', 'group name is all'
|
||||
assert isinstance(group, MemGroup), 'not MemGroup instance'
|
||||
logger.debug('Adding child group %s to parent %s', group.name, self.name)
|
||||
if group not in self.children:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user