More pep8 goodness

This commit is contained in:
Matthew Jones 2015-02-04 14:43:46 -05:00
parent fbf0ebf4d9
commit 1d76c1cd06
15 changed files with 83 additions and 69 deletions

View File

@ -62,8 +62,9 @@ class ModelAccessPermission(permissions.BasePermission):
def check_put_permissions(self, request, view, obj=None):
if not obj:
return True # FIXME: For some reason this needs to return True
# because it is first called with obj=None?
# FIXME: For some reason this needs to return True
# because it is first called with obj=None?
return True
if getattr(view, 'is_variable_data', False):
return check_user_access(request.user, view.model, 'change', obj,
dict(variables=request.DATA))
@ -76,8 +77,10 @@ class ModelAccessPermission(permissions.BasePermission):
def check_delete_permissions(self, request, view, obj=None):
if not obj:
return True # FIXME: For some reason this needs to return True
# because it is first called with obj=None?
# FIXME: For some reason this needs to return True
# because it is first called with obj=None?
return True
return check_user_access(request.user, view.model, 'delete', obj)
def check_permissions(self, request, view, obj=None):

View File

@ -1592,7 +1592,7 @@ class ScheduleSerializer(BaseSerializer):
raise serializers.ValidationError('RRULE require in rrule')
if len(match_multiple_rrule) > 1:
raise serializers.ValidationError('Multiple RRULE is not supported')
if not 'interval' in rrule_value.lower():
if 'interval' not in rrule_value.lower():
raise serializers.ValidationError('INTERVAL required in rrule')
if 'tzid' in rrule_value.lower():
raise serializers.ValidationError('TZID is not supported')

View File

@ -1,6 +1,8 @@
# Copyright (c) 2014 AnsibleWorks, Inc.
# All Rights Reserved.
# noqa
from django.conf.urls import include, patterns, url as original_url
def url(regex, view, kwargs=None, name=None, prefix=''):

View File

@ -12,6 +12,7 @@ from awx.main.models import Project
class OptionEnforceError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
@ -35,39 +36,39 @@ class BaseCommandInstance(BaseCommand):
@staticmethod
def generate_option_hostname():
return make_option('--hostname',
dest='hostname',
default='',
help='Find instance by specified hostname.')
dest='hostname',
default='',
help='Find instance by specified hostname.')
@staticmethod
def generate_option_hostname_set():
return make_option('--hostname',
dest='hostname',
default='',
help='Hostname to assign to the new instance.')
dest='hostname',
default='',
help='Hostname to assign to the new instance.')
@staticmethod
def generate_option_primary():
return make_option('--primary',
action='store_true',
default=False,
dest='primary',
help='Register instance as primary.')
action='store_true',
default=False,
dest='primary',
help='Register instance as primary.')
@staticmethod
def generate_option_secondary():
return make_option('--secondary',
action='store_true',
default=False,
dest='secondary',
help='Register instance as secondary.')
action='store_true',
default=False,
dest='secondary',
help='Register instance as secondary.')
@staticmethod
def generate_option_uuid():
return make_option('--uuid',
dest='uuid',
default='',
help='Find instance by specified uuid.')
dest='uuid',
default='',
help='Find instance by specified uuid.')
def include_options_roles(self):
BaseCommand.option_list += ( BaseCommandInstance.generate_option_primary(), BaseCommandInstance.generate_option_secondary(), )
@ -83,14 +84,19 @@ class BaseCommandInstance(BaseCommand):
def get_option_hostname(self):
return self.option_hostname
def get_option_uuid(self):
return self.option_uuid
def is_option_primary(self):
return self.option_primary
def is_option_secondary(self):
return self.option_secondary
def get_UUID(self):
return self.UUID
# for the enforce_unique_find policy
def get_unique_fields(self):
return self.unique_fields

View File

@ -48,8 +48,8 @@ class Command(NoArgsCommand):
#jobs_qs = jobs_qs.filter(created__lte=self.cutoff)
for job in Job.objects.all():
job_display = '"%s" (started %s, %d host summaries, %d events)' % \
(unicode(job), unicode(job.created),
job.job_host_summaries.count(), job.job_events.count())
(unicode(job), unicode(job.created),
job.job_host_summaries.count(), job.job_events.count())
if job.status in ('pending', 'waiting', 'running'):
action_text = 'would skip' if self.dry_run else 'skipping'
self.logger.debug('%s %s job %s', action_text, job.status, job_display)

View File

@ -111,7 +111,7 @@ class MemGroup(MemObject):
# don't add to child groups if already there
for g in self.children:
if g.name == name:
return g
return g
logger.debug('Adding child group %s to group %s', group.name, self.name)
self.children.append(group)
return group
@ -122,7 +122,7 @@ class MemGroup(MemObject):
logger.debug('Adding child group %s to parent %s', group.name, self.name)
if group not in self.children:
self.children.append(group)
if not self in group.parents:
if self not in group.parents:
group.parents.append(self)
def add_host(self, host):
@ -202,7 +202,7 @@ class BaseLoader(object):
logger.debug('Filtering host %s', host_name)
return None
host = None
if not host_name in self.all_group.all_hosts:
if host_name not in self.all_group.all_hosts:
host = MemHost(host_name, self.source_dir, port)
self.all_group.all_hosts[host_name] = host
return self.all_group.all_hosts[host_name]
@ -258,7 +258,7 @@ class BaseLoader(object):
if self.group_filter_re and not self.group_filter_re.match(name):
logger.debug('Filtering group %s', name)
return None
if not name in self.all_group.all_groups:
if name not in self.all_group.all_groups:
group = MemGroup(name, self.source_dir)
if not child:
all_group.add_child_group(group)
@ -557,10 +557,12 @@ class Command(NoArgsCommand):
self.logger = logging.getLogger('awx.main.commands.inventory_import')
self.logger.setLevel(log_levels.get(self.verbosity, 0))
handler = logging.StreamHandler()
class Formatter(logging.Formatter):
def format(self, record):
record.relativeSeconds = record.relativeCreated / 1000.0
return logging.Formatter.format(self, record)
formatter = Formatter('%(relativeSeconds)9.3f %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
@ -593,7 +595,7 @@ class Command(NoArgsCommand):
inventory=self.inventory,
active=True)
except InventorySource.DoesNotExist:
raise CommandError('Inventory source with id=%s not found' % \
raise CommandError('Inventory source with id=%s not found' %
inventory_source_id)
self.inventory_update = None
# Otherwise, create a new inventory source to capture this invocation
@ -1252,8 +1254,8 @@ class Command(NoArgsCommand):
queries_this_import = connection.queries[queries_before:]
sqltime = sum(float(x['time']) for x in queries_this_import)
self.logger.warning('Inventory import required %d queries '
'taking %0.3fs', len(queries_this_import),
sqltime)
'taking %0.3fs', len(queries_this_import),
sqltime)
except Exception, e:
if isinstance(e, KeyboardInterrupt):
status = 'canceled'

View File

@ -49,6 +49,7 @@ class CallbackReceiver(object):
except Exception, e:
pass
return _handler
def check_pre_handle(data):
event = data.get('event', '')
if event == 'playbook_on_play_start':
@ -111,18 +112,19 @@ class CallbackReceiver(object):
job_parent_events = last_parent_events.get(message['job_id'], {})
if message['event'] in ('playbook_on_play_start', 'playbook_on_stats', 'playbook_on_vars_prompt'):
parent = job_parent_events.get('playbook_on_start', None)
elif message['event'] in ('playbook_on_notify', 'playbook_on_setup',
'playbook_on_task_start',
'playbook_on_no_hosts_matched',
'playbook_on_no_hosts_remaining',
'playbook_on_import_for_host',
'playbook_on_not_import_for_host'):
elif message['event'] in ('playbook_on_notify',
'playbook_on_setup',
'playbook_on_task_start',
'playbook_on_no_hosts_matched',
'playbook_on_no_hosts_remaining',
'playbook_on_import_for_host',
'playbook_on_not_import_for_host'):
parent = job_parent_events.get('playbook_on_play_start', None)
elif message['event'].startswith('runner_on_'):
list_parents = []
list_parents.append(job_parent_events.get('playbook_on_setup', None))
list_parents.append(job_parent_events.get('playbook_on_task_start', None))
list_parents = sorted(filter(lambda x: x is not None, list_parents), cmp=lambda x, y: y.id-x.id)
list_parents = sorted(filter(lambda x: x is not None, list_parents), cmp=lambda x, y: y.id - x.id)
parent = list_parents[0] if len(list_parents) > 0 else None
else:
parent = None

View File

@ -149,11 +149,11 @@ def get_tasks():
# TODO: Replace this when we can grab all objects in a sane way.
graph_jobs = [j for j in Job.objects.filter(status__in=RELEVANT_JOBS)]
graph_inventory_updates = [iu for iu in
InventoryUpdate.objects.filter(status__in=RELEVANT_JOBS)]
InventoryUpdate.objects.filter(status__in=RELEVANT_JOBS)]
graph_project_updates = [pu for pu in
ProjectUpdate.objects.filter(status__in=RELEVANT_JOBS)]
ProjectUpdate.objects.filter(status__in=RELEVANT_JOBS)]
graph_system_jobs = [sj for sj in
SystemJob.objects.filter(status__in=RELEVANT_JOBS)]
SystemJob.objects.filter(status__in=RELEVANT_JOBS)]
all_actions = sorted(graph_jobs + graph_inventory_updates +
graph_project_updates + graph_system_jobs,
key=lambda task: task.created)
@ -197,8 +197,7 @@ def rebuild_graph(message):
# Check running tasks and make sure they are active in celery
print_log("Active celery tasks: " + str(active_tasks))
for task in list(running_tasks):
if (task.celery_task_id not in active_tasks and
not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
# NOTE: Pull status again and make sure it didn't finish in
# the meantime?
task.status = 'failed'
@ -214,14 +213,14 @@ def rebuild_graph(message):
# Create and process dependencies for new tasks
for task in new_tasks:
print_log("Checking dependencies for: %s" % str(task))
task_dependencies = task.generate_dependencies(running_tasks + waiting_tasks) #TODO: other 'new' tasks? Need to investigate this scenario
task_dependencies = task.generate_dependencies(running_tasks + waiting_tasks) # TODO: other 'new' tasks? Need to investigate this scenario
print_log("New dependencies: %s" % str(task_dependencies))
for dep in task_dependencies:
# We recalculate the created time for the moment to ensure the
# dependencies are always sorted in the right order relative to
# the dependent task.
time_delt = len(task_dependencies) - task_dependencies.index(dep)
dep.created = task.created - datetime.timedelta(seconds=1+time_delt)
dep.created = task.created - datetime.timedelta(seconds=1 + time_delt)
dep.status = 'waiting'
dep.save()
waiting_tasks.insert(waiting_tasks.index(task), dep)
@ -255,9 +254,9 @@ def process_graph(graph, task_capacity):
ready_nodes = filter(lambda x: x['node_object'].status != 'running', leaf_nodes)
remaining_volume = task_capacity - running_impact
print_log('Running Nodes: %s; Capacity: %s; Running Impact: %s; '
'Remaining Capacity: %s' %
(str(running_nodes), str(task_capacity),
str(running_impact), str(remaining_volume)))
'Remaining Capacity: %s' %
(str(running_nodes), str(task_capacity),
str(running_impact), str(remaining_volume)))
print_log("Ready Nodes: %s" % str(ready_nodes))
for task_node in ready_nodes:
node_obj = task_node['node_object']

View File

@ -24,12 +24,12 @@ class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--stat',
action='store',
dest='stat',
type="string",
default="jobs_running",
help='Select which stat to get information for'),
)
action='store',
dest='stat',
type="string",
default="jobs_running",
help='Select which stat to get information for'),
)
def job_stats(self, state):
return UnifiedJob.objects.filter(status=state).count()

View File

@ -141,7 +141,7 @@ class BaseModel(models.Model):
except ValidationError, e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
raise ValidationError(errors)
def update_fields(self, **kwargs):
save = kwargs.pop('save', True)

View File

@ -357,8 +357,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique):
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append( \
self.unique_error_message(model_class, unique_check))
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
if errors:
raise ValidationError(errors)

View File

@ -229,6 +229,7 @@ class Inventory(CommonModel):
# deepest level within the tree.
root_group_pks = set(self.root_groups.values_list('pk', flat=True))
group_depths = {} # pk: max_depth
def update_group_depths(group_pk, current_depth=0):
max_depth = group_depths.get(group_pk, -1)
if current_depth > max_depth:
@ -541,6 +542,7 @@ class Group(CommonModelNameNotUnique):
from awx.main.tasks import update_inventory_computed_fields, bulk_inventory_element_delete
from awx.main.utils import ignore_inventory_computed_fields
from awx.main.signals import disable_activity_stream
def mark_actual():
all_group_hosts = Group.hosts.through.objects.select_related("host", "group").filter(group__inventory=self.inventory)
group_hosts = {'groups': {}, 'hosts': {}}
@ -1036,7 +1038,7 @@ class InventorySourceOptions(BaseModel):
if invalid_filters:
raise ValidationError('Invalid filter expression%s: %s' %
('' if len(invalid_filters) == 1 else 's',
', '.join(invalid_filters)))
', '.join(invalid_filters)))
return instance_filters
def clean_group_by(self):
@ -1055,7 +1057,7 @@ class InventorySourceOptions(BaseModel):
if invalid_choices:
raise ValidationError('Invalid group by choice%s: %s' %
('' if len(invalid_choices) == 1 else 's',
', '.join(invalid_choices)))
', '.join(invalid_choices)))
return ','.join(choices)

View File

@ -266,7 +266,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions):
(survey_element['variable'], data[survey_element['variable']], survey_element['max']))
if type(data[survey_element['variable']]) not in (float, int):
errors.append("Value %s for %s expected to be a numeric type" % (data[survey_element['variable']],
survey_element['variable']))
survey_element['variable']))
elif survey_element['type'] == 'multiselect':
if survey_element['variable'] in data:
if type(data[survey_element['variable']]) != list:
@ -446,7 +446,7 @@ class Job(UnifiedJob, JobOptions):
dependencies.append(self.project.create_project_update(launch_type='dependency'))
if inventory_sources.count(): # and not has_setup_failures? Probably handled as an error scenario in the task runner
for source in inventory_sources:
if not source in inventory_sources_found and source.needs_update_on_launch:
if source not in inventory_sources_found and source.needs_update_on_launch:
dependencies.append(source.create_inventory_update(launch_type='dependency'))
return dependencies
@ -491,12 +491,11 @@ class JobHostSummary(CreatedModifiedModel):
editable=False,
)
host = models.ForeignKey('Host',
related_name='job_host_summaries',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False,
)
related_name='job_host_summaries',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False)
host_name = models.CharField(
max_length=1024,

View File

@ -569,7 +569,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
# out the time that elapsed, do so.
if self.started and self.finished and not self.elapsed:
td = self.finished - self.started
elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / (10**6 * 1.0)
elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
else:
elapsed = 0.0
if self.elapsed != elapsed:

View File

@ -14,4 +14,4 @@
# W391 - Blank line at end of file
# W293 - Blank line contains whitespace
ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E302,E303,E501,W291,W391,W293
exclude=awx/lib/site-packages,awx/ui
exclude=awx/lib/site-packages,awx/ui,awx/api/urls.py,awx/main/migrations