mirror of
https://github.com/ansible/awx.git
synced 2026-05-11 19:37:38 -02:30
move code linting to a stricter pep8-esque auto-formatting tool, black
This commit is contained in:
@@ -8,12 +8,11 @@ class Command(BaseCommand):
|
||||
help = "Find the slowest tasks and hosts for a Job Template's most recent runs."
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--template', dest='jt', type=int,
|
||||
help='ID of the Job Template to profile')
|
||||
parser.add_argument('--threshold', dest='threshold', type=float, default=30,
|
||||
help='Only show tasks that took at least this many seconds (defaults to 30)')
|
||||
parser.add_argument('--history', dest='history', type=float, default=25,
|
||||
help='The number of historic jobs to look at')
|
||||
parser.add_argument('--template', dest='jt', type=int, help='ID of the Job Template to profile')
|
||||
parser.add_argument(
|
||||
'--threshold', dest='threshold', type=float, default=30, help='Only show tasks that took at least this many seconds (defaults to 30)'
|
||||
)
|
||||
parser.add_argument('--history', dest='history', type=float, default=25, help='The number of historic jobs to look at')
|
||||
parser.add_argument('--ignore', action='append', help='ignore a specific action (e.g., --ignore git)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
|
||||
@@ -6,28 +6,19 @@ from django.core.management.base import BaseCommand
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
with connection.cursor() as cursor:
|
||||
start = {}
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
for relation in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent'):
|
||||
cursor.execute(f"SELECT MAX(id) FROM {relation};")
|
||||
start[relation] = cursor.fetchone()[0] or 0
|
||||
clear = False
|
||||
while True:
|
||||
lines = []
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
for relation in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent'):
|
||||
lines.append(relation)
|
||||
minimum = start[relation]
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE id > {minimum} AND modified > now() - '1 minute'::interval;"
|
||||
)
|
||||
cursor.execute(f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE id > {minimum} AND modified > now() - '1 minute'::interval;")
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ last minute {events}')
|
||||
lines.append('')
|
||||
@@ -37,4 +28,4 @@ class Command(BaseCommand):
|
||||
for line in lines:
|
||||
print(line)
|
||||
clear = True
|
||||
time.sleep(.25)
|
||||
time.sleep(0.25)
|
||||
|
||||
@@ -11,8 +11,7 @@ class Command(BaseCommand):
|
||||
"""Returns license type, e.g., 'enterprise', 'open', 'none'"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--data', dest='data', action='store_true',
|
||||
help='verbose, prints the actual (sanitized) license')
|
||||
parser.add_argument('--data', dest='data', action='store_true', help='verbose, prints the actual (sanitized) license')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).__init__()
|
||||
|
||||
@@ -4,7 +4,6 @@ from django.core.management.commands.makemigrations import Command as MakeMigrat
|
||||
|
||||
|
||||
class Command(MakeMigrations):
|
||||
|
||||
def execute(self, *args, **options):
|
||||
settings = connections['default'].settings_dict.copy()
|
||||
settings['ENGINE'] = 'sqlite3'
|
||||
|
||||
@@ -15,22 +15,18 @@ from awx.main.models import ActivityStream
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
"""
|
||||
Management command to purge old activity stream events.
|
||||
'''
|
||||
"""
|
||||
|
||||
help = 'Remove old activity stream events from the database'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N',
|
||||
help='Remove activity stream events more than N days old')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true',
|
||||
default=False, help='Dry run mode (show items that would '
|
||||
'be removed)')
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO,
|
||||
logging.DEBUG, 0]))
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
self.logger = logging.getLogger('awx.main.commands.cleanup_activitystream')
|
||||
self.logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
@@ -12,53 +12,29 @@ from django.db import transaction
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate,
|
||||
SystemJob, WorkflowJob, Notification
|
||||
)
|
||||
from awx.main.signals import (
|
||||
disable_activity_stream,
|
||||
disable_computed_fields
|
||||
)
|
||||
from awx.main.models import Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob, WorkflowJob, Notification
|
||||
from awx.main.signals import disable_activity_stream, disable_computed_fields
|
||||
|
||||
from awx.main.utils.deletion import AWXCollector, pre_delete
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
"""
|
||||
Management command to cleanup old jobs and project updates.
|
||||
'''
|
||||
"""
|
||||
|
||||
help = 'Remove old jobs, project and inventory updates from the database.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N',
|
||||
help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true',
|
||||
default=False, help='Dry run mode (show items that would '
|
||||
'be removed)')
|
||||
parser.add_argument('--jobs', dest='only_jobs', action='store_true',
|
||||
default=False,
|
||||
help='Remove jobs')
|
||||
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands',
|
||||
action='store_true', default=False,
|
||||
help='Remove ad hoc commands')
|
||||
parser.add_argument('--project-updates', dest='only_project_updates',
|
||||
action='store_true', default=False,
|
||||
help='Remove project updates')
|
||||
parser.add_argument('--inventory-updates', dest='only_inventory_updates',
|
||||
action='store_true', default=False,
|
||||
help='Remove inventory updates')
|
||||
parser.add_argument('--management-jobs', default=False,
|
||||
action='store_true', dest='only_management_jobs',
|
||||
help='Remove management jobs')
|
||||
parser.add_argument('--notifications', dest='only_notifications',
|
||||
action='store_true', default=False,
|
||||
help='Remove notifications')
|
||||
parser.add_argument('--workflow-jobs', default=False,
|
||||
action='store_true', dest='only_workflow_jobs',
|
||||
help='Remove workflow jobs')
|
||||
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
|
||||
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
|
||||
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
|
||||
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
|
||||
parser.add_argument('--inventory-updates', dest='only_inventory_updates', action='store_true', default=False, help='Remove inventory updates')
|
||||
parser.add_argument('--management-jobs', default=False, action='store_true', dest='only_management_jobs', help='Remove management jobs')
|
||||
parser.add_argument('--notifications', dest='only_notifications', action='store_true', default=False, help='Remove notifications')
|
||||
parser.add_argument('--workflow-jobs', default=False, action='store_true', dest='only_workflow_jobs', help='Remove workflow jobs')
|
||||
|
||||
def cleanup_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
@@ -83,7 +59,7 @@ class Command(BaseCommand):
|
||||
just_deleted = models_deleted['main.Job']
|
||||
deleted += just_deleted
|
||||
else:
|
||||
just_deleted = 0 # break from loop, this is dry run
|
||||
just_deleted = 0 # break from loop, this is dry run
|
||||
deleted = qs.count()
|
||||
|
||||
if just_deleted == 0:
|
||||
@@ -96,9 +72,7 @@ class Command(BaseCommand):
|
||||
skipped, deleted = 0, 0
|
||||
ad_hoc_commands = AdHocCommand.objects.filter(created__lt=self.cutoff)
|
||||
for ad_hoc_command in ad_hoc_commands.iterator():
|
||||
ad_hoc_command_display = '"%s" (%d events)' % \
|
||||
(str(ad_hoc_command),
|
||||
ad_hoc_command.ad_hoc_command_events.count())
|
||||
ad_hoc_command_display = '"%s" (%d events)' % (str(ad_hoc_command), ad_hoc_command.ad_hoc_command_events.count())
|
||||
if ad_hoc_command.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s ad hoc command %s', action_text, ad_hoc_command.status, ad_hoc_command_display)
|
||||
@@ -179,8 +153,7 @@ class Command(BaseCommand):
|
||||
return skipped, deleted
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO,
|
||||
logging.DEBUG, 0]))
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
self.logger = logging.getLogger('awx.main.commands.cleanup_jobs')
|
||||
self.logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
handler = logging.StreamHandler()
|
||||
@@ -192,9 +165,7 @@ class Command(BaseCommand):
|
||||
skipped, deleted = 0, 0
|
||||
workflow_jobs = WorkflowJob.objects.filter(created__lt=self.cutoff)
|
||||
for workflow_job in workflow_jobs.iterator():
|
||||
workflow_job_display = '"{}" ({} nodes)'.format(
|
||||
str(workflow_job),
|
||||
workflow_job.workflow_nodes.count())
|
||||
workflow_job_display = '"{}" ({} nodes)'.format(str(workflow_job), workflow_job.workflow_nodes.count())
|
||||
if workflow_job.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s job %s', action_text, workflow_job.status, workflow_job_display)
|
||||
@@ -214,8 +185,8 @@ class Command(BaseCommand):
|
||||
notifications = Notification.objects.filter(created__lt=self.cutoff)
|
||||
for notification in notifications.iterator():
|
||||
notification_display = '"{}" (started {}, {} type, {} sent)'.format(
|
||||
str(notification), str(notification.created),
|
||||
notification.notification_type, notification.notifications_sent)
|
||||
str(notification), str(notification.created), notification.notification_type, notification.notifications_sent
|
||||
)
|
||||
if notification.status in ('pending',):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s notification %s', action_text, notification.status, notification_display)
|
||||
@@ -240,8 +211,7 @@ class Command(BaseCommand):
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
except OverflowError:
|
||||
raise CommandError('--days specified is too large. Try something less than 99999 (about 270 years).')
|
||||
model_names = ('jobs', 'ad_hoc_commands', 'project_updates', 'inventory_updates',
|
||||
'management_jobs', 'workflow_jobs', 'notifications')
|
||||
model_names = ('jobs', 'ad_hoc_commands', 'project_updates', 'inventory_updates', 'management_jobs', 'workflow_jobs', 'notifications')
|
||||
models_to_cleanup = set()
|
||||
for m in model_names:
|
||||
if options.get('only_%s' % m, False):
|
||||
|
||||
@@ -6,10 +6,8 @@ from django.contrib.sessions.models import Session
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO,
|
||||
logging.DEBUG, 0]))
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
self.logger = logging.getLogger('awx.main.commands.cleanup_sessions')
|
||||
self.logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
@@ -7,10 +7,8 @@ from oauth2_provider.models import RefreshToken
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO,
|
||||
logging.DEBUG, 0]))
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
self.logger = logging.getLogger('awx.main.commands.cleanup_tokens')
|
||||
self.logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
@@ -9,7 +9,8 @@ from awx.api.serializers import OAuth2TokenSerializer
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Command that creates an OAuth2 token for a certain user. Returns the value of created token."""
|
||||
help='Creates an OAuth2 token for a user.'
|
||||
|
||||
help = 'Creates an OAuth2 token for a user.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--user', dest='user', type=str)
|
||||
@@ -22,7 +23,7 @@ class Command(BaseCommand):
|
||||
user = User.objects.get(username=options['user'])
|
||||
except ObjectDoesNotExist:
|
||||
raise CommandError('The user does not exist.')
|
||||
config = {'user': user, 'scope':'write'}
|
||||
config = {'user': user, 'scope': 'write'}
|
||||
serializer_obj = OAuth2TokenSerializer()
|
||||
|
||||
class FakeRequest(object):
|
||||
|
||||
@@ -4,16 +4,13 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from crum import impersonate
|
||||
from awx.main.models import (
|
||||
User, Organization, Project, Inventory, CredentialType,
|
||||
Credential, Host, JobTemplate, ExecutionEnvironment
|
||||
)
|
||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate, ExecutionEnvironment
|
||||
from awx.main.signals import disable_computed_fields
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Create preloaded data, intended for new installs
|
||||
"""
|
||||
"""Create preloaded data, intended for new installs"""
|
||||
|
||||
help = 'Creates a preload tower data if there is none.'
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
@@ -29,44 +26,42 @@ class Command(BaseCommand):
|
||||
if not Organization.objects.exists():
|
||||
o = Organization.objects.create(name='Default')
|
||||
|
||||
p = Project(name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
organization=o)
|
||||
p = Project(
|
||||
name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
organization=o,
|
||||
)
|
||||
p.save(skip_update=True)
|
||||
|
||||
ssh_type = CredentialType.objects.filter(namespace='ssh').first()
|
||||
c = Credential.objects.create(credential_type=ssh_type,
|
||||
name='Demo Credential',
|
||||
inputs={
|
||||
'username': superuser.username
|
||||
},
|
||||
created_by=superuser)
|
||||
c = Credential.objects.create(
|
||||
credential_type=ssh_type, name='Demo Credential', inputs={'username': superuser.username}, created_by=superuser
|
||||
)
|
||||
|
||||
c.admin_role.members.add(superuser)
|
||||
|
||||
public_galaxy_credential = Credential(name='Ansible Galaxy',
|
||||
managed_by_tower=True,
|
||||
credential_type=CredentialType.objects.get(kind='galaxy'),
|
||||
inputs={'url': 'https://galaxy.ansible.com/'})
|
||||
public_galaxy_credential = Credential(
|
||||
name='Ansible Galaxy',
|
||||
managed_by_tower=True,
|
||||
credential_type=CredentialType.objects.get(kind='galaxy'),
|
||||
inputs={'url': 'https://galaxy.ansible.com/'},
|
||||
)
|
||||
public_galaxy_credential.save()
|
||||
o.galaxy_credentials.add(public_galaxy_credential)
|
||||
|
||||
i = Inventory.objects.create(name='Demo Inventory',
|
||||
organization=o,
|
||||
created_by=superuser)
|
||||
i = Inventory.objects.create(name='Demo Inventory', organization=o, created_by=superuser)
|
||||
|
||||
Host.objects.create(name='localhost',
|
||||
inventory=i,
|
||||
variables="ansible_connection: local\nansible_python_interpreter: '{{ ansible_playbook_python }}'",
|
||||
created_by=superuser)
|
||||
Host.objects.create(
|
||||
name='localhost',
|
||||
inventory=i,
|
||||
variables="ansible_connection: local\nansible_python_interpreter: '{{ ansible_playbook_python }}'",
|
||||
created_by=superuser,
|
||||
)
|
||||
|
||||
jt = JobTemplate.objects.create(name='Demo Job Template',
|
||||
playbook='hello_world.yml',
|
||||
project=p,
|
||||
inventory=i)
|
||||
jt = JobTemplate.objects.create(name='Demo Job Template', playbook='hello_world.yml', project=p, inventory=i)
|
||||
jt.credentials.add(c)
|
||||
|
||||
print('Default organization added.')
|
||||
@@ -74,8 +69,7 @@ class Command(BaseCommand):
|
||||
changed = True
|
||||
|
||||
default_ee = settings.AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE
|
||||
ee, created = ExecutionEnvironment.objects.get_or_create(name='Default EE', defaults={'image': default_ee,
|
||||
'managed_by_tower': True})
|
||||
ee, created = ExecutionEnvironment.objects.get_or_create(name='Default EE', defaults={'image': default_ee, 'managed_by_tower': True})
|
||||
|
||||
if created:
|
||||
changed = True
|
||||
|
||||
@@ -13,14 +13,10 @@ class Command(BaseCommand):
|
||||
Deprovision a Tower cluster node
|
||||
"""
|
||||
|
||||
help = (
|
||||
'Remove instance from the database. '
|
||||
'Specify `--hostname` to use this command.'
|
||||
)
|
||||
help = 'Remove instance from the database. ' 'Specify `--hostname` to use this command.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str,
|
||||
help='Hostname used during provisioning')
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning')
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
@@ -37,4 +33,3 @@ class Command(BaseCommand):
|
||||
print('(changed: True)')
|
||||
else:
|
||||
print('No instance found matching name {}'.format(hostname))
|
||||
|
||||
|
||||
@@ -12,7 +12,8 @@ from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Expire Django auth sessions for a user/all users"""
|
||||
help='Expire Django auth sessions. Will expire all auth sessions if --user option is not supplied.'
|
||||
|
||||
help = 'Expire Django auth sessions. Will expire all auth sessions if --user option is not supplied.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--user', dest='user', type=str)
|
||||
|
||||
@@ -7,21 +7,19 @@ from django.utils.timezone import now
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
"""
|
||||
Gather AWX analytics data
|
||||
'''
|
||||
"""
|
||||
|
||||
help = 'Gather AWX analytics data'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--dry-run', dest='dry-run', action='store_true',
|
||||
help='Gather analytics without shipping. Works even if analytics are disabled in settings.')
|
||||
parser.add_argument('--ship', dest='ship', action='store_true',
|
||||
help='Enable to ship metrics to the Red Hat Cloud')
|
||||
parser.add_argument('--since', dest='since', action='store',
|
||||
help='Start date for collection')
|
||||
parser.add_argument('--until', dest='until', action='store',
|
||||
help='End date for collection')
|
||||
parser.add_argument(
|
||||
'--dry-run', dest='dry-run', action='store_true', help='Gather analytics without shipping. Works even if analytics are disabled in settings.'
|
||||
)
|
||||
parser.add_argument('--ship', dest='ship', action='store_true', help='Enable to ship metrics to the Red Hat Cloud')
|
||||
parser.add_argument('--since', dest='since', action='store', help='Start date for collection')
|
||||
parser.add_argument('--until', dest='until', action='store', help='End date for collection')
|
||||
|
||||
def init_logging(self):
|
||||
self.logger = logging.getLogger('awx.main.analytics')
|
||||
@@ -50,7 +48,7 @@ class Command(BaseCommand):
|
||||
if opt_ship and opt_dry_run:
|
||||
self.logger.error('Both --ship and --dry-run cannot be processed at the same time.')
|
||||
return
|
||||
tgzfiles = gather(collection_type='manual' if not opt_dry_run else 'dry-run', since = since, until = until)
|
||||
tgzfiles = gather(collection_type='manual' if not opt_dry_run else 'dry-run', since=since, until=until)
|
||||
if tgzfiles:
|
||||
for tgz in tgzfiles:
|
||||
self.logger.info(tgz)
|
||||
|
||||
@@ -14,6 +14,7 @@ from awx.conf.models import Setting
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Generate and store a randomized RSA key for SSH traffic to isolated instances"""
|
||||
|
||||
help = 'Generates and stores a randomized RSA key for SSH traffic to isolated instances'
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
@@ -21,25 +22,17 @@ class Command(BaseCommand):
|
||||
print(settings.AWX_ISOLATED_PUBLIC_KEY)
|
||||
return
|
||||
|
||||
key = rsa.generate_private_key(
|
||||
public_exponent=65537,
|
||||
key_size=4096,
|
||||
backend=default_backend()
|
||||
)
|
||||
key = rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend())
|
||||
Setting.objects.create(
|
||||
key='AWX_ISOLATED_PRIVATE_KEY',
|
||||
value=key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption()
|
||||
)
|
||||
encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()
|
||||
),
|
||||
).save()
|
||||
pemfile = Setting.objects.create(
|
||||
key='AWX_ISOLATED_PUBLIC_KEY',
|
||||
value=smart_str(key.public_key().public_bytes(
|
||||
encoding=serialization.Encoding.OpenSSH,
|
||||
format=serialization.PublicFormat.OpenSSH
|
||||
)) + " generated-by-awx@%s" % datetime.datetime.utcnow().isoformat()
|
||||
value=smart_str(key.public_key().public_bytes(encoding=serialization.Encoding.OpenSSH, format=serialization.PublicFormat.OpenSSH))
|
||||
+ " generated-by-awx@%s" % datetime.datetime.utcnow().isoformat(),
|
||||
)
|
||||
pemfile.save()
|
||||
print(pemfile.value)
|
||||
|
||||
@@ -9,10 +9,7 @@ from django.db.models import Count
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
Instance
|
||||
)
|
||||
from awx.main.models import Job, Instance
|
||||
|
||||
|
||||
DEFAULT_WIDTH = 100
|
||||
@@ -27,7 +24,7 @@ def clear_screen():
|
||||
print(chr(27) + "[2J")
|
||||
|
||||
|
||||
class JobStatus():
|
||||
class JobStatus:
|
||||
def __init__(self, status, color, width):
|
||||
self.status = status
|
||||
self.color = color
|
||||
@@ -44,16 +41,12 @@ class JobStatusController:
|
||||
RESET = chart_color_lookup('reset')
|
||||
|
||||
def __init__(self, width):
|
||||
self.plots = [
|
||||
JobStatus('pending', 'red', width),
|
||||
JobStatus('waiting', 'blue', width),
|
||||
JobStatus('running', 'green', width)
|
||||
]
|
||||
self.plots = [JobStatus('pending', 'red', width), JobStatus('waiting', 'blue', width), JobStatus('running', 'green', width)]
|
||||
self.ts_start = int(time.time())
|
||||
|
||||
def tick(self):
|
||||
ts = int(time.time()) - self.ts_start
|
||||
q = Job.objects.filter(status__in=['pending','waiting','running']).values_list('status').order_by().annotate(Count('status'))
|
||||
q = Job.objects.filter(status__in=['pending', 'waiting', 'running']).values_list('status').order_by().annotate(Count('status'))
|
||||
status_count = dict(pending=0, waiting=0, running=0)
|
||||
for status, count in q:
|
||||
status_count[status] = count
|
||||
@@ -86,12 +79,11 @@ class Command(BaseCommand):
|
||||
help = "Plot pending, waiting, running jobs over time on the terminal"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--refresh', dest='refresh', type=float, default=1.0,
|
||||
help='Time between refreshes of the graph and data in seconds (defaults to 1.0)')
|
||||
parser.add_argument('--width', dest='width', type=int, default=DEFAULT_WIDTH,
|
||||
help=f'Width of the graph (defaults to {DEFAULT_WIDTH})')
|
||||
parser.add_argument('--height', dest='height', type=int, default=DEFAULT_HEIGHT,
|
||||
help=f'Height of the graph (defaults to {DEFAULT_HEIGHT})')
|
||||
parser.add_argument(
|
||||
'--refresh', dest='refresh', type=float, default=1.0, help='Time between refreshes of the graph and data in seconds (defaults to 1.0)'
|
||||
)
|
||||
parser.add_argument('--width', dest='width', type=int, default=DEFAULT_WIDTH, help=f'Width of the graph (defaults to {DEFAULT_WIDTH})')
|
||||
parser.add_argument('--height', dest='height', type=int, default=DEFAULT_HEIGHT, help=f'Height of the graph (defaults to {DEFAULT_HEIGHT})')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
refresh_seconds = options['refresh']
|
||||
@@ -114,4 +106,3 @@ class Command(BaseCommand):
|
||||
print(draw)
|
||||
sys.stdout.write(status_line)
|
||||
time.sleep(refresh_seconds)
|
||||
|
||||
|
||||
@@ -22,21 +22,13 @@ from django.utils.encoding import smart_text
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
# AWX inventory imports
|
||||
from awx.main.models.inventory import (
|
||||
Inventory,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
Host
|
||||
)
|
||||
from awx.main.models.inventory import Inventory, InventorySource, InventoryUpdate, Host
|
||||
from awx.main.utils.mem_inventory import MemInventory, dict_to_mem_data
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
|
||||
# other AWX imports
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding
|
||||
from awx.main.utils import (
|
||||
ignore_inventory_computed_fields,
|
||||
get_licenser
|
||||
)
|
||||
from awx.main.utils import ignore_inventory_computed_fields, get_licenser
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
@@ -67,12 +59,12 @@ def functioning_dir(path):
|
||||
|
||||
|
||||
class AnsibleInventoryLoader(object):
|
||||
'''
|
||||
"""
|
||||
Given executable `source` (directory, executable, or file) this will
|
||||
use the ansible-inventory CLI utility to convert it into in-memory
|
||||
representational objects. Example:
|
||||
/usr/bin/ansible/ansible-inventory -i hosts --list
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self, source, venv_path=None, verbosity=0):
|
||||
self.source = source
|
||||
@@ -88,17 +80,11 @@ class AnsibleInventoryLoader(object):
|
||||
venv_exe = os.path.join(self.venv_path, 'bin', 'ansible-inventory')
|
||||
if os.path.exists(venv_exe):
|
||||
return venv_exe
|
||||
elif os.path.exists(
|
||||
os.path.join(self.venv_path, 'bin', 'ansible')
|
||||
):
|
||||
elif os.path.exists(os.path.join(self.venv_path, 'bin', 'ansible')):
|
||||
# if bin/ansible exists but bin/ansible-inventory doesn't, it's
|
||||
# probably a really old version of ansible that doesn't support
|
||||
# ansible-inventory
|
||||
raise RuntimeError(
|
||||
"{} does not exist (please upgrade to ansible >= 2.4)".format(
|
||||
venv_exe
|
||||
)
|
||||
)
|
||||
raise RuntimeError("{} does not exist (please upgrade to ansible >= 2.4)".format(venv_exe))
|
||||
return shutil.which('ansible-inventory')
|
||||
|
||||
def get_base_args(self):
|
||||
@@ -126,8 +112,7 @@ class AnsibleInventoryLoader(object):
|
||||
stderr = smart_text(stderr)
|
||||
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError('%s failed (rc=%d) with stdout:\n%s\nstderr:\n%s' % (
|
||||
'ansible-inventory', proc.returncode, stdout, stderr))
|
||||
raise RuntimeError('%s failed (rc=%d) with stdout:\n%s\nstderr:\n%s' % ('ansible-inventory', proc.returncode, stdout, stderr))
|
||||
|
||||
for line in stderr.splitlines():
|
||||
logger.error(line)
|
||||
@@ -149,63 +134,78 @@ class AnsibleInventoryLoader(object):
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
"""
|
||||
Management command to import inventory from a directory, ini file, or
|
||||
dynamic inventory script.
|
||||
'''
|
||||
"""
|
||||
|
||||
help = 'Import or sync external inventory sources'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--inventory-name', dest='inventory_name',
|
||||
type=str, default=None, metavar='n',
|
||||
help='name of inventory to sync')
|
||||
parser.add_argument('--inventory-id', dest='inventory_id', type=int,
|
||||
default=None, metavar='i',
|
||||
help='id of inventory to sync')
|
||||
parser.add_argument('--venv', dest='venv', type=str, default=None,
|
||||
help='absolute path to the AWX custom virtualenv to use')
|
||||
parser.add_argument('--overwrite', dest='overwrite', action='store_true', default=False,
|
||||
help='overwrite the destination hosts and groups')
|
||||
parser.add_argument('--overwrite-vars', dest='overwrite_vars',
|
||||
action='store_true', default=False,
|
||||
help='overwrite (rather than merge) variables')
|
||||
parser.add_argument('--keep-vars', dest='keep_vars', action='store_true', default=False,
|
||||
help='DEPRECATED legacy option, has no effect')
|
||||
parser.add_argument('--custom', dest='custom', action='store_true', default=False,
|
||||
help='DEPRECATED indicates a custom inventory script, no longer used')
|
||||
parser.add_argument('--source', dest='source', type=str, default=None,
|
||||
metavar='s', help='inventory directory, file, or script to load')
|
||||
parser.add_argument('--enabled-var', dest='enabled_var', type=str,
|
||||
default=None, metavar='v', help='host variable used to '
|
||||
'set/clear enabled flag when host is online/offline, may '
|
||||
'be specified as "foo.bar" to traverse nested dicts.')
|
||||
parser.add_argument('--enabled-value', dest='enabled_value', type=str,
|
||||
default=None, metavar='v', help='value of host variable '
|
||||
'specified by --enabled-var that indicates host is '
|
||||
'enabled/online.')
|
||||
parser.add_argument('--group-filter', dest='group_filter', type=str,
|
||||
default=None, metavar='regex', help='regular expression '
|
||||
'to filter group name(s); only matches are imported.')
|
||||
parser.add_argument('--host-filter', dest='host_filter', type=str,
|
||||
default=None, metavar='regex', help='regular expression '
|
||||
'to filter host name(s); only matches are imported.')
|
||||
parser.add_argument('--exclude-empty-groups', dest='exclude_empty_groups',
|
||||
action='store_true', default=False, help='when set, '
|
||||
'exclude all groups that have no child groups, hosts, or '
|
||||
'variables.')
|
||||
parser.add_argument('--instance-id-var', dest='instance_id_var', type=str,
|
||||
default=None, metavar='v', help='host variable that '
|
||||
'specifies the unique, immutable instance ID, may be '
|
||||
'specified as "foo.bar" to traverse nested dicts.')
|
||||
parser.add_argument('--inventory-name', dest='inventory_name', type=str, default=None, metavar='n', help='name of inventory to sync')
|
||||
parser.add_argument('--inventory-id', dest='inventory_id', type=int, default=None, metavar='i', help='id of inventory to sync')
|
||||
parser.add_argument('--venv', dest='venv', type=str, default=None, help='absolute path to the AWX custom virtualenv to use')
|
||||
parser.add_argument('--overwrite', dest='overwrite', action='store_true', default=False, help='overwrite the destination hosts and groups')
|
||||
parser.add_argument('--overwrite-vars', dest='overwrite_vars', action='store_true', default=False, help='overwrite (rather than merge) variables')
|
||||
parser.add_argument('--keep-vars', dest='keep_vars', action='store_true', default=False, help='DEPRECATED legacy option, has no effect')
|
||||
parser.add_argument(
|
||||
'--custom', dest='custom', action='store_true', default=False, help='DEPRECATED indicates a custom inventory script, no longer used'
|
||||
)
|
||||
parser.add_argument('--source', dest='source', type=str, default=None, metavar='s', help='inventory directory, file, or script to load')
|
||||
parser.add_argument(
|
||||
'--enabled-var',
|
||||
dest='enabled_var',
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='host variable used to ' 'set/clear enabled flag when host is online/offline, may ' 'be specified as "foo.bar" to traverse nested dicts.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--enabled-value',
|
||||
dest='enabled_value',
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='value of host variable ' 'specified by --enabled-var that indicates host is ' 'enabled/online.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--group-filter',
|
||||
dest='group_filter',
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='regex',
|
||||
help='regular expression ' 'to filter group name(s); only matches are imported.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--host-filter',
|
||||
dest='host_filter',
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='regex',
|
||||
help='regular expression ' 'to filter host name(s); only matches are imported.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--exclude-empty-groups',
|
||||
dest='exclude_empty_groups',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='when set, ' 'exclude all groups that have no child groups, hosts, or ' 'variables.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--instance-id-var',
|
||||
dest='instance_id_var',
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='host variable that ' 'specifies the unique, immutable instance ID, may be ' 'specified as "foo.bar" to traverse nested dicts.',
|
||||
)
|
||||
|
||||
def set_logging_level(self, verbosity):
|
||||
log_levels = dict(enumerate([logging.WARNING, logging.INFO,
|
||||
logging.DEBUG, 0]))
|
||||
log_levels = dict(enumerate([logging.WARNING, logging.INFO, logging.DEBUG, 0]))
|
||||
logger.setLevel(log_levels.get(verbosity, 0))
|
||||
|
||||
def _get_instance_id(self, variables, default=''):
|
||||
'''
|
||||
"""
|
||||
Retrieve the instance ID from the given dict of host variables.
|
||||
|
||||
The instance ID variable may be specified as 'foo.bar', in which case
|
||||
@@ -216,7 +216,7 @@ class Command(BaseCommand):
|
||||
Multiple ID variables may be specified as 'foo.bar,foobar', so that
|
||||
it will first try to find 'bar' inside of 'foo', and if unable,
|
||||
will try to find 'foobar' as a fallback
|
||||
'''
|
||||
"""
|
||||
instance_id = default
|
||||
if getattr(self, 'instance_id_var', None):
|
||||
for single_instance_id in self.instance_id_var.split(','):
|
||||
@@ -232,14 +232,14 @@ class Command(BaseCommand):
|
||||
return smart_text(instance_id)
|
||||
|
||||
def _get_enabled(self, from_dict, default=None):
|
||||
'''
|
||||
"""
|
||||
Retrieve the enabled state from the given dict of host variables.
|
||||
|
||||
The enabled variable may be specified as 'foo.bar', in which case
|
||||
the lookup will traverse into nested dicts, equivalent to:
|
||||
|
||||
from_dict.get('foo', {}).get('bar', default)
|
||||
'''
|
||||
"""
|
||||
enabled = default
|
||||
if getattr(self, 'enabled_var', None):
|
||||
default = object()
|
||||
@@ -266,8 +266,7 @@ class Command(BaseCommand):
|
||||
def get_source_absolute_path(source):
|
||||
if not os.path.exists(source):
|
||||
raise IOError('Source does not exist: %s' % source)
|
||||
source = os.path.join(os.getcwd(), os.path.dirname(source),
|
||||
os.path.basename(source))
|
||||
source = os.path.join(os.getcwd(), os.path.dirname(source), os.path.basename(source))
|
||||
source = os.path.normpath(os.path.abspath(source))
|
||||
return source
|
||||
|
||||
@@ -284,15 +283,14 @@ class Command(BaseCommand):
|
||||
self._batch_add_m2m_cache[key] = []
|
||||
|
||||
def _build_db_instance_id_map(self):
|
||||
'''
|
||||
"""
|
||||
Find any hosts in the database without an instance_id set that may
|
||||
still have one available via host variables.
|
||||
'''
|
||||
"""
|
||||
self.db_instance_id_map = {}
|
||||
if self.instance_id_var:
|
||||
host_qs = self.inventory_source.hosts.all()
|
||||
host_qs = host_qs.filter(instance_id='',
|
||||
variables__contains=self.instance_id_var.split('.')[0])
|
||||
host_qs = host_qs.filter(instance_id='', variables__contains=self.instance_id_var.split('.')[0])
|
||||
for host in host_qs:
|
||||
instance_id = self._get_instance_id(host.variables_dict)
|
||||
if not instance_id:
|
||||
@@ -300,38 +298,36 @@ class Command(BaseCommand):
|
||||
self.db_instance_id_map[instance_id] = host.pk
|
||||
|
||||
def _build_mem_instance_id_map(self):
|
||||
'''
|
||||
"""
|
||||
Update instance ID for each imported host and define a mapping of
|
||||
instance IDs to MemHost instances.
|
||||
'''
|
||||
"""
|
||||
self.mem_instance_id_map = {}
|
||||
if self.instance_id_var:
|
||||
for mem_host in self.all_group.all_hosts.values():
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
if not instance_id:
|
||||
logger.warning('Host "%s" has no "%s" variable(s)',
|
||||
mem_host.name, self.instance_id_var)
|
||||
logger.warning('Host "%s" has no "%s" variable(s)', mem_host.name, self.instance_id_var)
|
||||
continue
|
||||
mem_host.instance_id = instance_id
|
||||
self.mem_instance_id_map[instance_id] = mem_host.name
|
||||
|
||||
def _existing_host_pks(self):
|
||||
'''Returns cached set of existing / previous host primary key values
|
||||
"""Returns cached set of existing / previous host primary key values
|
||||
this is the starting set, meaning that it is pre-modification
|
||||
by deletions and other things done in the course of this import
|
||||
'''
|
||||
"""
|
||||
if not hasattr(self, '_cached_host_pk_set'):
|
||||
self._cached_host_pk_set = frozenset(
|
||||
self.inventory_source.hosts.values_list('pk', flat=True))
|
||||
self._cached_host_pk_set = frozenset(self.inventory_source.hosts.values_list('pk', flat=True))
|
||||
return self._cached_host_pk_set
|
||||
|
||||
def _delete_hosts(self):
|
||||
'''
|
||||
"""
|
||||
For each host in the database that is NOT in the local list, delete
|
||||
it. When importing from a cloud inventory source attached to a
|
||||
specific group, only delete hosts beneath that group. Delete each
|
||||
host individually so signal handlers will run.
|
||||
'''
|
||||
"""
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before = len(connection.queries)
|
||||
hosts_qs = self.inventory_source.hosts
|
||||
@@ -341,38 +337,36 @@ class Command(BaseCommand):
|
||||
all_instance_ids = list(self.mem_instance_id_map.keys())
|
||||
instance_ids = []
|
||||
for offset in range(0, len(all_instance_ids), self._batch_size):
|
||||
instance_ids = all_instance_ids[offset:(offset + self._batch_size)]
|
||||
instance_ids = all_instance_ids[offset : (offset + self._batch_size)]
|
||||
for host_pk in hosts_qs.filter(instance_id__in=instance_ids).values_list('pk', flat=True):
|
||||
del_host_pks.discard(host_pk)
|
||||
for host_pk in set([v for k,v in self.db_instance_id_map.items() if k in instance_ids]):
|
||||
for host_pk in set([v for k, v in self.db_instance_id_map.items() if k in instance_ids]):
|
||||
del_host_pks.discard(host_pk)
|
||||
all_host_names = list(set(self.mem_instance_id_map.values()) - set(self.all_group.all_hosts.keys()))
|
||||
else:
|
||||
all_host_names = list(self.all_group.all_hosts.keys())
|
||||
for offset in range(0, len(all_host_names), self._batch_size):
|
||||
host_names = all_host_names[offset:(offset + self._batch_size)]
|
||||
host_names = all_host_names[offset : (offset + self._batch_size)]
|
||||
for host_pk in hosts_qs.filter(name__in=host_names).values_list('pk', flat=True):
|
||||
del_host_pks.discard(host_pk)
|
||||
# Now delete all remaining hosts in batches.
|
||||
all_del_pks = sorted(list(del_host_pks))
|
||||
for offset in range(0, len(all_del_pks), self._batch_size):
|
||||
del_pks = all_del_pks[offset:(offset + self._batch_size)]
|
||||
del_pks = all_del_pks[offset : (offset + self._batch_size)]
|
||||
for host in hosts_qs.filter(pk__in=del_pks):
|
||||
host_name = host.name
|
||||
host.delete()
|
||||
logger.debug('Deleted host "%s"', host_name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('host deletions took %d queries for %d hosts',
|
||||
len(connection.queries) - queries_before,
|
||||
len(all_del_pks))
|
||||
logger.warning('host deletions took %d queries for %d hosts', len(connection.queries) - queries_before, len(all_del_pks))
|
||||
|
||||
def _delete_groups(self):
|
||||
'''
|
||||
"""
|
||||
# If overwrite is set, for each group in the database that is NOT in
|
||||
# the local list, delete it. When importing from a cloud inventory
|
||||
# source attached to a specific group, only delete children of that
|
||||
# group. Delete each group individually so signal handlers will run.
|
||||
'''
|
||||
"""
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before = len(connection.queries)
|
||||
groups_qs = self.inventory_source.groups.all()
|
||||
@@ -380,30 +374,28 @@ class Command(BaseCommand):
|
||||
del_group_pks = set(groups_qs.values_list('pk', flat=True))
|
||||
all_group_names = list(self.all_group.all_groups.keys())
|
||||
for offset in range(0, len(all_group_names), self._batch_size):
|
||||
group_names = all_group_names[offset:(offset + self._batch_size)]
|
||||
group_names = all_group_names[offset : (offset + self._batch_size)]
|
||||
for group_pk in groups_qs.filter(name__in=group_names).values_list('pk', flat=True):
|
||||
del_group_pks.discard(group_pk)
|
||||
# Now delete all remaining groups in batches.
|
||||
all_del_pks = sorted(list(del_group_pks))
|
||||
for offset in range(0, len(all_del_pks), self._batch_size):
|
||||
del_pks = all_del_pks[offset:(offset + self._batch_size)]
|
||||
del_pks = all_del_pks[offset : (offset + self._batch_size)]
|
||||
for group in groups_qs.filter(pk__in=del_pks):
|
||||
group_name = group.name
|
||||
with ignore_inventory_computed_fields():
|
||||
group.delete()
|
||||
logger.debug('Group "%s" deleted', group_name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('group deletions took %d queries for %d groups',
|
||||
len(connection.queries) - queries_before,
|
||||
len(all_del_pks))
|
||||
logger.warning('group deletions took %d queries for %d groups', len(connection.queries) - queries_before, len(all_del_pks))
|
||||
|
||||
def _delete_group_children_and_hosts(self):
|
||||
'''
|
||||
"""
|
||||
Clear all invalid child relationships for groups and all invalid host
|
||||
memberships. When importing from a cloud inventory source attached to
|
||||
a specific group, only clear relationships for hosts and groups that
|
||||
are beneath the inventory source group.
|
||||
'''
|
||||
"""
|
||||
# FIXME: Optimize performance!
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before = len(connection.queries)
|
||||
@@ -432,12 +424,11 @@ class Command(BaseCommand):
|
||||
# Removal list is complete - now perform the removals
|
||||
del_child_group_pks = list(set(db_children_name_pk_map.values()))
|
||||
for offset in range(0, len(del_child_group_pks), self._batch_size):
|
||||
child_group_pks = del_child_group_pks[offset:(offset + self._batch_size)]
|
||||
child_group_pks = del_child_group_pks[offset : (offset + self._batch_size)]
|
||||
for db_child in db_children.filter(pk__in=child_group_pks):
|
||||
group_group_count += 1
|
||||
db_group.children.remove(db_child)
|
||||
logger.debug('Group "%s" removed from group "%s"',
|
||||
db_child.name, db_group.name)
|
||||
logger.debug('Group "%s" removed from group "%s"', db_child.name, db_group.name)
|
||||
# FIXME: Inventory source group relationships
|
||||
# Delete group/host relationships not present in imported data.
|
||||
db_hosts = db_group.hosts
|
||||
@@ -451,37 +442,38 @@ class Command(BaseCommand):
|
||||
mem_hosts = self.all_group.all_groups[db_group.name].hosts
|
||||
all_mem_host_names = [h.name for h in mem_hosts if not h.instance_id]
|
||||
for offset in range(0, len(all_mem_host_names), self._batch_size):
|
||||
mem_host_names = all_mem_host_names[offset:(offset + self._batch_size)]
|
||||
mem_host_names = all_mem_host_names[offset : (offset + self._batch_size)]
|
||||
for db_host_pk in db_hosts.filter(name__in=mem_host_names).values_list('pk', flat=True):
|
||||
del_host_pks.discard(db_host_pk)
|
||||
all_mem_instance_ids = [h.instance_id for h in mem_hosts if h.instance_id]
|
||||
for offset in range(0, len(all_mem_instance_ids), self._batch_size):
|
||||
mem_instance_ids = all_mem_instance_ids[offset:(offset + self._batch_size)]
|
||||
mem_instance_ids = all_mem_instance_ids[offset : (offset + self._batch_size)]
|
||||
for db_host_pk in db_hosts.filter(instance_id__in=mem_instance_ids).values_list('pk', flat=True):
|
||||
del_host_pks.discard(db_host_pk)
|
||||
all_db_host_pks = [v for k,v in self.db_instance_id_map.items() if k in all_mem_instance_ids]
|
||||
all_db_host_pks = [v for k, v in self.db_instance_id_map.items() if k in all_mem_instance_ids]
|
||||
for db_host_pk in all_db_host_pks:
|
||||
del_host_pks.discard(db_host_pk)
|
||||
# Removal list is complete - now perform the removals
|
||||
del_host_pks = list(del_host_pks)
|
||||
for offset in range(0, len(del_host_pks), self._batch_size):
|
||||
del_pks = del_host_pks[offset:(offset + self._batch_size)]
|
||||
del_pks = del_host_pks[offset : (offset + self._batch_size)]
|
||||
for db_host in db_hosts.filter(pk__in=del_pks):
|
||||
group_host_count += 1
|
||||
if db_host not in db_group.hosts.all():
|
||||
continue
|
||||
db_group.hosts.remove(db_host)
|
||||
logger.debug('Host "%s" removed from group "%s"',
|
||||
db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" removed from group "%s"', db_host.name, db_group.name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('group-group and group-host deletions took %d queries for %d relationships',
|
||||
len(connection.queries) - queries_before,
|
||||
group_group_count + group_host_count)
|
||||
logger.warning(
|
||||
'group-group and group-host deletions took %d queries for %d relationships',
|
||||
len(connection.queries) - queries_before,
|
||||
group_group_count + group_host_count,
|
||||
)
|
||||
|
||||
def _update_inventory(self):
|
||||
'''
|
||||
"""
|
||||
Update inventory variables from "all" group.
|
||||
'''
|
||||
"""
|
||||
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||
# update variables mixing with each other.
|
||||
@@ -496,24 +488,24 @@ class Command(BaseCommand):
|
||||
logger.debug('Inventory variables unmodified')
|
||||
|
||||
def _create_update_groups(self):
|
||||
'''
|
||||
"""
|
||||
For each group in the local list, create it if it doesn't exist in the
|
||||
database. Otherwise, update/replace database variables from the
|
||||
imported data. Associate with the inventory source group if importing
|
||||
from cloud inventory source.
|
||||
'''
|
||||
"""
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before = len(connection.queries)
|
||||
all_group_names = sorted(self.all_group.all_groups.keys())
|
||||
root_group_names = set()
|
||||
for k,v in self.all_group.all_groups.items():
|
||||
for k, v in self.all_group.all_groups.items():
|
||||
if not v.parents:
|
||||
root_group_names.add(k)
|
||||
if len(v.parents) == 1 and v.parents[0].name == 'all':
|
||||
root_group_names.add(k)
|
||||
existing_group_names = set()
|
||||
for offset in range(0, len(all_group_names), self._batch_size):
|
||||
group_names = all_group_names[offset:(offset + self._batch_size)]
|
||||
group_names = all_group_names[offset : (offset + self._batch_size)]
|
||||
for group in self.inventory.groups.filter(name__in=group_names):
|
||||
mem_group = self.all_group.all_groups[group.name]
|
||||
db_variables = group.variables_dict
|
||||
@@ -537,20 +529,14 @@ class Command(BaseCommand):
|
||||
continue
|
||||
mem_group = self.all_group.all_groups[group_name]
|
||||
group_desc = mem_group.variables.pop('_awx_description', 'imported')
|
||||
group = self.inventory.groups.update_or_create(
|
||||
name=group_name,
|
||||
defaults={
|
||||
'variables':json.dumps(mem_group.variables),
|
||||
'description':group_desc
|
||||
}
|
||||
)[0]
|
||||
group = self.inventory.groups.update_or_create(name=group_name, defaults={'variables': json.dumps(mem_group.variables), 'description': group_desc})[
|
||||
0
|
||||
]
|
||||
logger.debug('Group "%s" added', group.name)
|
||||
self._batch_add_m2m(self.inventory_source.groups, group)
|
||||
self._batch_add_m2m(self.inventory_source.groups, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('group updates took %d queries for %d groups',
|
||||
len(connection.queries) - queries_before,
|
||||
len(self.all_group.all_groups))
|
||||
logger.warning('group updates took %d queries for %d groups', len(connection.queries) - queries_before, len(self.all_group.all_groups))
|
||||
|
||||
def _update_db_host_from_mem_host(self, db_host, mem_host):
|
||||
# Update host variables.
|
||||
@@ -604,12 +590,12 @@ class Command(BaseCommand):
|
||||
self._batch_add_m2m(self.inventory_source.hosts, db_host)
|
||||
|
||||
def _create_update_hosts(self):
|
||||
'''
|
||||
"""
|
||||
For each host in the local list, create it if it doesn't exist in the
|
||||
database. Otherwise, update/replace database variables from the
|
||||
imported data. Associate with the inventory source group if importing
|
||||
from cloud inventory source.
|
||||
'''
|
||||
"""
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before = len(connection.queries)
|
||||
host_pks_updated = set()
|
||||
@@ -617,7 +603,7 @@ class Command(BaseCommand):
|
||||
mem_host_instance_id_map = {}
|
||||
mem_host_name_map = {}
|
||||
mem_host_names_to_update = set(self.all_group.all_hosts.keys())
|
||||
for k,v in self.all_group.all_hosts.items():
|
||||
for k, v in self.all_group.all_hosts.items():
|
||||
mem_host_name_map[k] = v
|
||||
instance_id = self._get_instance_id(v.variables)
|
||||
if instance_id in self.db_instance_id_map:
|
||||
@@ -628,8 +614,8 @@ class Command(BaseCommand):
|
||||
# Update all existing hosts where we know the PK based on instance_id.
|
||||
all_host_pks = sorted(mem_host_pk_map.keys())
|
||||
for offset in range(0, len(all_host_pks), self._batch_size):
|
||||
host_pks = all_host_pks[offset:(offset + self._batch_size)]
|
||||
for db_host in self.inventory.hosts.filter( pk__in=host_pks):
|
||||
host_pks = all_host_pks[offset : (offset + self._batch_size)]
|
||||
for db_host in self.inventory.hosts.filter(pk__in=host_pks):
|
||||
if db_host.pk in host_pks_updated:
|
||||
continue
|
||||
mem_host = mem_host_pk_map[db_host.pk]
|
||||
@@ -640,8 +626,8 @@ class Command(BaseCommand):
|
||||
# Update all existing hosts where we know the instance_id.
|
||||
all_instance_ids = sorted(mem_host_instance_id_map.keys())
|
||||
for offset in range(0, len(all_instance_ids), self._batch_size):
|
||||
instance_ids = all_instance_ids[offset:(offset + self._batch_size)]
|
||||
for db_host in self.inventory.hosts.filter( instance_id__in=instance_ids):
|
||||
instance_ids = all_instance_ids[offset : (offset + self._batch_size)]
|
||||
for db_host in self.inventory.hosts.filter(instance_id__in=instance_ids):
|
||||
if db_host.pk in host_pks_updated:
|
||||
continue
|
||||
mem_host = mem_host_instance_id_map[db_host.instance_id]
|
||||
@@ -652,8 +638,8 @@ class Command(BaseCommand):
|
||||
# Update all existing hosts by name.
|
||||
all_host_names = sorted(mem_host_name_map.keys())
|
||||
for offset in range(0, len(all_host_names), self._batch_size):
|
||||
host_names = all_host_names[offset:(offset + self._batch_size)]
|
||||
for db_host in self.inventory.hosts.filter( name__in=host_names):
|
||||
host_names = all_host_names[offset : (offset + self._batch_size)]
|
||||
for db_host in self.inventory.hosts.filter(name__in=host_names):
|
||||
if db_host.pk in host_pks_updated:
|
||||
continue
|
||||
mem_host = mem_host_name_map[db_host.name]
|
||||
@@ -687,27 +673,25 @@ class Command(BaseCommand):
|
||||
self._batch_add_m2m(self.inventory_source.hosts, flush=True)
|
||||
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('host updates took %d queries for %d hosts',
|
||||
len(connection.queries) - queries_before,
|
||||
len(self.all_group.all_hosts))
|
||||
logger.warning('host updates took %d queries for %d hosts', len(connection.queries) - queries_before, len(self.all_group.all_hosts))
|
||||
|
||||
@transaction.atomic
|
||||
def _create_update_group_children(self):
|
||||
'''
|
||||
"""
|
||||
For each imported group, create all parent-child group relationships.
|
||||
'''
|
||||
"""
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before = len(connection.queries)
|
||||
all_group_names = sorted([k for k,v in self.all_group.all_groups.items() if v.children])
|
||||
all_group_names = sorted([k for k, v in self.all_group.all_groups.items() if v.children])
|
||||
group_group_count = 0
|
||||
for offset in range(0, len(all_group_names), self._batch_size):
|
||||
group_names = all_group_names[offset:(offset + self._batch_size)]
|
||||
group_names = all_group_names[offset : (offset + self._batch_size)]
|
||||
for db_group in self.inventory.groups.filter(name__in=group_names):
|
||||
mem_group = self.all_group.all_groups[db_group.name]
|
||||
group_group_count += len(mem_group.children)
|
||||
all_child_names = sorted([g.name for g in mem_group.children])
|
||||
for offset2 in range(0, len(all_child_names), self._batch_size):
|
||||
child_names = all_child_names[offset2:(offset2 + self._batch_size)]
|
||||
child_names = all_child_names[offset2 : (offset2 + self._batch_size)]
|
||||
db_children_qs = self.inventory.groups.filter(name__in=child_names)
|
||||
for db_child in db_children_qs.filter(children__id=db_group.id):
|
||||
logger.debug('Group "%s" already child of group "%s"', db_child.name, db_group.name)
|
||||
@@ -716,8 +700,7 @@ class Command(BaseCommand):
|
||||
logger.debug('Group "%s" added as child of "%s"', db_child.name, db_group.name)
|
||||
self._batch_add_m2m(db_group.children, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Group-group updates took %d queries for %d group-group relationships',
|
||||
len(connection.queries) - queries_before, group_group_count)
|
||||
logger.warning('Group-group updates took %d queries for %d group-group relationships', len(connection.queries) - queries_before, group_group_count)
|
||||
|
||||
@transaction.atomic
|
||||
def _create_update_group_hosts(self):
|
||||
@@ -725,16 +708,16 @@ class Command(BaseCommand):
|
||||
# belongs.
|
||||
if settings.SQL_DEBUG:
|
||||
queries_before = len(connection.queries)
|
||||
all_group_names = sorted([k for k,v in self.all_group.all_groups.items() if v.hosts])
|
||||
all_group_names = sorted([k for k, v in self.all_group.all_groups.items() if v.hosts])
|
||||
group_host_count = 0
|
||||
for offset in range(0, len(all_group_names), self._batch_size):
|
||||
group_names = all_group_names[offset:(offset + self._batch_size)]
|
||||
group_names = all_group_names[offset : (offset + self._batch_size)]
|
||||
for db_group in self.inventory.groups.filter(name__in=group_names):
|
||||
mem_group = self.all_group.all_groups[db_group.name]
|
||||
group_host_count += len(mem_group.hosts)
|
||||
all_host_names = sorted([h.name for h in mem_group.hosts if not h.instance_id])
|
||||
for offset2 in range(0, len(all_host_names), self._batch_size):
|
||||
host_names = all_host_names[offset2:(offset2 + self._batch_size)]
|
||||
host_names = all_host_names[offset2 : (offset2 + self._batch_size)]
|
||||
db_hosts_qs = self.inventory.hosts.filter(name__in=host_names)
|
||||
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
|
||||
logger.debug('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
@@ -743,7 +726,7 @@ class Command(BaseCommand):
|
||||
logger.debug('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
all_instance_ids = sorted([h.instance_id for h in mem_group.hosts if h.instance_id])
|
||||
for offset2 in range(0, len(all_instance_ids), self._batch_size):
|
||||
instance_ids = all_instance_ids[offset2:(offset2 + self._batch_size)]
|
||||
instance_ids = all_instance_ids[offset2 : (offset2 + self._batch_size)]
|
||||
db_hosts_qs = self.inventory.hosts.filter(instance_id__in=instance_ids)
|
||||
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
|
||||
logger.debug('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
@@ -752,14 +735,13 @@ class Command(BaseCommand):
|
||||
logger.debug('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
self._batch_add_m2m(db_group.hosts, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Group-host updates took %d queries for %d group-host relationships',
|
||||
len(connection.queries) - queries_before, group_host_count)
|
||||
logger.warning('Group-host updates took %d queries for %d group-host relationships', len(connection.queries) - queries_before, group_host_count)
|
||||
|
||||
def load_into_database(self):
|
||||
'''
|
||||
"""
|
||||
Load inventory from in-memory groups to the database, overwriting or
|
||||
merging as appropriate.
|
||||
'''
|
||||
"""
|
||||
# FIXME: Attribute changes to superuser?
|
||||
# Perform __in queries in batches (mainly for unit tests using SQLite).
|
||||
self._batch_size = 500
|
||||
@@ -782,9 +764,7 @@ class Command(BaseCommand):
|
||||
if remote_license_type is None:
|
||||
raise PermissionDenied('Unexpected Error: Tower inventory plugin missing needed metadata!')
|
||||
if local_license_type != remote_license_type:
|
||||
raise PermissionDenied('Tower server licenses must match: source: {} local: {}'.format(
|
||||
remote_license_type, local_license_type
|
||||
))
|
||||
raise PermissionDenied('Tower server licenses must match: source: {} local: {}'.format(remote_license_type, local_license_type))
|
||||
|
||||
def check_license(self):
|
||||
license_info = get_licenser().validate()
|
||||
@@ -875,7 +855,6 @@ class Command(BaseCommand):
|
||||
raise CommandError('Inventory with %s = %s returned multiple results' % list(q.items())[0])
|
||||
logger.info('Updating inventory %d: %s' % (inventory.pk, inventory.name))
|
||||
|
||||
|
||||
# Create ad-hoc inventory source and inventory update objects
|
||||
with ignore_inventory_computed_fields():
|
||||
source = Command.get_source_absolute_path(raw_source)
|
||||
@@ -888,15 +867,10 @@ class Command(BaseCommand):
|
||||
overwrite_vars=bool(options.get('overwrite_vars', False)),
|
||||
)
|
||||
inventory_update = inventory_source.create_inventory_update(
|
||||
_eager_fields=dict(
|
||||
job_args=json.dumps(sys.argv),
|
||||
job_env=dict(os.environ.items()),
|
||||
job_cwd=os.getcwd())
|
||||
_eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||
)
|
||||
|
||||
data = AnsibleInventoryLoader(
|
||||
source=source, venv_path=venv_path, verbosity=verbosity
|
||||
).load()
|
||||
data = AnsibleInventoryLoader(source=source, venv_path=venv_path, verbosity=verbosity).load()
|
||||
|
||||
logger.debug('Finished loading from source: %s', source)
|
||||
|
||||
@@ -992,12 +966,10 @@ class Command(BaseCommand):
|
||||
self.inventory_update.save()
|
||||
|
||||
logger.info('Processing JSON output...')
|
||||
inventory = MemInventory(
|
||||
group_filter_re=self.group_filter_re, host_filter_re=self.host_filter_re)
|
||||
inventory = MemInventory(group_filter_re=self.group_filter_re, host_filter_re=self.host_filter_re)
|
||||
inventory = dict_to_mem_data(data, inventory=inventory)
|
||||
|
||||
logger.info('Loaded %d groups, %d hosts', len(inventory.all_group.all_groups),
|
||||
len(inventory.all_group.all_hosts))
|
||||
logger.info('Loaded %d groups, %d hosts', len(inventory.all_group.all_groups), len(inventory.all_group.all_hosts))
|
||||
|
||||
if self.exclude_empty_groups:
|
||||
inventory.delete_empty_groups()
|
||||
@@ -1036,8 +1008,7 @@ class Command(BaseCommand):
|
||||
queries_before2 = len(connection.queries)
|
||||
self.inventory.update_computed_fields()
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('update computed fields took %d queries',
|
||||
len(connection.queries) - queries_before2)
|
||||
logger.warning('update computed fields took %d queries', len(connection.queries) - queries_before2)
|
||||
|
||||
# Check if the license is valid.
|
||||
# If the license is not valid, a CommandError will be thrown,
|
||||
@@ -1057,17 +1028,13 @@ class Command(BaseCommand):
|
||||
raise e
|
||||
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
logger.warning('Inventory import completed for %s in %0.1fs', self.inventory_source.name, time.time() - begin)
|
||||
else:
|
||||
logger.info('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
logger.info('Inventory import completed for %s in %0.1fs', self.inventory_source.name, time.time() - begin)
|
||||
|
||||
# If we're in debug mode, then log the queries and time
|
||||
# used to do the operation.
|
||||
if settings.SQL_DEBUG:
|
||||
queries_this_import = connection.queries[queries_before:]
|
||||
sqltime = sum(float(x['time']) for x in queries_this_import)
|
||||
logger.warning('Inventory import required %d queries '
|
||||
'taking %0.3fs', len(queries_this_import),
|
||||
sqltime)
|
||||
logger.warning('Inventory import required %d queries ' 'taking %0.3fs', len(queries_this_import), sqltime)
|
||||
|
||||
@@ -22,8 +22,7 @@ class Ungrouped(object):
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""List instances from the Tower database
|
||||
"""
|
||||
"""List instances from the Tower database"""
|
||||
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).__init__()
|
||||
|
||||
@@ -10,18 +10,18 @@ class Command(BaseCommand):
|
||||
"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--threshold', dest='threshold', type=float, default=2.0,
|
||||
help='The minimum query duration in seconds (default=2). Use 0 to disable.')
|
||||
parser.add_argument('--minutes', dest='minutes', type=float, default=5,
|
||||
help='How long to record for in minutes (default=5)')
|
||||
parser.add_argument(
|
||||
'--threshold', dest='threshold', type=float, default=2.0, help='The minimum query duration in seconds (default=2). Use 0 to disable.'
|
||||
)
|
||||
parser.add_argument('--minutes', dest='minutes', type=float, default=5, help='How long to record for in minutes (default=5)')
|
||||
|
||||
def handle(self, **options):
|
||||
profile_sql.delay(
|
||||
threshold=options['threshold'], minutes=options['minutes']
|
||||
)
|
||||
profile_sql.delay(threshold=options['threshold'], minutes=options['minutes'])
|
||||
if options['threshold'] > 0:
|
||||
print(f"SQL profiling initiated with a threshold of {options['threshold']} second(s) and a"
|
||||
f" duration of {options['minutes']} minute(s), any queries that meet criteria can"
|
||||
f" be found in /var/log/tower/profile/.")
|
||||
print(
|
||||
f"SQL profiling initiated with a threshold of {options['threshold']} second(s) and a"
|
||||
f" duration of {options['minutes']} minute(s), any queries that meet criteria can"
|
||||
f" be found in /var/log/tower/profile/."
|
||||
)
|
||||
else:
|
||||
print("SQL profiling disabled.")
|
||||
|
||||
@@ -16,16 +16,11 @@ class Command(BaseCommand):
|
||||
Register this instance with the database for HA tracking.
|
||||
"""
|
||||
|
||||
help = (
|
||||
'Add instance to the database. '
|
||||
'Specify `--hostname` to use this command.'
|
||||
)
|
||||
help = 'Add instance to the database. ' 'Specify `--hostname` to use this command.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str,
|
||||
help='Hostname used during provisioning')
|
||||
parser.add_argument('--is-isolated', dest='is_isolated', action='store_true',
|
||||
help='Specify whether the instance is isolated')
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning')
|
||||
parser.add_argument('--is-isolated', dest='is_isolated', action='store_true', help='Specify whether the instance is isolated')
|
||||
|
||||
def _register_hostname(self, hostname):
|
||||
if not hostname:
|
||||
|
||||
@@ -10,13 +10,8 @@ from django.db.models.signals import post_save
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.signals import on_post_save_setting
|
||||
from awx.main.models import (
|
||||
UnifiedJob, Credential, NotificationTemplate, Job, JobTemplate, WorkflowJob,
|
||||
WorkflowJobTemplate, OAuth2Application
|
||||
)
|
||||
from awx.main.utils.encryption import (
|
||||
encrypt_field, decrypt_field, encrypt_value, decrypt_value, get_encryption_key
|
||||
)
|
||||
from awx.main.models import UnifiedJob, Credential, NotificationTemplate, Job, JobTemplate, WorkflowJob, WorkflowJobTemplate, OAuth2Application
|
||||
from awx.main.utils.encryption import encrypt_field, decrypt_field, encrypt_value, decrypt_value, get_encryption_key
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -41,8 +36,7 @@ class Command(BaseCommand):
|
||||
for nt in NotificationTemplate.objects.iterator():
|
||||
CLASS_FOR_NOTIFICATION_TYPE = dict([(x[0], x[2]) for x in NotificationTemplate.NOTIFICATION_TYPES])
|
||||
notification_class = CLASS_FOR_NOTIFICATION_TYPE[nt.notification_type]
|
||||
for field in filter(lambda x: notification_class.init_parameters[x]['type'] == "password",
|
||||
notification_class.init_parameters):
|
||||
for field in filter(lambda x: notification_class.init_parameters[x]['type'] == "password", notification_class.init_parameters):
|
||||
nt.notification_configuration[field] = decrypt_field(nt, 'notification_configuration', subfield=field, secret_key=self.old_key)
|
||||
nt.notification_configuration[field] = encrypt_field(nt, 'notification_configuration', subfield=field, secret_key=self.new_key)
|
||||
nt.save()
|
||||
@@ -51,26 +45,14 @@ class Command(BaseCommand):
|
||||
for credential in Credential.objects.iterator():
|
||||
for field_name in credential.credential_type.secret_fields:
|
||||
if field_name in credential.inputs:
|
||||
credential.inputs[field_name] = decrypt_field(
|
||||
credential,
|
||||
field_name,
|
||||
secret_key=self.old_key
|
||||
)
|
||||
credential.inputs[field_name] = encrypt_field(
|
||||
credential,
|
||||
field_name,
|
||||
secret_key=self.new_key
|
||||
)
|
||||
credential.inputs[field_name] = decrypt_field(credential, field_name, secret_key=self.old_key)
|
||||
credential.inputs[field_name] = encrypt_field(credential, field_name, secret_key=self.new_key)
|
||||
credential.save()
|
||||
|
||||
def _unified_jobs(self):
|
||||
for uj in UnifiedJob.objects.iterator():
|
||||
if uj.start_args:
|
||||
uj.start_args = decrypt_field(
|
||||
uj,
|
||||
'start_args',
|
||||
secret_key=self.old_key
|
||||
)
|
||||
uj.start_args = decrypt_field(uj, 'start_args', secret_key=self.old_key)
|
||||
uj.start_args = encrypt_field(uj, 'start_args', secret_key=self.new_key)
|
||||
uj.save()
|
||||
|
||||
@@ -97,15 +79,8 @@ class Command(BaseCommand):
|
||||
if jt.survey_spec.get('spec', []):
|
||||
for field in jt.survey_spec['spec']:
|
||||
if field.get('type') == 'password' and field.get('default', ''):
|
||||
raw = decrypt_value(
|
||||
get_encryption_key('value', None, secret_key=self.old_key),
|
||||
field['default']
|
||||
)
|
||||
field['default'] = encrypt_value(
|
||||
raw,
|
||||
pk=None,
|
||||
secret_key=self.new_key
|
||||
)
|
||||
raw = decrypt_value(get_encryption_key('value', None, secret_key=self.old_key), field['default'])
|
||||
field['default'] = encrypt_value(raw, pk=None, secret_key=self.new_key)
|
||||
changed = True
|
||||
if changed:
|
||||
jt.save(update_fields=["survey_spec"])
|
||||
@@ -118,10 +93,7 @@ class Command(BaseCommand):
|
||||
extra_vars = json.loads(job.extra_vars)
|
||||
if not extra_vars.get(key):
|
||||
continue
|
||||
raw = decrypt_value(
|
||||
get_encryption_key('value', None, secret_key=self.old_key),
|
||||
extra_vars[key]
|
||||
)
|
||||
raw = decrypt_value(get_encryption_key('value', None, secret_key=self.old_key), extra_vars[key])
|
||||
extra_vars[key] = encrypt_value(raw, pk=None, secret_key=self.new_key)
|
||||
job.extra_vars = json.dumps(extra_vars)
|
||||
changed = True
|
||||
|
||||
@@ -112,19 +112,22 @@ class RegisterQueue:
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--queuename', dest='queuename', type=str,
|
||||
help='Queue to create/update')
|
||||
parser.add_argument('--hostnames', dest='hostnames', type=str,
|
||||
help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)')
|
||||
parser.add_argument('--controller', dest='controller', type=str,
|
||||
default='', help='The controlling group (makes this an isolated group)')
|
||||
parser.add_argument('--instance_percent', dest='instance_percent', type=int, default=0,
|
||||
help='The percentage of active instances that will be assigned to this group'),
|
||||
parser.add_argument('--instance_minimum', dest='instance_minimum', type=int, default=0,
|
||||
help='The minimum number of instance that will be retained for this group from available instances')
|
||||
|
||||
parser.add_argument('--queuename', dest='queuename', type=str, help='Queue to create/update')
|
||||
parser.add_argument(
|
||||
'--hostnames', dest='hostnames', type=str, help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)'
|
||||
)
|
||||
parser.add_argument('--controller', dest='controller', type=str, default='', help='The controlling group (makes this an isolated group)')
|
||||
parser.add_argument(
|
||||
'--instance_percent', dest='instance_percent', type=int, default=0, help='The percentage of active instances that will be assigned to this group'
|
||||
),
|
||||
parser.add_argument(
|
||||
'--instance_minimum',
|
||||
dest='instance_minimum',
|
||||
type=int,
|
||||
default=0,
|
||||
help='The minimum number of instance that will be retained for this group from available instances',
|
||||
)
|
||||
|
||||
def handle(self, **options):
|
||||
queuename = options.get('queuename')
|
||||
|
||||
@@ -10,13 +10,12 @@ class Command(BaseCommand):
|
||||
|
||||
help = (
|
||||
"Remove an instance (specified by --hostname) from the specified queue (instance group).\n"
|
||||
"In order remove the queue, use the `unregister_queue` command.")
|
||||
"In order remove the queue, use the `unregister_queue` command."
|
||||
)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--queuename', dest='queuename', type=str,
|
||||
help='Queue to be removed from')
|
||||
parser.add_argument('--hostname', dest='hostname', type=str,
|
||||
help='Host to remove from queue')
|
||||
parser.add_argument('--queuename', dest='queuename', type=str, help='Queue to be removed from')
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Host to remove from queue')
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if not options.get('queuename'):
|
||||
|
||||
@@ -10,17 +10,10 @@ from django.utils import timezone
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.models.events import emit_event_detail
|
||||
from awx.main.models import (
|
||||
UnifiedJob,
|
||||
Job,
|
||||
AdHocCommand,
|
||||
ProjectUpdate,
|
||||
InventoryUpdate,
|
||||
SystemJob
|
||||
)
|
||||
from awx.main.models import UnifiedJob, Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob
|
||||
|
||||
|
||||
class JobStatusLifeCycle():
|
||||
class JobStatusLifeCycle:
|
||||
def emit_job_status(self, job, status):
|
||||
# {"status": "successful", "project_id": 13, "unified_job_id": 659, "group_name": "jobs"}
|
||||
job.websocket_emit_status(status)
|
||||
@@ -65,10 +58,10 @@ class ReplayJobEvents(JobStatusLifeCycle):
|
||||
time.sleep(seconds)
|
||||
|
||||
def replay_elapsed(self):
|
||||
return (self.now() - self.replay_start)
|
||||
return self.now() - self.replay_start
|
||||
|
||||
def recording_elapsed(self, created):
|
||||
return (created - self.recording_start)
|
||||
return created - self.recording_start
|
||||
|
||||
def replay_offset(self, created, speed):
|
||||
return self.replay_elapsed().total_seconds() - (self.recording_elapsed(created).total_seconds() * (1.0 / speed))
|
||||
@@ -156,12 +149,12 @@ class ReplayJobEvents(JobStatusLifeCycle):
|
||||
self.sleep(replay_diff)
|
||||
else:
|
||||
stats['events_late']['total'] += 1
|
||||
stats['events_late']['lateness_total'] += (replay_diff * -1)
|
||||
stats['events_late']['lateness_total'] += replay_diff * -1
|
||||
if verbosity >= 3:
|
||||
print("\treplay: too far behind to sleep {} seconds".format(replay_diff))
|
||||
else:
|
||||
replay_offset = self.replay_offset(je_current.created, speed)
|
||||
stats['events_late']['lateness_total'] += (replay_offset * -1)
|
||||
stats['events_late']['lateness_total'] += replay_offset * -1
|
||||
stats['events_late']['total'] += 1
|
||||
if verbosity >= 3:
|
||||
print("\treplay: behind by {} seconds".format(replay_offset))
|
||||
@@ -211,18 +204,23 @@ class Command(BaseCommand):
|
||||
return range(start, stop, step)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--job_id', dest='job_id', type=int, metavar='j',
|
||||
help='Id of the job to replay (job or adhoc)')
|
||||
parser.add_argument('--speed', dest='speed', type=float, metavar='s',
|
||||
help='Speedup factor.')
|
||||
parser.add_argument('--skip-range', dest='skip_range', type=str, metavar='k',
|
||||
default='0:-1:1', help='Range of events to skip')
|
||||
parser.add_argument('--random-seed', dest='random_seed', type=int, metavar='r',
|
||||
default=0, help='Random number generator seed to use when determining job_event index to emit final job status')
|
||||
parser.add_argument('--final-status-delay', dest='final_status_delay', type=float, metavar='f',
|
||||
default=0, help='Delay between event and final status emit')
|
||||
parser.add_argument('--debug', dest='debug', type=bool, metavar='d',
|
||||
default=False, help='Enable step mode to control emission of job events one at a time.')
|
||||
parser.add_argument('--job_id', dest='job_id', type=int, metavar='j', help='Id of the job to replay (job or adhoc)')
|
||||
parser.add_argument('--speed', dest='speed', type=float, metavar='s', help='Speedup factor.')
|
||||
parser.add_argument('--skip-range', dest='skip_range', type=str, metavar='k', default='0:-1:1', help='Range of events to skip')
|
||||
parser.add_argument(
|
||||
'--random-seed',
|
||||
dest='random_seed',
|
||||
type=int,
|
||||
metavar='r',
|
||||
default=0,
|
||||
help='Random number generator seed to use when determining job_event index to emit final job status',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--final-status-delay', dest='final_status_delay', type=float, metavar='f', default=0, help='Delay between event and final status emit'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--debug', dest='debug', type=bool, metavar='d', default=False, help='Enable step mode to control emission of job events one at a time.'
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
job_id = options.get('job_id')
|
||||
@@ -234,5 +232,4 @@ class Command(BaseCommand):
|
||||
skip = self._parse_slice_range(options.get('skip_range'))
|
||||
|
||||
replayer = ReplayJobEvents()
|
||||
replayer.run(job_id, speed=speed, verbosity=verbosity, skip_range=skip, random_seed=random_seed,
|
||||
final_status_delay=final_status_delay, debug=debug)
|
||||
replayer.run(job_id, speed=speed, verbosity=verbosity, skip_range=skip, random_seed=random_seed, final_status_delay=final_status_delay, debug=debug)
|
||||
|
||||
@@ -16,7 +16,8 @@ def revoke_tokens(token_list):
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Command that revokes OAuth2 access tokens."""
|
||||
help='Revokes OAuth2 access tokens. Use --all to revoke access and refresh tokens.'
|
||||
|
||||
help = 'Revokes OAuth2 access tokens. Use --all to revoke access and refresh tokens.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--user', dest='user', type=str, help='revoke OAuth2 tokens for a specific username')
|
||||
|
||||
@@ -9,16 +9,16 @@ from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
"""
|
||||
Save Job Callback receiver
|
||||
Runs as a management command and receives job save events. It then hands
|
||||
them off to worker processors (see Worker) which writes them to the database
|
||||
'''
|
||||
"""
|
||||
|
||||
help = 'Launch the job callback receiver'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true',
|
||||
help='print the internal state of any running dispatchers')
|
||||
parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running dispatchers')
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
|
||||
@@ -24,13 +24,14 @@ class Command(BaseCommand):
|
||||
help = 'Launch the task dispatcher'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true',
|
||||
help='print the internal state of any running dispatchers')
|
||||
parser.add_argument('--running', dest='running', action='store_true',
|
||||
help='print the UUIDs of any tasked managed by this dispatcher')
|
||||
parser.add_argument('--reload', dest='reload', action='store_true',
|
||||
help=('cause the dispatcher to recycle all of its worker processes;'
|
||||
'running jobs will run to completion first'))
|
||||
parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running dispatchers')
|
||||
parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any tasked managed by this dispatcher')
|
||||
parser.add_argument(
|
||||
'--reload',
|
||||
dest='reload',
|
||||
action='store_true',
|
||||
help=('cause the dispatcher to recycle all of its worker processes;' 'running jobs will run to completion first'),
|
||||
)
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
@@ -57,12 +58,7 @@ class Command(BaseCommand):
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
consumer = AWXConsumerPG(
|
||||
'dispatcher',
|
||||
TaskWorker(),
|
||||
queues,
|
||||
AutoscalePool(min_workers=4)
|
||||
)
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
|
||||
@@ -27,8 +27,7 @@ class Command(BaseCommand):
|
||||
help = 'Launch the websocket broadcaster'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true',
|
||||
help='print the internal state of any running broadcast websocket')
|
||||
parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running broadcast websocket')
|
||||
|
||||
@classmethod
|
||||
def display_len(cls, s):
|
||||
@@ -58,7 +57,7 @@ class Command(BaseCommand):
|
||||
def get_connection_status(cls, me, hostnames, data):
|
||||
host_stats = [('hostname', 'state', 'start time', 'duration (sec)')]
|
||||
for h in hostnames:
|
||||
connection_color = '91' # red
|
||||
connection_color = '91' # red
|
||||
h_safe = safe_name(h)
|
||||
prefix = f'awx_{h_safe}'
|
||||
connection_state = data.get(f'{prefix}_connection', 'N/A')
|
||||
@@ -67,7 +66,7 @@ class Command(BaseCommand):
|
||||
if connection_state is None:
|
||||
connection_state = 'unknown'
|
||||
if connection_state == 'connected':
|
||||
connection_color = '92' # green
|
||||
connection_color = '92' # green
|
||||
connection_started = data.get(f'{prefix}_connection_start', 'Error')
|
||||
if connection_started != 'Error':
|
||||
connection_started = datetime.datetime.fromtimestamp(connection_started)
|
||||
|
||||
@@ -9,19 +9,14 @@ from awx.main.models import UnifiedJob
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
"""
|
||||
Emits some simple statistics suitable for external monitoring
|
||||
'''
|
||||
"""
|
||||
|
||||
help = 'Display some simple statistics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--stat',
|
||||
action='store',
|
||||
dest='stat',
|
||||
type=str,
|
||||
default="jobs_running",
|
||||
help='Select which stat to get information for')
|
||||
parser.add_argument('--stat', action='store', dest='stat', type=str, default="jobs_running", help='Select which stat to get information for')
|
||||
|
||||
def job_stats(self, state):
|
||||
return UnifiedJob.objects.filter(status=state).count()
|
||||
|
||||
@@ -13,11 +13,11 @@ from awx.main.isolated.manager import set_pythonpath
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Tests SSH connectivity between a controller and target isolated node"""
|
||||
|
||||
help = 'Tests SSH connectivity between a controller and target isolated node'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str,
|
||||
help='Hostname of an isolated node')
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname of an isolated node')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
hostname = options.get('hostname')
|
||||
@@ -27,10 +27,7 @@ class Command(BaseCommand):
|
||||
try:
|
||||
path = tempfile.mkdtemp(prefix='awx_isolated_ssh', dir=settings.AWX_PROOT_BASE_PATH)
|
||||
ssh_key = None
|
||||
if all([
|
||||
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
|
||||
getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)
|
||||
]):
|
||||
if all([getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True, getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)]):
|
||||
ssh_key = settings.AWX_ISOLATED_PRIVATE_KEY
|
||||
env = dict(os.environ.items())
|
||||
env['ANSIBLE_HOST_KEY_CHECKING'] = str(settings.AWX_ISOLATED_HOST_KEY_CHECKING)
|
||||
|
||||
@@ -14,11 +14,11 @@ class Command(BaseCommand):
|
||||
help = (
|
||||
"Remove specified queue (instance group) from database.\n"
|
||||
"Instances inside of queue will continue to exist, \n"
|
||||
"but jobs will no longer be processed by queue.")
|
||||
"but jobs will no longer be processed by queue."
|
||||
)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--queuename', dest='queuename', type=str,
|
||||
help='Queue to create/update')
|
||||
parser.add_argument('--queuename', dest='queuename', type=str, help='Queue to create/update')
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
|
||||
@@ -23,10 +23,8 @@ class UpdatePassword(object):
|
||||
|
||||
class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--username', dest='username', action='store', type=str, default=None,
|
||||
help='username to change the password for')
|
||||
parser.add_argument('--password', dest='password', action='store', type=str, default=None,
|
||||
help='new password for user')
|
||||
parser.add_argument('--username', dest='username', action='store', type=str, default=None, help='username to change the password for')
|
||||
parser.add_argument('--password', dest='password', action='store', type=str, default=None, help='new password for user')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
if not options['username']:
|
||||
|
||||
Reference in New Issue
Block a user