Compare commits

..

300 Commits
1.0.8 ... 2.1.0

Author SHA1 Message Date
softwarefactory-project-zuul[bot]
a28f8c43cb Merge pull request #2569 from shanemcd/devel
Bump version to 2.1.0

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-11-01 15:53:59 +00:00
Shane McDonald
fbec6a60bf Bump version to 2.1.0 2018-11-01 11:37:28 -04:00
softwarefactory-project-zuul[bot]
6a4f3c8758 Merge pull request #2566 from shanemcd/devel
Bump version to 2.0.2

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-11-01 14:12:00 +00:00
Shane McDonald
04625f566b Bump version to 2.0.2 2018-11-01 09:50:02 -04:00
softwarefactory-project-zuul[bot]
895a567ed1 Merge pull request #2558 from ansible/org-view-ui
updated fixtures to use proper organization linking

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-31 18:13:40 +00:00
Daniel Sami
e152b30fc1 linting fixes 2018-10-31 13:55:50 -04:00
softwarefactory-project-zuul[bot]
a1fe60da78 Merge pull request #2174 from matburt/jobtemplate_sharding
Implement Job Template Sharding/Splitting/Slicing

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-31 15:49:51 +00:00
softwarefactory-project-zuul[bot]
92f0893764 Merge pull request #2555 from ryanpetrello/old-access-cleanup
remove an old, unused migration file

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-31 15:33:37 +00:00
Ryan Petrello
479448ff09 remove an old, unused migration file 2018-10-31 11:15:09 -04:00
Jake McDermott
62a36e3704 update job slice count help text 2018-10-31 11:04:14 -04:00
kialam
2d286c5f68 Redirect to WF Details page after prompt for slice JT. 2018-10-31 11:04:14 -04:00
AlanCoding
f435e577b2 Adjust slicing tooltip text 2018-10-31 11:04:14 -04:00
AlanCoding
236b332a8b bump migration number 2018-10-31 11:04:13 -04:00
kialam
a7028df828 Fix one failing unit test. 2018-10-31 11:04:13 -04:00
kialam
a59017ceef Fix eslint errors. 2018-10-31 11:04:13 -04:00
AlanCoding
affacb8ab5 revert change of including slice wfj ids in recent_jobs list 2018-10-31 11:04:13 -04:00
AlanCoding
37f9024940 fix slicing task_impact and script gen bugs 2018-10-31 11:04:13 -04:00
kialam
f72fca5fcf Fix unit tests after "slice" rename.
- Update Jobs List unit tests with new schema and test cases.
- Update Job Details unit tests with new schema and test cases.
- Test both for expected behavior when handling a regular non-sliced job.
2018-10-31 11:04:13 -04:00
kialam
21aeda0f45 Add unit tests for Job Details
- Test `getSplitJobDetails` method.
- Fix failing tests.
- Rename unit tests.
2018-10-31 11:04:12 -04:00
kialam
65a0e5ed45 Fix failing tests. 2018-10-31 11:04:12 -04:00
kialam
571e34bf79 Begin adding unit tests for split jobs
- Test split job tag method within Jobs List Controller.
2018-10-31 11:04:12 -04:00
AlanCoding
6dc58af8e1 slicing rename test cleanup and bugfix 2018-10-31 11:04:12 -04:00
AlanCoding
bbd3edba47 rename to slicing and schema tweaks 2018-10-31 11:04:12 -04:00
Matthew Jones
46d6dce738 Mass rename of shard -> split 2018-10-31 11:04:12 -04:00
AlanCoding
475a701f78 Allow use of credential password prompting with split JTs
also
*update test to work with new JT callback call pattern
*fix spelling in template
2018-10-31 11:04:11 -04:00
AlanCoding
dccd7f2e9d do not split JT callback jobs 2018-10-31 11:04:11 -04:00
kialam
47711bc007 add package-lock.json to gitignore 2018-10-31 11:04:11 -04:00
kialam
04eec61387 Redirect to WF details page when a Split Job is launched 2018-10-31 11:04:11 -04:00
kialam
ef4a2cbebb Add Job Splitting feature to UI 2018-10-31 11:04:11 -04:00
AlanCoding
c8d76dbe78 update migration after rebase 2018-10-31 11:04:11 -04:00
Matthew Jones
61a706274b Adding architecture doc for job sharding 2018-10-31 11:04:10 -04:00
AlanCoding
20226f8984 Polish split jobs API info & add fields to UI
*clarify help text and squash migrations
*adds new internal_limit field to Job model for faster reference
*if field is non-blank, populate shard params in summary_fields
*add summary information to UI job/wfj details, JT selector
2018-10-31 11:04:10 -04:00
AlanCoding
7ff04dafd3 Fix IntegrityError deleting job splitting JT
misc:
*show sharded jobs in recent_jobs
*test updates
2018-10-31 11:04:10 -04:00
AlanCoding
f9bdb1da15 Job splitting access logic and more feature development
*allow sharding with prompts and schedules
*modify create_unified_job contract to pass class & parent_field name
*make parent field name instance method & set sharded UJT field
*access methods made compatible with job sharding
*move shard job special logic from task manager to workflows
*save sharded job prompts to workflow job exclusively
*allow using sharded jobs in workflows
2018-10-31 11:04:10 -04:00
AlanCoding
dab678c5cc Implement splitting logic in inventory & job task code 2018-10-31 11:04:10 -04:00
Matthew Jones
44ffcf86de Properly take prompted inventory into account
This also will rename shard jobs to add an index to the job name
2018-10-31 11:04:10 -04:00
Matthew Jones
8a18984be1 Spawn concrete workflow jobs from a job template launch 2018-10-31 11:04:09 -04:00
Matthew Jones
0b1776098b Implement model/view/launch paradigm for shard/split job templates 2018-10-31 11:04:09 -04:00
Daniel Sami
5da13683ce updated fixtures to use proper organization linking 2018-10-31 11:03:44 -04:00
Ryan Petrello
89c2038ea3 Merge pull request #2557 from ryanpetrello/fix-busted-docker-compose
pin docker-compose to a working version
2018-10-31 11:01:50 -04:00
Ryan Petrello
a1012b365c pin docker-compose to a working version 2018-10-31 10:47:45 -04:00
softwarefactory-project-zuul[bot]
484ef1b6a8 Merge pull request #2548 from wenottingham/mark-it-zero
Re-add markdown, which is used for rendering API help.

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-30 16:47:46 +00:00
Bill Nottingham
7fc269b65a Re-add markdown, which is used for rendering API help. 2018-10-30 12:10:00 -04:00
softwarefactory-project-zuul[bot]
ddda6b3d21 Merge pull request #2542 from jakemcdermott/fix-smoke
fix smoke test

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-28 00:32:26 +00:00
Jake McDermott
80adbf9c03 fix smoke test 2018-10-27 02:09:18 -04:00
softwarefactory-project-zuul[bot]
264f35d259 Merge pull request #2239 from AlanCoding/multi_pass_cancel
Do 2-pass cancel for workflow jobs

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-26 14:43:14 +00:00
AlanCoding
e513f8fe31 do 2-pass cancel for workflow jobs 2018-10-26 10:28:30 -04:00
softwarefactory-project-zuul[bot]
b9f35e5b50 Merge pull request #2536 from ryanpetrello/deprecated_auth_token_middleware
remove DeprecatedAuthTokenMiddleware

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-26 14:27:29 +00:00
softwarefactory-project-zuul[bot]
002f463ffd Merge pull request #2274 from AlanCoding/callback_debugging
Reduce default verbosity of dev-specific callback logging

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-26 14:23:24 +00:00
Ryan Petrello
28512e042b remove DeprecatedAuthTokenMiddleware 2018-10-26 10:11:53 -04:00
AlanCoding
482395eb6a reduce default verbosity of devel-specific callback logging 2018-10-26 10:03:46 -04:00
softwarefactory-project-zuul[bot]
e1d44d6d14 Merge pull request #2529 from AlanCoding/split_personality
Apply docker-compose fix to cluster target too

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-26 13:52:32 +00:00
AlanCoding
19030b9d5f apply docker-compose fix to cluster target too 2018-10-26 09:36:11 -04:00
softwarefactory-project-zuul[bot]
3e4738d948 Merge pull request #2430 from dmt/devel
Fix installer volume definitions

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-25 22:12:25 +00:00
softwarefactory-project-zuul[bot]
94083f55c7 Merge pull request #2510 from Intermax-Cloudsourcing/awx-web-dockerfile-tmp
Empties /tmp in awx_web Dockerfile

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-25 21:59:42 +00:00
Daniel Temme
6ecd18b2e2 make volume concatenation work
The second list gets interpreted as part of the else block, effectively
dropping it. Separating both list definitions with braces seems to work.

# Conflicts:
#	installer/roles/local_docker/tasks/standalone.yml
2018-10-25 17:54:10 -04:00
Daniel Temme
4e9c705997 Partial revert for "Bugfix for ca_trust_dir"
# Conflicts:
#	installer/roles/local_docker/tasks/standalone.yml

# Conflicts:
#	installer/roles/local_docker/tasks/standalone.yml
2018-10-25 17:53:12 -04:00
softwarefactory-project-zuul[bot]
1803a76a4d Merge pull request #2485 from wwt/fix-tiller-namespace
Pass tiller namespace down to helm task

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-25 21:40:08 +00:00
softwarefactory-project-zuul[bot]
86ca1875f1 Merge pull request #2486 from wwt/remove-rabbit-cluster-name
Remove .cluster.local from service name for rabbitmq

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-25 21:37:54 +00:00
wilmardo
bf5c259d92 Empties /tmp in web Dockerfile 2018-10-25 17:12:26 -04:00
softwarefactory-project-zuul[bot]
9bca937fad Merge pull request #2287 from AlanCoding/files_are_in_the_computer
automatically delete project files in entire cluster

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-25 20:53:20 +00:00
AlanCoding
526ca3ae42 automatically delete project files in entire cluster 2018-10-25 16:36:58 -04:00
softwarefactory-project-zuul[bot]
695c7ade86 Merge pull request #2523 from ivuk/fix-variable-names
Update variable names for local Docker daemon installation

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-25 17:30:44 +00:00
Igor Vuk
c133b35162 Update variable names for local Docker daemon installation
Signed-off-by: Igor Vuk <parcijala@gmail.com>
2018-10-25 12:47:25 -04:00
softwarefactory-project-zuul[bot]
afb3c0e31e Merge pull request #2498 from AlanCoding/relaunch_fix
Fix bug with relaunching with changed JT

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-25 16:40:16 +00:00
AlanCoding
8965f1934e fix bug with relaunching with changed JT 2018-10-25 11:45:47 -04:00
softwarefactory-project-zuul[bot]
556040fb8b Merge pull request #2497 from AlanCoding/fix_two_creds2
Fix server error using 2 creds of same type

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-24 19:14:32 +00:00
AlanCoding
8b3e49cb24 fix server error using 2 creds of same type 2018-10-24 14:59:01 -04:00
softwarefactory-project-zuul[bot]
331e272be0 Merge pull request #2504 from kialam/scheduler-template-fix-datepicker
Restore Date Picker field in Scheduler template.

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-24 18:46:16 +00:00
kialam
9e7808f2c9 Restore Date Picker field in Scheduler template. 2018-10-24 14:29:46 -04:00
softwarefactory-project-zuul[bot]
0bb2de24f3 Merge pull request #2513 from AlanCoding/filter_things
Allow UI to filter by type again

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-24 17:57:49 +00:00
AlanCoding
72ce7b194f allow UI to filter by type again 2018-10-24 13:35:04 -04:00
softwarefactory-project-zuul[bot]
85958c51a8 Merge pull request #2517 from dmsimard/preload_data
Let users disable create_preload_data if it isn't necessary

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-24 15:53:57 +00:00
David Moreau Simard
1dd44df471 Let users disable create_preload_data if it isn't necessary
The demo things might not be desirable in a production environment.
2018-10-24 11:36:33 -04:00
softwarefactory-project-zuul[bot]
b132f855a0 Merge pull request #2508 from shanemcd/devel
Fix permissions when running dev container as non-root user

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-24 14:56:33 +00:00
Shane McDonald
a361b5da6e Fix permissions when running dev container as non-root user
I wanted to pass `—user` to `docker-compose` up, but that option doesnt exist. To get around this, I had to record the uid on the host (CURRENT_UID), interpolate the variable in tools/docker-compose.yml, and detect that inside the container. I then piggy-backed on the /etc/passwd hack we use for scenarios with unpredictable uids.
2018-10-24 10:30:04 -04:00
Shane McDonald
7df63830ed Remove reference to file that doesnt exist anymore 2018-10-24 10:30:03 -04:00
softwarefactory-project-zuul[bot]
b3cf93256b Merge pull request #2520 from ryanpetrello/fix-flake8
fix flake8

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-24 14:28:32 +00:00
Ryan Petrello
c695ba2e10 fix flake8 2018-10-24 10:11:53 -04:00
softwarefactory-project-zuul[bot]
c44160933d Merge pull request #2514 from farcaller/patch-1
Fix a typo

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-23 17:24:39 +00:00
Vladimir Pouzanov
9ae3e1c40f Fix a typo 2018-10-23 18:01:00 +01:00
softwarefactory-project-zuul[bot]
c7c5a9d2f7 Merge pull request #2512 from wenottingham/some-less-assembly-required
Remove some obsolete requirements.

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-23 15:24:55 +00:00
Bill Nottingham
a56a231869 Remove some obsolete requirements.
Bump cryptography to latest.
2018-10-23 10:37:36 -04:00
softwarefactory-project-zuul[bot]
5087ca7f62 Merge pull request #2494 from ryanpetrello/drop-old-celery-tables
drop old celery/djcelery tables we no longer need

Reviewed-by: Ryan Petrello
             https://github.com/ryanpetrello
2018-10-22 19:26:46 +00:00
Ryan Petrello
3b7336c570 drop old celery/djcelery tables we no longer need 2018-10-22 09:20:10 -04:00
softwarefactory-project-zuul[bot]
9b413afb2e Merge pull request #2492 from ansible/workflow-visualizer-search
fix to search for exact search matches

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-19 14:49:58 +00:00
Daniel Sami
eec05eac3c Merge branch 'devel' into workflow-visualizer-search 2018-10-19 10:33:51 -04:00
softwarefactory-project-zuul[bot]
41671b5868 Merge pull request #2493 from ryanpetrello/celery-inventory-delete-retry
implement simple retries for wayward inventory deletes

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-19 14:30:04 +00:00
Daniel Sami
0bbf1d7014 Merge branch 'devel' into workflow-visualizer-search 2018-10-19 10:26:12 -04:00
Daniel Sami
c5ce62e11d added functionality to validate that search is complete before continuing 2018-10-19 10:23:50 -04:00
Ryan Petrello
9316c9ea3e implement simple retries for wayward inventory deletes 2018-10-19 10:10:52 -04:00
Daniel Sami
427b8bdabb lint fix 2018-10-19 10:01:50 -04:00
softwarefactory-project-zuul[bot]
cce470a5f8 Merge pull request #2487 from ryanpetrello/improved-amqp-cancel
fix a bug that breaks job cancellation on single node jobs

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-19 13:53:57 +00:00
Daniel Sami
92baea2ee6 fix to search for exact search matches 2018-10-19 09:43:13 -04:00
Ryan Petrello
3be9113d6b fix a bug that breaks job cancel on single node jobs
1.  Install awx w/ a single node.
2.  Start a long-running job.
3.  Forcibly kill the `awx-manage run_dispatcher` process (e.g.,
    SIGKILL) and do not start it again.
4.  The job remains in running - without a second cluster to discover
    the job, it is never reaped.
5.  This PR allows you to cancel the job from the UI+API.
2018-10-19 09:10:33 -04:00
softwarefactory-project-zuul[bot]
785c6fe846 Merge pull request #2475 from ryanpetrello/more-celery-hardening
make the dispatcher more fault-tolerant to prolonged database outages

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-19 00:22:56 +00:00
Ryan Petrello
0d29bbfdc6 make the dispatcher more fault-tolerant to prolonged database outages 2018-10-18 20:00:07 -04:00
softwarefactory-project-zuul[bot]
ce8117ef19 Merge pull request #2356 from ansible/updateProjectList
Update project list

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-18 21:43:13 +00:00
John Mitchell
bb921af146 fix badge updating and xss e2e test for projects list updates 2018-10-18 17:23:52 -04:00
John Mitchell
5e0ecc7f43 fix projects list search selectors 2018-10-18 17:23:52 -04:00
John Mitchell
73dc58e810 update project badge selector 2018-10-18 17:23:52 -04:00
John Mitchell
89344c2eee update project list selectors 2018-10-18 17:23:52 -04:00
John Mitchell
d61cd519d7 fix panel title and badge for new projects list 2018-10-18 17:23:51 -04:00
John Mitchell
8057438c67 add back in old-style project list json and relevant factories 2018-10-18 17:23:51 -04:00
John Mitchell
110671532d fix lint error with projects list route 2018-10-18 17:23:51 -04:00
Haokun-Chen
92ac3054c6 refactor projects list, clean up dependencies and old list generators and factory methods 2018-10-18 17:23:49 -04:00
softwarefactory-project-zuul[bot]
c95c2a4580 Merge pull request #2455 from wenottingham/into-the-deep-azure-yonder
Update Azure deps in Ansible venv to match Ansible 2.7 requirements.

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-18 21:05:32 +00:00
Bill Nottingham
2c01476eca Don't explicitly remove certifi. 2018-10-18 16:41:33 -04:00
Bill Nottingham
8adbc8a026 Update Azure requirements to match Ansible 2.7 requirements.
Add comments for Ansible requirements to note where they're used.

Remove our custom docutils fork, as the fix was merged upstream.
2018-10-18 16:41:33 -04:00
softwarefactory-project-zuul[bot]
98b8d7fb69 Merge pull request #2483 from ansible/workflow-visualizer-search
added search for visualizer nodes

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-18 19:48:10 +00:00
Daniel Sami
477551325d Merge branch 'workflow-visualizer-search' of https://github.com/ansible/awx into workflow-visualizer-search 2018-10-18 15:07:08 -04:00
Daniel Sami
fdedc472d1 lint fix 2018-10-18 15:06:37 -04:00
James Evans
88819ada6b Remove .cluster.local from service name for rabbitmq
FQDNs are not required for service discovery, and having the FQDN in the
name prevents the discovery from working in clusters not named
cluster.local.
2018-10-18 14:00:05 -05:00
Daniel Sami
f3ee93b67f Merge branch 'devel' into workflow-visualizer-search 2018-10-18 14:48:36 -04:00
Daniel Sami
b4549e5581 added search for visualizer nodes 2018-10-18 14:38:10 -04:00
softwarefactory-project-zuul[bot]
3afed6adb7 Merge pull request #2383 from ansible/updateSettingsNav
add additional settings sub navigation

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-18 17:55:26 +00:00
John Mitchell
1bc78fd429 fix selectors for settings sub pane 2018-10-18 13:37:45 -04:00
John Mitchell
6ce1b50751 update config e2e tests to fix syntax and linting issues 2018-10-18 13:37:45 -04:00
John Hill
6c87b88e2c Updating configuration/settings page 2018-10-18 13:37:45 -04:00
John Hill
10f21b8817 Updating e2e tests to match new settings nav 2018-10-18 13:37:45 -04:00
John Mitchell
0d1b25131d fix scope location of json fields of settings auth form 2018-10-18 13:37:45 -04:00
John Mitchell
d2118b8d25 fix activity stream settings links 2018-10-18 13:37:44 -04:00
John Mitchell
b852caaaa3 update configuration controllers to fix syntax warnings 2018-10-18 13:37:44 -04:00
John Mitchell
b0dd10b538 sidenav sub pane feedback
make height the same as side nav items
no tooltip for collapsed settings
2018-10-18 13:37:44 -04:00
John Mitchell
8f4aa5511b update side nav settings pane show hide hover logic 2018-10-18 13:37:44 -04:00
John Mitchell
4b26ac06ba fix open/close on settings nav item hover 2018-10-18 13:37:44 -04:00
John Mitchell
4dc6452dea updating suit name and variabilize colors for sub nav pane 2018-10-18 13:37:43 -04:00
John Mitchell
5a17acb131 working commit 2018-10-18 13:37:43 -04:00
Haokun-Chen
6cfd9dbfe4 refactor configuration (settings)
sub-nav added
2018-10-18 13:37:41 -04:00
softwarefactory-project-zuul[bot]
110c5a8e84 Merge pull request #2431 from Numblesix/devel
Added some Doc for ca_trust_dir

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-18 16:15:46 +00:00
Yanis Guenane
b185c1e0a2 Merge branch 'devel' into devel 2018-10-18 18:00:16 +02:00
softwarefactory-project-zuul[bot]
f1a4a62304 Merge pull request #2432 from Numblesix/ldap-doc
Added some Doc for FREEipa

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-18 14:53:14 +00:00
Yanis Guenane
9f3e3bad54 Merge branch 'devel' into ldap-doc 2018-10-18 16:38:31 +02:00
James Evans
4198227116 Pass tiller namespace down to helm task 2018-10-18 09:34:13 -05:00
softwarefactory-project-zuul[bot]
56525bc34f Merge pull request #2476 from AlanCoding/rm_changelog
Remove changelog

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-18 14:15:21 +00:00
Yanis Guenane
3f2068e74e Merge branch 'devel' into ldap-doc 2018-10-18 15:53:34 +02:00
AlanCoding
6117f8297e remove changelog 2018-10-18 09:52:08 -04:00
softwarefactory-project-zuul[bot]
8953d06905 Merge pull request #2456 from wenottingham/insert-obvious-unchained-joke-here
Update to latest django subminor to pick up assorted fixes.

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-18 13:50:55 +00:00
Numblesix
bf39a2a747 Added some Doc for FREEipa 2018-10-18 09:31:24 -04:00
Bill Nottingham
f27ec8cd89 Update Django version in version check. 2018-10-18 09:23:59 -04:00
Bill Nottingham
aec3244f52 Update to latest django subminor to pick up assorted fixes. 2018-10-18 09:23:57 -04:00
softwarefactory-project-zuul[bot]
07aaad53aa Merge pull request #2037 from ikke-t/ikke-t-selinux-fix
fixes selinux permissions for awx data.

Reviewed-by: Shane McDonald <me@shanemcd.com>
             https://github.com/shanemcd
2018-10-17 19:05:21 +00:00
Ilkka Tengvall
42a0192425 Merge branch 'devel' into ikke-t-selinux-fix 2018-10-17 21:44:48 +03:00
softwarefactory-project-zuul[bot]
ac08033d3e Merge pull request #2472 from ryanpetrello/callback-receiver-log
use the proper logger for the callback receiver

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-17 15:35:32 +00:00
Numblesix
6d0fed6d9a Added some Doc for ca_trust_dir 2018-10-17 11:32:26 -04:00
Ryan Petrello
53ae05094e use the proper logger for the callback receiver 2018-10-17 10:56:29 -04:00
softwarefactory-project-zuul[bot]
78c4d5005e Merge pull request #2461 from ryanpetrello/upgrade-celery-and-kombu
upgrade to the latest kombu + celery

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-17 13:49:37 +00:00
Ryan Petrello
79002ae563 upgrade to the latest kombu + celery 2018-10-16 16:14:58 -04:00
softwarefactory-project-zuul[bot]
6c868c7552 Merge pull request #2449 from ryanpetrello/noisy-check-migrations
silence the noisy error that's printed w/ `awx-manage check_migrations`

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-16 18:04:05 +00:00
Ryan Petrello
6e4f3efc4b silence the noisy error that's printed w/ awx-manage check_migrations 2018-10-16 13:48:03 -04:00
softwarefactory-project-zuul[bot]
ce9da4edb7 Merge pull request #2454 from ryanpetrello/more-celery-cleanup
allow users to specify BROKER_URL with passwords that contain : and @

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-16 16:30:33 +00:00
Ryan Petrello
6ff1fe8548 allow users to specify BROKER_URL with passwords that contain : and @ 2018-10-16 11:56:57 -04:00
softwarefactory-project-zuul[bot]
140b85688f Merge pull request #2451 from matburt/fixup_test_userlaunch
Force openshift user behavior for uids over 2500

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-16 15:47:20 +00:00
Matthew Jones
0477581dea Fix up flake8 2018-10-16 11:30:07 -04:00
Matthew Jones
d5c557c639 Proper parameterization for scm tests 2018-10-16 11:30:06 -04:00
Matthew Jones
8e60cb1270 Purge an unneeded ansible 2.4 version check 2018-10-16 11:30:05 -04:00
chris meyers
906eb98d8e fixes dispatcher test that inadvertently access db
* Logger inadvertently triggered by dispatcher tests that do not need DB
access. Mock settings to sidestep DB access.
2018-10-16 11:30:04 -04:00
Matthew Jones
119b9475ea Force openshift user behavior for uids over 2500 2018-10-16 11:30:04 -04:00
softwarefactory-project-zuul[bot]
12c8994faf Merge pull request #2450 from ryanpetrello/iso-deprovision-fix
don't call rabbitmqctl forget_cluster_node for isolated instances

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-16 14:10:35 +00:00
Ryan Petrello
f3e73bbed8 don't call rabbitmqctl forget_cluster_node for isolated instances 2018-10-16 09:47:53 -04:00
softwarefactory-project-zuul[bot]
e2a1b7902c Merge pull request #2439 from jmferrer/change_openshift_vars_path
Change openshift vars path.

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-16 12:17:51 +00:00
jmferrer
d65a3fa037 Restore per-deployment requirements. 2018-10-16 09:59:11 +02:00
jmferrer
f6600887bc Merge branch 'devel' of https://github.com/jmferrer/awx into change_openshift_vars_path 2018-10-16 09:55:05 +02:00
Michael Abashian
96c18fa311 Merge pull request #2141 from mabashian/remove-system-tracking
Removes system tracking code from the UI
2018-10-15 18:55:34 -06:00
mabashian
9645e5bcd3 Remove portalMode accidental portalMode inclusion that resulted from merge conflict 2018-10-15 18:50:58 -04:00
mabashian
0a09d98fe8 Removes system tracking code from the UI. Moves import of shared out to app.js 2018-10-15 18:50:58 -04:00
Ryan Petrello
1224e2c889 Merge pull request #2440 from ryanpetrello/fix-list-based-survey-choices
remove over-eager survey choices validation
2018-10-15 17:05:06 -04:00
softwarefactory-project-zuul[bot]
c8e6fa3bb3 Merge pull request #2438 from ryanpetrello/dispatcher-quit-race
don't attempt to recover special QUIT messages in the worker pool recovery code

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-15 20:40:20 +00:00
Ryan Petrello
00cae104b3 remove over-eager survey choices validation
it looks like choices can also be a list and _maybe_ comma delimited;
clearly there's a lot of history here; let's verify and test what's _really_ supported and _then_ add any necessary validation
2018-10-15 16:40:17 -04:00
jmferrer
f27a34cd1c Change openshift vars path. 2018-10-15 18:27:49 +02:00
Ryan Petrello
720a634702 don't attempt to recover special QUIT messages in the worker pool
when `--reload` is sent to the dispatcher, it sends a special QUIT
message to each worker in the pool so that it will exit gracefully at
the next opportunity

when a worker process exits unexpectedly, the dispatcher attempts to
recover its queued messages and sends them to another worker in the
pool; in this scenario, we should _never_ re-enqueue these special
QUIT messages (because the process doesn't need to quit, it's already
gone)

To reproduce this race condition:

1.  Launch an adhoc that does `sleep 60`
2.  Run `awx-manage run_dispatcher --reload` to enqueue a `QUIT` message
    into the worker's queue
3.  Find the pid of the worker running the `sleep 60` and `SIGKILL` it.
4.  Observe that dispatcher attempts to requeue the `QUIT` message and
    logs a confusing error.
2018-10-15 12:17:52 -04:00
Chris Meyers
c722e50595 Merge pull request #2425 from chrismeyersfsu/fix-ldap_group_type
fix issue with ldap queries containing unicode
2018-10-15 10:49:43 -05:00
softwarefactory-project-zuul[bot]
1cecfd9771 Merge pull request #2437 from ryanpetrello/fix-3039
fix a typo on the JT add page that breaks the custom venv field

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-15 15:19:08 +00:00
Ryan Petrello
011c8ae822 fix a typo on the JT add page that breaks the custom venv field 2018-10-15 11:04:31 -04:00
softwarefactory-project-zuul[bot]
73f54b2237 Merge pull request #2373 from marshmalien/always_nodes_ui
Display WF always nodes in conjunction with success and failure

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-12 22:43:32 +00:00
softwarefactory-project-zuul[bot]
0a964b2bf6 Merge pull request #2266 from ansible/celery-tastes-bad
replace the celery-based task queue with a kombu-based implementation

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-12 18:40:54 +00:00
softwarefactory-project-zuul[bot]
fa18b94725 Merge pull request #2429 from ryanpetrello/more-shippable-cleanup
more shippable -> zuul cleanup

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-12 16:10:53 +00:00
softwarefactory-project-zuul[bot]
5ab6255c67 Merge pull request #2424 from ryanpetrello/oauth-toolkit-upgrade
update to the latest stable 1.1 django-oauth-toolkit

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-12 15:54:36 +00:00
Ryan Petrello
ac80bc874a more shippable -> zuul cleanup 2018-10-12 11:50:29 -04:00
chris meyers
2e98446394 fix issue with ldap queries containing unicode 2018-10-12 10:33:01 -04:00
softwarefactory-project-zuul[bot]
c4afbbc2ca Merge pull request #2420 from dmt/devel
fix indentation for register variable

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-12 14:27:19 +00:00
Ryan Petrello
517043e209 update to the latest stable 1.1 django-oauth-toolkit
see: https://github.com/jazzband/django-oauth-toolkit/pull/629
2018-10-12 10:21:57 -04:00
Marliana Lara
e7c52bc5e7 Merge pull request #8 from dsesami/always_nodes_ui_tests
Always nodes ui tests
2018-10-12 10:20:10 -04:00
Daniel Sami
c25d208465 added browser close at end, waits for spinners 2018-10-12 10:18:49 -04:00
Daniel Temme
921231fe3d fix indentation for register variable 2018-10-12 11:13:42 +02:00
softwarefactory-project-zuul[bot]
6721ea54e9 Merge pull request #1956 from droopy4096/devel
allow nginx config extension

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 22:38:36 +00:00
softwarefactory-project-zuul[bot]
99a42e91fe Merge pull request #2235 from ChrisRo89/devel
Extracted more variables which a related to rabbitmq/postgresql from tasks to defaults

Reviewed-by: Shane McDonald <me@shanemcd.com>
             https://github.com/shanemcd
2018-10-11 21:54:38 +00:00
softwarefactory-project-zuul[bot]
9a580ba644 Merge pull request #2416 from fantashley/fix-openshift-auth
Fix openshift auth broken by undefined vars

Reviewed-by: Ashley Nelson <fantashley@gmail.com>
             https://github.com/fantashley
2018-10-11 21:51:20 +00:00
softwarefactory-project-zuul[bot]
74fcdabc22 Merge pull request #2156 from Decstasy/patch-1
Bugfix for ca_trust_dir

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 21:31:45 +00:00
Ashley Nelson
9bec7cf3b0 Fix openshift auth broken by undefined vars
Signed-off-by: Ashley Nelson <fantashley@gmail.com>
2018-10-11 16:25:55 -05:00
softwarefactory-project-zuul[bot]
f9e402658b Merge pull request #2414 from ryanpetrello/readme-updates
some minor README updates

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 20:53:19 +00:00
softwarefactory-project-zuul[bot]
9570981c7f Merge pull request #2351 from jakemcdermott/enhancement-2515
add views for organization permissions and roles

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 20:41:08 +00:00
softwarefactory-project-zuul[bot]
f79debac42 Merge pull request #2164 from atgreen/devel
Fix token based openshift logins during installation - fixes #489

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 20:36:39 +00:00
softwarefactory-project-zuul[bot]
a9f3eeef05 Merge pull request #2131 from walkafwalka/docker_install_awx_hostnames
Add inventory vars to set docker install hostnames

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 20:29:32 +00:00
softwarefactory-project-zuul[bot]
6eb1feffcd Merge pull request #2117 from walkafwalka/allow_awx_login_autocomplete
Allow autocomplete on the AWX login page

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 20:23:37 +00:00
softwarefactory-project-zuul[bot]
6f55cde6d3 Merge pull request #2091 from stoned/force_boolean_eval
force boolean evaluation

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 20:17:48 +00:00
softwarefactory-project-zuul[bot]
48511b6c33 Merge pull request #2281 from AlanCoding/consistent2
Always allow resource creation via global list

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 20:12:39 +00:00
softwarefactory-project-zuul[bot]
771daefcfd Merge pull request #2411 from fantashley/statefulset_servicename
Add serviceName to Kubernetes StatefulSet spec

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 20:06:50 +00:00
Ryan Petrello
1167361128 some minor README updates 2018-10-11 16:05:29 -04:00
softwarefactory-project-zuul[bot]
3a4cc0d464 Merge pull request #1911 from AlanCoding/spec_it_out
Much more comprehensive validation of survey specs

Reviewed-by: Shane McDonald <me@shanemcd.com>
             https://github.com/shanemcd
2018-10-11 20:00:43 +00:00
Jake McDermott
78901ab48e add organization permissions view 2018-10-11 14:21:44 -04:00
Jake McDermott
938bf1b531 add organizations tab to team permissions screen 2018-10-11 14:21:29 -04:00
Marliana Lara
27da141889 Address review comments 2018-10-11 13:13:01 -04:00
Ashley Nelson
2bf2412759 Add serviceName to Kubernetes StatefulSet spec
Signed-off-by: Ashley Nelson <fantashley@gmail.com>
2018-10-11 11:49:08 -05:00
Daniel Sami
1e3c229460 lint fixes 2018-10-11 12:24:55 -04:00
AlanCoding
cfa93b52b7 Always allow resource creation via global list 2018-10-11 12:21:45 -04:00
Christian.Rohr
96ad2b2b28 Extracted more variables which a related to rabbitmq 2018-10-11 12:16:01 -04:00
softwarefactory-project-zuul[bot]
f53a1fedf6 Merge pull request #2410 from ryanpetrello/update-azure-inv-script
update Azure inventory script to latest from Ansible

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 16:15:15 +00:00
Daniel Sami
8fceaf8810 Tests for UI workflow always nodes 2018-10-11 12:14:00 -04:00
Anthony Green
c39370dbd0 Fix token based openshift logins 2018-10-11 12:10:41 -04:00
AlanCoding
bdc7efb274 humble beginnings of survey question type validation 2018-10-11 12:10:40 -04:00
Ryan Petrello
10c76e2337 update Azure inventory script to latest from Ansible
rebased version of https://github.com/ansible/awx/pull/2234
2018-10-11 11:47:55 -04:00
Ryan Petrello
ff1e8cc356 replace celery task decorators with a kombu-based publisher
this commit implements the bulk of `awx-manage run_dispatcher`, a new
command that binds to RabbitMQ via kombu and balances messages across
a pool of workers that are similar to celeryd workers in spirit.
Specifically, this includes:

- a new decorator, `awx.main.dispatch.task`, which can be used to
  decorate functions or classes so that they can be designated as
  "Tasks"
- support for fanout/broadcast tasks (at this point in time, only
  `conf.Setting` memcached flushes use this functionality)
- support for job reaping
- support for success/failure hooks for job runs (i.e.,
  `handle_work_success` and `handle_work_error`)
- support for auto scaling worker pool that scale processes up and down
  on demand
- minimal support for RPC, such as status checks and pool recycle/reload
2018-10-11 10:53:30 -04:00
Ryan Petrello
da74f1d01f refactor and test the callback receiver as a base for a task dispatcher 2018-10-11 10:53:26 -04:00
softwarefactory-project-zuul[bot]
8ad46436df Merge pull request #2125 from wenottingham/the-first-purge
Purge inventory script requirements from the AWX virtual environment.

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-11 14:14:36 +00:00
Bill Nottingham
be01bed34b Purge inventory script requirements from the AWX virtual environment.
boto is still used by AWX itself.
2018-10-11 09:45:41 -04:00
softwarefactory-project-zuul[bot]
8a763d6cf8 Merge pull request #2372 from rooftopcellist/update_version
update awx version to 2.0.1

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-10 16:59:43 +00:00
adamscmRH
1165dcfa07 update awx version to 2.0.1 2018-10-10 12:31:14 -04:00
softwarefactory-project-zuul[bot]
f9928eef70 Merge pull request #2395 from shanemcd/devel
Fix fallout from #2392

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-10 16:29:45 +00:00
Shane McDonald
ee1d5e43b9 Fix fallout from https://github.com/ansible/awx/pull/2392
There were some upstream changes that I overwrote but shouldn’t have.
2018-10-10 11:41:34 -04:00
softwarefactory-project-zuul[bot]
e94e79d57a Merge pull request #2400 from ryanpetrello/swagger-job
build swagger docs as part of CI

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-10 14:52:30 +00:00
Ryan Petrello
f87a09c46a build swagger docs as part of CI 2018-10-10 10:27:54 -04:00
softwarefactory-project-zuul[bot]
535e16c6cf Merge pull request #2396 from jakemcdermott/update-npm-install
don't update package lock file by default, update readmes

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-10 14:03:10 +00:00
Jake McDermott
5ae19fd9c2 update development documentation 2018-10-10 09:23:47 -04:00
Jake McDermott
7d5f6aa49d don't update lock file by default 2018-10-10 09:23:37 -04:00
softwarefactory-project-zuul[bot]
c0fc3a74ee Merge pull request #2393 from ansible/non-root-docker-tests
Run tests in Docker as non-root user

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-10 13:20:00 +00:00
Yanis Guenane
22c831ff31 Merge branch 'devel' into non-root-docker-tests 2018-10-10 14:22:09 +02:00
softwarefactory-project-zuul[bot]
17dc6bf5a1 Merge pull request #2394 from wenottingham/ocean's-node-8
update node requirements in CONTRIBUTING.md to match INSTALL.md

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-10 10:07:50 +00:00
Yanis Guenane
e7fb82ffe7 Merge branch 'devel' into ocean's-node-8 2018-10-10 11:47:27 +02:00
softwarefactory-project-zuul[bot]
70ae546dee Merge pull request #2391 from wwitzel3/devel
use latest asgi_amqp version

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-10 09:40:21 +00:00
Yanis Guenane
5d22fc2bd7 Merge branch 'devel' into non-root-docker-tests 2018-10-10 09:44:01 +02:00
Ilkka Tengvall
b4919f9ebd Merge branch 'devel' into ikke-t-selinux-fix 2018-10-10 08:23:46 +03:00
Bill Nottingham
9033b3f2a5 update node requirements in CONTRIBUTING.md to match INSTALL.md 2018-10-09 19:54:05 -04:00
Shane McDonald
de60165a49 Fix broken defaults in awx installer 2018-10-09 19:15:32 -04:00
Daniel Sami
b02677a8d0 Initial commit for UI tests for always nodes 2018-10-09 16:32:24 -04:00
Wayne Witzel III
b8c1724880 use latest asgi_amqp version 2018-10-09 15:34:07 -04:00
softwarefactory-project-zuul[bot]
6baa2a109d Merge pull request #2392 from shanemcd/devel
Port downstream installer changes

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-09 19:18:24 +00:00
Shane McDonald
7a5cfd05a3 Run tests in Docker as non-root user 2018-10-09 15:16:01 -04:00
Shane McDonald
b9279ebd5e Port downstream installer changes 2018-10-09 14:39:39 -04:00
Marliana Lara
1b25dd0127 Fix ui-lint error 2018-10-09 14:21:59 -04:00
softwarefactory-project-zuul[bot]
49396178ca Merge pull request #2363 from AlanCoding/validate_env_vars
Validate ANSIBLE_ injectors on save and increase verbosity

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-09 18:11:47 +00:00
AlanCoding
a4dfd96a8d Validate ANSIBLE_ injectors on save and increase verbosity 2018-10-09 13:46:51 -04:00
Marliana Lara
a2f4e36e47 Show all wf options when node is not a root node
* Edge type of root node is always "always"
* If node is not a root node, show all options: always, success, fail
* Remove edge conflict logic
2018-10-09 11:30:53 -04:00
adamscmRH
ad566cc651 tests for always_nodes 2018-10-09 11:30:53 -04:00
adamscmRH
4d9523afa4 lift always node mutex restriction 2018-10-09 11:30:49 -04:00
softwarefactory-project-zuul[bot]
40602875e0 Merge pull request #2381 from msurovcak/patch-1
trivial: update teardown command

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-09 15:12:31 +00:00
Martin Surovcak
d0572cf170 trivial: update teardown command 2018-10-08 16:29:07 +02:00
softwarefactory-project-zuul[bot]
1edede213e Merge pull request #2309 from matburt/zuul_job_configuration
Add an initial check and gate job configuration for zuul

Reviewed-by: https://github.com/softwarefactory-project-zuul[bot]
2018-10-08 13:09:24 +00:00
Matthew Jones
e0c7a7bece Mention zuul in contributing 2018-10-05 14:32:47 -04:00
Matthew Jones
640f9474fc Remove shippable configuration 2018-10-05 14:26:03 -04:00
Ryan Petrello
29b90b700e minor docker-compose fix 2018-10-05 13:40:10 -04:00
Matthew Jones
f7c5289195 Clean up CI compose test invocation 2018-10-05 13:40:09 -04:00
Ryan Petrello
ee11341430 more make clean tinkering 2018-10-05 13:40:08 -04:00
Matthew Jones
56263a5fea Force ui cleanup in the test environment
Also allow using the system make
2018-10-05 13:40:07 -04:00
Jake McDermott
89e41f7524 replace phantomjs with headless chrome 2018-10-05 13:40:06 -04:00
Matthew Jones
3a8bacb8ef Add an initial check and gate job configuration for zuul
Updates for running ui tests and linters
2018-10-05 13:39:59 -04:00
Ryan Petrello
f328f8cad4 Merge pull request #2375 from ryanpetrello/fix-busted-notifications
fix busted notification tests
2018-10-05 09:39:35 -05:00
Ryan Petrello
7752446067 fix busted notification tests 2018-10-05 10:18:27 -04:00
Michael Abashian
96bc0f1578 Merge pull request #2348 from mabashian/2316-wf-width
Ensure workflow graph width is 100% of container
2018-10-05 08:00:56 -05:00
mabashian
9f25fdd079 Ensure workflow graph width is 100% of container 2018-10-05 06:42:53 -06:00
Shane McDonald
7249b21214 Merge pull request #2368 from backeby/fix
Fixed typo Ansbile->Ansible
2018-10-04 16:04:43 -05:00
Alan Rominger
2d642b95ae Merge pull request #2369 from AlanCoding/flake8_setup
Fix flake8 errors in setup.py
2018-10-04 12:45:34 -04:00
AlanCoding
b94d5c7f20 fix flake8 errors in setup.py 2018-10-04 12:25:07 -04:00
André Backeby
02c23fc1c6 Fixed typo Ansbile->Ansible 2018-10-04 15:07:52 +02:00
Shane McDonald
b75f8ceca6 Do not default to pulling latest from DockerHub 2018-10-03 17:50:07 -05:00
Shane McDonald
bfc74497b0 Fix error in image_build role
I think I derped up and commited something in an old stash.
2018-10-03 14:44:26 -05:00
Matthew Jones
c8c982428d Merge pull request #2332 from shanemcd/devel
Updates to versioning system.
2018-10-03 14:28:05 -04:00
Matthew Jones
e6dbf71252 Merge pull request #2341 from wwitzel3/views-breakout
Views breakout
2018-10-01 10:14:53 -04:00
Shane McDonald
3701567ad7 Revert "first-parent requires git >= 1.8.4"
This reverts commit 1af0ee2f8c.

# Conflicts:
#	installer/roles/image_build/templates/Dockerfile.j2
2018-09-28 15:48:33 -04:00
Shane McDonald
86140dec08 Revert "Fix sdist builder image"
This reverts commit 97472cb91b.

# Conflicts:
#	installer/roles/image_build/tasks/main.yml
2018-09-28 15:48:33 -04:00
Shane McDonald
50fe0392ed Updates to versioning system.
https://github.com/ansible/awx/issues?q=%22--first-parent%22
2018-09-28 15:48:33 -04:00
Wayne Witzel III
f18c965a8a fix test patches 2018-09-28 15:18:59 -04:00
Wayne Witzel III
f874e55051 split out mixins in views 2018-09-28 12:48:06 -04:00
Wayne Witzel III
1dcd2b1883 make views.py a directory based module 2018-09-28 12:29:12 -04:00
Ryan Petrello
7684579464 Merge pull request #2336 from ryanpetrello/fix-notification-race
send test notifications after the transaction closes to avoid a race
2018-09-28 10:02:02 -04:00
Ryan Petrello
16e89ed081 send test notifications after the transaction closes to avoid a race 2018-09-28 09:43:10 -04:00
Shane McDonald
62e3b9e3b6 Driveby cleanup: use built-in Make variable 2018-09-26 21:27:07 -04:00
Ryan Petrello
dc3f81920e Merge pull request #2302 from AlanCoding/verbose_data
create_preload_data: log no-op operation, remove unnecessary credential
2018-09-26 16:35:18 -04:00
Ryan Petrello
8a66213dbe Merge pull request #2298 from ryanpetrello/fix-oauth2-deprecated-token-header
properly support deprecated `Authorization: Token xyz`
2018-09-24 15:15:13 -04:00
Ryan Petrello
23d4122574 properly support deprecated Authorization: Token xyz 2018-09-24 14:50:33 -04:00
AlanCoding
5900af726b log no-op operation and changed status 2018-09-21 15:23:02 -04:00
Shane McDonald
9fc4c03e5b Merge pull request #2197 from Spredzy/minor_fixes_contributing.md
CONTRIBUTING.md: Fixing ToC indendation and wrong links
2018-09-20 17:51:00 -04:00
Dmytro Makovey
f8d2a32756 merge and resolve conflict 2018-09-18 11:35:35 -07:00
Shane McDonald
0bb1b0ed45 Merge pull request #2272 from ansible/delete-shrinkwrap
delete old npm-shrinkwrap lock file
2018-09-18 14:01:16 -04:00
John Mitchell
3b11219fff delete old npm-shrinkwrap lock file 2018-09-18 13:42:02 -04:00
Shane McDonald
1b4c3f56fa Merge pull request #2113 from kialam/upgrade-node-lts
Upgrade Node and NPM to LTS
2018-09-18 12:46:30 -04:00
Shane McDonald
6c5334c7d3 Update docs for new Node and NPM version requirements 2018-09-18 12:37:41 -04:00
Shane McDonald
1371e394de Update Node version in dev container image 2018-09-18 12:37:20 -04:00
Shane McDonald
ec67feef2f Bump npm version in package.json
This is what’s served out of the 8.x LTS yum repos.
2018-09-18 12:18:21 -04:00
Shane McDonald
89e656b2a4 Update Node version in sdist builder 2018-09-18 12:17:52 -04:00
Ryan Petrello
5910b8c562 Merge pull request #2265 from shanemcd/devel
Merge remote-tracking branch 'downstream/release_3.3.0' into devel
2018-09-18 08:37:02 -04:00
Yanis Guenane
aa717a2728 CONTRIBUTING.md: Fixing ToC indendation and wrong links
The Table of Contents list indentation was wrongly indented for 'Running
the environment'.

Also, some links pointed to anchor that did not exist. The commit fixes
that.

Signed-off-by: Yanis Guenane <yguenane@redhat.com>
2018-08-28 10:53:17 +02:00
kialam
42f01b7f05 Use latest version of nvd3 instead
- Replace forked version in favor of latest version from NOVUS.
2018-08-13 14:49:25 -04:00
kialam
6cf1fb3c10 Update node and nom to LTS version 2018-08-13 14:46:00 -04:00
Dennis U
a294a6f06e Bugfix for ca_trust_dir
Changed syntax as ca_trust_dir was not correctly mounted in awx_web container and added command to update CA trust inside awx_web container after creation.
2018-08-09 14:07:29 +02:00
walkafwalka
d2ab7bd54d Add inventory vars to set docker install hostnames
Signed-off-by: walkafwalka <41709139+walkafwalka@users.noreply.github.com>
2018-08-04 01:49:07 -07:00
walkafwalka
e02e8994ad Allow autocomplete on the AWX login page
Signed-off-by: walkafwalka <41709139+walkafwalka@users.noreply.github.com>
2018-08-01 00:21:38 +00:00
Stoned Elipot
ada2d65547 force boolean evaluation 2018-07-25 19:10:31 +02:00
Ilkka Tengvall
0443bd3099 fixes selinux permissions for awx data.
fixes issue #2036 and  #1896
2018-07-02 09:22:36 +03:00
Dmytro Makovey
adaa164a19 allow nginx config extension 2018-06-05 08:16:08 -07:00
299 changed files with 21226 additions and 18400 deletions

3
.gitignore vendored
View File

@@ -1,3 +1,4 @@
# Tags
.tags
.tags1
@@ -52,6 +53,7 @@ __pycache__
**/node_modules/**
/tmp
**/npm-debug.log*
**/package-lock.json
# UI build flag files
awx/ui/.deps_built
@@ -112,7 +114,6 @@ local/
*.mo
requirements/vendor
.i18n_built
VERSION
.idea/*
# AWX python libs populated by requirements.txt

View File

@@ -2,11 +2,11 @@
Hi there! We're excited to have you as a contributor.
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on irc.freenode.net, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project) .
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on irc.freenode.net, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
## Table of contents
* [Things to know prior to submitting code](#things-to-know-prior-to-contributing-code)
* [Things to know prior to submitting code](#things-to-know-prior-to-submitting-code)
* [Setting up your development environment](#setting-up-your-development-environment)
* [Prerequisites](#prerequisites)
* [Docker](#docker)
@@ -17,14 +17,14 @@ Have questions about this document or anything not covered here? Come chat with
* [Create local settings](#create-local-settings)
* [Build the base image](#build-the-base-image)
* [Build the user interface](#build-the-user-interface)
# [Running the environment](#running-the-environment)
* [Running the environment](#running-the-environment)
* [Start the containers](#start-the-containers)
* [Start from the container shell](#start-from-the-container-shell)
* [Post Build Steps](#post-build-steps)
* [Start a shell](#start-the-shell)
* [Create a superuser](#create-a-superuser)
* [Load the data](#load-the-data)
* [Building API Documentation](#build-documentation)
* [Start a shell](#start-a-shell)
* [Create a superuser](#create-a-superuser)
* [Load the data](#load-the-data)
* [Building API Documentation](#build-api-documentation)
* [Accessing the AWX web interface](#accessing-the-awx-web-interface)
* [Purging containers and images](#purging-containers-and-images)
* [What should I work on?](#what-should-i-work-on)
@@ -86,8 +86,8 @@ If you're not using Docker for Mac, or Docker for Windows, you may need, or choo
The AWX UI requires the following:
- Node 6.x LTS version
- NPM 3.x LTS
- Node 8.x LTS
- NPM 6.x LTS
### Build the environment
@@ -145,7 +145,7 @@ Start the development containers by running the following:
(host)$ make docker-compose
```
The above utilizes the image built in the previous step, and will automatically start all required services and dependent containers. Once the containers launch, your session will be attached to the *awx* container, and you'll be able to watch log messages and events in real time. You will see messages from Django, celery, and the front end build process.
The above utilizes the image built in the previous step, and will automatically start all required services and dependent containers. Once the containers launch, your session will be attached to the *awx* container, and you'll be able to watch log messages and events in real time. You will see messages from Django and the front end build process.
If you start a second terminal session, you can take a look at the running containers using the `docker ps` command. For example:
@@ -174,7 +174,7 @@ The first time you start the environment, database migrations need to run in ord
```bash
awx_1 | Operations to perform:
awx_1 | Synchronize unmigrated apps: solo, api, staticfiles, debug_toolbar, messages, channels, django_extensions, ui, rest_framework, polymorphic
awx_1 | Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
awx_1 | Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
awx_1 | Synchronizing apps without migrations:
awx_1 | Creating tables...
awx_1 | Running deferred SQL...
@@ -329,7 +329,7 @@ We like to keep our commit history clean, and will require resubmission of pull
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient.
All submitted PRs will have the linter and unit tests run against them, and the status reported in the PR.
All submitted PRs will have the linter and unit tests run against them via Zuul, and the status reported in the PR.
## Reporting Issues

View File

@@ -38,7 +38,7 @@ Export all objects
Clean up remnants of the old AWX install:
```docker rm -f $(ps -aq)``` # remove all old awx containers
```docker rm -f $(docker ps -aq)``` # remove all old awx containers
```make clean-ui``` # clean up ui artifacts

View File

@@ -62,8 +62,8 @@ Before you can run a deployment, you'll need the following installed in your loc
- [docker-py](https://github.com/docker/docker-py) Python module
- [GNU Make](https://www.gnu.org/software/make/)
- [Git](https://git-scm.com/) Requires Version 1.8.4+
- [Node 6.x LTS version](https://nodejs.org/en/download/)
- [NPM 3.x LTS](https://docs.npmjs.com/)
- [Node 8.x LTS version](https://nodejs.org/en/download/)
- [NPM 6.x LTS](https://docs.npmjs.com/)
### System Requirements
@@ -119,12 +119,12 @@ To complete a deployment to OpenShift, you will obviously need access to an Open
You will also need to have the `oc` command in your PATH. The `install.yml` playbook will call out to `oc` when logging into, and creating objects on the cluster.
The default resource requests per-pod requires:
The default resource requests per-deployment requires:
> Memory: 6GB
> CPU: 3 cores
This can be tuned by overriding the variables found in [/installer/openshift/defaults/main.yml](/installer/openshift/defaults/main.yml). Special care should be taken when doing this as undersized instances will experience crashes and resource exhaustion.
This can be tuned by overriding the variables found in [/installer/roles/kubernetes/defaults/main.yml](/installer/roles/kubernetes/defaults/main.yml). Special care should be taken when doing this as undersized instances will experience crashes and resource exhaustion.
For more detail on how resource requests are formed see: [https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources](https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources)
@@ -236,7 +236,7 @@ Using /etc/ansible/ansible.cfg as config file
}
Operations to perform:
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
Synchronizing apps without migrations:
Creating tables...
Running deferred SQL...
@@ -426,7 +426,7 @@ If you choose to use the official images then the remote host will be the one to
> As mentioned above, in [Prerequisites](#prerequisites-1), the prerequisites are required on the remote host.
> When deploying to a remote host, the playook does not execute tasks with the `become` option. For this reason, make sure the user that connects to the remote host has privileges to run the `docker` command. This typically means that non-privileged users need to be part of the `docker` group.
> When deploying to a remote host, the playbook does not execute tasks with the `become` option. For this reason, make sure the user that connects to the remote host has privileges to run the `docker` command. This typically means that non-privileged users need to be part of the `docker` group.
#### Inventory variables
@@ -449,6 +449,10 @@ Before starting the build process, review the [inventory](./installer/inventory)
When using docker-compose, the `docker-compose.yml` file will be created there (default `/var/lib/awx`).
*ca_trust_dir*
> If you're using a non trusted CA, provide a path where the untrusted Certs are stored on your Host.
#### Docker registry
If you wish to tag and push built images to a Docker registry, set the following variables in the inventory file:
@@ -548,7 +552,7 @@ Using /etc/ansible/ansible.cfg as config file
}
Operations to perform:
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
Synchronizing apps without migrations:
Creating tables...
Running deferred SQL...

View File

@@ -12,10 +12,7 @@ MANAGEMENT_COMMAND ?= awx-manage
IMAGE_REPOSITORY_AUTH ?=
IMAGE_REPOSITORY_BASE ?= https://gcr.io
VERSION=$(shell git describe --long --first-parent)
VERSION3=$(shell git describe --long --first-parent | sed 's/\-g.*//')
VERSION3DOT=$(shell git describe --long --first-parent | sed 's/\-g.*//' | sed 's/\-/\./')
RELEASE_VERSION=$(shell git describe --long --first-parent | sed 's@\([0-9.]\{1,\}\).*@\1@')
VERSION := $(shell cat VERSION)
# NOTE: This defaults the container image version to the branch that's active
COMPOSE_TAG ?= $(GIT_BRANCH)
@@ -30,8 +27,6 @@ DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
# Comma separated list
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
CURWD = $(shell pwd)
# Determine appropriate shasum command
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
@@ -48,20 +43,9 @@ DATE := $(shell date -u +%Y%m%d%H%M)
NAME ?= awx
GIT_REMOTE_URL = $(shell git config --get remote.origin.url)
ifeq ($(OFFICIAL),yes)
VERSION_TARGET ?= $(RELEASE_VERSION)
else
VERSION_TARGET ?= $(VERSION3DOT)
endif
# TAR build parameters
ifeq ($(OFFICIAL),yes)
SDIST_TAR_NAME=$(NAME)-$(RELEASE_VERSION)
WHEEL_NAME=$(NAME)-$(RELEASE_VERSION)
else
SDIST_TAR_NAME=$(NAME)-$(VERSION3DOT)
WHEEL_NAME=$(NAME)-$(VERSION3DOT)
endif
SDIST_TAR_NAME=$(NAME)-$(VERSION)
WHEEL_NAME=$(NAME)-$(VERSION)
SDIST_COMMAND ?= sdist
WHEEL_COMMAND ?= bdist_wheel
@@ -75,7 +59,7 @@ UI_RELEASE_FLAG_FILE = awx/ui/.release_built
I18N_FLAG_FILE = .i18n_built
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
develop refresh adduser migrate dbchange dbshell runserver celeryd \
develop refresh adduser migrate dbchange dbshell runserver \
receiver test test_unit test_ansible test_coverage coverage_html \
dev_build release_build release_clean sdist \
ui-docker-machine ui-docker ui-release ui-devel \
@@ -112,7 +96,6 @@ clean: clean-ui clean-dist
rm -rf requirements/vendor
rm -rf tmp
rm -rf $(I18N_FLAG_FILE)
rm -f VERSION
mkdir tmp
rm -rf build $(NAME)-$(VERSION) *.egg-info
find . -type f -regex ".*\.py[co]$$" -delete
@@ -182,7 +165,7 @@ requirements_awx: virtualenv_awx
else \
cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
fi
$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
#$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
requirements_awx_dev:
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_dev.txt
@@ -250,7 +233,7 @@ server_noattach:
tmux new-session -d -s awx 'exec make uwsgi'
tmux rename-window 'AWX'
tmux select-window -t awx:0
tmux split-window -v 'exec make celeryd'
tmux split-window -v 'exec make dispatcher'
tmux new-window 'exec make daphne'
tmux select-window -t awx:1
tmux rename-window 'WebSockets'
@@ -282,12 +265,6 @@ honcho:
fi; \
honcho start -f tools/docker-compose/Procfile
flower:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
celery flower --address=0.0.0.0 --port=5555 --broker=amqp://guest:guest@$(RABBITMQ_HOST):5672//
collectstatic:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
@@ -298,7 +275,7 @@ uwsgi: collectstatic
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:/bin/sh -c '[ -f /tmp/celery_pid ] && kill -1 `cat /tmp/celery_pid` || true'"
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:awx-manage run_dispatcher --reload"
daphne:
@if [ "$(VENV_BASE)" ]; then \
@@ -319,13 +296,13 @@ runserver:
fi; \
$(PYTHON) manage.py runserver
# Run to start the background celery worker for development.
celeryd:
rm -f /tmp/celery_pid
# Run to start the background task dispatcher for development.
dispatcher:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
celery worker -A awx -l DEBUG -B -Ofair --autoscale=100,4 --schedule=$(CELERY_SCHEDULE_FILE) -n celery@$(COMPOSE_HOST) --pidfile /tmp/celery_pid
$(PYTHON) manage.py run_dispatcher
# Run to start the zeromq callback receiver
receiver:
@@ -372,7 +349,7 @@ check: flake8 pep8 # pyflakes pylint
awx-link:
cp -R /tmp/awx.egg-info /awx_devel/ || true
sed -i "s/placeholder/$(shell git describe --long | sed 's/\./\\./g')/" /awx_devel/awx.egg-info/PKG-INFO
cp /tmp/awx.egg-link /venv/awx/lib/python2.7/site-packages/awx.egg-link
cp -f /tmp/awx.egg-link /venv/awx/lib/python2.7/site-packages/awx.egg-link
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
@@ -381,7 +358,7 @@ test:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
py.test -n auto $(TEST_DIRS)
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider -n auto $(TEST_DIRS)
awx-manage check_migrations --dry-run --check -n 'vNNN_missing_migration_file'
test_combined: test_ansible test
@@ -483,7 +460,7 @@ $(I18N_FLAG_FILE): $(UI_DEPS_FLAG_FILE)
ui-deps: $(UI_DEPS_FLAG_FILE)
$(UI_DEPS_FLAG_FILE):
$(NPM_BIN) --unsafe-perm --prefix awx/ui install awx/ui
$(NPM_BIN) --unsafe-perm --prefix awx/ui install --no-save awx/ui
touch $(UI_DEPS_FLAG_FILE)
ui-docker-machine: $(UI_DEPS_FLAG_FILE)
@@ -569,17 +546,27 @@ docker-isolated:
else \
docker exec "tools_isolated_1" bash -c "mkdir -p /root/.ssh && rm -f /root/.ssh/authorized_keys && echo $$(docker exec -t tools_awx_1 cat /root/.ssh/id_rsa.pub) >> /root/.ssh/authorized_keys"; \
fi
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
# Docker Compose Development environment
docker-compose: docker-auth
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
docker-compose-cluster: docker-auth
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
docker-compose-test: docker-auth
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
docker-compose-runtest:
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh
docker-compose-build-swagger:
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh swagger
docker-compose-clean:
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm -w /awx_devel --service-ports awx make clean
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose rm -sf
docker-compose-build: awx-devel-build
@@ -605,13 +592,13 @@ docker-refresh: docker-clean docker-compose
# Docker Development Environment with Elastic Stack Connected
docker-compose-elk: docker-auth
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose-cluster-elk: docker-auth
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
minishift-dev:
ansible-playbook -i localhost, -e devtree_directory=$(CURWD) tools/clusterdevel/start_minishift_dev.yml
ansible-playbook -i localhost, -e devtree_directory=$(CURDIR) tools/clusterdevel/start_minishift_dev.yml
clean-elk:
@@ -626,5 +613,4 @@ psql-container:
docker run -it --net tools_default --rm postgres:9.6 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
VERSION:
@echo $(VERSION_TARGET) > $@
@echo "awx: $(VERSION_TARGET)"
@echo "awx: $(VERSION)"

View File

@@ -1,7 +1,6 @@
[![Run Status](https://api.shippable.com/projects/591c82a22f895107009e8b35/badge?branch=devel)](https://app.shippable.com/github/ansible/awx)
[![Gated by Zuul](https://zuul-ci.org/gated.svg)](https://ansible.softwarefactory-project.io/zuul/status)
AWX
===
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is the upstream project for [Tower](https://www.ansible.com/tower), a commercial derivative of AWX.
@@ -11,6 +10,8 @@ To learn more about using AWX, and Tower, view the [Tower docs site](http://docs
The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq).
The AWX logos and branding assets are covered by [our trademark guidelines](https://github.com/ansible/awx-logos/blob/master/TRADEMARKS.md).
Contributing
------------

1
VERSION Normal file
View File

@@ -0,0 +1 @@
2.1.0

View File

@@ -12,14 +12,6 @@ __version__ = get_distribution('awx').version
__all__ = ['__version__']
# Isolated nodes do not have celery installed
try:
from .celery import app as celery_app # noqa
__all__.append('celery_app')
except ImportError:
pass
# Check for the presence/absence of "devonly" module to determine if running
# from a source code checkout or release packaage.
try:

View File

@@ -63,12 +63,15 @@ class Metadata(metadata.SimpleMetadata):
verbose_name = smart_text(opts.verbose_name)
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
for model_field in serializer.Meta.model._meta.fields:
if field.field_name == model_field.name:
field_info['filterable'] = True
break
if field.field_name == 'type':
field_info['filterable'] = True
else:
field_info['filterable'] = False
for model_field in serializer.Meta.model._meta.fields:
if field.field_name == model_field.name:
field_info['filterable'] = True
break
else:
field_info['filterable'] = False
# Indicate if a field has a default value.
# FIXME: Still isn't showing all default values?

View File

@@ -3011,7 +3011,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
'allow_simultaneous', 'custom_virtualenv')
'allow_simultaneous', 'custom_virtualenv', 'job_slice_count')
def get_related(self, obj):
res = super(JobTemplateSerializer, self).get_related(obj)
@@ -3028,6 +3028,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk})
@@ -3123,7 +3124,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch',
'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'allow_simultaneous', 'artifacts', 'scm_revision',
'instance_group', 'diff_mode')
'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count')
def get_related(self, obj):
res = super(JobSerializer, self).get_related(obj)
@@ -3590,6 +3591,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
class Meta:
model = WorkflowJob
fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous',
'job_template', 'is_sliced_job',
'-execution_node', '-event_processing_finished', '-controller_node',)
def get_related(self, obj):
@@ -3598,6 +3600,8 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
kwargs={'pk': obj.workflow_job_template.pk})
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
if obj.job_template_id:
res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
@@ -4535,13 +4539,13 @@ class SchedulePreviewSerializer(BaseSerializer):
# - COUNT > 999
def validate_rrule(self, value):
rrule_value = value
multi_by_month_day = ".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
multi_by_month = ".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = ".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(".*?(RRULE\:)", rrule_value)
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
if not len(match_multiple_dtstart):
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
if len(match_native_dtstart):

View File

@@ -26,6 +26,9 @@ string of `?all=1` to return all hosts, including disabled ones.
Specify a query string of `?towervars=1` to add variables
to the hostvars of each host that specifies its enabled state and database ID.
Specify a query string of `?subset=slice2of5` to produce an inventory that
has a restricted number of hosts according to the rules of job slicing.
To apply multiple query strings, join them with the `&` character, like `?hostvars=1&all=1`.
## Host Response

View File

@@ -8,6 +8,7 @@ from awx.api.views import (
JobTemplateDetail,
JobTemplateLaunch,
JobTemplateJobsList,
JobTemplateSliceWorkflowJobsList,
JobTemplateCallback,
JobTemplateSchedulesList,
JobTemplateSurveySpec,
@@ -28,6 +29,7 @@ urls = [
url(r'^(?P<pk>[0-9]+)/$', JobTemplateDetail.as_view(), name='job_template_detail'),
url(r'^(?P<pk>[0-9]+)/launch/$', JobTemplateLaunch.as_view(), name='job_template_launch'),
url(r'^(?P<pk>[0-9]+)/jobs/$', JobTemplateJobsList.as_view(), name='job_template_jobs_list'),
url(r'^(?P<pk>[0-9]+)/slice_workflow_jobs/$', JobTemplateSliceWorkflowJobsList.as_view(), name='job_template_slice_workflow_jobs_list'),
url(r'^(?P<pk>[0-9]+)/callback/$', JobTemplateCallback.as_view(), name='job_template_callback'),
url(r'^(?P<pk>[0-9]+)/schedules/$', JobTemplateSchedulesList.as_view(), name='job_template_schedules_list'),
url(r'^(?P<pk>[0-9]+)/survey_spec/$', JobTemplateSurveySpec.as_view(), name='job_template_survey_spec'),

View File

@@ -18,8 +18,8 @@ import six
# Django
from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.db.models import Q, Count, F
from django.db import IntegrityError, transaction
from django.db.models import Q, Count
from django.db import IntegrityError, transaction, connection
from django.shortcuts import get_object_or_404
from django.utils.encoding import smart_text
from django.utils.safestring import mark_safe
@@ -92,7 +92,15 @@ from awx.api.serializers import * # noqa
from awx.api.metadata import RoleMetadata, JobTypeMetadata
from awx.main.constants import ACTIVE_STATES
from awx.main.scheduler.tasks import run_job_complete
from awx.api.exceptions import ActiveJobConflict
from awx.api.views.mixin import (
ActivityStreamEnforcementMixin,
SystemTrackingEnforcementMixin,
WorkflowsEnforcementMixin,
UnifiedJobDeletionMixin,
InstanceGroupMembershipMixin,
RelatedJobsPreventDeleteMixin,
OrganizationCountsMixin,
)
logger = logging.getLogger('awx.api.views')
@@ -110,157 +118,6 @@ def api_exception_handler(exc, context):
return exception_handler(exc, context)
class ActivityStreamEnforcementMixin(object):
'''
Mixin to check that license supports activity streams.
'''
def check_permissions(self, request):
ret = super(ActivityStreamEnforcementMixin, self).check_permissions(request)
if not feature_enabled('activity_streams'):
raise LicenseForbids(_('Your license does not allow use of the activity stream.'))
return ret
class SystemTrackingEnforcementMixin(object):
'''
Mixin to check that license supports system tracking.
'''
def check_permissions(self, request):
ret = super(SystemTrackingEnforcementMixin, self).check_permissions(request)
if not feature_enabled('system_tracking'):
raise LicenseForbids(_('Your license does not permit use of system tracking.'))
return ret
class WorkflowsEnforcementMixin(object):
'''
Mixin to check that license supports workflows.
'''
def check_permissions(self, request):
ret = super(WorkflowsEnforcementMixin, self).check_permissions(request)
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
raise LicenseForbids(_('Your license does not allow use of workflows.'))
return ret
class UnifiedJobDeletionMixin(object):
'''
Special handling when deleting a running unified job object.
'''
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
try:
if obj.unified_job_node.workflow_job.status in ACTIVE_STATES:
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
except self.model.unified_job_node.RelatedObjectDoesNotExist:
pass
# Still allow deletion of new status, because these can be manually created
if obj.status in ACTIVE_STATES and obj.status != 'new':
raise PermissionDenied(detail=_("Cannot delete running job resource."))
elif not obj.event_processing_finished:
# Prohibit deletion if job events are still coming in
if obj.finished and now() < obj.finished + dateutil.relativedelta.relativedelta(minutes=1):
# less than 1 minute has passed since job finished and events are not in
return Response({"error": _("Job has not finished processing events.")},
status=status.HTTP_400_BAD_REQUEST)
else:
# if it has been > 1 minute, events are probably lost
logger.warning('Allowing deletion of {} through the API without all events '
'processed.'.format(obj.log_format))
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class InstanceGroupMembershipMixin(object):
'''
Manages signaling celery to reload its queue configuration on Instance Group membership changes
'''
def attach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
sub_id, res = self.attach_validate(request)
if status.is_success(response.status_code):
if self.parent_model is Instance:
inst_name = ig_obj.hostname
else:
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
with transaction.atomic():
ig_qs = InstanceGroup.objects.select_for_update()
if self.parent_model is Instance:
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
else:
# similar to get_parent_object, but selected for update
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
ig_obj = get_object_or_404(ig_qs, **parent_filter)
if inst_name not in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.append(inst_name)
ig_obj.save(update_fields=['policy_instance_list'])
return response
def is_valid_relation(self, parent, sub, created=False):
if sub.is_isolated():
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
if self.parent_model is InstanceGroup:
ig_obj = self.get_parent_object()
if ig_obj.controller_id is not None:
return {'error': _('Isolated instance group membership may not be managed via the API.')}
return None
def unattach_validate(self, request):
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
if res:
return (sub_id, res)
sub = get_object_or_400(self.model, pk=sub_id)
attach_errors = self.is_valid_relation(None, sub)
if attach_errors:
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
return (sub_id, res)
def unattach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
if status.is_success(response.status_code):
sub_id = request.data.get('id', None)
if self.parent_model is Instance:
inst_name = self.get_parent_object().hostname
else:
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
with transaction.atomic():
ig_qs = InstanceGroup.objects.select_for_update()
if self.parent_model is Instance:
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
else:
# similar to get_parent_object, but selected for update
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
ig_obj = get_object_or_404(ig_qs, **parent_filter)
if inst_name in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
ig_obj.save(update_fields=['policy_instance_list'])
return response
class RelatedJobsPreventDeleteMixin(object):
def perform_destroy(self, obj):
self.check_related_active_jobs(obj)
return super(RelatedJobsPreventDeleteMixin, self).perform_destroy(obj)
def check_related_active_jobs(self, obj):
active_jobs = obj.get_active_jobs()
if len(active_jobs) > 0:
raise ActiveJobConflict(active_jobs)
time_cutoff = now() - dateutil.relativedelta.relativedelta(minutes=1)
recent_jobs = obj._get_related_jobs().filter(finished__gte = time_cutoff)
for unified_job in recent_jobs.get_real_instances():
if not unified_job.event_processing_finished:
raise PermissionDenied(_(
'Related job {} is still processing events.'
).format(unified_job.log_format))
class ApiRootView(APIView):
permission_classes = (AllowAny,)
@@ -742,7 +599,7 @@ class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetac
search_fields = ('hostname',)
class ScheduleList(ListAPIView):
class ScheduleList(ListCreateAPIView):
view_name = _("Schedules")
model = Schedule
@@ -887,92 +744,6 @@ class AuthView(APIView):
return Response(data)
class OrganizationCountsMixin(object):
def get_serializer_context(self, *args, **kwargs):
full_context = super(OrganizationCountsMixin, self).get_serializer_context(*args, **kwargs)
if self.request is None:
return full_context
db_results = {}
org_qs = self.model.accessible_objects(self.request.user, 'read_role')
org_id_list = org_qs.values('id')
if len(org_id_list) == 0:
if self.request.method == 'POST':
full_context['related_field_counts'] = {}
return full_context
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
project_qs = Project.accessible_objects(self.request.user, 'read_role')
# Produce counts of Foreign Key relationships
db_results['inventories'] = inv_qs\
.values('organization').annotate(Count('organization')).order_by('organization')
db_results['teams'] = Team.accessible_objects(
self.request.user, 'read_role').values('organization').annotate(
Count('organization')).order_by('organization')
JT_project_reference = 'project__organization'
JT_inventory_reference = 'inventory__organization'
db_results['job_templates_project'] = JobTemplate.accessible_objects(
self.request.user, 'read_role').exclude(
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
Count(JT_project_reference)).order_by(JT_project_reference)
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
db_results['projects'] = project_qs\
.values('organization').annotate(Count('organization')).order_by('organization')
# Other members and admins of organization are always viewable
db_results['users'] = org_qs.annotate(
users=Count('member_role__members', distinct=True),
admins=Count('admin_role__members', distinct=True)
).values('id', 'users', 'admins')
count_context = {}
for org in org_id_list:
org_id = org['id']
count_context[org_id] = {
'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0,
'admins': 0, 'projects': 0}
for res, count_qs in db_results.items():
if res == 'job_templates_project':
org_reference = JT_project_reference
elif res == 'job_templates_inventory':
org_reference = JT_inventory_reference
elif res == 'users':
org_reference = 'id'
else:
org_reference = 'organization'
for entry in count_qs:
org_id = entry[org_reference]
if org_id in count_context:
if res == 'users':
count_context[org_id]['admins'] = entry['admins']
count_context[org_id]['users'] = entry['users']
continue
count_context[org_id][res] = entry['%s__count' % org_reference]
# Combine the counts for job templates by project and inventory
for org in org_id_list:
org_id = org['id']
count_context[org_id]['job_templates'] = 0
for related_path in ['job_templates_project', 'job_templates_inventory']:
if related_path in count_context[org_id]:
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
full_context['related_field_counts'] = count_context
return full_context
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
model = Organization
@@ -1520,7 +1291,7 @@ class ProjectUpdateNotificationsList(SubListAPIView):
search_fields = ('subject', 'notification_type', 'body',)
class ProjectUpdateScmInventoryUpdates(SubListCreateAPIView):
class ProjectUpdateScmInventoryUpdates(SubListAPIView):
view_name = _("Project Update SCM Inventory Updates")
model = InventoryUpdate
@@ -1713,10 +1484,11 @@ class OAuth2TokenActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIVi
search_fields = ('changes',)
class UserTeamsList(ListAPIView):
class UserTeamsList(SubListAPIView):
model = User
model = Team
serializer_class = TeamSerializer
parent_model = User
def get_queryset(self):
u = get_object_or_404(User, pk=self.kwargs['pk'])
@@ -1894,7 +1666,7 @@ class CredentialTypeDetail(RetrieveUpdateDestroyAPIView):
return super(CredentialTypeDetail, self).destroy(request, *args, **kwargs)
class CredentialTypeCredentialList(SubListAPIView):
class CredentialTypeCredentialList(SubListCreateAPIView):
model = Credential
parent_model = CredentialType
@@ -2680,6 +2452,16 @@ class InventoryScriptView(RetrieveAPIView):
hostvars = bool(request.query_params.get('hostvars', ''))
towervars = bool(request.query_params.get('towervars', ''))
show_all = bool(request.query_params.get('all', ''))
subset = request.query_params.get('subset', '')
if subset:
if not isinstance(subset, six.string_types):
raise ParseError(_('Inventory subset argument must be a string.'))
if subset.startswith('slice'):
slice_number, slice_count = Inventory.parse_slice_params(subset)
else:
raise ParseError(_('Subset does not use any supported syntax.'))
else:
slice_number, slice_count = 1, 1
if hostname:
hosts_q = dict(name=hostname)
if not show_all:
@@ -2689,7 +2471,8 @@ class InventoryScriptView(RetrieveAPIView):
return Response(obj.get_script_data(
hostvars=hostvars,
towervars=towervars,
show_all=show_all
show_all=show_all,
slice_number=slice_number, slice_count=slice_count
))
@@ -3140,9 +2923,14 @@ class JobTemplateLaunch(RetrieveAPIView):
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
data = OrderedDict()
data['job'] = new_job.id
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
if isinstance(new_job, WorkflowJob):
data['workflow_job'] = new_job.id
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
data.update(WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
else:
data['job'] = new_job.id
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
headers = {'Location': new_job.get_absolute_url(request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
@@ -3188,6 +2976,16 @@ class JobTemplateSurveySpec(GenericAPIView):
obj_permission_type = 'admin'
serializer_class = EmptySerializer
ALLOWED_TYPES = {
'text': six.string_types,
'textarea': six.string_types,
'password': six.string_types,
'multiplechoice': six.string_types,
'multiselect': six.string_types,
'integer': int,
'float': float
}
def get(self, request, *args, **kwargs):
obj = self.get_object()
if not feature_enabled('surveys'):
@@ -3214,7 +3012,8 @@ class JobTemplateSurveySpec(GenericAPIView):
obj.save(update_fields=['survey_spec'])
return Response()
def _validate_spec_data(self, new_spec, old_spec):
@staticmethod
def _validate_spec_data(new_spec, old_spec):
schema_errors = {}
for field, expect_type, type_label in [
('name', six.string_types, 'string'),
@@ -3235,40 +3034,75 @@ class JobTemplateSurveySpec(GenericAPIView):
variable_set = set()
old_spec_dict = JobTemplate.pivot_spec(old_spec)
for idx, survey_item in enumerate(new_spec["spec"]):
context = dict(
idx=six.text_type(idx),
survey_item=survey_item
)
# General element validation
if not isinstance(survey_item, dict):
return Response(dict(error=_("Survey question %s is not a json object.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if "type" not in survey_item:
return Response(dict(error=_("'type' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if "question_name" not in survey_item:
return Response(dict(error=_("'question_name' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if "variable" not in survey_item:
return Response(dict(error=_("'variable' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
for field_name in ['type', 'question_name', 'variable', 'required']:
if field_name not in survey_item:
return Response(dict(error=_("'{field_name}' missing from survey question {idx}").format(
field_name=field_name, **context
)), status=status.HTTP_400_BAD_REQUEST)
val = survey_item[field_name]
allow_types = six.string_types
type_label = 'string'
if field_name == 'required':
allow_types = bool
type_label = 'boolean'
if not isinstance(val, allow_types):
return Response(dict(error=_("'{field_name}' in survey question {idx} expected to be {type_label}.").format(
field_name=field_name, type_label=type_label, **context
)))
if survey_item['variable'] in variable_set:
return Response(dict(error=_("'variable' '%(item)s' duplicated in survey question %(survey)s.") % {
'item': survey_item['variable'], 'survey': str(idx)}), status=status.HTTP_400_BAD_REQUEST)
else:
variable_set.add(survey_item['variable'])
if "required" not in survey_item:
return Response(dict(error=_("'required' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if survey_item["type"] == "password" and "default" in survey_item:
if not isinstance(survey_item['default'], six.string_types):
# Type-specific validation
# validate question type <-> default type
qtype = survey_item["type"]
if qtype not in JobTemplateSurveySpec.ALLOWED_TYPES:
return Response(dict(error=_(
"'{survey_item[type]}' in survey question {idx} is not one of '{allowed_types}' allowed question types."
).format(
allowed_types=', '.join(JobTemplateSurveySpec.ALLOWED_TYPES.keys()), **context
)))
if 'default' in survey_item:
if not isinstance(survey_item['default'], JobTemplateSurveySpec.ALLOWED_TYPES[qtype]):
type_label = 'string'
if qtype in ['integer', 'float']:
type_label = qtype
return Response(dict(error=_(
"Value {question_default} for '{variable_name}' expected to be a string."
"Default value {survey_item[default]} in survey question {idx} expected to be {type_label}."
).format(
question_default=survey_item["default"], variable_name=survey_item["variable"])
), status=status.HTTP_400_BAD_REQUEST)
type_label=type_label, **context
)), status=status.HTTP_400_BAD_REQUEST)
# additional type-specific properties, the UI provides these even
# if not applicable to the question, TODO: request that they not do this
for key in ['min', 'max']:
if key in survey_item:
if survey_item[key] is not None and (not isinstance(survey_item[key], int)):
return Response(dict(error=_(
"The {min_or_max} limit in survey question {idx} expected to be integer."
).format(min_or_max=key, **context)))
if qtype in ['multiplechoice', 'multiselect'] and 'choices' not in survey_item:
return Response(dict(error=_(
"Survey question {idx} of type {survey_item[type]} must specify choices.".format(**context)
)))
# Process encryption substitution
if ("default" in survey_item and isinstance(survey_item['default'], six.string_types) and
survey_item['default'].startswith('$encrypted$')):
# Submission expects the existence of encrypted DB value to replace given default
if survey_item["type"] != "password":
if qtype != "password":
return Response(dict(error=_(
"$encrypted$ is a reserved keyword for password question defaults, "
"survey question {question_position} is type {question_type}."
).format(
question_position=str(idx), question_type=survey_item["type"])
), status=status.HTTP_400_BAD_REQUEST)
"survey question {idx} is type {survey_item[type]}."
).format(**context)), status=status.HTTP_400_BAD_REQUEST)
old_element = old_spec_dict.get(survey_item['variable'], {})
encryptedish_default_exists = False
if 'default' in old_element:
@@ -3280,10 +3114,10 @@ class JobTemplateSurveySpec(GenericAPIView):
encryptedish_default_exists = True
if not encryptedish_default_exists:
return Response(dict(error=_(
"$encrypted$ is a reserved keyword, may not be used for new default in position {question_position}."
).format(question_position=str(idx))), status=status.HTTP_400_BAD_REQUEST)
"$encrypted$ is a reserved keyword, may not be used for new default in position {idx}."
).format(**context)), status=status.HTTP_400_BAD_REQUEST)
survey_item['default'] = old_element['default']
elif survey_item["type"] == "password" and 'default' in survey_item:
elif qtype == "password" and 'default' in survey_item:
# Submission provides new encrypted default
survey_item['default'] = encrypt_value(survey_item['default'])
@@ -3354,8 +3188,8 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
def is_valid_relation(self, parent, sub, created=False):
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
return {"error": _("Cannot assign multiple {credential_type} credentials.".format(
credential_type=sub.unique_hash(display=True)))}
return {"error": _("Cannot assign multiple {credential_type} credentials.").format(
credential_type=sub.unique_hash(display=True))}
kind = sub.credential_type.kind
if kind not in ('ssh', 'vault', 'cloud', 'net'):
return {'error': _('Cannot assign a Credential of kind `{}`.').format(kind)}
@@ -3544,10 +3378,11 @@ class JobTemplateCallback(GenericAPIView):
if extra_vars is not None and job_template.ask_variables_on_launch:
extra_vars_redacted, removed = extract_ansible_vars(extra_vars)
kv['extra_vars'] = extra_vars_redacted
kv['_prevent_slicing'] = True # will only run against 1 host, so no point
with transaction.atomic():
job = job_template.create_job(**kv)
# Send a signal to celery that the job should be started.
# Send a signal to signify that the job should be started.
result = job.signal_start(inventory_sources_already_updated=inventory_sources_already_updated)
if not result:
data = dict(msg=_('Error starting job!'))
@@ -3575,6 +3410,15 @@ class JobTemplateJobsList(SubListCreateAPIView):
return methods
class JobTemplateSliceWorkflowJobsList(SubListCreateAPIView):
model = WorkflowJob
serializer_class = WorkflowJobListSerializer
parent_model = JobTemplate
relationship = 'slice_workflow_jobs'
parent_key = 'job_template'
class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
model = InstanceGroup
@@ -3667,10 +3511,6 @@ class WorkflowJobTemplateNodeChildrenBaseList(WorkflowsEnforcementMixin, Enforce
return getattr(parent, self.relationship).all()
def is_valid_relation(self, parent, sub, created=False):
mutex_list = ('success_nodes', 'failure_nodes') if self.relationship == 'always_nodes' else ('always_nodes',)
for relation in mutex_list:
if getattr(parent, relation).all().exists():
return {'Error': _('Cannot associate {0} when {1} have been associated.').format(self.relationship, relation)}
if created:
return None
@@ -3871,6 +3711,8 @@ class WorkflowJobRelaunch(WorkflowsEnforcementMixin, GenericAPIView):
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.is_sliced_job and not obj.job_template_id:
raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.'))
new_workflow_job = obj.create_relaunch_workflow_job()
new_workflow_job.signal_start()
@@ -4960,7 +4802,7 @@ class NotificationTemplateTest(GenericAPIView):
if not notification:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
else:
send_notifications.delay([notification.id])
connection.on_commit(lambda: send_notifications.delay([notification.id]))
data = OrderedDict()
data['notification'] = notification.id
data.update(NotificationSerializer(notification, context=self.get_serializer_context()).to_representation(notification))

275
awx/api/views/mixin.py Normal file
View File

@@ -0,0 +1,275 @@
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
import dateutil
import logging
from django.db.models import (
Count,
F,
)
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework import status
from awx.main.constants import ACTIVE_STATES
from awx.main.utils import get_object_or_400
from awx.main.models.ha import (
Instance,
InstanceGroup,
)
from awx.main.models.organization import Team
from awx.main.models.projects import Project
from awx.main.models.inventory import Inventory
from awx.main.models.jobs import JobTemplate
from awx.conf.license import (
feature_enabled,
LicenseForbids,
)
from awx.api.exceptions import ActiveJobConflict
logger = logging.getLogger('awx.api.views.mixin')
class ActivityStreamEnforcementMixin(object):
'''
Mixin to check that license supports activity streams.
'''
def check_permissions(self, request):
ret = super(ActivityStreamEnforcementMixin, self).check_permissions(request)
if not feature_enabled('activity_streams'):
raise LicenseForbids(_('Your license does not allow use of the activity stream.'))
return ret
class SystemTrackingEnforcementMixin(object):
'''
Mixin to check that license supports system tracking.
'''
def check_permissions(self, request):
ret = super(SystemTrackingEnforcementMixin, self).check_permissions(request)
if not feature_enabled('system_tracking'):
raise LicenseForbids(_('Your license does not permit use of system tracking.'))
return ret
class WorkflowsEnforcementMixin(object):
'''
Mixin to check that license supports workflows.
'''
def check_permissions(self, request):
ret = super(WorkflowsEnforcementMixin, self).check_permissions(request)
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
raise LicenseForbids(_('Your license does not allow use of workflows.'))
return ret
class UnifiedJobDeletionMixin(object):
'''
Special handling when deleting a running unified job object.
'''
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
try:
if obj.unified_job_node.workflow_job.status in ACTIVE_STATES:
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
except self.model.unified_job_node.RelatedObjectDoesNotExist:
pass
# Still allow deletion of new status, because these can be manually created
if obj.status in ACTIVE_STATES and obj.status != 'new':
raise PermissionDenied(detail=_("Cannot delete running job resource."))
elif not obj.event_processing_finished:
# Prohibit deletion if job events are still coming in
if obj.finished and now() < obj.finished + dateutil.relativedelta.relativedelta(minutes=1):
# less than 1 minute has passed since job finished and events are not in
return Response({"error": _("Job has not finished processing events.")},
status=status.HTTP_400_BAD_REQUEST)
else:
# if it has been > 1 minute, events are probably lost
logger.warning('Allowing deletion of {} through the API without all events '
'processed.'.format(obj.log_format))
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class InstanceGroupMembershipMixin(object):
'''
This mixin overloads attach/detach so that it calls InstanceGroup.save(),
triggering a background recalculation of policy-based instance group
membership.
'''
def attach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
sub_id, res = self.attach_validate(request)
if status.is_success(response.status_code):
if self.parent_model is Instance:
ig_obj = get_object_or_400(self.model, pk=sub_id)
inst_name = ig_obj.hostname
else:
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
with transaction.atomic():
ig_qs = InstanceGroup.objects.select_for_update()
if self.parent_model is Instance:
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
else:
# similar to get_parent_object, but selected for update
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
ig_obj = get_object_or_404(ig_qs, **parent_filter)
if inst_name not in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.append(inst_name)
ig_obj.save(update_fields=['policy_instance_list'])
return response
def is_valid_relation(self, parent, sub, created=False):
if sub.is_isolated():
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
if self.parent_model is InstanceGroup:
ig_obj = self.get_parent_object()
if ig_obj.controller_id is not None:
return {'error': _('Isolated instance group membership may not be managed via the API.')}
return None
def unattach_validate(self, request):
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
if res:
return (sub_id, res)
sub = get_object_or_400(self.model, pk=sub_id)
attach_errors = self.is_valid_relation(None, sub)
if attach_errors:
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
return (sub_id, res)
def unattach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
if status.is_success(response.status_code):
sub_id = request.data.get('id', None)
if self.parent_model is Instance:
inst_name = self.get_parent_object().hostname
else:
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
with transaction.atomic():
ig_qs = InstanceGroup.objects.select_for_update()
if self.parent_model is Instance:
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
else:
# similar to get_parent_object, but selected for update
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
ig_obj = get_object_or_404(ig_qs, **parent_filter)
if inst_name in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
ig_obj.save(update_fields=['policy_instance_list'])
return response
class RelatedJobsPreventDeleteMixin(object):
def perform_destroy(self, obj):
self.check_related_active_jobs(obj)
return super(RelatedJobsPreventDeleteMixin, self).perform_destroy(obj)
def check_related_active_jobs(self, obj):
active_jobs = obj.get_active_jobs()
if len(active_jobs) > 0:
raise ActiveJobConflict(active_jobs)
time_cutoff = now() - dateutil.relativedelta.relativedelta(minutes=1)
recent_jobs = obj._get_related_jobs().filter(finished__gte = time_cutoff)
for unified_job in recent_jobs.get_real_instances():
if not unified_job.event_processing_finished:
raise PermissionDenied(_(
'Related job {} is still processing events.'
).format(unified_job.log_format))
class OrganizationCountsMixin(object):
def get_serializer_context(self, *args, **kwargs):
full_context = super(OrganizationCountsMixin, self).get_serializer_context(*args, **kwargs)
if self.request is None:
return full_context
db_results = {}
org_qs = self.model.accessible_objects(self.request.user, 'read_role')
org_id_list = org_qs.values('id')
if len(org_id_list) == 0:
if self.request.method == 'POST':
full_context['related_field_counts'] = {}
return full_context
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
project_qs = Project.accessible_objects(self.request.user, 'read_role')
# Produce counts of Foreign Key relationships
db_results['inventories'] = inv_qs\
.values('organization').annotate(Count('organization')).order_by('organization')
db_results['teams'] = Team.accessible_objects(
self.request.user, 'read_role').values('organization').annotate(
Count('organization')).order_by('organization')
JT_project_reference = 'project__organization'
JT_inventory_reference = 'inventory__organization'
db_results['job_templates_project'] = JobTemplate.accessible_objects(
self.request.user, 'read_role').exclude(
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
Count(JT_project_reference)).order_by(JT_project_reference)
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
db_results['projects'] = project_qs\
.values('organization').annotate(Count('organization')).order_by('organization')
# Other members and admins of organization are always viewable
db_results['users'] = org_qs.annotate(
users=Count('member_role__members', distinct=True),
admins=Count('admin_role__members', distinct=True)
).values('id', 'users', 'admins')
count_context = {}
for org in org_id_list:
org_id = org['id']
count_context[org_id] = {
'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0,
'admins': 0, 'projects': 0}
for res, count_qs in db_results.items():
if res == 'job_templates_project':
org_reference = JT_project_reference
elif res == 'job_templates_inventory':
org_reference = JT_inventory_reference
elif res == 'users':
org_reference = 'id'
else:
org_reference = 'organization'
for entry in count_qs:
org_id = entry[org_reference]
if org_id in count_context:
if res == 'users':
count_context[org_id]['admins'] = entry['admins']
count_context[org_id]['users'] = entry['users']
continue
count_context[org_id][res] = entry['%s__count' % org_reference]
# Combine the counts for job templates by project and inventory
for org in org_id_list:
org_id = org['id']
count_context[org_id]['job_templates'] = 0
for related_path in ['job_templates_project', 'job_templates_inventory']:
if related_path in count_context[org_id]:
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
full_context['related_field_counts'] = count_context
return full_context

View File

@@ -1,25 +0,0 @@
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from django.conf import settings # noqa
try:
import awx.devonly # noqa
MODE = 'development'
except ImportError: # pragma: no cover
MODE = 'production'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings.%s' % MODE)
app = Celery('awx')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
if __name__ == '__main__':
app.start()

View File

@@ -2,11 +2,13 @@
from collections import namedtuple
import contextlib
import logging
import re
import sys
import threading
import time
import StringIO
import traceback
import urllib
import six
@@ -60,6 +62,15 @@ SETTING_CACHE_DEFAULTS = True
__all__ = ['SettingsWrapper', 'get_settings_to_cache', 'SETTING_CACHE_NOTSET']
def normalize_broker_url(value):
parts = value.rsplit('@', 1)
match = re.search('(amqp://[^:]+:)(.*)', parts[0])
if match:
prefix, password = match.group(1), match.group(2)
parts[0] = prefix + urllib.quote(password)
return '@'.join(parts)
@contextlib.contextmanager
def _ctit_db_wrapper(trans_safe=False):
'''
@@ -115,7 +126,8 @@ def _ctit_db_wrapper(trans_safe=False):
bottom_stack.close()
# Log the combined stack
if trans_safe:
logger.warning('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
if 'check_migrations' not in sys.argv:
logger.warning('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
else:
logger.error('Error modifying something related to database settings.\n{}'.format(tb_string))
finally:
@@ -444,7 +456,16 @@ class SettingsWrapper(UserSettingsHolder):
value = self._get_local(name)
if value is not empty:
return value
return self._get_default(name)
value = self._get_default(name)
# sometimes users specify RabbitMQ passwords that contain
# unescaped : and @ characters that confused urlparse, e.g.,
# amqp://guest:a@ns:ibl3#@localhost:5672//
#
# detect these scenarios, and automatically escape the user's
# password so it just works
if name == 'BROKER_URL':
value = normalize_broker_url(value)
return value
def _set_local(self, name, value):
field = self.registry.get_setting_field(name)

View File

@@ -1789,7 +1789,7 @@ class WorkflowJobNodeAccess(BaseAccess):
def filtered_queryset(self):
return self.model.objects.filter(
workflow_job__workflow_job_template__in=WorkflowJobTemplate.accessible_objects(
workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(
self.user, 'read_role'))
@check_superuser
@@ -1915,7 +1915,7 @@ class WorkflowJobAccess(BaseAccess):
def filtered_queryset(self):
return WorkflowJob.objects.filter(
workflow_job_template__in=WorkflowJobTemplate.accessible_objects(
unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(
self.user, 'read_role'))
def can_add(self, data):
@@ -1947,9 +1947,11 @@ class WorkflowJobAccess(BaseAccess):
if self.user.is_superuser:
return True
wfjt = obj.workflow_job_template
template = obj.workflow_job_template
if not template and obj.job_template_id:
template = obj.job_template
# only superusers can relaunch orphans
if not wfjt:
if not template:
return False
# If job was launched by another user, it could have survey passwords
@@ -1967,7 +1969,7 @@ class WorkflowJobAccess(BaseAccess):
return False
# execute permission to WFJT is mandatory for any relaunch
return (self.user in wfjt.execute_role)
return (self.user in template.execute_role)
def can_recreate(self, obj):
node_qs = obj.workflow_job_nodes.all().prefetch_related('inventory', 'credentials', 'unified_job_template')

View File

@@ -29,3 +29,11 @@ STANDARD_INVENTORY_UPDATE_ENV = {
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
ACTIVE_STATES = CAN_CANCEL
CENSOR_VALUE = '************'
ENV_BLACKLIST = frozenset((
'VIRTUAL_ENV', 'PATH', 'PYTHONPATH', 'PROOT_TMP_DIR', 'JOB_ID',
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
'AWX_HOST', 'PROJECT_REVISION'
))

View File

@@ -0,0 +1,5 @@
from django.conf import settings
def get_local_queuename():
return settings.CLUSTER_HOST_ID.encode('utf-8')

View File

@@ -0,0 +1,58 @@
import logging
import socket
from django.conf import settings
from awx.main.dispatch import get_local_queuename
from kombu import Connection, Queue, Exchange, Producer, Consumer
logger = logging.getLogger('awx.main.dispatch')
class Control(object):
services = ('dispatcher', 'callback_receiver')
result = None
def __init__(self, service, host=None):
if service not in self.services:
raise RuntimeError('{} must be in {}'.format(service, self.services))
self.service = service
self.queuename = host or get_local_queuename()
self.queue = Queue(self.queuename, Exchange(self.queuename), routing_key=self.queuename)
def publish(self, msg, conn, **kwargs):
producer = Producer(
exchange=self.queue.exchange,
channel=conn,
routing_key=self.queuename
)
producer.publish(msg, expiration=5, **kwargs)
def status(self, *args, **kwargs):
return self.control_with_reply('status', *args, **kwargs)
def running(self, *args, **kwargs):
return self.control_with_reply('running', *args, **kwargs)
def control_with_reply(self, command, timeout=5):
logger.warn('checking {} {} for {}'.format(self.service, command, self.queuename))
reply_queue = Queue(name="amq.rabbitmq.reply-to")
self.result = None
with Connection(settings.BROKER_URL) as conn:
with Consumer(conn, reply_queue, callbacks=[self.process_message], no_ack=True):
self.publish({'control': command}, conn, reply_to='amq.rabbitmq.reply-to')
try:
conn.drain_events(timeout=timeout)
except socket.timeout:
logger.error('{} did not reply within {}s'.format(self.service, timeout))
raise
return self.result
def control(self, msg, **kwargs):
with Connection(settings.BROKER_URL) as conn:
self.publish(msg, conn)
def process_message(self, body, message):
self.result = body
message.ack()

405
awx/main/dispatch/pool.py Normal file
View File

@@ -0,0 +1,405 @@
import logging
import os
import sys
import random
import traceback
from uuid import uuid4
import collections
from multiprocessing import Process
from multiprocessing import Queue as MPQueue
from Queue import Full as QueueFull, Empty as QueueEmpty
from django.conf import settings
from django.db import connection as django_connection, connections
from django.core.cache import cache as django_cache
from jinja2 import Template
import psutil
from awx.main.models import UnifiedJob
from awx.main.dispatch import reaper
logger = logging.getLogger('awx.main.dispatch')
class PoolWorker(object):
'''
Used to track a worker child process and its pending and finished messages.
This class makes use of two distinct multiprocessing.Queues to track state:
- self.queue: this is a queue which represents pending messages that should
be handled by this worker process; as new AMQP messages come
in, a pool will put() them into this queue; the child
process that is forked will get() from this queue and handle
received messages in an endless loop
- self.finished: this is a queue which the worker process uses to signal
that it has finished processing a message
When a message is put() onto this worker, it is tracked in
self.managed_tasks.
Periodically, the worker will call .calculate_managed_tasks(), which will
cause messages in self.finished to be removed from self.managed_tasks.
In this way, self.managed_tasks represents a view of the messages assigned
to a specific process. The message at [0] is the least-recently inserted
message, and it represents what the worker is running _right now_
(self.current_task).
A worker is "busy" when it has at least one message in self.managed_tasks.
It is "idle" when self.managed_tasks is empty.
'''
def __init__(self, queue_size, target, args):
self.messages_sent = 0
self.messages_finished = 0
self.managed_tasks = collections.OrderedDict()
self.finished = MPQueue(queue_size)
self.queue = MPQueue(queue_size)
self.process = Process(target=target, args=(self.queue, self.finished) + args)
self.process.daemon = True
def start(self):
self.process.start()
def put(self, body):
uuid = '?'
if isinstance(body, dict):
if not body.get('uuid'):
body['uuid'] = str(uuid4())
uuid = body['uuid']
logger.debug('delivered {} to worker[{}] qsize {}'.format(
uuid, self.pid, self.qsize
))
self.managed_tasks[uuid] = body
self.queue.put(body, block=True, timeout=5)
self.messages_sent += 1
self.calculate_managed_tasks()
def quit(self):
'''
Send a special control message to the worker that tells it to exit
gracefully.
'''
self.queue.put('QUIT')
@property
def pid(self):
return self.process.pid
@property
def qsize(self):
return self.queue.qsize()
@property
def alive(self):
return self.process.is_alive()
@property
def mb(self):
if self.alive:
return '{:0.3f}'.format(
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
)
return '0'
@property
def exitcode(self):
return str(self.process.exitcode)
def calculate_managed_tasks(self):
# look to see if any tasks were finished
finished = []
for _ in range(self.finished.qsize()):
try:
finished.append(self.finished.get(block=False))
except QueueEmpty:
break # qsize is not always _totally_ up to date
# if any tasks were finished, removed them from the managed tasks for
# this worker
for uuid in finished:
self.messages_finished += 1
del self.managed_tasks[uuid]
@property
def current_task(self):
self.calculate_managed_tasks()
# the task at [0] is the one that's running right now (or is about to
# be running)
if len(self.managed_tasks):
return self.managed_tasks[self.managed_tasks.keys()[0]]
return None
@property
def orphaned_tasks(self):
orphaned = []
if not self.alive:
# if this process had a running task that never finished,
# requeue its error callbacks
current_task = self.current_task
if isinstance(current_task, dict):
orphaned.extend(current_task.get('errbacks', []))
# if this process has any pending messages requeue them
for _ in range(self.qsize):
try:
message = self.queue.get(block=False)
if message != 'QUIT':
orphaned.append(message)
except QueueEmpty:
break # qsize is not always _totally_ up to date
if len(orphaned):
logger.error(
'requeuing {} messages from gone worker pid:{}'.format(
len(orphaned), self.pid
)
)
return orphaned
@property
def busy(self):
self.calculate_managed_tasks()
return len(self.managed_tasks) > 0
@property
def idle(self):
return not self.busy
class WorkerPool(object):
'''
Creates a pool of forked PoolWorkers.
As WorkerPool.write(...) is called (generally, by a kombu consumer
implementation when it receives an AMQP message), messages are passed to
one of the multiprocessing Queues where some work can be done on them.
class MessagePrinter(awx.main.dispatch.worker.BaseWorker):
def perform_work(self, body):
print body
pool = WorkerPool(min_workers=4) # spawn four worker processes
pool.init_workers(MessagePrint().work_loop)
pool.write(
0, # preferred worker 0
'Hello, World!'
)
'''
debug_meta = ''
def __init__(self, min_workers=None, queue_size=None):
self.name = settings.CLUSTER_HOST_ID
self.pid = os.getpid()
self.min_workers = min_workers or settings.JOB_EVENT_WORKERS
self.queue_size = queue_size or settings.JOB_EVENT_MAX_QUEUE_SIZE
self.workers = []
def __len__(self):
return len(self.workers)
def init_workers(self, target, *target_args):
self.target = target
self.target_args = target_args
for idx in range(self.min_workers):
self.up()
def up(self):
idx = len(self.workers)
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race conditions)
django_connection.close()
django_cache.close()
worker = PoolWorker(self.queue_size, self.target, (idx,) + self.target_args)
self.workers.append(worker)
try:
worker.start()
except Exception:
logger.exception('could not fork')
else:
logger.warn('scaling up worker pid:{}'.format(worker.pid))
return idx, worker
def debug(self, *args, **kwargs):
self.cleanup()
tmpl = Template(
'{{ pool.name }}[pid:{{ pool.pid }}] workers total={{ workers|length }} {{ meta }} \n'
'{% for w in workers %}'
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
' sent={{ w.messages_sent }}'
' finished={{ w.messages_finished }}'
' qsize={{ w.managed_tasks|length }}'
' rss={{ w.mb }}MB'
'{% for task in w.managed_tasks.values() %}'
'\n - {% if loop.index0 == 0 %}running {% else %}queued {% endif %}'
'{{ task["uuid"] }} '
'{% if "task" in task %}'
'{{ task["task"].rsplit(".", 1)[-1] }}'
# don't print kwargs, they often contain launch-time secrets
'(*{{ task.get("args", []) }})'
'{% endif %}'
'{% endfor %}'
'{% if not w.managed_tasks|length %}'
' [IDLE]'
'{% endif %}'
'\n'
'{% endfor %}'
)
return tmpl.render(pool=self, workers=self.workers, meta=self.debug_meta)
def write(self, preferred_queue, body):
queue_order = sorted(range(len(self.workers)), cmp=lambda x, y: -1 if x==preferred_queue else 0)
write_attempt_order = []
for queue_actual in queue_order:
try:
self.workers[queue_actual].put(body)
return queue_actual
except QueueFull:
pass
except Exception:
tb = traceback.format_exc()
logger.warn("could not write to queue %s" % preferred_queue)
logger.warn("detail: {}".format(tb))
write_attempt_order.append(preferred_queue)
logger.warn("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
return None
def stop(self, signum):
try:
for worker in self.workers:
os.kill(worker.pid, signum)
except Exception:
logger.exception('could not kill {}'.format(worker.pid))
class AutoscalePool(WorkerPool):
'''
An extended pool implementation that automatically scales workers up and
down based on demand
'''
def __init__(self, *args, **kwargs):
self.max_workers = kwargs.pop('max_workers', None)
super(AutoscalePool, self).__init__(*args, **kwargs)
if self.max_workers is None:
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
if settings_absmem is not None:
total_memory_gb = int(settings_absmem)
else:
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
# 5 workers per GB of total memory
self.max_workers = (total_memory_gb * 5)
@property
def should_grow(self):
if len(self.workers) < self.min_workers:
# If we don't have at least min_workers, add more
return True
# If every worker is busy doing something, add more
return all([w.busy for w in self.workers])
@property
def full(self):
return len(self.workers) == self.max_workers
@property
def debug_meta(self):
return 'min={} max={}'.format(self.min_workers, self.max_workers)
def cleanup(self):
"""
Perform some internal account and cleanup. This is run on
every cluster node heartbeat:
1. Discover worker processes that exited, and recover messages they
were handling.
2. Clean up unnecessary, idle workers.
3. Check to see if the database says this node is running any tasks
that aren't actually running. If so, reap them.
"""
orphaned = []
for w in self.workers[::]:
if not w.alive:
# the worker process has exited
# 1. take the task it was running and enqueue the error
# callbacks
# 2. take any pending tasks delivered to its queue and
# send them to another worker
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
if w.current_task:
if w.current_task != 'QUIT':
try:
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
reaper.reap_job(j, 'failed')
except Exception:
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
orphaned.extend(w.orphaned_tasks)
self.workers.remove(w)
elif w.idle and len(self.workers) > self.min_workers:
# the process has an empty queue (it's idle) and we have
# more processes in the pool than we need (> min)
# send this process a message so it will exit gracefully
# at the next opportunity
logger.warn('scaling down worker pid:{}'.format(w.pid))
w.quit()
self.workers.remove(w)
for m in orphaned:
# if all the workers are dead, spawn at least one
if not len(self.workers):
self.up()
idx = random.choice(range(len(self.workers)))
self.write(idx, m)
# if the database says a job is running on this node, but it's *not*,
# then reap it
running_uuids = []
for worker in self.workers:
worker.calculate_managed_tasks()
running_uuids.extend(worker.managed_tasks.keys())
try:
reaper.reap(excluded_uuids=running_uuids)
except Exception:
# we _probably_ failed here due to DB connectivity issues, so
# don't use our logger (it accesses the database for configuration)
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
def up(self):
if self.full:
# if we can't spawn more workers, just toss this message into a
# random worker's backlog
idx = random.choice(range(len(self.workers)))
return idx, self.workers[idx]
else:
return super(AutoscalePool, self).up()
def write(self, preferred_queue, body):
try:
# when the cluster heartbeat occurs, clean up internally
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
self.cleanup()
if self.should_grow:
self.up()
# we don't care about "preferred queue" round robin distribution, just
# find the first non-busy worker and claim it
workers = self.workers[:]
random.shuffle(workers)
for w in workers:
if not w.busy:
w.put(body)
break
else:
return super(AutoscalePool, self).write(preferred_queue, body)
except Exception:
for conn in connections.all():
# If the database connection has a hiccup, re-establish a new
# connection
conn.close_if_unusable_or_obsolete()
logger.exception('failed to write inbound message')

View File

@@ -0,0 +1,128 @@
import inspect
import logging
import sys
from uuid import uuid4
from django.conf import settings
from kombu import Connection, Exchange, Producer
logger = logging.getLogger('awx.main.dispatch')
def serialize_task(f):
return '.'.join([f.__module__, f.__name__])
class task:
"""
Used to decorate a function or class so that it can be run asynchronously
via the task dispatcher. Tasks can be simple functions:
@task()
def add(a, b):
return a + b
...or classes that define a `run` method:
@task()
class Adder:
def run(self, a, b):
return a + b
# Tasks can be run synchronously...
assert add(1, 1) == 2
assert Adder().run(1, 1) == 2
# ...or published to a queue:
add.apply_async([1, 1])
Adder.apply_async([1, 1])
# Tasks can also define a specific target queue or exchange type:
@task(queue='slow-tasks')
def snooze():
time.sleep(10)
@task(queue='tower_broadcast', exchange_type='fanout')
def announce():
print "Run this everywhere!"
"""
def __init__(self, queue=None, exchange_type=None):
self.queue = queue
self.exchange_type = exchange_type
def __call__(self, fn=None):
queue = self.queue
exchange_type = self.exchange_type
class PublisherMixin(object):
queue = None
@classmethod
def delay(cls, *args, **kwargs):
return cls.apply_async(args, kwargs)
@classmethod
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
task_id = uuid or str(uuid4())
args = args or []
kwargs = kwargs or {}
queue = (
queue or
getattr(cls.queue, 'im_func', cls.queue) or
settings.CELERY_DEFAULT_QUEUE
)
obj = {
'uuid': task_id,
'args': args,
'kwargs': kwargs,
'task': cls.name
}
obj.update(**kw)
if callable(queue):
queue = queue()
if not settings.IS_TESTING(sys.argv):
with Connection(settings.BROKER_URL) as conn:
exchange = Exchange(queue, type=exchange_type or 'direct')
producer = Producer(conn)
logger.debug('publish {}({}, queue={})'.format(
cls.name,
task_id,
queue
))
producer.publish(obj,
serializer='json',
compression='bzip2',
exchange=exchange,
declare=[exchange],
delivery_mode="persistent",
routing_key=queue)
return (obj, queue)
# If the object we're wrapping *is* a class (e.g., RunJob), return
# a *new* class that inherits from the wrapped class *and* BaseTask
# In this way, the new class returned by our decorator is the class
# being decorated *plus* PublisherMixin so cls.apply_async() and
# cls.delay() work
bases = []
ns = {'name': serialize_task(fn), 'queue': queue}
if inspect.isclass(fn):
bases = list(fn.__bases__)
ns.update(fn.__dict__)
cls = type(
fn.__name__,
tuple(bases + [PublisherMixin]),
ns
)
if inspect.isclass(fn):
return cls
# if the object being decorated is *not* a class (it's a Python
# function), make fn.apply_async and fn.delay proxy through to the
# PublisherMixin we dynamically created above
setattr(fn, 'name', cls.name)
setattr(fn, 'apply_async', cls.apply_async)
setattr(fn, 'delay', cls.delay)
return fn

View File

@@ -0,0 +1,46 @@
from datetime import timedelta
import logging
from django.db.models import Q
from django.utils.timezone import now as tz_now
from django.contrib.contenttypes.models import ContentType
from awx.main.models import Instance, UnifiedJob, WorkflowJob
logger = logging.getLogger('awx.main.dispatch')
def reap_job(j, status):
j.status = status
j.start_args = '' # blank field to remove encrypted passwords
j.job_explanation += ' '.join((
'Task was marked as running in Tower but was not present in',
'the job queue, so it has been marked as failed.',
))
j.save(update_fields=['status', 'start_args', 'job_explanation'])
if hasattr(j, 'send_notification_templates'):
j.send_notification_templates('failed')
j.websocket_emit_status(status)
logger.error(
'{} is no longer running; reaping'.format(j.log_format)
)
def reap(instance=None, status='failed', excluded_uuids=[]):
'''
Reap all jobs in waiting|running for this instance.
'''
me = instance or Instance.objects.me()
now = tz_now()
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter(
(
Q(status='running') |
Q(status='waiting', modified__lte=now - timedelta(seconds=60))
) & (
Q(execution_node=me.hostname) |
Q(controller_node=me.hostname)
) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
).exclude(celery_task_id__in=excluded_uuids)
for j in jobs:
reap_job(j, status)

View File

@@ -0,0 +1,3 @@
from .base import AWXConsumer, BaseWorker # noqa
from .callback import CallbackBrokerWorker # noqa
from .task import TaskWorker # noqa

View File

@@ -0,0 +1,146 @@
# Copyright (c) 2018 Ansible by Red Hat
# All Rights Reserved.
import os
import logging
import signal
from uuid import UUID
from Queue import Empty as QueueEmpty
from kombu import Producer
from kombu.mixins import ConsumerMixin
from awx.main.dispatch.pool import WorkerPool
logger = logging.getLogger('awx.main.dispatch')
def signame(sig):
return dict(
(k, v) for v, k in signal.__dict__.items()
if v.startswith('SIG') and not v.startswith('SIG_')
)[sig]
class WorkerSignalHandler:
def __init__(self):
self.kill_now = False
signal.signal(signal.SIGINT, self.exit_gracefully)
def exit_gracefully(self, *args, **kwargs):
self.kill_now = True
class AWXConsumer(ConsumerMixin):
def __init__(self, name, connection, worker, queues=[], pool=None):
self.connection = connection
self.total_messages = 0
self.queues = queues
self.worker = worker
self.pool = pool
if pool is None:
self.pool = WorkerPool()
self.pool.init_workers(self.worker.work_loop)
def get_consumers(self, Consumer, channel):
logger.debug(self.listening_on)
return [Consumer(queues=self.queues, accept=['json'],
callbacks=[self.process_task])]
@property
def listening_on(self):
return 'listening on {}'.format([
'{} [{}]'.format(q.name, q.exchange.type) for q in self.queues
])
def control(self, body, message):
logger.warn(body)
control = body.get('control')
if control in ('status', 'running'):
producer = Producer(
channel=self.connection,
routing_key=message.properties['reply_to']
)
if control == 'status':
msg = '\n'.join([self.listening_on, self.pool.debug()])
elif control == 'running':
msg = []
for worker in self.pool.workers:
worker.calculate_managed_tasks()
msg.extend(worker.managed_tasks.keys())
producer.publish(msg)
elif control == 'reload':
for worker in self.pool.workers:
worker.quit()
else:
logger.error('unrecognized control message: {}'.format(control))
message.ack()
def process_task(self, body, message):
if 'control' in body:
return self.control(body, message)
if len(self.pool):
if "uuid" in body and body['uuid']:
try:
queue = UUID(body['uuid']).int % len(self.pool)
except Exception:
queue = self.total_messages % len(self.pool)
else:
queue = self.total_messages % len(self.pool)
else:
queue = 0
self.pool.write(queue, body)
self.total_messages += 1
message.ack()
def run(self, *args, **kwargs):
signal.signal(signal.SIGINT, self.stop)
signal.signal(signal.SIGTERM, self.stop)
self.worker.on_start()
super(AWXConsumer, self).run(*args, **kwargs)
def stop(self, signum, frame):
self.should_stop = True # this makes the kombu mixin stop consuming
logger.debug('received {}, stopping'.format(signame(signum)))
self.worker.on_stop()
raise SystemExit()
class BaseWorker(object):
def work_loop(self, queue, finished, idx, *args):
ppid = os.getppid()
signal_handler = WorkerSignalHandler()
while not signal_handler.kill_now:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
if os.getppid() != ppid:
break
try:
body = queue.get(block=True, timeout=1)
if body == 'QUIT':
break
except QueueEmpty:
continue
except Exception as e:
logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
continue
try:
self.perform_work(body, *args)
finally:
if 'uuid' in body:
uuid = body['uuid']
logger.debug('task {} is finished'.format(uuid))
finished.put(uuid)
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
def perform_work(self, body):
raise NotImplementedError()
def on_start(self):
pass
def on_stop(self):
pass

View File

@@ -0,0 +1,130 @@
import logging
import time
import os
import signal
import traceback
from django.conf import settings
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
'''
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *builds* these types of messages is found in the AWX display
callback (`awx.lib.awx_display_callback`).
'''
MAX_RETRIES = 2
def perform_work(self, body):
try:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
if not any([key in body for key in event_map]):
raise Exception('Payload does not have a job identifier')
def _save_event_data():
for key, cls in event_map.items():
if key in body:
cls.create_from_data(**body)
job_identifier = 'unknown job'
job_key = 'unknown'
for key in event_map.keys():
if key in body:
job_identifier = body[key]
job_key = key
break
if settings.DEBUG:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
from pprint import pformat
if body.get('event') == 'EOF':
event_thing = 'EOF event'
else:
event_thing = 'event {}'.format(body.get('counter', 'unknown'))
logger.info('Callback worker received {} for {} {}'.format(
event_thing, job_key[:-len('_id')], job_identifier
))
logger.debug('Body: {}'.format(
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
)[:1024 * 4])
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if hasattr(uj, 'send_notification_templates'):
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
break
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_identifier)
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
retries = 0
while retries <= self.MAX_RETRIES:
try:
_save_event_data()
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
os.kill(os.getppid(), signal.SIGINT)
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))

View File

@@ -0,0 +1,118 @@
import inspect
import logging
import importlib
import sys
import traceback
import six
from django import db
from awx.main.tasks import dispatch_startup, inform_cluster_of_shutdown
from .base import BaseWorker
logger = logging.getLogger('awx.main.dispatch')
class TaskWorker(BaseWorker):
'''
A worker implementation that deserializes task messages and runs native
Python code.
The code that *builds* these types of messages is found in
`awx.main.dispatch.publish`.
'''
@classmethod
def resolve_callable(cls, task):
'''
Transform a dotted notation task into an imported, callable function, e.g.,
awx.main.tasks.delete_inventory
awx.main.tasks.RunProjectUpdate
'''
module, target = task.rsplit('.', 1)
module = importlib.import_module(module)
_call = None
if hasattr(module, target):
_call = getattr(module, target, None)
return _call
def run_callable(self, body):
'''
Given some AMQP message, import the correct Python code and run it.
'''
task = body['task']
uuid = body.get('uuid', '<unknown>')
args = body.get('args', [])
kwargs = body.get('kwargs', {})
_call = TaskWorker.resolve_callable(task)
if inspect.isclass(_call):
# the callable is a class, e.g., RunJob; instantiate and
# return its `run()` method
_call = _call().run
# don't print kwargs, they often contain launch-time secrets
logger.debug('task {} starting {}(*{})'.format(uuid, task, args))
return _call(*args, **kwargs)
def perform_work(self, body):
'''
Import and run code for a task e.g.,
body = {
'args': [8],
'callbacks': [{
'args': [],
'kwargs': {}
'task': u'awx.main.tasks.handle_work_success'
}],
'errbacks': [{
'args': [],
'kwargs': {},
'task': 'awx.main.tasks.handle_work_error'
}],
'kwargs': {},
'task': u'awx.main.tasks.RunProjectUpdate'
}
'''
for conn in db.connections.all():
# If the database connection has a hiccup during at task, close it
# so we can establish a new connection
conn.close_if_unusable_or_obsolete()
result = None
try:
result = self.run_callable(body)
except Exception as exc:
try:
if getattr(exc, 'is_awx_task_error', False):
# Error caused by user / tracked in job output
logger.warning(six.text_type("{}").format(exc))
else:
task = body['task']
args = body.get('args', [])
kwargs = body.get('kwargs', {})
logger.exception('Worker failed to run task {}(*{}, **{}'.format(
task, args, kwargs
))
except Exception:
# It's fairly critical that this code _not_ raise exceptions on logging
# If you configure external logging in a way that _it_ fails, there's
# not a lot we can do here; sys.stderr.write is a final hail mary
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
for callback in body.get('errbacks', []) or []:
callback['uuid'] = body['uuid']
self.perform_work(callback)
for callback in body.get('callbacks', []) or []:
callback['uuid'] = body['uuid']
self.perform_work(callback)
return result
def on_start(self):
dispatch_startup()
def on_stop(self):
inform_cluster_of_shutdown()

View File

@@ -4,11 +4,6 @@
import six
# Celery does not respect exception type when using a serializer different than pickle;
# and awx uses the json serializer
# https://github.com/celery/celery/issues/3586
class _AwxTaskError():
def build_exception(self, task, message=None):
if message is None:
@@ -36,5 +31,3 @@ class _AwxTaskError():
AwxTaskError = _AwxTaskError()

View File

@@ -38,7 +38,7 @@ class IsolatedManager(object):
:param stdout_handle: a file-like object for capturing stdout
:param ssh_key_path: a filepath where SSH key data can be read
:param expect_passwords: a dict of regular expression password prompts
to input values, i.e., {r'Password:\s*?$':
to input values, i.e., {r'Password:*?$':
'some_password'}
:param cancelled_callback: a callable - which returns `True` or `False`
- signifying if the job has been prematurely

View File

@@ -71,7 +71,7 @@ def run_pexpect(args, cwd, env, logfile,
- signifying if the job has been prematurely
cancelled
:param expect_passwords: a dict of regular expression password prompts
to input values, i.e., {r'Password:\s*?$':
to input values, i.e., {r'Password:*?$':
'some_password'}
:param extra_update_fields: a dict used to specify DB fields which should
be updated on the underlying model

View File

@@ -46,7 +46,7 @@ from awx.main.utils.filters import SmartFilter
from awx.main.utils.encryption import encrypt_value, decrypt_value, get_encryption_key
from awx.main.validators import validate_ssh_private_key
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS, ENV_BLACKLIST
from awx.main import utils
@@ -573,10 +573,10 @@ class CredentialInputField(JSONSchemaField):
# string)
match = re.search(
# 'foo' is a dependency of 'bar'
"'" # apostrophe
"([^']+)" # one or more non-apostrophes (first group)
"'[\w ]+'" # one or more words/spaces
"([^']+)", # second group
r"'" # apostrophe
r"([^']+)" # one or more non-apostrophes (first group)
r"'[\w ]+'" # one or more words/spaces
r"([^']+)", # second group
error.message,
)
if match:
@@ -755,7 +755,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
'file': {
'type': 'object',
'patternProperties': {
'^template(\.[a-zA-Z_]+[a-zA-Z0-9_]*)?$': {'type': 'string'},
r'^template(\.[a-zA-Z_]+[a-zA-Z0-9_]*)?$': {'type': 'string'},
},
'additionalProperties': False,
},
@@ -767,7 +767,12 @@ class CredentialTypeInjectorField(JSONSchemaField):
# of underscores, digits, and alphabetics from the portable
# character set. The first character of a name is not
# a digit.
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {'type': 'string'},
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {
'type': 'string',
# The environment variable _value_ can be any ascii,
# but pexpect will choke on any unicode
'pattern': '^[\x00-\x7F]*$'
},
},
'additionalProperties': False,
},
@@ -783,6 +788,19 @@ class CredentialTypeInjectorField(JSONSchemaField):
'additionalProperties': False
}
def validate_env_var_allowed(self, env_var):
if env_var.startswith('ANSIBLE_'):
raise django_exceptions.ValidationError(
_('Environment variable {} may affect Ansible configuration so its '
'use is not allowed in credentials.').format(env_var),
code='invalid', params={'value': env_var},
)
if env_var in ENV_BLACKLIST:
raise django_exceptions.ValidationError(
_('Environment variable {} is blacklisted from use in credentials.').format(env_var),
code='invalid', params={'value': env_var},
)
def validate(self, value, model_instance):
super(CredentialTypeInjectorField, self).validate(
value, model_instance
@@ -834,6 +852,9 @@ class CredentialTypeInjectorField(JSONSchemaField):
setattr(valid_namespace['tower'].filename, template_name, 'EXAMPLE_FILENAME')
for type_, injector in value.items():
if type_ == 'env':
for key in injector.keys():
self.validate_env_var_allowed(key)
for key, tmpl in injector.items():
try:
Environment(

View File

@@ -15,6 +15,8 @@ class Command(BaseCommand):
def handle(self, *args, **kwargs):
# Sanity check: Is there already an organization in the system?
if Organization.objects.count():
print('An organization is already in the system, exiting.')
print('(changed: False)')
return
# Create a default organization as the first superuser found.
@@ -54,3 +56,4 @@ class Command(BaseCommand):
jt.credentials.add(c)
print('Default organization added.')
print('Demo Credential, Inventory, and Job Template added.')
print('(changed: True)')

View File

@@ -41,14 +41,18 @@ class Command(BaseCommand):
with advisory_lock('instance_registration_%s' % hostname):
instance = Instance.objects.filter(hostname=hostname)
if instance.exists():
isolated = instance.first().is_isolated()
instance.delete()
print("Instance Removed")
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(hostname), shell=True).wait()
if result != 0:
print("Node deprovisioning may have failed when attempting to "
"remove the RabbitMQ instance {} from the cluster".format(hostname))
else:
if isolated:
print('Successfully deprovisioned {}'.format(hostname))
else:
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(hostname), shell=True).wait()
if result != 0:
print("Node deprovisioning may have failed when attempting to "
"remove the RabbitMQ instance {} from the cluster".format(hostname))
else:
print('Successfully deprovisioned {}'.format(hostname))
print('(changed: True)')
else:
print('No instance found matching name {}'.format(hostname))

View File

@@ -938,7 +938,7 @@ class Command(BaseCommand):
self.exclude_empty_groups = bool(options.get('exclude_empty_groups', False))
self.instance_id_var = options.get('instance_id_var', None)
self.celery_invoked = False if os.getenv('INVENTORY_SOURCE_ID', None) is None else True
self.invoked_from_dispatcher = False if os.getenv('INVENTORY_SOURCE_ID', None) is None else True
# Load inventory and related objects from database.
if self.inventory_name and self.inventory_id:
@@ -1062,7 +1062,7 @@ class Command(BaseCommand):
exc = e
transaction.rollback()
if self.celery_invoked is False:
if self.invoked_from_dispatcher is False:
with ignore_inventory_computed_fields():
self.inventory_update = InventoryUpdate.objects.get(pk=self.inventory_update.pk)
self.inventory_update.result_traceback = tb

View File

@@ -1,231 +1,11 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import logging
import os
import signal
import time
from uuid import UUID
from multiprocessing import Process
from multiprocessing import Queue as MPQueue
from Queue import Empty as QueueEmpty
from Queue import Full as QueueFull
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
# Django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from django.db import DatabaseError, OperationalError
from django.db.utils import InterfaceError, InternalError
from django.core.cache import cache as django_cache
from kombu import Connection, Exchange, Queue
# AWX
from awx.main.models import * # noqa
from awx.main.consumers import emit_channel_notification
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class WorkerSignalHandler:
def __init__(self):
self.kill_now = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, *args, **kwargs):
self.kill_now = True
class CallbackBrokerWorker(ConsumerMixin):
MAX_RETRIES = 2
def __init__(self, connection, use_workers=True):
self.connection = connection
self.worker_queues = []
self.total_messages = 0
self.init_workers(use_workers)
def init_workers(self, use_workers=True):
def shutdown_handler(active_workers):
def _handler(signum, frame):
try:
for active_worker in active_workers:
active_worker.terminate()
signal.signal(signum, signal.SIG_DFL)
os.kill(os.getpid(), signum) # Rethrow signal, this time without catching it
except Exception:
logger.exception('Error in shutdown_handler')
return _handler
if use_workers:
for idx in range(settings.JOB_EVENT_WORKERS):
queue_actual = MPQueue(settings.JOB_EVENT_MAX_QUEUE_SIZE)
w = Process(target=self.callback_worker, args=(queue_actual, idx,))
if settings.DEBUG:
logger.info('Starting worker %s' % str(idx))
self.worker_queues.append([0, queue_actual, w])
# It's important to close these _right before_ we fork; we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race
# conditions)
django_connection.close()
django_cache.close()
for _, _, w in self.worker_queues:
w.start()
elif settings.DEBUG:
logger.warn('Started callback receiver (no workers)')
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in self.worker_queues]))
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in self.worker_queues]))
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[Queue(settings.CALLBACK_QUEUE,
Exchange(settings.CALLBACK_QUEUE, type='direct'),
routing_key=settings.CALLBACK_QUEUE)],
accept=['json'],
callbacks=[self.process_task])]
def process_task(self, body, message):
if "uuid" in body and body['uuid']:
try:
queue = UUID(body['uuid']).int % settings.JOB_EVENT_WORKERS
except Exception:
queue = self.total_messages % settings.JOB_EVENT_WORKERS
else:
queue = self.total_messages % settings.JOB_EVENT_WORKERS
self.write_queue_worker(queue, body)
self.total_messages += 1
message.ack()
def write_queue_worker(self, preferred_queue, body):
queue_order = sorted(range(settings.JOB_EVENT_WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0)
write_attempt_order = []
for queue_actual in queue_order:
try:
worker_actual = self.worker_queues[queue_actual]
worker_actual[1].put(body, block=True, timeout=5)
worker_actual[0] += 1
return queue_actual
except QueueFull:
pass
except Exception:
import traceback
tb = traceback.format_exc()
logger.warn("Could not write to queue %s" % preferred_queue)
logger.warn("Detail: {}".format(tb))
write_attempt_order.append(preferred_queue)
logger.warn("Could not write payload to any queue, attempted order: {}".format(write_attempt_order))
return None
def callback_worker(self, queue_actual, idx):
signal_handler = WorkerSignalHandler()
while not signal_handler.kill_now:
try:
body = queue_actual.get(block=True, timeout=1)
except QueueEmpty:
continue
except Exception as e:
logger.error("Exception on worker thread, restarting: " + str(e))
continue
try:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
if not any([key in body for key in event_map]):
raise Exception('Payload does not have a job identifier')
if settings.DEBUG:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
from pprint import pformat
logger.info('Body: {}'.format(
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
)[:1024 * 4])
def _save_event_data():
for key, cls in event_map.items():
if key in body:
cls.create_from_data(**body)
job_identifier = 'unknown job'
for key in event_map.keys():
if key in body:
job_identifier = body[key]
break
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if hasattr(uj, 'send_notification_templates'):
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
break
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_identifier)
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
continue
retries = 0
while retries <= self.MAX_RETRIES:
try:
_save_event_data()
break
except (OperationalError, InterfaceError, InternalError) as e:
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
os.kill(os.getppid(), signal.SIGINT)
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError as e:
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
break
except Exception as exc:
import traceback
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
from awx.main.dispatch.worker import AWXConsumer, CallbackBrokerWorker
class Command(BaseCommand):
@@ -238,8 +18,22 @@ class Command(BaseCommand):
def handle(self, *arg, **options):
with Connection(settings.BROKER_URL) as conn:
consumer = None
try:
worker = CallbackBrokerWorker(conn)
worker.run()
consumer = AWXConsumer(
'callback_receiver',
conn,
CallbackBrokerWorker(),
[
Queue(
settings.CALLBACK_QUEUE,
Exchange(settings.CALLBACK_QUEUE, type='direct'),
routing_key=settings.CALLBACK_QUEUE
)
]
)
consumer.run()
except KeyboardInterrupt:
print('Terminating Callback Receiver')
if consumer:
consumer.stop()

View File

@@ -0,0 +1,128 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import os
import logging
from multiprocessing import Process
from django.conf import settings
from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand
from django.db import connection as django_connection, connections
from kombu import Connection, Exchange, Queue
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
logger = logging.getLogger('awx.main.dispatch')
def construct_bcast_queue_name(common_name):
return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID
class Command(BaseCommand):
help = 'Launch the task dispatcher'
def add_arguments(self, parser):
parser.add_argument('--status', dest='status', action='store_true',
help='print the internal state of any running dispatchers')
parser.add_argument('--running', dest='running', action='store_true',
help='print the UUIDs of any tasked managed by this dispatcher')
parser.add_argument('--reload', dest='reload', action='store_true',
help=('cause the dispatcher to recycle all of its worker processes;'
'running jobs will run to completion first'))
def beat(self):
from celery import Celery
from celery.beat import PersistentScheduler
from celery.apps import beat
class AWXScheduler(PersistentScheduler):
def __init__(self, *args, **kwargs):
self.ppid = os.getppid()
super(AWXScheduler, self).__init__(*args, **kwargs)
def setup_schedule(self):
super(AWXScheduler, self).setup_schedule()
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
def tick(self, *args, **kwargs):
if os.getppid() != self.ppid:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
raise SystemExit()
return super(AWXScheduler, self).tick(*args, **kwargs)
def apply_async(self, entry, producer=None, advance=True, **kwargs):
for conn in connections.all():
# If the database connection has a hiccup, re-establish a new
# connection
conn.close_if_unusable_or_obsolete()
task = TaskWorker.resolve_callable(entry.task)
result, queue = task.apply_async()
class TaskResult(object):
id = result['uuid']
return TaskResult()
app = Celery()
app.conf.BROKER_URL = settings.BROKER_URL
app.conf.CELERY_TASK_RESULT_EXPIRES = False
beat.Beat(
30,
app,
schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler
).run()
def handle(self, *arg, **options):
if options.get('status'):
print Control('dispatcher').status()
return
if options.get('running'):
print Control('dispatcher').running()
return
if options.get('reload'):
return Control('dispatcher').control({'control': 'reload'})
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race conditions)
django_connection.close()
django_cache.close()
beat = Process(target=self.beat)
beat.daemon = True
beat.start()
reaper.reap()
consumer = None
with Connection(settings.BROKER_URL) as conn:
try:
bcast = 'tower_broadcast_all'
queues = [
Queue(q, Exchange(q), routing_key=q)
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
]
queues.append(
Queue(
construct_bcast_queue_name(bcast),
exchange=Exchange(bcast, type='fanout'),
routing_key=bcast,
reply=True
)
)
consumer = AWXConsumer(
'dispatcher',
conn,
TaskWorker(),
queues,
AutoscalePool(min_workers=4)
)
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()

View File

@@ -1,66 +0,0 @@
import datetime
import os
import signal
import subprocess
import sys
import time
from celery import Celery
from django.core.management.base import BaseCommand
from django.conf import settings
class Command(BaseCommand):
"""Watch local celery workers"""
help=("Sends a periodic ping to the local celery process over AMQP to ensure "
"it's responsive; this command is only intended to run in an environment "
"where celeryd is running")
#
# Just because celery is _running_ doesn't mean it's _working_; it's
# imperative that celery workers are _actually_ handling AMQP messages on
# their appropriate queues for awx to function. Unfortunately, we've been
# plagued by a variety of bugs in celery that cause it to hang and become
# an unresponsive zombie, such as:
#
# https://github.com/celery/celery/issues/4185
# https://github.com/celery/celery/issues/4457
#
# The goal of this code is periodically send a broadcast AMQP message to
# the celery process on the local host via celery.app.control.ping;
# If that _fails_, we attempt to determine the pid of the celery process
# and send SIGHUP (which tends to resolve these sorts of issues for us).
#
INTERVAL = 60
def _log(self, msg):
sys.stderr.write(datetime.datetime.utcnow().isoformat())
sys.stderr.write(' ')
sys.stderr.write(msg)
sys.stderr.write('\n')
def handle(self, **options):
app = Celery('awx')
app.config_from_object('django.conf:settings')
while True:
try:
pongs = app.control.ping(['celery@{}'.format(settings.CLUSTER_HOST_ID)], timeout=30)
except Exception:
pongs = []
if not pongs:
self._log('celery is not responsive to ping over local AMQP')
pid = self.getpid()
if pid:
self._log('sending SIGHUP to {}'.format(pid))
os.kill(pid, signal.SIGHUP)
time.sleep(self.INTERVAL)
def getpid(self):
cmd = 'supervisorctl pid tower-processes:awx-celeryd'
if os.path.exists('/supervisor_task.conf'):
cmd = 'supervisorctl -c /supervisor_task.conf pid tower-processes:celery'
try:
return int(subprocess.check_output(cmd, shell=True))
except Exception:
self._log('could not detect celery pid')

View File

@@ -1,25 +1,20 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import base64
import json
import uuid
import logging
import threading
import uuid
import six
import time
import cProfile
import pstats
import os
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save
from django.db.migrations.executor import MigrationExecutor
from django.db import IntegrityError, connection
from django.http import HttpResponse
from django.utils.functional import curry
from django.shortcuts import get_object_or_404, redirect
from django.apps import apps
@@ -209,56 +204,6 @@ class URLModificationMiddleware(object):
request.path_info = new_path
class DeprecatedAuthTokenMiddleware(object):
"""
Used to emulate support for the old Auth Token endpoint to ease the
transition to OAuth2.0. Specifically, this middleware:
1. Intercepts POST requests to `/api/v2/authtoken/` (which now no longer
_actually_ exists in our urls.py)
2. Rewrites `request.path` to `/api/v2/users/N/personal_tokens/`
3. Detects the username and password in the request body (either in JSON,
or form-encoded variables) and builds an appropriate HTTP_AUTHORIZATION
Basic header
"""
def process_request(self, request):
if re.match('^/api/v[12]/authtoken/?$', request.path):
if request.method != 'POST':
return HttpResponse('HTTP {} is not allowed.'.format(request.method), status=405)
try:
payload = json.loads(request.body)
except (ValueError, TypeError):
payload = request.POST
if 'username' not in payload or 'password' not in payload:
return HttpResponse('Unable to login with provided credentials.', status=401)
username = payload['username']
password = payload['password']
try:
pk = User.objects.get(username=username).pk
except ObjectDoesNotExist:
return HttpResponse('Unable to login with provided credentials.', status=401)
new_path = reverse('api:user_personal_token_list', kwargs={
'pk': pk,
'version': 'v2'
})
request._body = ''
request.META['CONTENT_TYPE'] = 'application/json'
request.path = request.path_info = new_path
auth = ' '.join([
'Basic',
base64.b64encode(
six.text_type('{}:{}').format(username, password)
)
])
request.environ['HTTP_AUTHORIZATION'] = auth
logger.warn(
'The Auth Token API (/api/v2/authtoken/) is deprecated and will '
'be replaced with OAuth2.0 in the next version of Ansible Tower '
'(see /api/o/ for more details).'
)
class MigrationRanCheckMiddleware(object):
def process_request(self, request):

View File

@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0049_v330_validate_instance_capacity_adjustment'),
]
operations = [
migrations.RunSQL([
("DROP TABLE IF EXISTS {} CASCADE;".format(table))
])
for table in ('celery_taskmeta', 'celery_tasksetmeta', 'djcelery_crontabschedule',
'djcelery_intervalschedule', 'djcelery_periodictask',
'djcelery_periodictasks', 'djcelery_taskstate', 'djcelery_workerstate',
'djkombu_message', 'djkombu_queue')
]

View File

@@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-10-15 16:21
from __future__ import unicode_literals
import awx.main.utils.polymorphic
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0050_v340_drop_celery_tables'),
]
operations = [
migrations.AddField(
model_name='job',
name='job_slice_count',
field=models.PositiveIntegerField(blank=True, default=1, help_text='If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job.'),
),
migrations.AddField(
model_name='job',
name='job_slice_number',
field=models.PositiveIntegerField(blank=True, default=0, help_text='If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used.'),
),
migrations.AddField(
model_name='jobtemplate',
name='job_slice_count',
field=models.PositiveIntegerField(blank=True, default=1, help_text='The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1.'),
),
migrations.AddField(
model_name='workflowjob',
name='is_sliced_job',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='workflowjob',
name='job_template',
field=models.ForeignKey(blank=True, default=None, help_text='If automatically created for a sliced job run, the job template the workflow job was created from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='slice_workflow_jobs', to='main.JobTemplate'),
),
migrations.AlterField(
model_name='unifiedjob',
name='unified_job_template',
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjob_unified_jobs', to='main.UnifiedJobTemplate'),
),
]

File diff suppressed because it is too large Load Diff

View File

@@ -136,8 +136,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
else:
return []
@classmethod
def _get_parent_field_name(cls):
def _get_parent_field_name(self):
return ''
@classmethod

View File

@@ -439,15 +439,6 @@ class CredentialType(CommonModelNameNotUnique):
defaults = OrderedDict()
ENV_BLACKLIST = set((
'VIRTUAL_ENV', 'PATH', 'PYTHONPATH', 'PROOT_TMP_DIR', 'JOB_ID',
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
'AWX_HOST', 'PROJECT_REVISION'
))
class Meta:
app_label = 'main'
ordering = ('kind', 'name')
@@ -648,8 +639,14 @@ class CredentialType(CommonModelNameNotUnique):
file_label = file_label.split('.')[1]
setattr(tower_namespace.filename, file_label, path)
injector_field = self._meta.get_field('injectors')
for env_var, tmpl in self.injectors.get('env', {}).items():
if env_var.startswith('ANSIBLE_') or env_var in self.ENV_BLACKLIST:
try:
injector_field.validate_env_var_allowed(env_var)
except ValidationError as e:
logger.error(six.text_type(
'Ignoring prohibited env var {}, reason: {}'
).format(env_var, e))
continue
env[env_var] = Template(tmpl).render(**namespace)
safe_env[env_var] = Template(tmpl).render(**safe_namespace)

View File

@@ -32,7 +32,7 @@ __all__ = ('Instance', 'InstanceGroup', 'JobOrigin', 'TowerScheduleState',)
def validate_queuename(v):
# celery and kombu don't play nice with unicode in queue names
# kombu doesn't play nice with unicode in queue names
if v:
try:
'{}'.format(v.decode('utf-8'))

View File

@@ -9,7 +9,6 @@ import copy
from urlparse import urljoin
import os.path
import six
from distutils.version import LooseVersion
# Django
from django.conf import settings
@@ -20,6 +19,9 @@ from django.core.exceptions import ValidationError
from django.utils.timezone import now
from django.db.models import Q
# REST Framework
from rest_framework.exceptions import ParseError
# AWX
from awx.api.versioning import reverse
from awx.main.constants import CLOUD_PROVIDERS
@@ -42,7 +44,7 @@ from awx.main.models.notifications import (
NotificationTemplate,
JobNotificationMixin,
)
from awx.main.utils import _inventory_updates, get_ansible_version, region_sorting
from awx.main.utils import _inventory_updates, region_sorting
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate',
@@ -218,67 +220,87 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
group_children.add(from_group_id)
return group_children_map
def get_script_data(self, hostvars=False, towervars=False, show_all=False):
if show_all:
hosts_q = dict()
else:
hosts_q = dict(enabled=True)
@staticmethod
def parse_slice_params(slice_str):
m = re.match(r"slice(?P<number>\d+)of(?P<step>\d+)", slice_str)
if not m:
raise ParseError(_('Could not parse subset as slice specification.'))
number = int(m.group('number'))
step = int(m.group('step'))
if number > step:
raise ParseError(_('Slice number must be less than total number of slices.'))
elif number < 1:
raise ParseError(_('Slice number must be 1 or higher.'))
return (number, step)
def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1):
hosts_kw = dict()
if not show_all:
hosts_kw['enabled'] = True
fetch_fields = ['name', 'id', 'variables']
if towervars:
fetch_fields.append('enabled')
hosts = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
if slice_count > 1:
offset = slice_number - 1
hosts = hosts[offset::slice_count]
data = dict()
all_group = data.setdefault('all', dict())
all_hostnames = set(host.name for host in hosts)
if self.variables_dict:
all_group = data.setdefault('all', dict())
all_group['vars'] = self.variables_dict
if self.kind == 'smart':
if len(self.hosts.all()) == 0:
return {}
else:
all_group = data.setdefault('all', dict())
smart_hosts_qs = self.hosts.filter(**hosts_q).all()
smart_hosts = list(smart_hosts_qs.values_list('name', flat=True))
all_group['hosts'] = smart_hosts
all_group['hosts'] = [host.name for host in hosts]
else:
# Add hosts without a group to the all group.
groupless_hosts_qs = self.hosts.filter(groups__isnull=True, **hosts_q)
groupless_hosts = list(groupless_hosts_qs.values_list('name', flat=True))
if groupless_hosts:
all_group = data.setdefault('all', dict())
all_group['hosts'] = groupless_hosts
# Keep track of hosts that are members of a group
grouped_hosts = set([])
# Build in-memory mapping of groups and their hosts.
group_hosts_kw = dict(group__inventory_id=self.id, host__inventory_id=self.id)
if 'enabled' in hosts_q:
group_hosts_kw['host__enabled'] = hosts_q['enabled']
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
group_hosts_qs = group_hosts_qs.values_list('group_id', 'host_id', 'host__name')
group_hosts_qs = Group.hosts.through.objects.filter(
group__inventory_id=self.id,
host__inventory_id=self.id
).values_list('group_id', 'host_id', 'host__name')
group_hosts_map = {}
for group_id, host_id, host_name in group_hosts_qs:
if host_name not in all_hostnames:
continue # host might not be in current shard
group_hostnames = group_hosts_map.setdefault(group_id, [])
group_hostnames.append(host_name)
grouped_hosts.add(host_name)
# Build in-memory mapping of groups and their children.
group_parents_qs = Group.parents.through.objects.filter(
from_group__inventory_id=self.id,
to_group__inventory_id=self.id,
)
group_parents_qs = group_parents_qs.values_list('from_group_id', 'from_group__name',
'to_group_id')
).values_list('from_group_id', 'from_group__name', 'to_group_id')
group_children_map = {}
for from_group_id, from_group_name, to_group_id in group_parents_qs:
group_children = group_children_map.setdefault(to_group_id, [])
group_children.append(from_group_name)
# Now use in-memory maps to build up group info.
for group in self.groups.all():
for group in self.groups.only('name', 'id', 'variables'):
group_info = dict()
group_info['hosts'] = group_hosts_map.get(group.id, [])
group_info['children'] = group_children_map.get(group.id, [])
group_info['vars'] = group.variables_dict
data[group.name] = group_info
# Add ungrouped hosts to all group
all_group['hosts'] = [host.name for host in hosts if host.name not in grouped_hosts]
# Remove any empty groups
for group_name in list(data.keys()):
if not data.get(group_name, {}).get('hosts', []):
data.pop(group_name)
if hostvars:
data.setdefault('_meta', dict())
data['_meta'].setdefault('hostvars', dict())
for host in self.hosts.filter(**hosts_q):
for host in hosts:
data['_meta']['hostvars'][host.name] = host.variables_dict
if towervars:
tower_dict = dict(remote_tower_enabled=str(host.enabled).lower(),
@@ -1578,12 +1600,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, RelatedJobsMix
"Instead, configure the corresponding source project to update on launch."))
return self.update_on_launch
def clean_overwrite_vars(self): # TODO: remove when Ansible 2.4 becomes unsupported, obviously
if self.source == 'scm' and not self.overwrite_vars:
if get_ansible_version() < LooseVersion('2.5'):
raise ValidationError(_("SCM type sources must set `overwrite_vars` to `true` until Ansible 2.5."))
return self.overwrite_vars
def clean_source_path(self):
if self.source != 'scm' and self.source_path:
raise ValidationError(_("Cannot set source_path if not SCM type."))
@@ -1631,8 +1647,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
null=True
)
@classmethod
def _get_parent_field_name(cls):
def _get_parent_field_name(self):
return 'inventory_source'
@classmethod

View File

@@ -277,6 +277,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
default=False,
allows_field='credentials'
)
job_slice_count = models.PositiveIntegerField(
blank=True,
default=1,
help_text=_("The number of jobs to slice into at runtime. "
"Will cause the Job Template to launch a workflow if value is greater than 1."),
)
admin_role = ImplicitRoleField(
parent_role=['project.organization.job_template_admin_role', 'inventory.organization.job_template_admin_role']
)
@@ -295,7 +302,8 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
@classmethod
def _get_unified_job_field_names(cls):
return set(f.name for f in JobOptions._meta.fields) | set(
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials']
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials',
'job_slice_number', 'job_slice_count']
)
@property
@@ -320,6 +328,31 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
'''
return self.create_unified_job(**kwargs)
def create_unified_job(self, **kwargs):
prevent_slicing = kwargs.pop('_prevent_slicing', False)
slice_event = bool(self.job_slice_count > 1 and (not prevent_slicing))
if slice_event:
# A Slice Job Template will generate a WorkflowJob rather than a Job
from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobNode
kwargs['_unified_job_class'] = WorkflowJobTemplate._get_unified_job_class()
kwargs['_parent_field_name'] = "job_template"
kwargs.setdefault('_eager_fields', {})
kwargs['_eager_fields']['is_sliced_job'] = True
job = super(JobTemplate, self).create_unified_job(**kwargs)
if slice_event:
try:
wj_config = job.launch_config
except JobLaunchConfig.DoesNotExist:
wj_config = JobLaunchConfig()
actual_inventory = wj_config.inventory if wj_config.inventory else self.inventory
for idx in xrange(min(self.job_slice_count,
actual_inventory.hosts.count())):
create_kwargs = dict(workflow_job=job,
unified_job_template=self,
ancestor_artifacts=dict(job_slice=idx + 1))
WorkflowJobNode.objects.create(**create_kwargs)
return job
def get_absolute_url(self, request=None):
return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request)
@@ -452,7 +485,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
RelatedJobsMixin
'''
def _get_related_jobs(self):
return Job.objects.filter(job_template=self)
return UnifiedJob.objects.filter(unified_job_template=self)
class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskManagerJobMixin):
@@ -501,10 +534,21 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
on_delete=models.SET_NULL,
help_text=_('The SCM Refresh task used to make sure the playbooks were available for the job run'),
)
job_slice_number = models.PositiveIntegerField(
blank=True,
default=0,
help_text=_("If part of a sliced job, the ID of the inventory slice operated on. "
"If not part of sliced job, parameter is not used."),
)
job_slice_count = models.PositiveIntegerField(
blank=True,
default=1,
help_text=_("If ran as part of sliced jobs, the total number of slices. "
"If 1, job is not part of a sliced job."),
)
@classmethod
def _get_parent_field_name(cls):
def _get_parent_field_name(self):
return 'job_template'
@classmethod
@@ -545,6 +589,15 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
def event_class(self):
return JobEvent
def copy_unified_job(self, **new_prompts):
# Needed for job slice relaunch consistency, do no re-spawn workflow job
# target same slice as original job
new_prompts['_prevent_slicing'] = True
new_prompts.setdefault('_eager_fields', {})
new_prompts['_eager_fields']['job_slice_number'] = self.job_slice_number
new_prompts['_eager_fields']['job_slice_count'] = self.job_slice_count
return super(Job, self).copy_unified_job(**new_prompts)
@property
def ask_diff_mode_on_launch(self):
if self.job_template is not None:
@@ -638,6 +691,9 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
count_hosts = 2
else:
count_hosts = Host.objects.filter(inventory__jobs__pk=self.pk).count()
if self.job_slice_count > 1:
# Integer division intentional
count_hosts = (count_hosts + self.job_slice_count - self.job_slice_number) / self.job_slice_count
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
@property

View File

@@ -466,6 +466,14 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
models.Q(ProjectUpdate___project=self)
)
def delete(self, *args, **kwargs):
path_to_delete = self.get_project_path(check_if_exists=False)
r = super(Project, self).delete(*args, **kwargs)
if self.scm_type and path_to_delete: # non-manual, concrete path
from awx.main.tasks import delete_project_files
delete_project_files.delay(path_to_delete)
return r
class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManagerProjectUpdateMixin):
'''
@@ -488,8 +496,7 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
default='check',
)
@classmethod
def _get_parent_field_name(cls):
def _get_parent_field_name(self):
return 'project'
@classmethod

View File

@@ -153,7 +153,7 @@ class Schedule(CommonModel, LaunchTimeConfig):
if 'until=' in rrule.lower():
# if DTSTART;TZID= is used, coerce "naive" UNTIL values
# to the proper UTC date
match_until = re.match(".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rrule)
match_until = re.match(r".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rrule)
if not len(match_until.group('utcflag')):
# rrule = DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000

View File

@@ -7,9 +7,11 @@ import json
import logging
import os
import re
import socket
import subprocess
import tempfile
from collections import OrderedDict
import six
# Django
from django.conf import settings
@@ -27,11 +29,9 @@ from rest_framework.exceptions import ParseError
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
# Django-Celery
from djcelery.models import TaskMeta
# AWX
from awx.main.models.base import * # noqa
from awx.main.dispatch.control import Control as ControlDispatcher
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
from awx.main.utils import (
encrypt_dict, decrypt_field, _inventory_updates,
@@ -309,13 +309,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
'''
raise NotImplementedError # Implement in subclass.
@classmethod
def _get_unified_job_field_names(cls):
'''
Return field names that should be copied from template to new job.
'''
raise NotImplementedError # Implement in subclass.
@property
def notification_templates(self):
'''
@@ -338,19 +331,33 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
unified_job_class = self._get_unified_job_class()
fields = self._get_unified_job_field_names()
unallowed_fields = set(kwargs.keys()) - set(fields)
if unallowed_fields:
logger.warn('Fields {} are not allowed as overrides.'.format(unallowed_fields))
map(kwargs.pop, unallowed_fields)
parent_field_name = None
if "_unified_job_class" in kwargs:
# Special case where spawned job is different type than usual
# Only used for slice jobs
unified_job_class = kwargs.pop("_unified_job_class")
fields = unified_job_class._get_unified_job_field_names() & fields
parent_field_name = kwargs.pop('_parent_field_name')
unified_job = copy_model_by_class(self, unified_job_class, fields, kwargs)
unallowed_fields = set(kwargs.keys()) - set(fields)
validated_kwargs = kwargs.copy()
if unallowed_fields:
if parent_field_name is None:
logger.warn(six.text_type('Fields {} are not allowed as overrides to spawn {} from {}.').format(
six.text_type(', ').join(unallowed_fields), unified_job, self
))
map(validated_kwargs.pop, unallowed_fields)
unified_job = copy_model_by_class(self, unified_job_class, fields, validated_kwargs)
if eager_fields:
for fd, val in eager_fields.items():
setattr(unified_job, fd, val)
# Set the unified job template back-link on the job
parent_field_name = unified_job_class._get_parent_field_name()
# NOTE: slice workflow jobs _get_parent_field_name method
# is not correct until this is set
if not parent_field_name:
parent_field_name = unified_job._get_parent_field_name()
setattr(unified_job, parent_field_name, self)
# For JobTemplate-based jobs with surveys, add passwords to list for perma-redaction
@@ -364,24 +371,25 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
unified_job.save()
# Labels and credentials copied here
if kwargs.get('credentials'):
if validated_kwargs.get('credentials'):
Credential = UnifiedJob._meta.get_field('credentials').related_model
cred_dict = Credential.unique_dict(self.credentials.all())
prompted_dict = Credential.unique_dict(kwargs['credentials'])
prompted_dict = Credential.unique_dict(validated_kwargs['credentials'])
# combine prompted credentials with JT
cred_dict.update(prompted_dict)
kwargs['credentials'] = [cred for cred in cred_dict.values()]
validated_kwargs['credentials'] = [cred for cred in cred_dict.values()]
kwargs['credentials'] = validated_kwargs['credentials']
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
copy_m2m_relationships(self, unified_job, fields, kwargs=kwargs)
copy_m2m_relationships(self, unified_job, fields, kwargs=validated_kwargs)
if 'extra_vars' in kwargs:
unified_job.handle_extra_data(kwargs['extra_vars'])
if 'extra_vars' in validated_kwargs:
unified_job.handle_extra_data(validated_kwargs['extra_vars'])
if not getattr(self, '_deprecated_credential_launch', False):
# Create record of provided prompts for relaunch and rescheduling
unified_job.create_config_from_prompts(kwargs)
unified_job.create_config_from_prompts(kwargs, parent=self)
return unified_job
@@ -546,7 +554,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
default=None,
editable=False,
related_name='%(class)s_unified_jobs',
on_delete=models.SET_NULL,
on_delete=polymorphic.SET_NULL,
)
launch_type = models.CharField(
max_length=20,
@@ -694,8 +702,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
def supports_isolation(cls):
return False
@classmethod
def _get_parent_field_name(cls):
def _get_parent_field_name(self):
return 'unified_job_template' # Override in subclasses.
@classmethod
@@ -828,7 +835,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
'''
unified_job_class = self.__class__
unified_jt_class = self._get_unified_job_template_class()
parent_field_name = unified_job_class._get_parent_field_name()
parent_field_name = self._get_parent_field_name()
fields = unified_jt_class._get_unified_job_field_names() | set([parent_field_name])
create_data = {}
@@ -847,10 +854,10 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
setattr(unified_job, fd, val)
unified_job.save()
# Labels copied here
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
copy_m2m_relationships(self, unified_job, fields)
# Labels copied here
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
copy_m2m_relationships(self, unified_job, fields)
return unified_job
@@ -866,16 +873,18 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
except JobLaunchConfig.DoesNotExist:
return None
def create_config_from_prompts(self, kwargs):
def create_config_from_prompts(self, kwargs, parent=None):
'''
Create a launch configuration entry for this job, given prompts
returns None if it can not be created
'''
if self.unified_job_template is None:
return None
JobLaunchConfig = self._meta.get_field('launch_config').related_model
config = JobLaunchConfig(job=self)
valid_fields = self.unified_job_template.get_ask_mapping().keys()
if parent is None:
parent = getattr(self, self._get_parent_field_name())
if parent is None:
return
valid_fields = parent.get_ask_mapping().keys()
# Special cases allowed for workflows
if hasattr(self, 'extra_vars'):
valid_fields.extend(['survey_passwords', 'extra_vars'])
@@ -892,8 +901,9 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
setattr(config, key, value)
config.save()
job_creds = (set(kwargs.get('credentials', [])) -
set(self.unified_job_template.credentials.all()))
job_creds = set(kwargs.get('credentials', []))
if 'credentials' in [field.name for field in parent._meta.get_fields()]:
job_creds = job_creds - set(parent.credentials.all())
if job_creds:
config.credentials.add(*job_creds)
return config
@@ -1112,14 +1122,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
pass
return None
@property
def celery_task(self):
try:
if self.celery_task_id:
return TaskMeta.objects.get(task_id=self.celery_task_id)
except TaskMeta.DoesNotExist:
pass
def get_passwords_needed_to_start(self):
return []
@@ -1224,29 +1226,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
return (True, opts)
def start_celery_task(self, opts, error_callback, success_callback, queue):
kwargs = {
'link_error': error_callback,
'link': success_callback,
'queue': None,
'task_id': None,
}
if not self.celery_task_id:
raise RuntimeError("Expected celery_task_id to be set on model.")
kwargs['task_id'] = self.celery_task_id
task_class = self._get_task_class()
kwargs['queue'] = queue
task_class().apply_async([self.pk], opts, **kwargs)
def start(self, error_callback, success_callback, **kwargs):
'''
Start the task running via Celery.
'''
(res, opts) = self.pre_start(**kwargs)
if res:
self.start_celery_task(opts, error_callback, success_callback)
return res
def signal_start(self, **kwargs):
"""Notify the task runner system to begin work on this task."""
@@ -1282,46 +1261,35 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
# Done!
return True
@property
def actually_running(self):
# returns True if the job is running in the appropriate dispatcher process
running = False
if all([
self.status == 'running',
self.celery_task_id,
self.execution_node
]):
# If the job is marked as running, but the dispatcher
# doesn't know about it (or the dispatcher doesn't reply),
# then cancel the job
timeout = 5
try:
running = self.celery_task_id in ControlDispatcher(
'dispatcher', self.execution_node
).running(timeout=timeout)
except socket.timeout:
logger.error(six.text_type(
'could not reach dispatcher on {} within {}s'
).format(self.execution_node, timeout))
running = False
return running
@property
def can_cancel(self):
return bool(self.status in CAN_CANCEL)
def _force_cancel(self):
# Update the status to 'canceled' if we can detect that the job
# really isn't running (i.e. celery has crashed or forcefully
# killed the worker).
task_statuses = ('STARTED', 'SUCCESS', 'FAILED', 'RETRY', 'REVOKED')
try:
taskmeta = self.celery_task
if not taskmeta or taskmeta.status not in task_statuses:
return
from celery import current_app
i = current_app.control.inspect()
for v in (i.active() or {}).values():
if taskmeta.task_id in [x['id'] for x in v]:
return
for v in (i.reserved() or {}).values():
if taskmeta.task_id in [x['id'] for x in v]:
return
for v in (i.revoked() or {}).values():
if taskmeta.task_id in [x['id'] for x in v]:
return
for v in (i.scheduled() or {}).values():
if taskmeta.task_id in [x['id'] for x in v]:
return
instance = self.__class__.objects.get(pk=self.pk)
if instance.can_cancel:
instance.status = 'canceled'
update_fields = ['status']
if not instance.job_explanation:
instance.job_explanation = 'Forced cancel'
update_fields.append('job_explanation')
instance.save(update_fields=update_fields)
self.websocket_emit_status("canceled")
except Exception: # FIXME: Log this exception!
if settings.DEBUG:
raise
def _build_job_explanation(self):
if not self.job_explanation:
return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
@@ -1340,13 +1308,14 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
if self.status in ('pending', 'waiting', 'new'):
self.status = 'canceled'
cancel_fields.append('status')
if self.status == 'running' and not self.actually_running:
self.status = 'canceled'
cancel_fields.append('status')
if job_explanation is not None:
self.job_explanation = job_explanation
cancel_fields.append('job_explanation')
self.save(update_fields=cancel_fields)
self.websocket_emit_status("canceled")
if settings.BROKER_URL.startswith('amqp://'):
self._force_cancel()
return self.cancel_flag
@property
@@ -1402,7 +1371,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
r['{}_user_last_name'.format(name)] = created_by.last_name
return r
def get_celery_queue_name(self):
def get_queue_name(self):
return self.controller_node or self.execution_node or settings.CELERY_DEFAULT_QUEUE
def is_isolated(self):

View File

@@ -9,6 +9,7 @@ import logging
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
#from django import settings as tower_settings
# AWX
@@ -206,13 +207,24 @@ class WorkflowJobNode(WorkflowNodeBase):
workflow_pk=self.pk,
error_text=errors))
data.update(accepted_fields) # missing fields are handled in the scheduler
try:
# config saved on the workflow job itself
wj_config = self.workflow_job.launch_config
except ObjectDoesNotExist:
wj_config = None
if wj_config:
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**wj_config.prompts_dict())
accepted_fields.pop('extra_vars', None) # merge handled with other extra_vars later
data.update(accepted_fields)
# build ancestor artifacts, save them to node model for later
aa_dict = {}
is_root_node = True
for parent_node in self.get_parent_nodes():
is_root_node = False
aa_dict.update(parent_node.ancestor_artifacts)
if parent_node.job and hasattr(parent_node.job, 'artifacts'):
aa_dict.update(parent_node.job.artifacts)
if aa_dict:
if aa_dict and not is_root_node:
self.ancestor_artifacts = aa_dict
self.save(update_fields=['ancestor_artifacts'])
# process password list
@@ -240,6 +252,12 @@ class WorkflowJobNode(WorkflowNodeBase):
data['extra_vars'] = extra_vars
# ensure that unified jobs created by WorkflowJobs are marked
data['_eager_fields'] = {'launch_type': 'workflow'}
# Extra processing in the case that this is a slice job
if 'job_slice' in self.ancestor_artifacts and is_root_node:
data['_eager_fields']['allow_simultaneous'] = True
data['_eager_fields']['job_slice_number'] = self.ancestor_artifacts['job_slice']
data['_eager_fields']['job_slice_count'] = self.workflow_job.workflow_job_nodes.count()
data['_prevent_slicing'] = True
return data
@@ -261,6 +279,12 @@ class WorkflowJobOptions(BaseModel):
def workflow_nodes(self):
raise NotImplementedError()
@classmethod
def _get_unified_job_field_names(cls):
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
['name', 'description', 'schedule', 'survey_passwords', 'labels']
)
def _create_workflow_nodes(self, old_node_list, user=None):
node_links = {}
for old_node in old_node_list:
@@ -288,7 +312,7 @@ class WorkflowJobOptions(BaseModel):
def create_relaunch_workflow_job(self):
new_workflow_job = self.copy_unified_job()
if self.workflow_job_template is None:
if self.unified_job_template_id is None:
new_workflow_job.copy_nodes_from_original(original=self)
return new_workflow_job
@@ -331,12 +355,6 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
def _get_unified_job_class(cls):
return WorkflowJob
@classmethod
def _get_unified_job_field_names(cls):
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
['name', 'description', 'schedule', 'survey_passwords', 'labels']
)
@classmethod
def _get_unified_jt_copy_names(cls):
base_list = super(WorkflowJobTemplate, cls)._get_unified_jt_copy_names()
@@ -433,13 +451,28 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
default=None,
on_delete=models.SET_NULL,
)
job_template = models.ForeignKey(
'JobTemplate',
related_name='slice_workflow_jobs',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
help_text=_("If automatically created for a sliced job run, the job template "
"the workflow job was created from."),
)
is_sliced_job = models.BooleanField(
default=False
)
@property
def workflow_nodes(self):
return self.workflow_job_nodes
@classmethod
def _get_parent_field_name(cls):
def _get_parent_field_name(self):
if self.job_template_id:
# This is a workflow job which is a container for slice jobs
return 'job_template'
return 'workflow_job_template'
@classmethod
@@ -482,8 +515,8 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
def preferred_instance_groups(self):
return []
'''
A WorkflowJob is a virtual job. It doesn't result in a celery task.
'''
def start_celery_task(self, opts, error_callback, success_callback, queue):
return None
@property
def actually_running(self):
# WorkflowJobs don't _actually_ run anything in the dispatcher, so
# there's no point in asking the dispatcher if it knows about this task
return self.status == 'running'

View File

@@ -50,6 +50,7 @@ class WorkflowDAG(SimpleDAG):
return [n['node_object'] for n in nodes_found]
def cancel_node_jobs(self):
cancel_finished = True
for n in self.nodes:
obj = n['node_object']
job = obj.job
@@ -57,7 +58,9 @@ class WorkflowDAG(SimpleDAG):
if not job:
continue
elif job.can_cancel:
cancel_finished = False
job.cancel()
return cancel_finished
def is_workflow_done(self):
root_nodes = self.get_root_nodes()

View File

@@ -2,7 +2,7 @@
# All Rights Reserved
# Python
from datetime import datetime, timedelta
from datetime import timedelta
import logging
import uuid
import json
@@ -11,18 +11,13 @@ import random
from sets import Set
# Django
from django.conf import settings
from django.core.cache import cache
from django.db import transaction, connection, DatabaseError
from django.db import transaction, connection
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now as tz_now, utc
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from django.utils.timezone import now as tz_now
# AWX
from awx.main.models import (
AdHocCommand,
Instance,
InstanceGroup,
InventorySource,
InventoryUpdate,
@@ -30,21 +25,15 @@ from awx.main.models import (
Project,
ProjectUpdate,
SystemJob,
UnifiedJob,
WorkflowJob,
)
from awx.main.scheduler.dag_workflow import WorkflowDAG
from awx.main.utils.pglock import advisory_lock
from awx.main.utils import get_type_for_model
from awx.main.signals import disable_activity_stream
from awx.main.scheduler.dependency_graph import DependencyGraph
from awx.main.utils import decrypt_field
# Celery
from celery import Celery
from celery.app.control import Inspect
logger = logging.getLogger('awx.main.scheduler')
@@ -85,79 +74,6 @@ class TaskManager():
key=lambda task: task.created)
return all_tasks
'''
Tasks that are running and SHOULD have a celery task.
{
'execution_node': [j1, j2,...],
'execution_node': [j3],
...
}
'''
def get_running_tasks(self):
execution_nodes = {}
waiting_jobs = []
now = tz_now()
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter((Q(status='running') |
Q(status='waiting', modified__lte=now - timedelta(seconds=60))) &
~Q(polymorphic_ctype_id=workflow_ctype_id))
for j in jobs:
if j.execution_node:
execution_nodes.setdefault(j.execution_node, []).append(j)
else:
waiting_jobs.append(j)
return (execution_nodes, waiting_jobs)
'''
Tasks that are currently running in celery
Transform:
{
"celery@ec2-54-204-222-62.compute-1.amazonaws.com": [],
"celery@ec2-54-163-144-168.compute-1.amazonaws.com": [{
...
"id": "5238466a-f8c7-43b3-9180-5b78e9da8304",
...
}, {
...,
}, ...]
}
to:
{
"ec2-54-204-222-62.compute-1.amazonaws.com": [
"5238466a-f8c7-43b3-9180-5b78e9da8304",
"5238466a-f8c7-43b3-9180-5b78e9da8306",
...
]
}
'''
def get_active_tasks(self):
if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'):
app = Celery('awx')
app.config_from_object('django.conf:settings')
inspector = Inspect(app=app)
active_task_queues = inspector.active()
else:
logger.warn("Ignoring celery task inspector")
active_task_queues = None
queues = None
if active_task_queues is not None:
queues = {}
for queue in active_task_queues:
active_tasks = set()
map(lambda at: active_tasks.add(at['id']), active_task_queues[queue])
# celery worker name is of the form celery@myhost.com
queue_name = queue.split('@')
queue_name = queue_name[1 if len(queue_name) > 1 else 0]
queues[queue_name] = active_tasks
else:
return (None, None)
return (active_task_queues, queues)
def get_latest_project_update_tasks(self, all_sorted_tasks):
project_ids = Set()
@@ -187,8 +103,15 @@ class TaskManager():
def spawn_workflow_graph_jobs(self, workflow_jobs):
for workflow_job in workflow_jobs:
if workflow_job.cancel_flag:
logger.debug('Not spawning jobs for %s because it is pending cancelation.', workflow_job.log_format)
continue
dag = WorkflowDAG(workflow_job)
spawn_nodes = dag.bfs_nodes_to_run()
if spawn_nodes:
logger.info('Spawning jobs for %s', workflow_job.log_format)
else:
logger.debug('No nodes to spawn for %s', workflow_job.log_format)
for spawn_node in spawn_nodes:
if spawn_node.unified_job_template is None:
continue
@@ -196,8 +119,13 @@ class TaskManager():
job = spawn_node.unified_job_template.create_unified_job(**kv)
spawn_node.job = job
spawn_node.save()
logger.info('Spawned %s in %s for node %s', job.log_format, workflow_job.log_format, spawn_node.pk)
if job._resources_sufficient_for_launch():
can_start = job.signal_start()
if workflow_job.start_args:
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
else:
start_args = {}
can_start = job.signal_start(**start_args)
if not can_start:
job.job_explanation = _("Job spawned from workflow could not start because it "
"was not in the right state or required manual credentials")
@@ -213,23 +141,30 @@ class TaskManager():
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
#emit_websocket_notification('/socket.io/jobs', '', dict(id=))
# See comment in tasks.py::RunWorkflowJob::run()
def process_finished_workflow_jobs(self, workflow_jobs):
result = []
for workflow_job in workflow_jobs:
dag = WorkflowDAG(workflow_job)
if workflow_job.cancel_flag:
workflow_job.status = 'canceled'
workflow_job.save()
dag.cancel_node_jobs()
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
logger.debug('Canceling spawned jobs of %s due to cancel flag.', workflow_job.log_format)
cancel_finished = dag.cancel_node_jobs()
if cancel_finished:
logger.info('Marking %s as canceled, all spawned jobs have concluded.', workflow_job.log_format)
workflow_job.status = 'canceled'
workflow_job.start_args = '' # blank field to remove encrypted passwords
workflow_job.save(update_fields=['status', 'start_args'])
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
else:
is_done, has_failed = dag.is_workflow_done()
if not is_done:
continue
logger.info('Marking %s as %s.', workflow_job.log_format, 'failed' if has_failed else 'successful')
result.append(workflow_job.id)
workflow_job.status = 'failed' if has_failed else 'successful'
workflow_job.save()
new_status = 'failed' if has_failed else 'successful'
logger.debug(six.text_type("Transitioning {} to {} status.").format(workflow_job.log_format, new_status))
workflow_job.status = new_status
workflow_job.start_args = '' # blank field to remove encrypted passwords
workflow_job.save(update_fields=['status', 'start_args'])
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
return result
@@ -256,9 +191,6 @@ class TaskManager():
rampart_group.name, task.log_format))
return
error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies)
success_handler = handle_work_success.s(task_actual=task_actual)
task.status = 'waiting'
(start_status, opts) = task.pre_start()
@@ -300,11 +232,23 @@ class TaskManager():
def post_commit():
task.websocket_emit_status(task.status)
if task.status != 'failed':
task.start_celery_task(opts,
error_callback=error_handler,
success_callback=success_handler,
queue=task.get_celery_queue_name())
if task.status != 'failed' and type(task) is not WorkflowJob:
task_cls = task._get_task_class()
task_cls.apply_async(
[task.pk],
opts,
queue=task.get_queue_name(),
uuid=task.celery_task_id,
callbacks=[{
'task': handle_work_success.name,
'kwargs': {'task_actual': task_actual}
}],
errbacks=[{
'task': handle_work_error.name,
'args': [task.celery_task_id],
'kwargs': {'subtasks': [task_actual] + dependencies}
}],
)
connection.on_commit(post_commit)
@@ -483,7 +427,7 @@ class TaskManager():
logger.debug(six.text_type("Dependent {} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
def process_pending_tasks(self, pending_tasks):
running_workflow_templates = set([wf.workflow_job_template_id for wf in self.get_running_workflow_jobs()])
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
for task in pending_tasks:
self.process_dependencies(task, self.generate_dependencies(task))
if self.is_job_blocked(task):
@@ -493,12 +437,12 @@ class TaskManager():
found_acceptable_queue = False
idle_instance_that_fits = None
if isinstance(task, WorkflowJob):
if task.workflow_job_template_id in running_workflow_templates:
if task.unified_job_template_id in running_workflow_templates:
if not task.allow_simultaneous:
logger.debug(six.text_type("{} is blocked from running, workflow already running").format(task.log_format))
continue
else:
running_workflow_templates.add(task.workflow_job_template_id)
running_workflow_templates.add(task.unified_job_template_id)
self.start_task(task, None, task.get_jobs_fail_chain(), None)
continue
for rampart_group in preferred_instance_groups:
@@ -529,105 +473,6 @@ class TaskManager():
if not found_acceptable_queue:
logger.debug(six.text_type("{} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
def fail_jobs_if_not_in_celery(self, node_jobs, active_tasks, celery_task_start_time,
isolated=False):
for task in node_jobs:
if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
if isinstance(task, WorkflowJob):
continue
if task.modified > celery_task_start_time:
continue
new_status = 'failed'
if isolated:
new_status = 'error'
task.status = new_status
task.start_args = '' # blank field to remove encrypted passwords
if isolated:
# TODO: cancel and reap artifacts of lost jobs from heartbeat
task.job_explanation += ' '.join((
'Task was marked as running in Tower but its ',
'controller management daemon was not present in',
'the job queue, so it has been marked as failed.',
'Task may still be running, but contactability is unknown.'
))
else:
task.job_explanation += ' '.join((
'Task was marked as running in Tower but was not present in',
'the job queue, so it has been marked as failed.',
))
try:
task.save(update_fields=['status', 'start_args', 'job_explanation'])
except DatabaseError:
logger.error("Task {} DB error in marking failed. Job possibly deleted.".format(task.log_format))
continue
if hasattr(task, 'send_notification_templates'):
task.send_notification_templates('failed')
task.websocket_emit_status(new_status)
logger.error("{}Task {} has no record in celery. Marking as failed".format(
'Isolated ' if isolated else '', task.log_format))
def cleanup_inconsistent_celery_tasks(self):
'''
Rectify tower db <-> celery inconsistent view of jobs state
'''
last_cleanup = cache.get('last_celery_task_cleanup') or datetime.min.replace(tzinfo=utc)
if (tz_now() - last_cleanup).seconds < settings.AWX_INCONSISTENT_TASK_INTERVAL:
return
logger.debug("Failing inconsistent running jobs.")
celery_task_start_time = tz_now()
active_task_queues, active_queues = self.get_active_tasks()
cache.set('last_celery_task_cleanup', tz_now())
if active_queues is None:
logger.error('Failed to retrieve active tasks from celery')
return None
'''
Only consider failing tasks on instances for which we obtained a task
list from celery for.
'''
running_tasks, waiting_tasks = self.get_running_tasks()
all_celery_task_ids = []
for node, node_jobs in active_queues.iteritems():
all_celery_task_ids.extend(node_jobs)
self.fail_jobs_if_not_in_celery(waiting_tasks, all_celery_task_ids, celery_task_start_time)
for node, node_jobs in running_tasks.iteritems():
isolated = False
if node in active_queues:
active_tasks = active_queues[node]
else:
'''
Node task list not found in celery. We may branch into cases:
- instance is unknown to tower, system is improperly configured
- instance is reported as down, then fail all jobs on the node
- instance is an isolated node, then check running tasks
among all allowed controller nodes for management process
- valid healthy instance not included in celery task list
probably a netsplit case, leave it alone
'''
instance = Instance.objects.filter(hostname=node).first()
if instance is None:
logger.error("Execution node Instance {} not found in database. "
"The node is currently executing jobs {}".format(
node, [j.log_format for j in node_jobs]))
active_tasks = []
elif instance.capacity == 0:
active_tasks = []
elif instance.rampart_groups.filter(controller__isnull=False).exists():
active_tasks = all_celery_task_ids
isolated = True
else:
continue
self.fail_jobs_if_not_in_celery(
node_jobs, active_tasks, celery_task_start_time,
isolated=isolated
)
def calculate_capacity_consumed(self, tasks):
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
@@ -673,6 +518,14 @@ class TaskManager():
running_workflow_tasks = self.get_running_workflow_jobs()
finished_wfjs = self.process_finished_workflow_jobs(running_workflow_tasks)
previously_running_workflow_tasks = running_workflow_tasks
running_workflow_tasks = []
for workflow_job in previously_running_workflow_tasks:
if workflow_job.status == 'running':
running_workflow_tasks.append(workflow_job)
else:
logger.debug('Removed %s from job spawning consideration.', workflow_job.log_format)
self.spawn_workflow_graph_jobs(running_workflow_tasks)
self.process_tasks(all_sorted_tasks)
@@ -687,7 +540,6 @@ class TaskManager():
return
logger.debug("Starting Scheduler")
self.cleanup_inconsistent_celery_tasks()
finished_wfjs = self._schedule()
# Operations whose queries rely on modifications made during the atomic scheduling session

View File

@@ -2,30 +2,24 @@
# Python
import logging
# Celery
from celery import shared_task
# AWX
from awx.main.scheduler import TaskManager
from awx.main.dispatch.publish import task
logger = logging.getLogger('awx.main.scheduler')
# TODO: move logic to UnifiedJob model and use bind=True feature of celery.
# Would we need the request loop then? I think so. Even if we get the in-memory
# updated model, the call to schedule() may get stale data.
@shared_task()
@task()
def run_job_launch(job_id):
TaskManager().schedule()
@shared_task()
@task()
def run_job_complete(job_id):
TaskManager().schedule()
@shared_task()
@task()
def run_task_manager():
logger.debug("Running Tower task manager.")
TaskManager().schedule()

View File

@@ -13,12 +13,11 @@ import logging
import os
import re
import shutil
import six
import stat
import sys
import tempfile
import time
import traceback
import six
import urlparse
from distutils.version import LooseVersion as Version
import yaml
@@ -28,12 +27,6 @@ try:
except Exception:
psutil = None
# Celery
from kombu import Queue, Exchange
from kombu.common import Broadcast
from celery import Task, shared_task
from celery.signals import celeryd_init, worker_shutdown
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError
@@ -58,10 +51,12 @@ from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.expect import run, isolated_manager
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url,
check_proot_installed, build_proot_temp_dir, get_licenser,
wrap_args_with_proot, OutputEventFilter, OutputVerboseFilter, ignore_inventory_computed_fields,
ignore_inventory_group_removal, get_type_for_model, extract_ansible_vars)
ignore_inventory_group_removal, extract_ansible_vars)
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
@@ -87,52 +82,7 @@ Try upgrading OpenSSH or providing your private key in an different format. \
logger = logging.getLogger('awx.main.tasks')
def log_celery_failure(self, exc, task_id, args, kwargs, einfo):
try:
if getattr(exc, 'is_awx_task_error', False):
# Error caused by user / tracked in job output
logger.warning(six.text_type("{}").format(exc))
elif isinstance(self, BaseTask):
logger.exception(six.text_type(
'{!s} {!s} execution encountered exception.')
.format(get_type_for_model(self.model), args[0]))
else:
logger.exception(six.text_type('Task {} encountered exception.').format(self.name), exc_info=exc)
except Exception:
# It's fairly critical that this code _not_ raise exceptions on logging
# If you configure external logging in a way that _it_ fails, there's
# not a lot we can do here; sys.stderr.write is a final hail mary
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
@celeryd_init.connect
def celery_startup(conf=None, **kwargs):
#
# When celeryd starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, celeryd starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
if kwargs['instance'].hostname != 'celery@{}'.format(settings.CLUSTER_HOST_ID):
error = six.text_type('celery -n {} does not match settings.CLUSTER_HOST_ID={}').format(
instance.hostname, settings.CLUSTER_HOST_ID
)
logger.error(error)
raise RuntimeError(error)
(changed, tower_instance) = Instance.objects.get_or_register()
if changed:
logger.info(six.text_type("Registered tower node '{}'").format(tower_instance.hostname))
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.info("Syncing Schedules")
for sch in Schedule.objects.all():
@@ -144,34 +94,44 @@ def celery_startup(conf=None, **kwargs):
except Exception:
logger.exception(six.text_type("Failed to rebuild schedule {}.").format(sch))
# set the queues we want to bind to dynamically at startup
queues = []
me = Instance.objects.me()
for q in [me.hostname] + settings.AWX_CELERY_QUEUES_STATIC:
q = q.encode('utf-8')
queues.append(Queue(q, Exchange(q), routing_key=q))
for q in settings.AWX_CELERY_BCAST_QUEUES_STATIC:
queues.append(Broadcast(q.encode('utf-8')))
conf.CELERY_QUEUES = list(set(queues))
# Expedite the first hearbeat run so a node comes online quickly.
cluster_node_heartbeat.apply([])
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
if Instance.objects.me().is_controller():
awx_isolated_heartbeat()
@worker_shutdown.connect
def inform_cluster_of_shutdown(*args, **kwargs):
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning(six.text_type('Normal shutdown signal for instance {}, '
'removed self from capacity pool.').format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
def apply_cluster_membership_policies(self):
@task()
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
@@ -280,20 +240,36 @@ def apply_cluster_membership_policies(self):
logger.info('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@shared_task(exchange='tower_broadcast_all', bind=True)
def handle_setting_changes(self, setting_keys):
@task(queue='tower_broadcast_all', exchange_type='fanout')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
logger.warn('Processing cache changes, task args: {0.args!r} kwargs: {0.kwargs!r}'.format(
self.request))
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
@task(queue='tower_broadcast_all', exchange_type='fanout')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.info(six.text_type('Success removing project files {}').format(project_path))
except Exception:
logger.exception(six.text_type('Could not remove project directory {}').format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug(six.text_type('Success removing {}').format(lock_file))
except Exception:
logger.exception(six.text_type('Could not remove lock file {}').format(lock_file))
@task()
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
@@ -322,8 +298,8 @@ def send_notifications(notification_list, job_id=None):
logger.exception(six.text_type('Error saving notification {} result.').format(notification.id))
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
def run_administrative_checks(self):
@task()
def run_administrative_checks():
logger.warn("Running administrative checks.")
if not settings.TOWER_ADMIN_ALERTS:
return
@@ -344,8 +320,8 @@ def run_administrative_checks(self):
fail_silently=True)
@shared_task(bind=True)
def purge_old_stdout_files(self):
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT,f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
@@ -353,8 +329,8 @@ def purge_old_stdout_files(self):
logger.info(six.text_type("Removing {}").format(os.path.join(settings.JOBOUTPUT_ROOT,f)))
@shared_task(bind=True)
def cluster_node_heartbeat(self):
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all_non_isolated())
@@ -397,9 +373,13 @@ def cluster_node_heartbeat(self):
this_inst.version))
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(['uwsgi', 'celery', 'beat', 'callback'], communicate=False)
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
@@ -424,8 +404,8 @@ def cluster_node_heartbeat(self):
logger.exception(six.text_type('Error marking {} as lost').format(other_inst.hostname))
@shared_task(bind=True)
def awx_isolated_heartbeat(self):
@task(queue=get_local_queuename)
def awx_isolated_heartbeat():
local_hostname = settings.CLUSTER_HOST_ID
logger.debug("Controlling node checking for any isolated management tasks.")
poll_interval = settings.AWX_ISOLATED_PERIODIC_CHECK
@@ -452,8 +432,8 @@ def awx_isolated_heartbeat(self):
isolated_manager.IsolatedManager.health_check(isolated_instance_qs, awx_application_version)
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
def awx_periodic_scheduler(self):
@task()
def awx_periodic_scheduler():
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
@@ -503,8 +483,8 @@ def awx_periodic_scheduler(self):
state.save()
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
def handle_work_success(self, result, task_actual):
@task()
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
@@ -517,7 +497,7 @@ def handle_work_success(self, result, task_actual):
run_job_complete.delay(instance.id)
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
@task()
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
@@ -558,7 +538,7 @@ def handle_work_error(task_id, *args, **kwargs):
pass
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
@task()
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
'''
Signal handler and wrapper around inventory.update_computed_fields to
@@ -578,7 +558,7 @@ def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
raise
@shared_task(queue=settings.CELERY_DEFAULT_QUEUE)
@task()
def update_host_smart_inventory_memberships():
try:
with transaction.atomic():
@@ -603,8 +583,8 @@ def update_host_smart_inventory_memberships():
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE, max_retries=5)
def delete_inventory(self, inventory_id, user_id):
@task()
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
@@ -629,7 +609,9 @@ def delete_inventory(self, inventory_id, user_id):
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
self.retry(countdown=10)
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@@ -650,8 +632,7 @@ def with_path_cleanup(f):
return _wrapped
class BaseTask(Task):
name = None
class BaseTask(object):
model = None
event_model = None
abstract = True
@@ -844,7 +825,12 @@ class BaseTask(Task):
return False
def build_inventory(self, instance, **kwargs):
json_data = json.dumps(instance.inventory.get_script_data(hostvars=True))
script_params = dict(hostvars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
json_data = json.dumps(script_data)
handle, path = tempfile.mkstemp(dir=kwargs.get('private_data_dir', None))
f = os.fdopen(handle, 'w')
f.write('#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nprint %r\n' % json_data)
@@ -945,14 +931,11 @@ class BaseTask(Task):
if instance.cancel_flag:
instance = self.update_model(instance.pk, status='canceled')
if instance.status != 'running':
if hasattr(settings, 'CELERY_UNIT_TEST'):
return
else:
# Stop the task chain and prevent starting the job if it has
# already been canceled.
instance = self.update_model(pk)
status = instance.status
raise RuntimeError('not starting %s task' % instance.status)
# Stop the task chain and prevent starting the job if it has
# already been canceled.
instance = self.update_model(pk)
status = instance.status
raise RuntimeError('not starting %s task' % instance.status)
if not os.path.exists(settings.AWX_PROOT_BASE_PATH):
raise RuntimeError('AWX_PROOT_BASE_PATH=%s does not exist' % settings.AWX_PROOT_BASE_PATH)
@@ -1085,8 +1068,6 @@ class BaseTask(Task):
logger.exception(six.text_type('{} Final run hook errored.').format(instance.log_format))
instance.websocket_emit_status(status)
if status != 'successful':
# Raising an exception will mark the job as 'failed' in celery
# and will stop a task chain from continuing to execute
if status == 'canceled':
raise AwxTaskError.TaskCancel(instance, rc)
else:
@@ -1109,12 +1090,12 @@ class BaseTask(Task):
return ''
@task()
class RunJob(BaseTask):
'''
Celery task to run a job using ansible-playbook.
Run a job using ansible-playbook.
'''
name = 'awx.main.tasks.run_job'
model = Job
event_model = JobEvent
event_data_key = 'job_id'
@@ -1404,7 +1385,6 @@ class RunJob(BaseTask):
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
if job.project and job.project.scm_type:
job_request_id = '' if self.request.id is None else self.request.id
pu_ig = job.instance_group
pu_en = job.execution_node
if job.is_isolated() is True:
@@ -1417,16 +1397,14 @@ class RunJob(BaseTask):
status='running',
instance_group = pu_ig,
execution_node=pu_en,
celery_task_id=job_request_id))
celery_task_id=job.celery_task_id))
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
task_instance = project_update_task()
task_instance.request.id = job_request_id
task_instance.run(local_project_sync.id)
project_update_task().run(local_project_sync.id)
job = self.update_model(job.pk, scm_revision=job.project.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
@@ -1436,7 +1414,6 @@ class RunJob(BaseTask):
('project_update', local_project_sync.name, local_project_sync.id)))
raise
def final_run_hook(self, job, status, **kwargs):
super(RunJob, self).final_run_hook(job, status, **kwargs)
if job.use_fact_cache:
@@ -1467,9 +1444,9 @@ class RunJob(BaseTask):
update_inventory_computed_fields.delay(inventory.id, True)
@task()
class RunProjectUpdate(BaseTask):
name = 'awx.main.tasks.run_project_update'
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
@@ -1670,7 +1647,6 @@ class RunProjectUpdate(BaseTask):
return getattr(settings, 'PROJECT_UPDATE_IDLE_TIMEOUT', None)
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
project_request_id = '' if self.request.id is None else self.request.id
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
@@ -1693,13 +1669,10 @@ class RunProjectUpdate(BaseTask):
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
celery_task_id=str(project_request_id),
source_project_update=project_update))
source_project_update=project_update,
celery_task_id=project_update.celery_task_id))
try:
task_instance = inv_update_class()
# Runs in the same Celery task as project update
task_instance.request.id = project_request_id
task_instance.run(local_inv_update.id)
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception(six.text_type('{} Unhandled exception updating dependent SCM inventory sources.')
.format(project_update.log_format))
@@ -1804,9 +1777,9 @@ class RunProjectUpdate(BaseTask):
return getattr(settings, 'AWX_PROOT_ENABLED', False)
@task()
class RunInventoryUpdate(BaseTask):
name = 'awx.main.tasks.run_inventory_update'
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
@@ -2024,8 +1997,7 @@ class RunInventoryUpdate(BaseTask):
This dictionary is used by `build_env`, below.
"""
# Run the superclass implementation.
super_ = super(RunInventoryUpdate, self).build_passwords
passwords = super_(inventory_update, **kwargs)
passwords = super(RunInventoryUpdate, self).build_passwords(inventory_update, **kwargs)
# Take key fields from the credential in use and add them to the
# passwords dictionary.
@@ -2188,7 +2160,6 @@ class RunInventoryUpdate(BaseTask):
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (inventory_update.source=='scm' and inventory_update.launch_type!='scm' and source_project):
request_id = '' if self.request.id is None else self.request.id
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
@@ -2196,16 +2167,14 @@ class RunInventoryUpdate(BaseTask):
status='running',
execution_node=inventory_update.execution_node,
instance_group = inventory_update.instance_group,
celery_task_id=request_id))
celery_task_id=inventory_update.celery_task_id))
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
task_instance = project_update_task()
task_instance.request.id = request_id
task_instance.run(local_project_sync.id)
project_update_task().run(local_project_sync.id)
inventory_update.inventory_source.scm_last_revision = local_project_sync.project.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
@@ -2216,12 +2185,12 @@ class RunInventoryUpdate(BaseTask):
raise
@task()
class RunAdHocCommand(BaseTask):
'''
Celery task to run an ad hoc command using ansible.
Run an ad hoc command using ansible.
'''
name = 'awx.main.tasks.run_ad_hoc_command'
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
@@ -2382,9 +2351,9 @@ class RunAdHocCommand(BaseTask):
return getattr(settings, 'AWX_PROOT_ENABLED', False)
@task()
class RunSystemJob(BaseTask):
name = 'awx.main.tasks.run_system_job'
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
@@ -2439,9 +2408,9 @@ def _reconstruct_relationships(copy_mapping):
new_obj.save()
@shared_task(bind=True, queue=settings.CELERY_DEFAULT_QUEUE)
@task()
def deep_copy_model_obj(
self, model_module, model_name, obj_pk, new_obj_pk,
model_module, model_name, obj_pk, new_obj_pk,
user_pk, sub_obj_list, permission_check_func=None
):
logger.info(six.text_type('Deep copy {} from {} to {}.').format(model_name, obj_pk, new_obj_pk))

View File

@@ -119,7 +119,7 @@ class TestSwaggerGeneration():
def test_autogen_response_examples(self, swagger_autogen):
for pattern, node in TestSwaggerGeneration.JSON['paths'].items():
pattern = pattern.replace('{id}', '[0-9]+')
pattern = pattern.replace('{category_slug}', '[a-zA-Z0-9\-]+')
pattern = pattern.replace(r'{category_slug}', r'[a-zA-Z0-9\-]+')
for path, result in swagger_autogen.items():
if re.match('^{}$'.format(pattern), path):
for key, value in result.items():

View File

@@ -359,18 +359,17 @@ def test_create_with_valid_injectors(get, post, admin):
},
'injectors': {
'env': {
'ANSIBLE_MY_CLOUD_TOKEN': '{{api_token}}'
'AWX_MY_CLOUD_TOKEN': '{{api_token}}'
}
}
}, admin)
assert response.status_code == 201
}, admin, expect=201)
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
injectors = response.data['results'][0]['injectors']
assert len(injectors) == 1
assert injectors['env'] == {
'ANSIBLE_MY_CLOUD_TOKEN': '{{api_token}}'
'AWX_MY_CLOUD_TOKEN': '{{api_token}}'
}
@@ -388,7 +387,7 @@ def test_create_with_undefined_template_variable_xfail(post, admin):
}]
},
'injectors': {
'env': {'ANSIBLE_MY_CLOUD_TOKEN': '{{api_tolkien}}'}
'env': {'AWX_MY_CLOUD_TOKEN': '{{api_tolkien}}'}
}
}, admin)
assert response.status_code == 400

View File

@@ -59,7 +59,7 @@ def check_system_tracking_feature_forbidden(response):
assert 'Your license does not permit use of system tracking.' == response.data['detail']
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled)
@mock.patch('awx.api.views.mixin.feature_enabled', new=mock_feature_disabled)
@pytest.mark.django_db
@pytest.mark.license_feature
def test_system_tracking_license_get(hosts, get, user):
@@ -70,7 +70,7 @@ def test_system_tracking_license_get(hosts, get, user):
check_system_tracking_feature_forbidden(response)
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled)
@mock.patch('awx.api.views.mixin.feature_enabled', new=mock_feature_disabled)
@pytest.mark.django_db
@pytest.mark.license_feature
def test_system_tracking_license_options(hosts, options, user):

View File

@@ -41,7 +41,7 @@ def check_system_tracking_feature_forbidden(response):
assert 'Your license does not permit use of system tracking.' == response.data['detail']
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled)
@mock.patch('awx.api.views.mixin.feature_enabled', new=mock_feature_disabled)
@pytest.mark.django_db
@pytest.mark.license_feature
def test_system_tracking_license_get(hosts, get, user):
@@ -52,7 +52,7 @@ def test_system_tracking_license_get(hosts, get, user):
check_system_tracking_feature_forbidden(response)
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled)
@mock.patch('awx.api.views.mixin.feature_enabled', new=mock_feature_disabled)
@pytest.mark.django_db
@pytest.mark.license_feature
def test_system_tracking_license_options(hosts, options, user):

View File

@@ -122,6 +122,22 @@ def test_job_relaunch_on_failed_hosts(post, inventory, project, machine_credenti
assert r.data.get('limit') == hosts
@pytest.mark.django_db
def test_slice_jt_recent_jobs(slice_job_factory, admin_user, get):
workflow_job = slice_job_factory(3, spawn=True)
slice_jt = workflow_job.job_template
r = get(
url=slice_jt.get_absolute_url(),
user=admin_user,
expect=200
)
job_ids = [entry['id'] for entry in r.data['summary_fields']['recent_jobs']]
assert workflow_job.pk not in job_ids
for node in workflow_job.workflow_nodes.all():
job = node.job
assert job.pk in job_ids
@pytest.mark.django_db
def test_block_unprocessed_events(delete, admin_user, mocker):
time_of_finish = parse("Thu Feb 28 09:10:20 2013 -0500")
@@ -141,7 +157,7 @@ def test_block_unprocessed_events(delete, admin_user, mocker):
view = MockView()
time_of_request = time_of_finish + relativedelta(seconds=2)
with mock.patch('awx.api.views.now', lambda: time_of_request):
with mock.patch('awx.api.views.mixin.now', lambda: time_of_request):
r = view.destroy(request)
assert r.status_code == 400
@@ -162,7 +178,7 @@ def test_block_related_unprocessed_events(mocker, organization, project, delete,
)
view = RelatedJobsPreventDeleteMixin()
time_of_request = time_of_finish + relativedelta(seconds=2)
with mock.patch('awx.api.views.now', lambda: time_of_request):
with mock.patch('awx.api.views.mixin.now', lambda: time_of_request):
with pytest.raises(PermissionDenied):
view.perform_destroy(organization)

View File

@@ -6,7 +6,7 @@ import json
from awx.api.serializers import JobLaunchSerializer
from awx.main.models.credential import Credential
from awx.main.models.inventory import Inventory, Host
from awx.main.models.jobs import Job, JobTemplate
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
from awx.api.versioning import reverse
@@ -553,15 +553,15 @@ def test_callback_accept_prompted_extra_var(mocker, survey_spec_factory, job_tem
with mocker.patch('awx.main.access.BaseAccess.check_license'):
mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4})
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch.object(UnifiedJobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}):
with mocker.patch('awx.api.views.JobTemplateCallback.find_matching_hosts', return_value=[host]):
post(
reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
admin_user, expect=201, format='json')
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({
assert UnifiedJobTemplate.create_unified_job.called
assert UnifiedJobTemplate.create_unified_job.call_args == ({
'extra_vars': {'survey_var': 4, 'job_launch_var': 3},
'_eager_fields': {'launch_type': 'callback'},
'limit': 'single-host'},
@@ -579,15 +579,15 @@ def test_callback_ignore_unprompted_extra_var(mocker, survey_spec_factory, job_t
with mocker.patch('awx.main.access.BaseAccess.check_license'):
mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4})
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch.object(UnifiedJobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}):
with mocker.patch('awx.api.views.JobTemplateCallback.find_matching_hosts', return_value=[host]):
post(
reverse('api:job_template_callback', kwargs={'pk':job_template.pk}),
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
admin_user, expect=201, format='json')
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({
assert UnifiedJobTemplate.create_unified_job.called
assert UnifiedJobTemplate.create_unified_job.call_args == ({
'_eager_fields': {'launch_type': 'callback'},
'limit': 'single-host'},
)

View File

@@ -5,10 +5,7 @@ import json
from django.db import connection
from django.test.utils import override_settings
from django.test import Client
from django.core.urlresolvers import resolve
from rest_framework.test import APIRequestFactory
from awx.main.middleware import DeprecatedAuthTokenMiddleware
from awx.main.utils.encryption import decrypt_value, get_encryption_key
from awx.api.versioning import reverse, drf_reverse
from awx.main.models.oauth import (OAuth2Application as Application,
@@ -361,43 +358,3 @@ def test_revoke_refreshtoken(oauth_application, post, get, delete, admin):
new_refresh_token = RefreshToken.objects.all().first()
assert refresh_token == new_refresh_token
assert new_refresh_token.revoked
@pytest.mark.django_db
@pytest.mark.parametrize('fmt', ['json', 'multipart'])
def test_deprecated_authtoken_support(alice, fmt):
kwargs = {
'data': {'username': 'alice', 'password': 'alice'},
'format': fmt
}
request = getattr(APIRequestFactory(), 'post')('/api/v2/authtoken/', **kwargs)
DeprecatedAuthTokenMiddleware().process_request(request)
assert request.path == request.path_info == '/api/v2/users/{}/personal_tokens/'.format(alice.pk)
view, view_args, view_kwargs = resolve(request.path)
resp = view(request, *view_args, **view_kwargs)
assert resp.status_code == 201
assert 'token' in resp.data
assert resp.data['refresh_token'] is None
assert resp.data['scope'] == 'write'
@pytest.mark.django_db
def test_deprecated_authtoken_invalid_username(alice):
kwargs = {
'data': {'username': 'nobody', 'password': 'nobody'},
'format': 'json'
}
request = getattr(APIRequestFactory(), 'post')('/api/v2/authtoken/', **kwargs)
resp = DeprecatedAuthTokenMiddleware().process_request(request)
assert resp.status_code == 401
@pytest.mark.django_db
def test_deprecated_authtoken_missing_credentials(alice):
kwargs = {
'data': {},
'format': 'json'
}
request = getattr(APIRequestFactory(), 'post')('/api/v2/authtoken/', **kwargs)
resp = DeprecatedAuthTokenMiddleware().process_request(request)
assert resp.status_code == 401

View File

@@ -7,6 +7,7 @@ import pytest
import os
from django.conf import settings
from kombu.utils.url import parse_url
# Mock
import mock
@@ -358,3 +359,25 @@ def test_isolated_key_flag_readonly(get, patch, delete, admin):
delete(url, user=admin)
assert settings.AWX_ISOLATED_KEY_GENERATION is True
@pytest.mark.django_db
def test_default_broker_url():
url = parse_url(settings.BROKER_URL)
assert url['transport'] == 'amqp'
assert url['hostname'] == 'rabbitmq'
assert url['userid'] == 'guest'
assert url['password'] == 'guest'
assert url['virtual_host'] == '/'
@pytest.mark.django_db
def test_broker_url_with_special_characters():
settings.BROKER_URL = 'amqp://guest:a@ns:ibl3#@rabbitmq:5672//'
url = parse_url(settings.BROKER_URL)
assert url['transport'] == 'amqp'
assert url['hostname'] == 'rabbitmq'
assert url['port'] == 5672
assert url['userid'] == 'guest'
assert url['password'] == 'a@ns:ibl3#'
assert url['virtual_host'] == '/'

View File

@@ -285,9 +285,7 @@ def test_survey_spec_passwords_with_default_required(job_template_factory, post,
@pytest.mark.django_db
@pytest.mark.parametrize('default, status', [
('SUPERSECRET', 200),
(['some', 'invalid', 'list'], 400),
({'some-invalid': 'dict'}, 400),
(False, 400)
])
def test_survey_spec_default_passwords_are_encrypted(job_template, post, admin_user, default, status):
job_template.survey_enabled = True
@@ -317,7 +315,7 @@ def test_survey_spec_default_passwords_are_encrypted(job_template, post, admin_u
'secret_value': default
}
else:
assert "for 'secret_value' expected to be a string." in str(resp.data)
assert "expected to be string." in str(resp.data)
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@@ -346,37 +344,6 @@ def test_survey_spec_default_passwords_encrypted_on_update(job_template, post, p
assert updated_jt.survey_spec == JobTemplate.objects.get(pk=job_template.pk).survey_spec
# Tests related to survey content validation
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db
@pytest.mark.survey
def test_survey_spec_non_dict_error(deploy_jobtemplate, post, admin_user):
"""When a question doesn't follow the standard format, verify error thrown."""
response = post(
url=reverse('api:job_template_survey_spec', kwargs={'pk': deploy_jobtemplate.id}),
data={
"description": "Email of the submitter",
"spec": ["What is your email?"], "name": "Email survey"
},
user=admin_user,
expect=400
)
assert response.data['error'] == "Survey question 0 is not a json object."
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db
@pytest.mark.survey
def test_survey_spec_dual_names_error(survey_spec_factory, deploy_jobtemplate, post, user):
response = post(
url=reverse('api:job_template_survey_spec', kwargs={'pk': deploy_jobtemplate.id}),
data=survey_spec_factory(['submitter_email', 'submitter_email']),
user=user('admin', True),
expect=400
)
assert response.data['error'] == "'variable' 'submitter_email' duplicated in survey question 1."
# Test actions that should be allowed with non-survey license
@mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys)
@pytest.mark.django_db

View File

@@ -8,13 +8,13 @@ import tempfile
import shutil
from datetime import timedelta
from six.moves import xrange
from mock import PropertyMock
# Django
from django.core.urlresolvers import resolve
from django.utils.six.moves.urllib.parse import urlparse
from django.utils import timezone
from django.contrib.auth.models import User
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
from jsonbfield.fields import JSONField
@@ -66,17 +66,6 @@ def swagger_autogen(requests=__SWAGGER_REQUESTS__):
return requests
@pytest.fixture(scope="session", autouse=True)
def celery_memory_broker():
'''
FIXME: Not sure how "far" just setting the BROKER_URL will get us.
We may need to incluence CELERY's configuration like we do in the old unit tests (see base.py)
Allows django signal code to execute without the need for redis
'''
settings.BROKER_URL='memory://localhost/'
@pytest.fixture
def user():
def u(name, is_superuser=False):
@@ -682,6 +671,24 @@ def job_template_labels(organization, job_template):
return job_template
@pytest.fixture
def jt_linked(job_template_factory, credential, net_credential, vault_credential):
'''
A job template with a reasonably complete set of related objects to
test RBAC and other functionality affected by related objects
'''
objects = job_template_factory(
'testJT', organization='org1', project='proj1', inventory='inventory1',
credential='cred1')
jt = objects.job_template
jt.credentials.add(vault_credential)
jt.save()
# Add AWS cloud credential and network credential
jt.credentials.add(credential)
jt.credentials.add(net_credential)
return jt
@pytest.fixture
def workflow_job_template(organization):
wjt = WorkflowJobTemplate(name='test-workflow_job_template', organization=organization)
@@ -764,3 +771,42 @@ def sqlite_copy_expert(request):
request.addfinalizer(lambda: delattr(SQLiteCursorWrapper, 'copy_expert'))
return path
@pytest.fixture
def disable_database_settings(mocker):
m = mocker.patch('awx.conf.settings.SettingsWrapper.all_supported_settings', new_callable=PropertyMock)
m.return_value = []
@pytest.fixture
def slice_jt_factory(inventory):
def r(N, jt_kwargs=None):
for i in range(N):
inventory.hosts.create(name='foo{}'.format(i))
if not jt_kwargs:
jt_kwargs = {}
return JobTemplate.objects.create(
name='slice-jt-from-factory',
job_slice_count=N,
inventory=inventory,
**jt_kwargs
)
return r
@pytest.fixture
def slice_job_factory(slice_jt_factory):
def r(N, jt_kwargs=None, prompts=None, spawn=False):
slice_jt = slice_jt_factory(N, jt_kwargs=jt_kwargs)
if not prompts:
prompts = {}
slice_job = slice_jt.create_unified_job(**prompts)
if spawn:
for node in slice_job.workflow_nodes.all():
# does what the task manager does for spawning workflow jobs
kv = node.get_job_kwargs()
job = node.unified_job_template.create_unified_job(**kv)
node.job = job
node.save()
return slice_job
return r

View File

@@ -38,6 +38,33 @@ class TestInventoryScript:
'remote_tower_id': host.id
}
def test_slice_subset(self, inventory):
for i in range(3):
inventory.hosts.create(name='host{}'.format(i))
for i in range(3):
assert inventory.get_script_data(slice_number=i + 1, slice_count=3) == {
'all': {'hosts': ['host{}'.format(i)]}
}
def test_slice_subset_with_groups(self, inventory):
hosts = []
for i in range(3):
host = inventory.hosts.create(name='host{}'.format(i))
hosts.append(host)
g1 = inventory.groups.create(name='contains_all_hosts')
for host in hosts:
g1.hosts.add(host)
g2 = inventory.groups.create(name='contains_two_hosts')
for host in hosts[:2]:
g2.hosts.add(host)
for i in range(3):
expected_data = {
'contains_all_hosts': {'hosts': ['host{}'.format(i)], 'children': [], 'vars': {}},
}
if i < 2:
expected_data['contains_two_hosts'] = {'hosts': ['host{}'.format(i)], 'children': [], 'vars': {}}
assert inventory.get_script_data(slice_number=i + 1, slice_count=3) == expected_data
@pytest.mark.django_db
class TestActiveCount:

View File

@@ -1,7 +1,7 @@
import pytest
import six
from awx.main.models import JobTemplate, Job, JobHostSummary
from awx.main.models import JobTemplate, Job, JobHostSummary, WorkflowJob
from crum import impersonate
@@ -81,3 +81,23 @@ def test_job_host_summary_representation(host):
jhs = JobHostSummary.objects.get(pk=jhs.id)
host.delete()
assert 'N/A changed=1 dark=2 failures=3 ok=4 processed=5 skipped=6' == six.text_type(jhs)
@pytest.mark.django_db
class TestSlicingModels:
def test_slice_workflow_spawn(self, slice_jt_factory):
slice_jt = slice_jt_factory(3)
job = slice_jt.create_unified_job()
assert isinstance(job, WorkflowJob)
assert job.job_template == slice_jt
assert job.unified_job_template == slice_jt
assert job.workflow_nodes.count() == 3
def test_slices_with_JT_and_prompts(self, slice_job_factory):
job = slice_job_factory(3, jt_kwargs={'ask_limit_on_launch': True}, prompts={'limit': 'foobar'}, spawn=True)
assert job.launch_config.prompts_dict() == {'limit': 'foobar'}
for node in job.workflow_nodes.all():
assert node.limit is None # data not saved in node prompts
job = node.job
assert job.limit == 'foobar'

View File

@@ -1,13 +1,16 @@
import itertools
import pytest
import mock
import six
# Django
from django.contrib.contenttypes.models import ContentType
# AWX
from awx.main.models import UnifiedJobTemplate, Job, JobTemplate, WorkflowJobTemplate, Project, WorkflowJob, Schedule
from awx.main.models.ha import InstanceGroup
from awx.main.models import (
UnifiedJobTemplate, Job, JobTemplate, WorkflowJobTemplate,
Project, WorkflowJob, Schedule,
Credential
)
@pytest.mark.django_db
@@ -55,9 +58,7 @@ class TestCreateUnifiedJob:
job_with_links.save()
job_with_links.credentials.add(machine_credential)
job_with_links.credentials.add(net_credential)
with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate._get_unified_job_field_names',
return_value=['inventory', 'credential', 'limit']):
second_job = job_with_links.copy_unified_job()
second_job = job_with_links.copy_unified_job()
# Check that job data matches the original variables
assert second_job.credential == job_with_links.credential
@@ -65,47 +66,20 @@ class TestCreateUnifiedJob:
assert second_job.limit == 'my_server'
assert net_credential in second_job.credentials.all()
@pytest.mark.django_db
class TestIsolatedRuns:
def test_low_capacity_isolated_instance_selected(self):
ig = InstanceGroup.objects.create(name='tower')
iso_ig = InstanceGroup.objects.create(name='thepentagon', controller=ig)
iso_ig.instances.create(hostname='iso1', capacity=50)
i2 = iso_ig.instances.create(hostname='iso2', capacity=200)
job = Job.objects.create(
instance_group=iso_ig,
celery_task_id='something',
)
mock_async = mock.MagicMock()
success_callback = mock.MagicMock()
error_callback = mock.MagicMock()
class MockTaskClass:
apply_async = mock_async
with mock.patch.object(job, '_get_task_class') as task_class:
task_class.return_value = MockTaskClass
job.start_celery_task([], error_callback, success_callback, 'thepentagon')
mock_async.assert_called_with([job.id], [],
link_error=error_callback,
link=success_callback,
queue='thepentagon',
task_id='something')
i2.capacity = 20
i2.save()
with mock.patch.object(job, '_get_task_class') as task_class:
task_class.return_value = MockTaskClass
job.start_celery_task([], error_callback, success_callback, 'thepentagon')
mock_async.assert_called_with([job.id], [],
link_error=error_callback,
link=success_callback,
queue='thepentagon',
task_id='something')
def test_job_relaunch_modifed_jt(self, jt_linked):
# Replace all credentials with a new one of same type
new_creds = []
for cred in jt_linked.credentials.all():
new_creds.append(Credential.objects.create(
name=six.text_type(cred.name) + six.text_type('_new'),
credential_type=cred.credential_type,
inputs=cred.inputs
))
job = jt_linked.create_unified_job()
jt_linked.credentials.clear()
jt_linked.credentials.add(*new_creds)
relaunched_job = job.copy_unified_job()
assert set(relaunched_job.credentials.all()) == set(new_creds)
@pytest.mark.django_db
@@ -178,3 +152,50 @@ def test_event_processing_not_finished():
def test_event_model_undefined():
wj = WorkflowJob.objects.create(name='foobar', status='finished')
assert wj.event_processing_finished
@pytest.mark.django_db
class TestTaskImpact:
@pytest.fixture
def job_host_limit(self, job_template, inventory):
def r(hosts, forks):
for i in range(hosts):
inventory.hosts.create(name='foo' + str(i))
job = Job.objects.create(
name='fake-job',
launch_type='workflow',
job_template=job_template,
inventory=inventory,
forks=forks
)
return job
return r
def test_limit_task_impact(self, job_host_limit):
job = job_host_limit(5, 2)
assert job.task_impact == 2 + 1 # forks becomes constraint
def test_host_task_impact(self, job_host_limit):
job = job_host_limit(3, 5)
assert job.task_impact == 3 + 1 # hosts becomes constraint
def test_shard_task_impact(self, slice_job_factory):
# factory creates on host per slice
workflow_job = slice_job_factory(3, jt_kwargs={'forks': 50}, spawn=True)
# arrange the jobs by their number
jobs = [None for i in range(3)]
for node in workflow_job.workflow_nodes.all():
jobs[node.job.job_slice_number - 1] = node.job
# Even distribution - all jobs run on 1 host
assert [
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
for i in range(3)
] == [1, 1, 1]
assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact
# Uneven distribution - first job takes the extra host
jobs[0].inventory.hosts.create(name='remainder_foo')
assert [
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
for i in range(3)
] == [2, 1, 1]
assert [job.task_impact for job in jobs] == [3, 2, 2]

View File

@@ -7,6 +7,7 @@ from awx.main.models.workflow import WorkflowJob, WorkflowJobNode, WorkflowJobTe
from awx.main.models.jobs import JobTemplate, Job
from awx.main.models.projects import ProjectUpdate
from awx.main.scheduler.dag_workflow import WorkflowDAG
from awx.api.versioning import reverse
# Django
from django.test import TransactionTestCase
@@ -196,9 +197,15 @@ class TestWorkflowJobTemplate:
assert test_view.is_valid_relation(node_assoc, nodes[1]) == {'Error': 'Multiple parent relationship not allowed.'}
# test mutex validation
test_view.relationship = 'failure_nodes'
node_assoc_1 = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt)
assert (test_view.is_valid_relation(nodes[2], node_assoc_1) ==
{'Error': 'Cannot associate failure_nodes when always_nodes have been associated.'})
def test_always_success_failure_creation(self, wfjt, admin, get):
wfjt_node = wfjt.workflow_job_template_nodes.all()[1]
node = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt)
wfjt_node.always_nodes.add(node)
assert len(node.get_parent_nodes()) == 1
url = reverse('api:workflow_job_template_node_list') + str(wfjt_node.id) + '/'
resp = get(url, admin)
assert node.id in resp.data['always_nodes']
def test_wfjt_unique_together_with_org(self, organization):
wfjt1 = WorkflowJobTemplate(name='foo', organization=organization)

View File

@@ -1,19 +1,10 @@
import pytest
import mock
import json
from datetime import timedelta, datetime
from django.core.cache import cache
from django.utils.timezone import now as tz_now
from datetime import timedelta
from awx.main.scheduler import TaskManager
from awx.main.utils import encrypt_field
from awx.main.models import (
Job,
Instance,
WorkflowJob,
)
from awx.main.models.notifications import JobNotificationMixin
@pytest.mark.django_db
@@ -245,140 +236,3 @@ def test_shared_dependencies_launch(default_instance_group, job_template_factory
iu = [x for x in ii.inventory_updates.all()]
assert len(pu) == 1
assert len(iu) == 1
@pytest.mark.django_db
def test_cleanup_interval(mock_cache):
with mock.patch.multiple('awx.main.scheduler.task_manager.cache', get=mock_cache.get, set=mock_cache.set):
assert mock_cache.get('last_celery_task_cleanup') is None
TaskManager().cleanup_inconsistent_celery_tasks()
last_cleanup = mock_cache.get('last_celery_task_cleanup')
assert isinstance(last_cleanup, datetime)
TaskManager().cleanup_inconsistent_celery_tasks()
assert cache.get('last_celery_task_cleanup') == last_cleanup
class TestReaper():
@pytest.fixture
def all_jobs(self, mocker):
now = tz_now()
Instance.objects.create(hostname='host1', capacity=100)
Instance.objects.create(hostname='host2', capacity=100)
Instance.objects.create(hostname='host3_split', capacity=100)
Instance.objects.create(hostname='host4_offline', capacity=0)
j1 = Job.objects.create(status='pending', execution_node='host1')
j2 = Job.objects.create(status='waiting', celery_task_id='considered_j2')
j3 = Job.objects.create(status='waiting', celery_task_id='considered_j3')
j3.modified = now - timedelta(seconds=60)
j3.save(update_fields=['modified'])
j4 = Job.objects.create(status='running', celery_task_id='considered_j4', execution_node='host1')
j5 = Job.objects.create(status='waiting', celery_task_id='reapable_j5')
j5.modified = now - timedelta(seconds=60)
j5.save(update_fields=['modified'])
j6 = Job.objects.create(status='waiting', celery_task_id='considered_j6')
j6.modified = now - timedelta(seconds=60)
j6.save(update_fields=['modified'])
j7 = Job.objects.create(status='running', celery_task_id='considered_j7', execution_node='host2')
j8 = Job.objects.create(status='running', celery_task_id='reapable_j7', execution_node='host2')
j9 = Job.objects.create(status='waiting', celery_task_id='reapable_j8')
j9.modified = now - timedelta(seconds=60)
j9.save(update_fields=['modified'])
j10 = Job.objects.create(status='running', celery_task_id='host3_j10', execution_node='host3_split')
j11 = Job.objects.create(status='running', celery_task_id='host4_j11', execution_node='host4_offline')
j12 = WorkflowJob.objects.create(status='running', celery_task_id='workflow_job', execution_node='host1')
js = [j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12]
for j in js:
j.save = mocker.Mock(wraps=j.save)
j.websocket_emit_status = mocker.Mock()
return js
@pytest.fixture
def considered_jobs(self, all_jobs):
return all_jobs[2:7] + [all_jobs[10]]
@pytest.fixture
def running_tasks(self, all_jobs):
return {
'host1': [all_jobs[3]],
'host2': [all_jobs[7], all_jobs[8]],
'host3_split': [all_jobs[9]],
'host4_offline': [all_jobs[10]],
}
@pytest.fixture
def waiting_tasks(self, all_jobs):
return [all_jobs[2], all_jobs[4], all_jobs[5], all_jobs[8]]
@pytest.fixture
def reapable_jobs(self, all_jobs):
return [all_jobs[4], all_jobs[7], all_jobs[10]]
@pytest.fixture
def unconsidered_jobs(self, all_jobs):
return all_jobs[0:1] + all_jobs[5:7]
@pytest.fixture
def active_tasks(self):
return ([], {
'host1': ['considered_j2', 'considered_j3', 'considered_j4',],
'host2': ['considered_j6', 'considered_j7'],
})
@pytest.mark.django_db
@mock.patch.object(JobNotificationMixin, 'send_notification_templates')
@mock.patch.object(TaskManager, 'get_active_tasks', lambda self: ([], []))
def test_cleanup_inconsistent_task(self, notify, active_tasks, considered_jobs, reapable_jobs, running_tasks, waiting_tasks, mocker, settings):
settings.AWX_INCONSISTENT_TASK_INTERVAL = 0
tm = TaskManager()
tm.get_running_tasks = mocker.Mock(return_value=(running_tasks, waiting_tasks))
tm.get_active_tasks = mocker.Mock(return_value=active_tasks)
tm.cleanup_inconsistent_celery_tasks()
for j in considered_jobs:
if j not in reapable_jobs:
j.save.assert_not_called()
assert notify.call_count == 4
notify.assert_has_calls([mock.call('failed') for j in reapable_jobs], any_order=True)
for j in reapable_jobs:
j.websocket_emit_status.assert_called_once_with('failed')
assert j.status == 'failed'
assert j.job_explanation == (
'Task was marked as running in Tower but was not present in the job queue, so it has been marked as failed.'
)
@pytest.mark.django_db
def test_get_running_tasks(self, all_jobs):
tm = TaskManager()
# Ensure the query grabs the expected jobs
execution_nodes_jobs, waiting_jobs = tm.get_running_tasks()
assert 'host1' in execution_nodes_jobs
assert 'host2' in execution_nodes_jobs
assert 'host3_split' in execution_nodes_jobs
assert all_jobs[3] in execution_nodes_jobs['host1']
assert all_jobs[6] in execution_nodes_jobs['host2']
assert all_jobs[7] in execution_nodes_jobs['host2']
assert all_jobs[9] in execution_nodes_jobs['host3_split']
assert all_jobs[10] in execution_nodes_jobs['host4_offline']
assert all_jobs[11] not in execution_nodes_jobs['host1']
assert all_jobs[2] in waiting_jobs
assert all_jobs[4] in waiting_jobs
assert all_jobs[5] in waiting_jobs
assert all_jobs[8] in waiting_jobs

View File

@@ -0,0 +1,387 @@
import datetime
import multiprocessing
import random
import signal
import time
from django.utils.timezone import now as tz_now
import pytest
from awx.main.models import Job, WorkflowJob, Instance
from awx.main.dispatch import reaper
from awx.main.dispatch.pool import PoolWorker, WorkerPool, AutoscalePool
from awx.main.dispatch.publish import task
from awx.main.dispatch.worker import BaseWorker, TaskWorker
@task()
def add(a, b):
return a + b
class BaseTask(object):
def add(self, a, b):
return add(a, b)
@task()
class Adder(BaseTask):
def run(self, a, b):
return super(Adder, self).add(a, b)
@task(queue='hard-math')
def multiply(a, b):
return a * b
class SimpleWorker(BaseWorker):
def perform_work(self, body, *args):
pass
class ResultWriter(BaseWorker):
def perform_work(self, body, result_queue):
result_queue.put(body + '!!!')
class SlowResultWriter(BaseWorker):
def perform_work(self, body, result_queue):
time.sleep(3)
super(SlowResultWriter, self).perform_work(body, result_queue)
@pytest.mark.usefixtures("disable_database_settings")
class TestPoolWorker:
def setup_method(self, test_method):
self.worker = PoolWorker(1000, self.tick, tuple())
def tick(self):
self.worker.finished.put(self.worker.queue.get()['uuid'])
time.sleep(.5)
def test_qsize(self):
assert self.worker.qsize == 0
for i in range(3):
self.worker.put({'task': 'abc123'})
assert self.worker.qsize == 3
def test_put(self):
assert len(self.worker.managed_tasks) == 0
assert self.worker.messages_finished == 0
self.worker.put({'task': 'abc123'})
assert len(self.worker.managed_tasks) == 1
assert self.worker.messages_sent == 1
def test_managed_tasks(self):
self.worker.put({'task': 'abc123'})
self.worker.calculate_managed_tasks()
assert len(self.worker.managed_tasks) == 1
self.tick()
self.worker.calculate_managed_tasks()
assert len(self.worker.managed_tasks) == 0
def test_current_task(self):
self.worker.put({'task': 'abc123'})
assert self.worker.current_task['task'] == 'abc123'
def test_quit(self):
self.worker.quit()
assert self.worker.queue.get() == 'QUIT'
def test_idle_busy(self):
assert self.worker.idle is True
assert self.worker.busy is False
self.worker.put({'task': 'abc123'})
assert self.worker.busy is True
assert self.worker.idle is False
@pytest.mark.django_db
class TestWorkerPool:
def setup_method(self, test_method):
self.pool = WorkerPool(min_workers=3)
def teardown_method(self, test_method):
self.pool.stop(signal.SIGTERM)
def test_worker(self):
self.pool.init_workers(SimpleWorker().work_loop)
assert len(self.pool) == 3
for worker in self.pool.workers:
assert worker.messages_sent == 0
assert worker.alive is True
def test_single_task(self):
self.pool.init_workers(SimpleWorker().work_loop)
self.pool.write(0, 'xyz')
assert self.pool.workers[0].messages_sent == 1 # worker at index 0 handled one task
assert self.pool.workers[1].messages_sent == 0
assert self.pool.workers[2].messages_sent == 0
def test_queue_preference(self):
self.pool.init_workers(SimpleWorker().work_loop)
self.pool.write(2, 'xyz')
assert self.pool.workers[0].messages_sent == 0
assert self.pool.workers[1].messages_sent == 0
assert self.pool.workers[2].messages_sent == 1 # worker at index 2 handled one task
def test_worker_processing(self):
result_queue = multiprocessing.Queue()
self.pool.init_workers(ResultWriter().work_loop, result_queue)
for i in range(10):
self.pool.write(
random.choice(range(len(self.pool))),
'Hello, Worker {}'.format(i)
)
all_messages = [result_queue.get(timeout=1) for i in range(10)]
all_messages.sort()
assert all_messages == [
'Hello, Worker {}!!!'.format(i)
for i in range(10)
]
total_handled = sum([worker.messages_sent for worker in self.pool.workers])
assert total_handled == 10
@pytest.mark.django_db
class TestAutoScaling:
def setup_method(self, test_method):
self.pool = AutoscalePool(min_workers=2, max_workers=10)
def teardown_method(self, test_method):
self.pool.stop(signal.SIGTERM)
def test_scale_up(self):
result_queue = multiprocessing.Queue()
self.pool.init_workers(SlowResultWriter().work_loop, result_queue)
# start with two workers, write an event to each worker and make it busy
assert len(self.pool) == 2
for i, w in enumerate(self.pool.workers):
w.put('Hello, Worker {}'.format(0))
assert len(self.pool) == 2
# wait for the subprocesses to start working on their tasks and be marked busy
time.sleep(1)
assert self.pool.should_grow
# write a third message, expect a new worker to spawn because all
# workers are busy
self.pool.write(0, 'Hello, Worker {}'.format(2))
assert len(self.pool) == 3
def test_scale_down(self):
self.pool.init_workers(ResultWriter().work_loop, multiprocessing.Queue())
# start with two workers, and scale up to 10 workers
assert len(self.pool) == 2
for i in range(8):
self.pool.up()
assert len(self.pool) == 10
# cleanup should scale down to 8 workers
self.pool.cleanup()
assert len(self.pool) == 2
def test_max_scale_up(self):
self.pool.init_workers(ResultWriter().work_loop, multiprocessing.Queue())
assert len(self.pool) == 2
for i in range(25):
self.pool.up()
assert self.pool.max_workers == 10
assert self.pool.full is True
assert len(self.pool) == 10
def test_equal_worker_distribution(self):
# if all workers are busy, spawn new workers *before* adding messages
# to an existing queue
self.pool.init_workers(SlowResultWriter().work_loop, multiprocessing.Queue)
# start with two workers, write an event to each worker and make it busy
assert len(self.pool) == 2
for i in range(10):
self.pool.write(0, 'Hello, World!')
assert len(self.pool) == 10
for w in self.pool.workers:
assert w.busy
assert len(w.managed_tasks) == 1
# the queue is full at 10, the _next_ write should put the message into
# a worker's backlog
assert len(self.pool) == 10
for w in self.pool.workers:
assert w.messages_sent == 1
self.pool.write(0, 'Hello, World!')
assert len(self.pool) == 10
assert self.pool.workers[0].messages_sent == 2
def test_lost_worker_autoscale(self):
# if a worker exits, it should be replaced automatically up to min_workers
self.pool.init_workers(ResultWriter().work_loop, multiprocessing.Queue())
# start with two workers, kill one of them
assert len(self.pool) == 2
assert not self.pool.should_grow
alive_pid = self.pool.workers[1].pid
self.pool.workers[0].process.terminate()
time.sleep(1) # wait a moment for sigterm
# clean up and the dead worker
self.pool.cleanup()
assert len(self.pool) == 1
assert self.pool.workers[0].pid == alive_pid
# the next queue write should replace the lost worker
self.pool.write(0, 'Hello, Worker')
assert len(self.pool) == 2
@pytest.mark.usefixtures("disable_database_settings")
class TestTaskDispatcher:
@property
def tm(self):
return TaskWorker()
def test_function_dispatch(self):
result = self.tm.perform_work({
'task': 'awx.main.tests.functional.test_dispatch.add',
'args': [2, 2]
})
assert result == 4
def test_method_dispatch(self):
result = self.tm.perform_work({
'task': 'awx.main.tests.functional.test_dispatch.Adder',
'args': [2, 2]
})
assert result == 4
class TestTaskPublisher:
def test_function_callable(self):
assert add(2, 2) == 4
def test_method_callable(self):
assert Adder().run(2, 2) == 4
def test_function_apply_async(self):
message, queue = add.apply_async([2, 2])
assert message['args'] == [2, 2]
assert message['kwargs'] == {}
assert message['task'] == 'awx.main.tests.functional.test_dispatch.add'
assert queue == 'awx_private_queue'
def test_method_apply_async(self):
message, queue = Adder.apply_async([2, 2])
assert message['args'] == [2, 2]
assert message['kwargs'] == {}
assert message['task'] == 'awx.main.tests.functional.test_dispatch.Adder'
assert queue == 'awx_private_queue'
def test_apply_with_queue(self):
message, queue = add.apply_async([2, 2], queue='abc123')
assert queue == 'abc123'
def test_queue_defined_in_task_decorator(self):
message, queue = multiply.apply_async([2, 2])
assert queue == 'hard-math'
def test_queue_overridden_from_task_decorator(self):
message, queue = multiply.apply_async([2, 2], queue='not-so-hard')
assert queue == 'not-so-hard'
def test_apply_with_callable_queuename(self):
message, queue = add.apply_async([2, 2], queue=lambda: 'called')
assert queue == 'called'
yesterday = tz_now() - datetime.timedelta(days=1)
@pytest.mark.django_db
class TestJobReaper(object):
@pytest.mark.parametrize('status, execution_node, controller_node, modified, fail', [
('running', '', '', None, False), # running, not assigned to the instance
('running', 'awx', '', None, True), # running, has the instance as its execution_node
('running', '', 'awx', None, True), # running, has the instance as its controller_node
('waiting', '', '', None, False), # waiting, not assigned to the instance
('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago
('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago
('waiting', 'awx', '', yesterday, True), # waiting, assigned to the execution_node, stale
('waiting', '', 'awx', yesterday, True), # waiting, assigned to the controller_node, stale
])
def test_should_reap(self, status, fail, execution_node, controller_node, modified):
i = Instance(hostname='awx')
i.save()
j = Job(
status=status,
execution_node=execution_node,
controller_node=controller_node,
start_args='SENSITIVE',
)
j.save()
if modified:
# we have to edit the modification time _without_ calling save()
# (because .save() overwrites it to _now_)
Job.objects.filter(id=j.id).update(modified=modified)
reaper.reap(i)
job = Job.objects.first()
if fail:
assert job.status == 'failed'
assert 'marked as failed' in job.job_explanation
assert job.start_args == ''
else:
assert job.status == status
@pytest.mark.parametrize('excluded_uuids, fail', [
(['abc123'], False),
([], True),
])
def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail):
i = Instance(hostname='awx')
i.save()
j = Job(
status='running',
execution_node='awx',
controller_node='',
start_args='SENSITIVE',
celery_task_id='abc123',
)
j.save()
# if the UUID is excluded, don't reap it
reaper.reap(i, excluded_uuids=excluded_uuids)
job = Job.objects.first()
if fail:
assert job.status == 'failed'
assert 'marked as failed' in job.job_explanation
assert job.start_args == ''
else:
assert job.status == 'running'
def test_workflow_does_not_reap(self):
i = Instance(hostname='awx')
i.save()
j = WorkflowJob(
status='running',
execution_node='awx'
)
j.save()
reaper.reap(i)
assert WorkflowJob.objects.first().status == 'running'

View File

@@ -3,6 +3,11 @@ import pytest
from awx.main.models.inventory import Inventory
from awx.main.models.credential import Credential
from awx.main.models.jobs import JobTemplate, Job
from awx.main.access import (
UnifiedJobAccess,
WorkflowJobAccess, WorkflowJobNodeAccess,
JobAccess
)
@pytest.mark.django_db
@@ -43,6 +48,31 @@ def test_inventory_use_access(inventory, user):
assert common_user.can_access(Inventory, 'use', inventory)
@pytest.mark.django_db
def test_slice_job(slice_job_factory, rando):
workflow_job = slice_job_factory(2, jt_kwargs={'created_by': rando}, spawn=True)
workflow_job.job_template.execute_role.members.add(rando)
# Abilities of user with execute_role for slice workflow job container
assert WorkflowJobAccess(rando).can_start(workflow_job) # relaunch allowed
for access_cls in (UnifiedJobAccess, WorkflowJobAccess):
access = access_cls(rando)
assert access.can_read(workflow_job)
assert workflow_job in access.get_queryset()
# Abilities of user with execute_role for all the slice of the job
for node in workflow_job.workflow_nodes.all():
access = WorkflowJobNodeAccess(rando)
assert access.can_read(node)
assert node in access.get_queryset()
job = node.job
assert JobAccess(rando).can_start(job) # relaunch allowed
for access_cls in (UnifiedJobAccess, JobAccess):
access = access_cls(rando)
assert access.can_read(job)
assert job in access.get_queryset()
@pytest.mark.django_db
class TestJobRelaunchAccess:
@pytest.fixture

View File

@@ -15,24 +15,6 @@ from awx.main.models.organization import Organization
from awx.main.models.schedules import Schedule
@pytest.fixture
def jt_linked(job_template_factory, credential, net_credential, vault_credential):
'''
A job template with a reasonably complete set of related objects to
test RBAC and other functionality affected by related objects
'''
objects = job_template_factory(
'testJT', organization='org1', project='proj1', inventory='inventory1',
credential='cred1')
jt = objects.job_template
jt.credentials.add(vault_credential)
jt.save()
# Add AWS cloud credential and network credential
jt.credentials.add(credential)
jt.credentials.add(net_credential)
return jt
@mock.patch.object(BaseAccess, 'check_license', return_value=None)
@pytest.mark.django_db
def test_job_template_access_superuser(check_license, user, deploy_jobtemplate):

View File

@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import mock
import pytest
import requests
@@ -293,7 +294,7 @@ class TestSurveySpecValidation:
new['spec'][0]['default'] = '$encrypted$'
new['spec'][0]['required'] = False
resp = view._validate_spec_data(new, old)
assert resp is None
assert resp is None, resp.data
assert new == {
"name": "old survey",
"description": "foobar",
@@ -357,3 +358,58 @@ class TestSurveySpecValidation:
}
]
}
@staticmethod
def spec_from_element(survey_item):
survey_item.setdefault('name', 'foo')
survey_item.setdefault('variable', 'foo')
survey_item.setdefault('required', False)
survey_item.setdefault('question_name', 'foo')
survey_item.setdefault('type', 'text')
spec = {
'name': 'test survey',
'description': 'foo',
'spec': [survey_item]
}
return spec
@pytest.mark.parametrize("survey_item, error_text", [
({'type': 'password', 'default': ['some', 'invalid', 'list']}, 'expected to be string'),
({'type': 'password', 'default': False}, 'expected to be string'),
({'type': 'integer', 'default': 'foo'}, 'expected to be int'),
({'type': 'integer', 'default': u'🐉'}, 'expected to be int'),
({'type': 'foo'}, 'allowed question types'),
({'type': u'🐉'}, 'allowed question types'),
({'type': 'multiplechoice'}, 'multiplechoice must specify choices'),
({'type': 'integer', 'min': 'foo'}, 'min limit in survey question 0 expected to be integer'),
({'question_name': 42}, "'question_name' in survey question 0 expected to be string.")
])
def test_survey_question_element_validation(self, survey_item, error_text):
spec = self.spec_from_element(survey_item)
r = JobTemplateSurveySpec._validate_spec_data(spec, {})
assert r is not None, (spec, error_text)
assert 'error' in r.data
assert error_text in r.data['error']
def test_survey_spec_non_dict_error(self):
spec = self.spec_from_element({})
spec['spec'][0] = 'foo'
r = JobTemplateSurveySpec._validate_spec_data(spec, {})
assert 'Survey question 0 is not a json object' in r.data['error']
def test_survey_spec_dual_names_error(self):
spec = self.spec_from_element({})
spec['spec'].append(spec['spec'][0].copy())
r = JobTemplateSurveySpec._validate_spec_data(spec, {})
assert "'variable' 'foo' duplicated in survey question 1." in r.data['error']
def test_survey_spec_element_missing_property(self):
spec = self.spec_from_element({})
spec['spec'][0].pop('type')
r = JobTemplateSurveySpec._validate_spec_data(spec, {})
assert "'type' missing from survey question 0" in r.data['error']

View File

@@ -73,17 +73,6 @@ def test_invalid_kind_clean_insights_credential():
class TestControlledBySCM():
@pytest.mark.parametrize('source', [
'scm',
'ec2',
'manual',
])
def test_clean_overwrite_vars_valid(self, source):
inv_src = InventorySource(overwrite_vars=True,
source=source)
inv_src.clean_overwrite_vars()
def test_clean_source_path_valid(self):
inv_src = InventorySource(source_path='/not_real/',
source='scm')

View File

@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
import pytest
import six
from django.core.exceptions import ValidationError
from rest_framework.serializers import ValidationError as DRFValidationError
@@ -123,6 +125,9 @@ def test_cred_type_input_schema_validity(input_, valid):
({'env': {'AWX_SECRET_99': '{{awx_secret}}'}}, True),
({'env': {'99': '{{awx_secret}}'}}, False),
({'env': {'AWX_SECRET=': '{{awx_secret}}'}}, False),
({'env': {'ANSIBLE_SETTING': '{{awx_secret}}'}}, False),
({'env': {'DRAGON': u'🐉'}}, False),
({'env': {u'🐉': 'DRAGON'}}, False),
({'extra_vars': 123}, False),
({'extra_vars': {}}, True),
({'extra_vars': {'hostname': '{{host}}'}}, True),
@@ -147,7 +152,8 @@ def test_cred_type_injectors_schema(injectors, valid):
)
field = CredentialType._meta.get_field('injectors')
if valid is False:
with pytest.raises(ValidationError):
with pytest.raises(ValidationError, message=six.text_type(
"Injector was supposed to throw a validation error, data: {}").format(injectors)):
field.clean(injectors, type_)
else:
field.clean(injectors, type_)

View File

@@ -1,69 +0,0 @@
# Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved.
import mock
import pytest
from django.utils.timezone import now as tz_now
from django.db import DatabaseError
from awx.main.scheduler import TaskManager
from awx.main.models import (
Job,
Instance,
InstanceGroup,
)
from django.core.cache import cache
class TestCleanupInconsistentCeleryTasks():
@mock.patch.object(cache, 'get', return_value=None)
@mock.patch.object(TaskManager, 'get_active_tasks', return_value=([], {}))
@mock.patch.object(TaskManager, 'get_running_tasks', return_value=({'host1': [Job(id=2), Job(id=3),]}, []))
@mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[])
@mock.patch.object(Instance.objects, 'filter', return_value=mock.MagicMock(first=lambda: None))
@mock.patch('awx.main.scheduler.task_manager.logger')
def test_instance_does_not_exist(self, logger_mock, *args):
logger_mock.error = mock.MagicMock(side_effect=RuntimeError("mocked"))
tm = TaskManager()
with pytest.raises(RuntimeError) as excinfo:
tm.cleanup_inconsistent_celery_tasks()
assert "mocked" in str(excinfo.value)
logger_mock.error.assert_called_once_with("Execution node Instance host1 not found in database. "
"The node is currently executing jobs ['job 2 (new)', "
"'job 3 (new)']")
@mock.patch.object(cache, 'get', return_value=None)
@mock.patch.object(TaskManager, 'get_active_tasks', return_value=([], {'host1': []}))
@mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[])
@mock.patch.object(TaskManager, 'get_running_tasks')
@mock.patch('awx.main.scheduler.task_manager.logger')
def test_save_failed(self, logger_mock, get_running_tasks, *args):
logger_mock.error = mock.MagicMock()
job = Job(id=2, modified=tz_now(), status='running', celery_task_id='blah', execution_node='host1')
job.websocket_emit_status = mock.MagicMock()
get_running_tasks.return_value = ({'host1': [job]}, [])
tm = TaskManager()
with mock.patch.object(job, 'save', side_effect=DatabaseError):
tm.cleanup_inconsistent_celery_tasks()
job.save.assert_called_once()
logger_mock.error.assert_called_once_with("Task job 2 (failed) DB error in marking failed. Job possibly deleted.")
@mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[])
@mock.patch('awx.main.scheduler.task_manager.Inspect')
def test_multiple_active_instances_sanity_check(self, inspect_mock, *args):
class MockInspector:
pass
mock_inspector = MockInspector()
mock_inspector.active = lambda: {
'celery@host1': [],
'celery@host2': []
}
inspect_mock.return_value = mock_inspector
tm = TaskManager()
active_task_queues, queues = tm.get_active_tasks()
assert 'host1' in queues
assert 'host2' in queues

View File

@@ -67,7 +67,7 @@ def test_work_success_callback_missing_job():
task_data = {'type': 'project_update', 'id': 9999}
with mock.patch('django.db.models.query.QuerySet.get') as get_mock:
get_mock.side_effect = ProjectUpdate.DoesNotExist()
assert tasks.handle_work_success(None, task_data) is None
assert tasks.handle_work_success(task_data) is None
def test_send_notifications_list(mocker):
@@ -246,6 +246,8 @@ class TestJobExecution(object):
# If `Job.update_model` is called, we're not actually persisting
# to the database; just update the status, which is usually
# the update we care about for testing purposes
if kwargs.get('result_traceback'):
raise Exception('Task encountered error:\n{}'.format(kwargs['result_traceback']))
if 'status' in kwargs:
self.instance.status = kwargs['status']
if 'job_env' in kwargs:
@@ -891,9 +893,9 @@ class TestJobCredentials(TestJobExecution):
(k.pattern, v) for k, v in call_kwargs['expect_passwords'].items()
if 'Vault' in k.pattern
)
assert vault_passwords['Vault password \(prod\):\\s*?$'] == 'pass@prod'
assert vault_passwords['Vault password \(dev\):\\s*?$'] == 'pass@dev'
assert vault_passwords['Vault password:\\s*?$'] == ''
assert vault_passwords['Vault password \(prod\):\\s*?$'] == 'pass@prod' # noqa
assert vault_passwords['Vault password \(dev\):\\s*?$'] == 'pass@dev' # noqa
assert vault_passwords['Vault password:\\s*?$'] == '' # noqa
assert '--ask-vault-pass' not in ' '.join(args)
assert '--vault-id dev@prompt' in ' '.join(args)
assert '--vault-id prod@prompt' in ' '.join(args)
@@ -935,9 +937,9 @@ class TestJobCredentials(TestJobExecution):
(k.pattern, v) for k, v in call_kwargs['expect_passwords'].items()
if 'Vault' in k.pattern
)
assert vault_passwords['Vault password \(prod\):\\s*?$'] == 'provided-at-launch@prod'
assert vault_passwords['Vault password \(dev\):\\s*?$'] == 'provided-at-launch@dev'
assert vault_passwords['Vault password:\\s*?$'] == ''
assert vault_passwords['Vault password \(prod\):\\s*?$'] == 'provided-at-launch@prod' # noqa
assert vault_passwords['Vault password \(dev\):\\s*?$'] == 'provided-at-launch@dev' # noqa
assert vault_passwords['Vault password:\\s*?$'] == '' # noqa
assert '--ask-vault-pass' not in ' '.join(args)
assert '--vault-id dev@prompt' in ' '.join(args)
assert '--vault-id prod@prompt' in ' '.join(args)

View File

@@ -8,6 +8,7 @@ from rest_framework.generics import ListAPIView
# AWX
from awx.main.views import ApiErrorView
from awx.api.views import JobList, InventorySourceList
from awx.api.generics import ListCreateAPIView, SubListAttachDetachAPIView
HTTP_METHOD_NAMES = [
@@ -73,3 +74,30 @@ def test_views_have_search_fields(all_views):
for v in views_missing_search
]))
)
def test_global_creation_always_possible(all_views):
"""To not make life very difficult for clients, this test
asserts that all creatable resources can be created by
POSTing to the global resource list
"""
views_by_model = {}
for View in all_views:
if not getattr(View, 'deprecated', False) and issubclass(View, ListAPIView) and hasattr(View, 'model'):
views_by_model.setdefault(View.model, []).append(View)
for model, views in views_by_model.items():
creatable = False
global_view = None
creatable_view = None
for View in views:
if '{}ListView'.format(model.__name__) == View.__name__:
global_view = View
if issubclass(View, ListCreateAPIView) and not issubclass(View, SubListAttachDetachAPIView):
creatable = True
creatable_view = View
if not creatable or not global_view:
continue
assert 'POST' in global_view().allowed_methods, (
'Resource {} should be creatable in global list view {}. '
'Can be created now in {}'.format(model, global_view, creatable_view)
)

View File

@@ -8,8 +8,8 @@ def test_produce_supervisor_command(mocker):
mock_process.communicate = communicate_mock
Popen_mock = mocker.MagicMock(return_value=mock_process)
with mocker.patch.object(reload.subprocess, 'Popen', Popen_mock):
reload._supervisor_service_command(['beat', 'callback', 'fact'], "restart")
reload._supervisor_service_command("restart")
reload.subprocess.Popen.assert_called_once_with(
['supervisorctl', 'restart', 'tower-processes:receiver',],
['supervisorctl', 'restart', 'tower-processes:*',],
stderr=-1, stdin=-1, stdout=-1)

View File

@@ -1,27 +0,0 @@
from celery.utils.log import get_logger
from celery.worker.autoscale import Autoscaler, AUTOSCALE_KEEPALIVE
from django.conf import settings
import psutil
logger = get_logger('awx.main.tasks')
class DynamicAutoScaler(Autoscaler):
def __init__(self, pool, max_concurrency, min_concurrency=0, worker=None,
keepalive=AUTOSCALE_KEEPALIVE, mutex=None):
super(DynamicAutoScaler, self).__init__(pool, max_concurrency,
min_concurrency, worker,
keepalive, mutex)
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
if settings_absmem is not None:
total_memory_gb = int(settings_absmem)
else:
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
# 5 workers per GB of total memory
self.max_concurrency = min(max_concurrency, (total_memory_gb * 5))
logger.warn('celery worker dynamic --autoscale={},{}'.format(
self.max_concurrency,
self.min_concurrency
))

View File

@@ -118,10 +118,10 @@ def string_to_type(t):
elif t == u'false':
return False
if re.search('^[-+]?[0-9]+$',t):
if re.search(r'^[-+]?[0-9]+$',t):
return int(t)
if re.search('^[-+]?[0-9]+\.[0-9]+$',t):
if re.search(r'^[-+]?[0-9]+\.[0-9]+$',t):
return float(t)
return t

View File

@@ -1,17 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Tower by Red Hat
# All Rights Reserved.
from django.conf import settings
class AWXCeleryRouter(object):
def route_for_task(self, task, args=None, kwargs=None):
tasks = [
'awx.main.tasks.cluster_node_heartbeat',
'awx.main.tasks.purge_old_stdout_files',
'awx.main.tasks.awx_isolated_heartbeat',
]
if task in tasks:
return {'queue': settings.CLUSTER_HOST_ID, 'routing_key': settings.CLUSTER_HOST_ID}

View File

@@ -11,11 +11,8 @@ from django.conf import settings
logger = logging.getLogger('awx.main.utils.reload')
def _supervisor_service_command(service_internal_names, command, communicate=True):
def _supervisor_service_command(command, communicate=True):
'''
Service internal name options:
- beat - celery - callback - channels - uwsgi - daphne
- fact - nginx
example use pattern of supervisorctl:
# supervisorctl restart tower-processes:receiver tower-processes:factcacher
'''
@@ -25,13 +22,7 @@ def _supervisor_service_command(service_internal_names, command, communicate=Tru
args = ['supervisorctl']
if settings.DEBUG:
args.extend(['-c', '/supervisor.conf'])
programs = []
name_translation_dict = settings.SERVICE_NAME_DICT
for n in service_internal_names:
if n in name_translation_dict:
programs.append('{}:{}'.format(group_name, name_translation_dict[n]))
args.extend([command])
args.extend(programs)
args.extend([command, '{}:*'.format(group_name)])
logger.debug('Issuing command to {} services, args={}'.format(command, args))
supervisor_process = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -48,6 +39,6 @@ def _supervisor_service_command(service_internal_names, command, communicate=Tru
logger.info('Submitted supervisorctl {} command, not waiting for result'.format(command))
def stop_local_services(service_internal_names, communicate=True):
logger.warn('Stopping services {} on this node in response to user action'.format(service_internal_names))
_supervisor_service_command(service_internal_names, command='stop', communicate=communicate)
def stop_local_services(communicate=True):
logger.warn('Stopping services on this node in response to user action')
_supervisor_service_command(command='stop', communicate=communicate)

View File

@@ -78,10 +78,10 @@ def sanitize_jinja(arg):
if isinstance(arg, six.string_types):
# If the argument looks like it contains Jinja expressions
# {{ x }} ...
if re.search('\{\{[^}]+}}', arg) is not None:
if re.search(r'\{\{[^}]+}}', arg) is not None:
raise ValueError('Inline Jinja variables are not allowed.')
# If the argument looks like it contains Jinja statements/control flow...
# {% if x.foo() %} ...
if re.search('\{%[^%]+%}', arg) is not None:
if re.search(r'\{%[^%]+%}', arg) is not None:
raise ValueError('Inline Jinja variables are not allowed.')
return arg

View File

@@ -24,7 +24,7 @@ Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the Azure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/
Authentication
--------------
@@ -50,6 +50,7 @@ Command line arguments:
- ad_user
- password
- cloud_environment
- adfs_authority_url
Environment variables:
- AZURE_PROFILE
@@ -60,6 +61,7 @@ Environment variables:
- AZURE_AD_USER
- AZURE_PASSWORD
- AZURE_CLOUD_ENVIRONMENT
- AZURE_ADFS_AUTHORITY_URL
Run for Specific Host
-----------------------
@@ -200,27 +202,43 @@ except ImportError:
# python3
import configparser as cp
from packaging.version import Version
from os.path import expanduser
import ansible.module_utils.six.moves.urllib.parse as urlparse
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
CLIError = None
try:
from msrestazure.azure_active_directory import AADTokenCredentials
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_active_directory import MSIAuthentication
from msrestazure import azure_cloud
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.subscriptions import SubscriptionClient
from azure.mgmt.compute import ComputeManagementClient
from adal.authentication_context import AuthenticationContext
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
CLIError = Exception
try:
from ansible.release import __version__ as ansible_version
except ImportError:
ansible_version = 'unknown'
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
@@ -231,6 +249,7 @@ AZURE_CREDENTIAL_ENV_MAPPING = dict(
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
)
AZURE_CONFIG_SETTINGS = dict(
@@ -241,10 +260,13 @@ AZURE_CONFIG_SETTINGS = dict(
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG'
group_by_tag='AZURE_GROUP_BY_TAG',
group_by_os_family='AZURE_GROUP_BY_OS_FAMILY',
use_private_ip='AZURE_USE_PRIVATE_IP'
)
AZURE_MIN_VERSION = "2.0.0"
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
def azure_id_to_dict(id):
@@ -265,6 +287,8 @@ class AzureRM(object):
self._compute_client = None
self._resource_client = None
self._network_client = None
self._adfs_authority_url = None
self._resource = None
self.debug = False
if args.debug:
@@ -300,13 +324,38 @@ class AzureRM(object):
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
# get authentication authority
# for adfs, user could pass in authority or not.
# for others, use default authority from cloud environment
if self.credentials.get('adfs_authority_url'):
self._adfs_authority_url = self.credentials.get('adfs_authority_url')
else:
self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
# get resource from cloud environment
self._resource = self._cloud_environment.endpoints.active_directory_resource_id
if self.credentials.get('credentials'):
self.azure_credentials = self.credentials.get('credentials')
elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment)
elif self.credentials.get('ad_user') is not None and \
self.credentials.get('password') is not None and \
self.credentials.get('client_id') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = self.acquire_token_with_username_password(
self._adfs_authority_url,
self._resource,
self.credentials['ad_user'],
self.credentials['password'],
self.credentials['client_id'],
self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
@@ -315,9 +364,12 @@ class AzureRM(object):
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment)
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
"Credentials must include client_id, secret and tenant or ad_user and password, or "
"ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
"be logged in using AzureCLI.")
def log(self, msg):
if self.debug:
@@ -361,6 +413,32 @@ class AzureRM(object):
return None
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_msi_credentials(self, subscription_id_param=None):
credentials = MSIAuthentication()
subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
try:
# try to get the subscription in MSI to test whether MSI is enabled
subscription_client = SubscriptionClient(credentials)
subscription = next(subscription_client.subscriptions.list())
subscription_id = str(subscription.subscription_id)
return {
'credentials': credentials,
'subscription_id': subscription_id_param or subscription_id
}
except Exception as exc:
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
@@ -397,8 +475,31 @@ class AzureRM(object):
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
if msi_credentials:
self.log('Retrieved credentials from MSI.')
return msi_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
authority_uri = authority
if tenant is not None:
authority_uri = authority + '/' + tenant
context = AuthenticationContext(authority_uri)
token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
return AADTokenCredentials(token_response)
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
@@ -412,16 +513,21 @@ class AzureRM(object):
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
def get_mgmt_svc_client(self, client_type, base_url, api_version):
client = client_type(self.azure_credentials,
self.subscription_id,
base_url=base_url,
api_version=api_version)
client.config.add_user_agent(ANSIBLE_USER_AGENT)
return client
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(
self.azure_credentials,
self.subscription_id,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-06-01'
)
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-06-01')
self._register('Microsoft.Network')
return self._network_client
@@ -429,24 +535,18 @@ class AzureRM(object):
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(
self.azure_credentials,
self.subscription_id,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-05-10'
)
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-05-10')
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(
self.azure_credentials,
self.subscription_id,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-03-30'
)
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-03-30')
self._register('Microsoft.Compute')
return self._compute_client
@@ -473,9 +573,11 @@ class AzureInventory(object):
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_os_family = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self.use_private_ip = False
self._inventory = dict(
_meta=dict(
@@ -528,6 +630,8 @@ class AzureInventory(object):
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--adfs_authority_url', action='store',
help='Azure ADFS authority url')
parser.add_argument('--cloud_environment', action='store',
help='Azure Cloud Environment name or metadata discovery URL')
parser.add_argument('--resource-groups', action='store',
@@ -545,7 +649,7 @@ class AzureInventory(object):
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower())
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc)))
if self._args.host or self.tags:
@@ -604,7 +708,7 @@ class AzureInventory(object):
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value
operating_system_type=machine.storage_profile.os_disk.os_type.value.lower()
)
if self.include_powerstate:
@@ -651,12 +755,15 @@ class AzureInventory(object):
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if self.use_private_ip:
host_vars['ansible_host'] = ip_config.private_ip_address
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
host_vars['ansible_host'] = public_ip_address.ip_address
if not self.use_private_ip:
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
@@ -706,10 +813,16 @@ class AzureInventory(object):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower())
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_os_family:
if not self._inventory.get(operating_system_type):
self._inventory[operating_system_type] = []
self._inventory[operating_system_type].append(host_name)
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []

View File

@@ -69,7 +69,7 @@ class ServiceScanService(BaseService):
# Upstart
if initctl_path is not None and chkconfig_path is None:
p = re.compile('^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
real_stdout = stdout.replace("\r","")
for line in real_stdout.split("\n"):
@@ -90,8 +90,8 @@ class ServiceScanService(BaseService):
elif chkconfig_path is not None:
#print '%s --status-all | grep -E "is (running|stopped)"' % service_path
p = re.compile(
'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
# Check for special cases where stdout does not fit pattern
match_any = False
@@ -99,7 +99,7 @@ class ServiceScanService(BaseService):
if p.match(line):
match_any = True
if not match_any:
p_simple = re.compile('(?P<service>.*?)\s+(?P<rl0>on|off)')
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
match_any = False
for line in stdout.split('\n'):
if p_simple.match(line):

View File

@@ -4,7 +4,6 @@
import os
import re # noqa
import sys
import djcelery
import six
from datetime import timedelta
@@ -26,6 +25,8 @@ BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def is_testing(argv=None):
import sys
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
@@ -60,7 +61,7 @@ DATABASES = {
'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'),
'ATOMIC_REQUESTS': True,
'TEST': {
# Test database cannot be :memory: for celery/inventory tests.
# Test database cannot be :memory: for inventory tests.
'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3'),
},
}
@@ -261,7 +262,6 @@ MIDDLEWARE_CLASSES = ( # NOQA
'awx.sso.middleware.SocialAuthMiddleware',
'crum.CurrentRequestUserMiddleware',
'awx.main.middleware.URLModificationMiddleware',
'awx.main.middleware.DeprecatedAuthTokenMiddleware',
'awx.main.middleware.SessionTimeoutMiddleware',
)
@@ -280,7 +280,6 @@ INSTALLED_APPS = (
'oauth2_provider',
'rest_framework',
'django_extensions',
'djcelery',
'channels',
'polymorphic',
'taggit',
@@ -459,40 +458,9 @@ DEVSERVER_DEFAULT_PORT = '8013'
# Set default ports for live server tests.
os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199')
djcelery.setup_loader()
BROKER_POOL_LIMIT = None
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
CELERY_EVENT_QUEUE_TTL = 5
CELERY_DEFAULT_QUEUE = 'awx_private_queue'
CELERY_DEFAULT_EXCHANGE = 'awx_private_queue'
CELERY_DEFAULT_ROUTING_KEY = 'awx_private_queue'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TRACK_STARTED = True
CELERYD_TASK_TIME_LIMIT = None
CELERYD_TASK_SOFT_TIME_LIMIT = None
CELERYD_POOL_RESTARTS = True
CELERYD_AUTOSCALER = 'awx.main.utils.autoscale:DynamicAutoScaler'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_IMPORTS = ('awx.main.scheduler.tasks',)
CELERY_QUEUES = ()
CELERY_ROUTES = ('awx.main.utils.ha.AWXCeleryRouter',)
def log_celery_failure(*args):
# Import annotations lazily to avoid polluting the `awx.settings` namespace
# and causing circular imports
from awx.main.tasks import log_celery_failure
return log_celery_failure(*args)
CELERY_ANNOTATIONS = {'*': {'on_failure': log_celery_failure}}
CELERYBEAT_SCHEDULER = 'celery.beat.PersistentScheduler'
CELERYBEAT_MAX_LOOP_INTERVAL = 60
CELERYBEAT_SCHEDULE = {
'tower_scheduler': {
'task': 'awx.main.tasks.awx_periodic_scheduler',
@@ -525,9 +493,6 @@ CELERYBEAT_SCHEDULE = {
}
AWX_INCONSISTENT_TASK_INTERVAL = 60 * 3
# Celery queues that will always be listened to by celery workers
# Note: Broadcast queues have unique, auto-generated names, with the alias
# property value of the original queue name.
AWX_CELERY_QUEUES_STATIC = [
six.text_type(CELERY_DEFAULT_QUEUE),
]
@@ -626,8 +591,8 @@ SOCIAL_AUTH_SAML_ENABLED_IDPS = {}
SOCIAL_AUTH_SAML_ORGANIZATION_ATTR = {}
SOCIAL_AUTH_SAML_TEAM_ATTR = {}
# Any ANSIBLE_* settings will be passed to the subprocess environment by the
# celery task.
# Any ANSIBLE_* settings will be passed to the task runner subprocess
# environment
# Do not want AWX to ask interactive questions and want it to be friendly with
# reprovisioning
@@ -641,8 +606,7 @@ ANSIBLE_PARAMIKO_RECORD_HOST_KEYS = False
# output
ANSIBLE_FORCE_COLOR = True
# Additional environment variables to be passed to the subprocess started by
# the celery task.
# Additional environment variables to be passed to the ansible subprocesses
AWX_TASK_ENV = {}
# Flag to enable/disable updating hosts M2M when saving job events.
@@ -1027,7 +991,10 @@ LOGGING = {
'timed_import': {
'()': 'awx.main.utils.formatters.TimeFormatter',
'format': '%(relativeSeconds)9.3f %(levelname)-8s %(message)s'
}
},
'dispatcher': {
'format': '%(asctime)s %(levelname)-8s %(name)s PID:%(process)d %(message)s',
},
},
'handlers': {
'console': {
@@ -1077,6 +1044,19 @@ LOGGING = {
'backupCount': 5,
'formatter':'simple',
},
'dispatcher': {
'level': 'WARNING',
'class':'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(LOG_ROOT, 'dispatcher.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'dispatcher',
},
'celery.beat': {
'class':'logging.StreamHandler',
'level': 'ERROR'
}, # don't log every celerybeat wakeup
'inventory_import': {
'level': 'DEBUG',
'class':'logging.StreamHandler',
@@ -1158,8 +1138,13 @@ LOGGING = {
},
'awx.main': {
'handlers': ['null']
}, 'awx.main.commands.run_callback_receiver': {
},
'awx.main.commands.run_callback_receiver': {
'handlers': ['callback_receiver'],
'level': 'INFO' # in debug mode, includes full callback data
},
'awx.main.dispatch': {
'handlers': ['dispatcher'],
},
'awx.isolated.manager.playbooks': {
'handlers': ['management_playbooks'],

View File

@@ -68,13 +68,6 @@ template['OPTIONS']['loaders'] = (
'django.template.loaders.app_directories.Loader',
)
# Disable capturing all SQL queries when running celeryd in development.
if 'celery' in sys.argv:
SQL_DEBUG = False
CELERYD_HIJACK_ROOT_LOGGER = False
CELERYD_LOG_COLOR = True
CALLBACK_QUEUE = "callback_tasks"
# Enable dynamically pulling roles from a requirement.yml file
@@ -149,15 +142,6 @@ except ImportError:
CLUSTER_HOST_ID = socket.gethostname()
# Supervisor service name dictionary used for programatic restart
SERVICE_NAME_DICT = {
"celery": "celery",
"callback": "receiver",
"runworker": "channels",
"uwsgi": "uwsgi",
"daphne": "daphne",
"nginx": "nginx"}
try:
socket.gethostbyname('docker.for.mac.host.internal')
os.environ['SDB_NOTIFY_HOST'] = 'docker.for.mac.host.internal'

View File

@@ -73,13 +73,13 @@ if "pytest" in sys.modules:
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'),
'TEST': {
# Test database cannot be :memory: for celery/inventory tests.
# Test database cannot be :memory: for inventory tests.
'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3'),
},
}
}
# Celery AMQP configuration.
# AMQP configuration.
BROKER_URL = "amqp://{}:{}@{}/{}".format(os.environ.get("RABBITMQ_USER"),
os.environ.get("RABBITMQ_PASS"),
os.environ.get("RABBITMQ_HOST"),
@@ -138,8 +138,7 @@ REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST']
# REMOTE_HOST_HEADERS will be trusted unconditionally')
PROXY_IP_WHITELIST = []
# Define additional environment variables to be passed to subprocess started by
# the celery task.
# Define additional environment variables to be passed to ansible subprocesses
#AWX_TASK_ENV['FOO'] = 'BAR'
# If set, use -vvv for project updates instead of -v for more output.
@@ -197,7 +196,7 @@ LOGGING['handlers']['syslog'] = {
LOGGING['loggers']['django.request']['handlers'] = ['console']
LOGGING['loggers']['rest_framework.request']['handlers'] = ['console']
LOGGING['loggers']['awx']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['awx.main.commands.run_callback_receiver']['handlers'] = ['console']
LOGGING['loggers']['awx.main.commands.run_callback_receiver']['handlers'] = [] # propogates to awx
LOGGING['loggers']['awx.main.tasks']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['awx.main.scheduler']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console']

View File

@@ -39,13 +39,13 @@ if is_testing(sys.argv):
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'),
'TEST': {
# Test database cannot be :memory: for celery/inventory tests.
# Test database cannot be :memory: for tests.
'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3'),
},
}
}
# Celery AMQP configuration.
# AMQP configuration.
BROKER_URL = 'amqp://guest:guest@localhost:5672'
# Set True to enable additional logging from the job_event_callback plugin
@@ -94,8 +94,7 @@ REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST']
# REMOTE_HOST_HEADERS will be trusted unconditionally')
PROXY_IP_WHITELIST = []
# Define additional environment variables to be passed to subprocess started by
# the celery task.
# Define additional environment variables to be passed to ansible subprocesses
#AWX_TASK_ENV['FOO'] = 'BAR'
# If set, use -vvv for project updates instead of -v for more output.

View File

@@ -54,21 +54,13 @@ AWX_ISOLATED_USERNAME = 'awx'
LOGGING['handlers']['tower_warnings']['filename'] = '/var/log/tower/tower.log'
LOGGING['handlers']['callback_receiver']['filename'] = '/var/log/tower/callback_receiver.log'
LOGGING['handlers']['dispatcher']['filename'] = '/var/log/tower/dispatcher.log'
LOGGING['handlers']['task_system']['filename'] = '/var/log/tower/task_system.log'
LOGGING['handlers']['fact_receiver']['filename'] = '/var/log/tower/fact_receiver.log'
LOGGING['handlers']['management_playbooks']['filename'] = '/var/log/tower/management_playbooks.log'
LOGGING['handlers']['system_tracking_migrations']['filename'] = '/var/log/tower/tower_system_tracking_migrations.log'
LOGGING['handlers']['rbac_migrations']['filename'] = '/var/log/tower/tower_rbac_migrations.log'
# Supervisor service name dictionary used for programatic restart
SERVICE_NAME_DICT = {
"beat": "awx-celery-beat",
"celery": "awx-celery",
"callback": "awx-callback-receiver",
"channels": "awx-channels-worker",
"uwsgi": "awx-uwsgi",
"daphne": "awx-daphne"}
# Store a snapshot of default settings at this point before loading any
# customizable config files.
DEFAULTS_SNAPSHOT = {}

View File

@@ -42,7 +42,7 @@ class PosixUIDGroupType(LDAPGroupType):
)
search = group_search.search_with_additional_term_string(filterstr)
search.attrlist = [self.name_attr]
search.attrlist = [str(self.name_attr)]
groups = search.execute(ldap_user.connection)
except (KeyError, IndexError):
pass

View File

@@ -1,46 +1,26 @@
# AWX UI
## Requirements
- node.js 8.x LTS
- npm 5.x LTS
- bzip2, gcc-c++, git, make
### Node / NPM
## Development
The API development server will need to be running. See [CONTRIBUTING.md](../../CONTRIBUTING.md).
AWX currently requires the 6.x LTS version of Node and NPM.
```shell
# Build ui for the devel environment - reachable at https://localhost:8043
make ui-devel
macOS installer: [https://nodejs.org/dist/latest-v6.x/](https://nodejs.org/dist/latest-v6.x/)
# Alternatively, start the ui development server. While running, the ui will be reachable
# at https://localhost:3000 and updated automatically when code changes.
make ui-docker
RHEL / CentOS / Fedora:
```
$ curl --silent --location https://rpm.nodesource.com/setup_6.x | bash -
$ yum install nodejs
```
### Other Dependencies
On macOS, install the Command Line Tools:
```
$ xcode-select --install
```
RHEL / CentOS / Fedora:
```
$ yum install bzip2 gcc-c++ git make
```
## Usage
### Starting the UI
First, the AWX API will need to be running. See [CONTRIBUTING.md](../../CONTRIBUTING.md).
When using Docker for Mac or native Docker on Linux:
```
$ make ui-docker
# When using docker machine, use this command to start the ui development server instead.
DOCKER_MACHINE_NAME=default make ui-docker-machine
```
## Development with an external server
If you normally run awx on an external host/server (in this example, `awx.local`),
you'll need to reconfigure the webpack proxy slightly for `make ui-docker` to
work:
@@ -63,119 +43,32 @@ awx/ui/build/webpack.watch.js
},
```
When using Docker Machine:
## Testing
```shell
# run linters
make jshint
```
$ DOCKER_MACHINE_NAME=default make ui-docker-machine
# run unit tests
make ui-test-ci
# run e2e tests - see awx/ui/test/e2e for more information
npm --prefix awx/ui run e2e
```
### Running Tests
## Adding dependencies
```shell
# add a development or build dependency
npm install --prefix awx/ui --save-dev dev-package@1.2.3
Run unit tests locally, poll for changes to both source and test files, launch tests in supported browser engines:
# add a production dependency
npm install --prefix awx/ui --save prod-package@1.23
```
$ make ui-test
# add the updated package.json and lock files to scm
git add awx/ui/package.json awx/ui/package-lock.json
```
Run unit tests in a CI environment (Jenkins)
## Building for Production
```shell
# built files are placed in awx/ui/static
make ui-release
```
$ make ui-test-ci
```
### Adding new dependencies
#### Add / update a bundled vendor dependency
1. `npm install --prefix awx/ui --save some-frontend-package@1.2.3`
2. Add `'some-package'` to `var vendorFiles` in `./grunt-tasks/webpack.js`
3. `npm --prefix awx/ui shrinkwrap` to freeze current dependency resolution
#### Add / update a dependecy in the build/test pipeline
1. `npm install --prefix awx/ui --save-dev some-toolchain-package@1.2.3`
2. `npm --prefix awx/ui shrinkwrap` to freeze current dependency resolution
### Polyfills, shims, patches
The Webpack pipeline will prefer module patterns in this order: CommonJS, AMD, UMD. For a comparison of supported patterns, refer to [https://webpack.github.io/docs/comparison.html](Webpack's docs).
Some javascript libraries do not export their contents as a module, or depend on other third-party components. If the library maintainer does not wrap their lib in a factory that provides a CommonJS or AMD module, you will need to provide dependencies with a shim.
1. Shim implicit dependencies using Webpack's [ProvidePlugin](https://github.com/webpack/webpack/blob/006d59500de0493c4096d5d4cecd64eb12db2b95/lib/ProvidePlugin.js). Example:
```js
// AWX source code depends on the lodash library being available as _
_.uniq([1,2,3,1]) // will throw error undefined
```
```js
// webpack.config.js
plugins: [
new webpack.ProvidePlugin({
'_': 'lodash',
})
]
```
```js
// the following requirement is inserted by webpack at build time
var _ = require('lodash');
_.uniq([1,2,3,1])
```
2. Use [`imports-loader`](https://webpack.github.io/docs/shimming-modules.html#importing) to inject requirements into the namespace of vendor code at import time. Use [`exports-loader`](https://webpack.github.io/docs/shimming-modules.html#exporting) to conventionally export vendor code lacking a conventional export pattern.
3. [Apply a functional patch](https://gist.github.com/leigh-johnson/070159d3fd780d6d8da6e13625234bb3). A webpack plugin is the correct choice for a functional patch if your patch needs to access events in a build's lifecycle. A webpack loader is preferable if you need to compile and export a custom pattern of library modules.
4. [Submit patches to libraries without modular exports](https://github.com/leigh-johnson/ngToast/commit/fea95bb34d27687e414619b4f72c11735d909f93) - the internet will thank you
Some javascript libraries might only get one module pattern right.
### Environment configuration - used in development / test builds
Build tasks are parameterized with environment variables.
`package.json` contains default environment configuration. When `npm run myScriptName` is executed, these variables will be exported to your environment with the prefix `npm_package_config_`. For example, `my_variable` will be exported to `npm_package_config_my_variable`.
Environment variables can accessed in a Javascript via `PROCESS.env`.
``` json
"config": {
"django_port": "8013",
"websocket_port": "8080",
"django_host": "0.0.0.0"
}
```
Example usage in `npm run build-docker-machine`:
```bash
$ docker-machine ssh $DOCKER_MACHINE_NAME -f -N -L ${npm_package_config_websocket_port}:localhost:${npm_package_config_websocket_port}; ip=$(docker-machine ip $DOCKER_MACHINE_NAME); echo npm set awx:django_host ${ip}; $ grunt dev
```
Example usage in an `npm test` script target:
```
npm_package_config_websocket_port=mock_websocket_port npm_package_config_django_port=mock_api_port npm_package_config_django_host=mock_api_host npm run test:someMockIntegration
```
You'll usually want to pipe and set vars prior to running a script target:
```
$ npm set awx:websocket_host ${mock_host}; npm run script-name
```
### NPM Scripts
Examples:
```json
{
"scripts": {
"pretest": "echo I run immediately before 'npm test' executes",
"posttest": "echo I run immediately after 'npm test' exits",
"test": "karma start karma.conf.js"
}
}
```
`npm test` is an alias for `npm run test`. Refer to [script field docs](https://docs.npmjs.com/misc/scripts) for a list of other runtime events.

View File

@@ -9,6 +9,7 @@ import atFeaturesTemplates from '~features/templates';
import atFeaturesUsers from '~features/users';
import atFeaturesJobs from '~features/jobs';
import atFeaturesPortalMode from '~features/portalMode';
import atFeaturesProjects from '~features/projects';
const MODULE_NAME = 'at.features';
@@ -24,6 +25,7 @@ angular.module(MODULE_NAME, [
atFeaturesOutput,
atFeaturesTemplates,
atFeaturesPortalMode,
atFeaturesProjects
]);
export default MODULE_NAME;

View File

@@ -1,12 +1,14 @@
import JobsStrings from './jobs.strings';
import jobsRoute from './routes/jobs.route';
import { jobsSchedulesRoute, jobsSchedulesEditRoute } from '../../src/scheduler/schedules.route';
import jobsListController from './jobsList.controller';
const MODULE_NAME = 'at.features.jobs';
angular
.module(MODULE_NAME, [])
.service('JobsStrings', JobsStrings)
.controller('jobsListController', jobsListController)
.run(['$stateExtender', ($stateExtender) => {
$stateExtender.addState(jobsRoute);
$stateExtender.addState(jobsSchedulesRoute);

View File

@@ -76,6 +76,22 @@ function ListJobsController (
return { icon, link, value };
});
vm.getSliceJobDetails = (job) => {
if (!job.job_slice_count) {
return null;
}
if (job.job_slice_count === 1) {
return null;
}
if (job.job_slice_number && job.job_slice_count) {
return `Slice Job ${job.job_slice_number}/${job.job_slice_count}`;
}
return null;
};
vm.getSref = ({ type, id }) => {
let sref;

View File

@@ -23,7 +23,8 @@
status-tip="{{ vm.strings.get('list.STATUS_TOOLTIP', job.status) }}"
header-value="{{ job.id }} - {{ job.name }}"
header-state="{{ vm.getSref(job) }}"
header-tag="{{ vm.jobTypes[job.type] }}">
header-tag="{{ vm.jobTypes[job.type] }}"
secondary-tag="{{ vm.getSliceJobDetails(job) }}">
</at-row-item>
<div class="at-Row--inline">
<at-row-item

View File

@@ -1,5 +1,4 @@
import { N_ } from '../../../src/i18n';
import jobsListController from '../jobsList.controller';
import indexController from '../index.controller';
const indexTemplate = require('~features/jobs/index.view.html');
@@ -69,7 +68,7 @@ export default {
},
'jobsList@jobs': {
templateUrl: jobsListTemplate,
controller: jobsListController,
controller: 'jobsListController',
controllerAs: 'vm'
}
}

View File

@@ -126,6 +126,33 @@ function getSourceWorkflowJobDetails () {
return { link, tooltip };
}
function getSliceJobDetails () {
const count = resource.model.get('job_slice_count');
if (!count) {
return null;
}
if (count === 1) {
return null;
}
const number = resource.model.get('job_slice_number');
if (!number) {
return null;
}
const label = strings.get('labels.SLICE_JOB');
const offset = `${number}/${count}`;
const tooltip = strings.get('tooltips.SLICE_JOB_DETAILS');
if (label && offset && tooltip) {
return { label, offset, tooltip };
}
return null;
}
function getJobTemplateDetails () {
const jobTemplate = resource.model.get('summary_fields.job_template');
@@ -671,6 +698,7 @@ function JobDetailsController (
vm.jobType = getJobTypeDetails();
vm.jobTemplate = getJobTemplateDetails();
vm.sourceWorkflowJob = getSourceWorkflowJobDetails();
vm.sliceJobDetails = getSliceJobDetails();
vm.inventory = getInventoryDetails();
vm.project = getProjectDetails();
vm.projectUpdate = getProjectUpdateDetails();

View File

@@ -151,6 +151,12 @@
<div class="JobResults-resultRowText">{{ vm.jobType.value }}</div>
</div>
<!-- SLICE JOB DETAIL -->
<div class="JobResults-resultRow" ng-if="vm.sliceJobDetails">
<label class="JobResults-resultRowLabel">{{ vm.sliceJobDetails.label }}</label>
<div class="JobResults-resultRowText">{{ vm.sliceJobDetails.offset }}</div>
</div>
<!-- LAUNCHED BY DETAIL -->
<div class="JobResults-resultRow" ng-if="vm.launchedBy">
<label class="JobResults-resultRowLabel">{{ vm.launchedBy.label }}</label>

Some files were not shown because too many files have changed in this diff Show More