Compare commits

..

502 Commits
1.0.2 ... 1.0.3

Author SHA1 Message Date
Ryan Petrello
ed1bacdc08 Merge pull request #1090 from ryanpetrello/awx-ansible-overview
add documentation for how awx uses/interacts with ansible
2018-02-02 08:44:11 -05:00
Jake McDermott
f39fa35d86 Merge pull request #1103 from mabashian/1091-status-icons
Tweaked smart status icon styling to prevent overlap with action buttons
2018-02-01 23:03:37 -05:00
Matthew Jones
9266444b19 Merge pull request #1117 from ryanpetrello/fatal-celery-reload
run the celery reload in a shell so the uwsgi hook isn't fatal on fail
2018-02-01 22:39:00 -05:00
Ryan Petrello
35230eded1 run the celery reload in a shell so the uwsgi hook isn't fatal on fail 2018-02-01 22:32:08 -05:00
Jake McDermott
ecacf64c28 Merge pull request #1105 from mabashian/1023-activity-stream-inv-link
Fixed inventory links in activity stream
2018-02-01 18:23:54 -05:00
Jake McDermott
d01e6ab8b6 Merge pull request #1106 from mabashian/1014-team-link-user-permissions
Fixed team links in users permissions tab
2018-02-01 18:23:00 -05:00
Jake McDermott
5653b47aa3 Merge pull request #1112 from mabashian/994-empty-list-text
Updated empty list text
2018-02-01 18:22:11 -05:00
Ryan Petrello
7bc3d85913 Merge pull request #1114 from ryanpetrello/fix-dateutil-bug
work around a bug in dateutil that incorrectly parses Z dates
2018-02-01 16:06:13 -05:00
Ryan Petrello
0a8df7fde2 work around a bug in dateutil that incorrectly parses Z dates
related: https://github.com/dateutil/dateutil/issues/349
2018-02-01 15:51:59 -05:00
Ryan Petrello
b39269c4c2 Merge pull request #1113 from ryanpetrello/fix-schedule-related
fix a bug which can break the schedules list endpoint
2018-02-01 14:50:37 -05:00
Chris Meyers
09981c0020 Merge pull request #1107 from ansible/docs-saml2
Extend saml docs to include new fields added
2018-02-01 14:33:51 -05:00
Ryan Petrello
81bdbef785 fix a bug which can break the schedules list endpoint
see: https://github.com/ansible/ansible-tower/issues/7881
related: https://github.com/ansible/awx/pull/1095
2018-02-01 14:30:56 -05:00
Chris Meyers
3c541a4695 Merge pull request #1111 from ansible/jakemcdermott-update-ldap-docs
Updates to ldap documentation
2018-02-01 14:30:23 -05:00
Jake McDermott
5a1ae9b816 Update ldap.md 2018-02-01 13:57:07 -05:00
mabashian
8c261892ee Updated empty list text 2018-02-01 13:54:33 -05:00
Matthew Jones
b89d4349c0 Merge pull request #1080 from Xiol/feat-projects-vol
Allow AWX projects directory to be a volume
2018-02-01 13:17:34 -05:00
Jake McDermott
3e98363811 Merge pull request #1104 from ansible/doc-formatting
Update saml.md
2018-02-01 11:55:46 -05:00
Chris Meyers
f24289b2ba Extend saml docs to include new fields added 2018-02-01 11:27:01 -05:00
mabashian
9170c557a7 Fixed team links in users permissions tab 2018-02-01 11:23:51 -05:00
Chris Meyers
a47b403f8d Update saml.md 2018-02-01 11:05:08 -05:00
mabashian
83aa7bfac4 Fixed inventory links in activity stream 2018-02-01 11:04:16 -05:00
mabashian
db0b2e6cb6 Tweaked smart status icon styling to prevent overlap with action buttons 2018-02-01 10:44:08 -05:00
Ryan Petrello
f391b7ace4 Merge pull request #1095 from ryanpetrello/schedule-related-proj-inv
add related links to the inventory and project for a schedule
2018-01-31 15:53:13 -05:00
Ryan Petrello
008c9e4320 Merge pull request #1094 from ryanpetrello/leaky-mock
remove some leaky mock.patch() that were causing sporadic test failures
2018-01-31 15:52:58 -05:00
Ryan Petrello
8ddc1c61ef add related links to the inventory and project for a schedule
see: https://github.com/ansible/awx/issues/276
2018-01-31 15:18:11 -05:00
Ryan Petrello
0aa6c7b83f remove some leaky mock.patch() that were causing sporadic test failures 2018-01-31 15:12:59 -05:00
Jake McDermott
e43879d44e Merge pull request #1092 from dovshap/patch-1
Update INSTALL.md
2018-01-31 14:11:11 -05:00
Ryan Petrello
2a6f6111dc add documentation for how awx uses/interacts with ansible 2018-01-31 14:02:18 -05:00
dovshap
6b0659d63a Update INSTALL.md
fix bad link in contents
2018-01-31 11:00:03 -08:00
Shane McDonald
426e901cdf Merge pull request #1089 from KAMiKAZOW/patch-1
Fix CentOS typo in CONTRIBUTING.md
2018-01-30 21:11:10 -05:00
KAMiKAZOW
ac55f93cfb CentOS typo in CONTRIBUTING.md 2018-01-31 03:07:52 +01:00
Ryan Petrello
c32c3db35e Merge pull request #1086 from ryanpetrello/fix-enabled-sso-auth
fix a bug which causes /api/v2/auth/ to list disabled auth backends
2018-01-30 16:35:26 -05:00
Ryan Petrello
20a999f846 Merge pull request #1085 from ryanpetrello/fix-7876
fix a bug in custom venv support that breaks legacy `POST /api/v1/jobs`
2018-01-30 16:21:10 -05:00
Ryan Petrello
81af34fce3 fix a bug which causes /api/v2/auth/ to list disabled auth backends
see: https://github.com/ansible/awx/issues/1073
2018-01-30 16:20:29 -05:00
Ryan Petrello
8fed469975 fix a bug in custom venv support that breaks legacy POST /api/v1/jobs
see: https://github.com/ansible/ansible-tower/issues/7876
2018-01-30 15:29:11 -05:00
Dane Elwell
c6d4a62263 Allow AWX projects directory to be a volume
Signed-off-by: Dane Elwell <dane.elwell@ukfast.co.uk>
2018-01-30 09:49:44 +00:00
Ryan Petrello
a9b77eb706 Merge pull request #1066 from ryanpetrello/fix-schedules-without-inventories
fix a few bugs for scheduled jobs that run without inventories
2018-01-29 16:08:17 -05:00
Ryan Petrello
e642af82cc fix a few bugs for scheduled jobs that run without inventories
see: https://github.com/ansible/ansible-tower/issues/7865
see: https://github.com/ansible/ansible-tower/issues/7866
2018-01-29 15:15:57 -05:00
Ryan Petrello
b0a755d7b5 Merge pull request #1076 from ryanpetrello/rrule-until-changes
adhere to RFC5545 regarding UNTIL timezones
2018-01-29 13:31:17 -05:00
Ryan Petrello
6753f1ca35 adhere to RFC5545 regarding UNTIL timezones
If the "DTSTART" property is specified as a date with UTC time or a date with
local time and time zone reference, then the UNTIL rule part MUST be specified
as a date with UTC time.
2018-01-29 12:42:31 -05:00
Ryan Petrello
f8d9d5f51a Merge pull request #1067 from ryanpetrello/fix-7869
don't allow distant DTSTART values for schedules; it's slow
2018-01-29 12:00:36 -05:00
Wayne Witzel III
bad8c65321 Merge pull request #1074 from wwitzel3/devel
Load Celery inspector manually when needed
2018-01-29 12:00:24 -05:00
Ryan Petrello
6f0c937236 don't allow distant DTSTART values for schedules; it's slow
see: https://github.com/ansible/ansible-tower/issues/7869
2018-01-29 10:16:03 -05:00
Wayne Witzel III
55a616cba6 Load Celery inspector manually when needed 2018-01-29 14:57:03 +00:00
Shane McDonald
87365e5969 Merge pull request #1071 from jakemcdermott/set-selenium-container-image-tags
use selenium hub / node container image version '3.8.1-erbium'
2018-01-28 16:37:03 -05:00
Jake McDermott
7e829e3a9d use selenium hub / node container image version '3.8.1-erbium' 2018-01-28 16:05:29 -05:00
Shane McDonald
b8cba916a5 Merge pull request #1069 from tdgroot/develop-dockerfile_rsync
Add rsync to Dockerfile
2018-01-27 10:14:36 -05:00
Timon de Groot
dc96a1730e Add rsync to Dockerfile 2018-01-27 11:54:51 +01:00
Matthew Jones
d4983ea10d Merge pull request #856 from ewjoachim/docker-compose-491
Fixes #491: Adding Docker Compose installer
2018-01-26 08:33:40 -05:00
Joachim Jablon
209bdd00a1 related #491 Bacpkort #1007 2018-01-26 07:09:28 +01:00
Joachim Jablon
c4efbd62bc related #491 Docker Compose installer
Signed-off-by: Joachim Jablon <ewjoachim@gmail.com>
2018-01-26 07:09:28 +01:00
Joachim Jablon
287a3bc8d4 related #491 Documentation for Docker Compose
Signed-off-by: Joachim Jablon <ewjoachim@gmail.com>
2018-01-26 07:09:28 +01:00
Joachim Jablon
9fefc26528 related #491 Split local_docker docker into 2 task files
Signed-off-by: Joachim Jablon <ewjoachim@gmail.com>
2018-01-26 07:09:28 +01:00
Ryan Petrello
e2d4ef31fd Merge pull request #1061 from ryanpetrello/fix-1042
fix a unicode bug in the stdout endpoint when ?content_encoding=base64
2018-01-25 16:07:21 -05:00
Ryan Petrello
a15e257b9e fix a unicode bug in the stdout endpoint when ?content_encoding=base64
see: https://github.com/ansible/awx/issues/1042
2018-01-25 15:53:43 -05:00
Ryan Petrello
a56370fed5 Merge pull request #1059 from ryanpetrello/reload-celery
reload the entire celery worker pool when uwsgi reloads the Python app
2018-01-25 15:12:22 -05:00
Ryan Petrello
e7ed4811c1 reload the entire celery worker pool when uwsgi reloads the Python app
this is for the development environment only; when uwsgi notices a code
change, it automatically reloads the uwsgi workers; this patch includes
a hook that sends `SIGHUP` to the celery process, causing it to spawn
a new set of workers as well
2018-01-25 14:59:40 -05:00
Ryan Petrello
9860b38438 Merge pull request #1055 from ryanpetrello/lazybaron
only import the redbaron library on-demand
2018-01-25 13:09:58 -05:00
Ryan Petrello
ef80ecd3b6 only import the redbaron library on-demand
redbaron is a library we use to facilitate parsing local settings files;
at _import_ time it generates a parse tree and caches it to disk at
`/tmp`; this process is _really time consuming, and only necessary if
we're actually *using* the library

right now, we're importing this library and paying the penalty
_every_ time we load the awx application
2018-01-25 10:23:44 -05:00
Ryan Petrello
50290a9063 Merge pull request #1024 from ryanpetrello/fix-710-schedule-timezone
support TZID= in schedule rrules
2018-01-25 10:14:57 -05:00
Shane McDonald
fefa4a8bf4 Merge pull request #1049 from ansible/jakemcdermott-fix-1045
add minimum git version to install guide
2018-01-24 15:40:10 -05:00
Jake McDermott
546f88c74d add minimum git version 2018-01-24 15:12:58 -05:00
Jake McDermott
afa1fb489c Merge pull request #1044 from jakemcdermott/fix-948
bump templates form credential_types page limit
2018-01-24 09:35:43 -05:00
Jake McDermott
3571abb42b bump templates form credential_types page limit 2018-01-23 18:28:59 -05:00
Jake McDermott
21425db889 Merge pull request #1041 from jakemcdermott/fix-multicred-bugs
Fix multicred bugs
2018-01-23 13:34:35 -05:00
Jake McDermott
cc64657749 use correct handle for modal tag deselect 2018-01-23 10:32:13 -05:00
Jake McDermott
7300c2ccc1 fix unexpected deselect when selecting no-vault-id vault credentials 2018-01-23 10:19:19 -05:00
Jake McDermott
7c596039c5 fix modal exit button close 2018-01-23 09:39:59 -05:00
Ryan Petrello
9857c8272e add more tests for weird timezone/DST boundaries in schedules
see: https://github.com/ansible/awx/pull/1024
2018-01-22 14:57:57 -05:00
Shane McDonald
797169317c Merge pull request #1037 from shanemcd/devel
Use newer version of git in dev image
2018-01-22 14:43:09 -05:00
Shane McDonald
67c6591f6f Use newer version of git in dev image
More fallout from #982
2018-01-22 13:57:44 -05:00
Ryan Petrello
15906b7e3c support TZID= in schedule rrules
this commit allows schedule `rrule` strings to include local timezone
information via TZID=NNNNN; occurrences are _generated_ in the local
time specific by the user (or UTC, if e.g., DTSTART:YYYYMMDDTHHMMSSZ)
while Schedule.next_run, Schedule.dtstart, and Schedule.dtend will be
stored in the UTC equivalent (i.e., the scheduler will still do math on
"what to run next" based on UTC datetimes).

in addition to this change, there is now a new API endpoint,
`/api/v2/schedules/preview/`, which takes an rrule and shows the next
10 occurrences in local and UTC time.

see: https://github.com/ansible/ansible-tower/issues/823
related: https://github.com/dateutil/dateutil/issues/614
2018-01-22 11:50:00 -05:00
Ryan Petrello
fdd2b84804 Merge pull request #1036 from ryanpetrello/fix-955
fix a bug that breaks workflows w/ a survey password + inventory sync
2018-01-22 10:55:51 -05:00
Ryan Petrello
ac3f7d0fac fix a bug that breaks workflows w/ a survey password + inventory sync
prior versions of awx did not raise an exception for this scenario
- they simply ignored kwargs that they couldn't accept.  this change is
a sort of middle ground - it ignores them, but gives a clue in the logs
as to why

see: https://github.com/ansible/awx/issues/955
related: https://github.com/ansible/awx/pull/803
2018-01-22 09:41:30 -05:00
Shane McDonald
09d63b4883 Merge pull request #1029 from jakemcdermott/fix-navbar
fix navbar / breadcrumb issue
2018-01-19 13:54:56 -05:00
Jake McDermott
b96e33ea50 fix navbar / breadcrumb issue 2018-01-19 13:52:11 -05:00
Matthew Jones
71d23e8c81 Merge pull request #1007 from wallnerryan/alternate-dns-servers
support dns servers: fixes https://github.com/ansible/awx/issues/1004
2018-01-19 08:58:29 -05:00
Ryan Petrello
073feb74cb Merge pull request #1015 from ryanpetrello/fix-980
fix another bug that breaks the JT callback process
2018-01-18 14:26:48 -05:00
Ryan Petrello
43f19cc94b fix another bug that breaks the JT callback process
see: https://github.com/ansible/awx/issues/980
related: 17cd0595d7
2018-01-18 13:17:06 -05:00
Ryan Petrello
ef312f0030 Merge pull request #1011 from ryanpetrello/fix-1010
don't require an IRC password in the notification UI
2018-01-18 12:55:57 -05:00
Christian Adams
d0fec0f19c Merge pull request #1013 from rooftopcellist/rdb_docs
added RDB info to docs
2018-01-18 12:33:00 -05:00
adamscmRH
1e14221625 added RDB info to docs 2018-01-18 11:48:26 -05:00
Matthew Jones
b6a901ac51 Merge pull request #1012 from wwitzel3/devel
first-parent requires git >= 1.8.4
2018-01-18 11:36:33 -05:00
Wayne Witzel III
1af0ee2f8c first-parent requires git >= 1.8.4 2018-01-18 16:12:23 +00:00
Ryan Petrello
b62ac6fbe4 Merge pull request #1001 from ryanpetrello/fix-7852
refactor credential injection for builtin types
2018-01-18 10:49:03 -05:00
Ryan Petrello
e5aaeedc43 don't require an IRC password in the notification UI
see: https://github.com/ansible/awx/issues/1010
2018-01-18 09:14:22 -05:00
Wayne Witzel III
fc5c5400cd Merge pull request #1003 from wwitzel3/devel
Fix notification_data attempting to access name property of an int
2018-01-18 08:55:50 -05:00
Wayne Witzel III
95bead2bb2 Extend notification_data test 2018-01-18 13:30:12 +00:00
Ryan Wallner
bcbda23aee support dns servers 2018-01-18 07:46:09 -05:00
Jake McDermott
5a21783013 Merge pull request #976 from jakemcdermott/multivault-templates-form
multivault select for templates form
2018-01-17 23:20:44 -05:00
Jake McDermott
e33604de71 show credential kind icon on credential tags 2018-01-17 23:07:34 -05:00
Jake McDermott
c50c63a9ff default to machine credential type 2018-01-17 23:07:23 -05:00
Jake McDermott
916d91cbc7 use updated credentials endpoint 2018-01-17 23:07:10 -05:00
Jake McDermott
79bd8b2c72 show vault id 2018-01-17 22:02:54 -05:00
Jake McDermott
5939116b0a update e2e and smoke tests for multivault select 2018-01-17 22:02:43 -05:00
Jake McDermott
6759e60428 add working multivault select for templates form 2018-01-17 22:02:30 -05:00
Jake McDermott
ef8af79700 load multiselect list when vault kind is selected 2018-01-17 22:02:16 -05:00
Ryan Petrello
dbb4d2b011 refactor credential injection for builtin types
this cleanups up a _lot_ of code duplication that we have for builtin
credential types. it will allow customers to setup custom inventory
sources that utilize builtin credential types (e.g., a custom inventory
script that could use an AzureRM credential)

see: https://github.com/ansible/ansible-tower/issues/7852
2018-01-17 16:50:28 -05:00
Wayne Witzel III
4a28065dbb Fix notification_data attempting to access name property of an int 2018-01-17 21:46:49 +00:00
Ryan Petrello
5387846cbb Merge pull request #992 from ryanpetrello/optimize-output-event-filter
optimize OutputEventFilter for large stdout streams
2018-01-17 14:24:15 -05:00
Ryan Petrello
6b247f1f24 Merge pull request #1000 from ryanpetrello/fix-7853
fix a minor unicode handling bug in project names
2018-01-17 14:15:15 -05:00
Ryan Petrello
838b793704 fix a minor unicode handling bug in project names
see: https://github.com/ansible/ansible-tower/issues/7853
2018-01-17 13:37:06 -05:00
Ryan Petrello
3cb8c98a41 Merge pull request #998 from ryanpetrello/fix-980
fix a bug which broke the callback plugin launch process
2018-01-17 12:12:05 -05:00
Ryan Petrello
18f254fc28 Merge pull request #769 from rbywater/feature/cloudformssuffix
Add ability to append suffix to host names for Cloudforms Inventory
2018-01-17 11:43:10 -05:00
Michael Abashian
9c6c6ce816 Merge pull request #990 from mabashian/975-delete-template
Fixed delete on templates list
2018-01-17 11:40:42 -05:00
Chris Meyers
6699be95bf Merge pull request #995 from chrismeyersfsu/improvement-fact_cache_log_job
add job_id to fact cache log output
2018-01-17 11:30:57 -05:00
Ryan Petrello
17cd0595d7 fix a bug which broke the callback plugin launch process
see: https://github.com/ansible/awx/issues/980
2018-01-17 11:28:13 -05:00
Chris Meyers
0402064c0f expose ansible_facts_modified 2018-01-17 10:28:34 -05:00
Chris Meyers
e33265e12c add job_id to fact cache log output 2018-01-17 10:19:27 -05:00
Richard Bywater
b8c76301de Add validation to ensure leading fullstop for suffix 2018-01-17 13:20:59 +13:00
Ryan Petrello
51f7907a01 optimize OutputEventFilter for large stdout streams
update our event data search algorithm to be a bit lazier in event data
discovery; this drastically improves processing speeds for stdout >5MB

see: https://github.com/ansible/awx/issues/417
2018-01-16 14:41:35 -05:00
jlmitch5
1a98cedc0f Merge pull request #993 from ansible/jlmitch5-patch-1
update css so that scroll bar doesn't take padding from main content area
2018-01-16 14:28:14 -05:00
jlmitch5
db974d4fd4 update css so that scroll bar doesn't take padding from main content area 2018-01-16 14:22:08 -05:00
mabashian
d6e663eff0 Fixed delete on templates list 2018-01-16 08:36:38 -05:00
Christian Adams
ccb40c8c68 Merge pull request #986 from rooftopcellist/xtra_vars
extends JT xtra var error msg
2018-01-16 00:58:59 -05:00
Ryan Petrello
6eb04de1a7 Merge pull request #978 from ryanpetrello/fix-7841
fix a minor bug in the JT launch related to support for zero credentials
2018-01-15 20:35:28 -05:00
Shane McDonald
cad5c5e79a Merge pull request #987 from shanemcd/devel
Fix sdist builder image
2018-01-15 20:25:16 -05:00
Shane McDonald
97472cb91b Fix sdist builder image
Fallout from https://github.com/ansible/awx/pull/982
2018-01-15 15:39:48 -05:00
adamscmRH
0c63ea0052 extends JT xtra var error msg 2018-01-15 15:05:03 -05:00
Chris Meyers
2b1d2b2976 Merge pull request #805 from chrismeyersfsu/feature-saml_import_attr
allow for saml attributes to define team and org
2018-01-15 11:57:05 -05:00
Shane McDonald
7d51b1cb9d Merge pull request #982 from shanemcd/devel
Use first parent commit when determining version from tags
2018-01-15 11:04:23 -05:00
Shane McDonald
52e531625c Use first parent commit when determining version from tags
We were having issues where an older tag was being outputed from `git describe`.

From the man page:

Follow only the first parent commit upon seeing a merge commit. This is useful when you wish to not match tags on branches merged in the history of the target commit.
2018-01-15 11:01:47 -05:00
Richard Bywater
b5db652050 Clarify that leading fullstop needed 2018-01-14 14:05:34 +13:00
Jake McDermott
e699402115 Merge pull request #979 from mabashian/template-list-actions
Added old schedule/copy logic to template list
2018-01-12 20:19:31 -05:00
mabashian
d012f5cd99 Added old schedule/copy logic to template list until it can be refactored 2018-01-12 12:23:29 -05:00
Ryan Petrello
4a2ca20b60 fix a minor bug in the JT launch related to support for zero credentials
see: https://github.com/ansible/ansible-tower/issues/7841
2018-01-12 11:37:33 -05:00
Chris Meyers
e49dfd6ee2 only run saml pipeline if saml social auth
* Do not trigger saml social auth pipeline methods if the user logging
in was not created by the saml social auth backend.
2018-01-11 16:20:49 -05:00
Jake McDermott
fb414802fa Merge pull request #970 from ansible/smoketest-fixes-for-lists
update test selector for add button dropdown arrow
2018-01-11 16:13:51 -05:00
Michael Abashian
00f400e839 Merge pull request #971 from mabashian/892-delete-inv-src
Fixed a few straggling success/error promises and replaced with then/catch
2018-01-11 14:51:29 -05:00
Chris Meyers
234e33df0e Merge pull request #959 from chrismeyersfsu/feature-multiple_ldap_servers
implement multiple ldap servers
2018-01-11 14:45:38 -05:00
mabashian
f9b0a3121f Fixed a few straggling success/error promises and replaced with then/catch 2018-01-11 13:59:46 -05:00
Jake McDermott
0afdca3674 update test selector for add button dropdown arrow 2018-01-11 13:48:03 -05:00
Matthew Jones
03cef6fea3 Merge pull request #969 from matburt/default_x_forwarded_for
Add X-Forwarded-For as a default source of remote host headers
2018-01-11 12:01:27 -05:00
Matthew Jones
7dc0fce1aa Use x-forwarded-for by default in openshift and kubernetes 2018-01-11 12:00:01 -05:00
Matthew Jones
648d27f28d Merge pull request #909 from scottp-dpaw/add-openshift-hint
Add REMOTE_HOST_HEADERS override to OpenShift template
2018-01-11 11:56:05 -05:00
jlmitch5
5a5e5bc121 Merge pull request #898 from jlmitch5/newTemplateList
implementation for expanded template list
2018-01-11 11:23:56 -05:00
John Mitchell
aea37654e2 updated template list to using new components 2018-01-11 11:20:12 -05:00
Chris Meyers
2ed97aeb0c implement multiple ldap servers 2018-01-11 09:03:14 -05:00
Ryan Petrello
9431b0b6ff Merge pull request #962 from ryanpetrello/fix-7843
fix a unicode handling bug
2018-01-10 18:17:58 -05:00
Ryan Petrello
a5007ccd41 fix a unicode handling bug
see: https://github.com/ansible/ansible-tower/issues/7843
related: https://github.com/ansible/awx/pull/807
2018-01-10 15:56:31 -05:00
Michael Abashian
81fc4219ae Merge pull request #957 from mabashian/860-facts
Fixed display of host facts
2018-01-10 15:02:23 -05:00
Ryan Petrello
c3c4d79890 Merge pull request #958 from ryanpetrello/multivenv
add an example for custom virtualenv setup in containers
2018-01-10 14:24:42 -05:00
Ryan Petrello
b01b229fea add an example for custom virtualenv setup in containers 2018-01-10 13:48:55 -05:00
mabashian
984b7e066d Fixed display of host facts 2018-01-10 13:18:38 -05:00
Matthew Jones
67d927121d Merge pull request #940 from ryanpetrello/multivenv
implement support for per-playbook/project/org virtualenvs
2018-01-10 12:15:38 -05:00
Matthew Jones
ae06cff991 Merge pull request #938 from ansible/kubernetes_install_support
Kubernetes install support
2018-01-10 09:57:33 -05:00
Matthew Jones
7ea6d7bf4d Clean up documentation for kubernetes installer 2018-01-10 09:39:07 -05:00
Matthew Jones
fad4a549d0 Remove oc command usage from docker registry k8s reference 2018-01-10 09:38:00 -05:00
Matthew Jones
9365e477c5 Merge pull request #951 from ansible/remove_nodeport
Remove nodeport customization
2018-01-10 09:32:36 -05:00
Matthew Jones
d0b3cac72a Remove nodeport definition 2018-01-10 09:29:12 -05:00
Chris Meyers
de02138dfd spelling is hard 2018-01-10 09:26:11 -05:00
Matthew Jones
44f0b003fc Kubernetes install documentation 2018-01-10 09:25:59 -05:00
Matthew Jones
56aed597b2 Add initial support for kubernetes to the installer 2018-01-10 09:25:59 -05:00
Matthew Jones
f33ee03b98 Remove nodeport customization
This isn't strictly necessary for the Openshift routes and can
sometimes cause problems when the resource is already defined in openshift
2018-01-10 09:23:46 -05:00
Ryan Petrello
69a3b0def6 Merge pull request #946 from ryanpetrello/fix-7846
fix a handful of issues for playbooks that contain unicode
2018-01-10 09:16:40 -05:00
Matthew Jones
6504972d82 Merge pull request #741 from rbywater/bugfix/cloudformsinventory
Fix CloudForms enabled & id variable names - relates to #705
2018-01-10 00:22:58 -05:00
Ryan Petrello
4bb2b5768e properly compose stdout downloads that contain unicode 2018-01-09 23:52:02 -05:00
Ryan Petrello
c0a641ed52 properly handle unicode for isolated job buffers
from: https://docs.python.org/2/library/stringio.html#module-cStringIO
"Unlike the StringIO module, this module is not able to accept Unicode
strings that cannot be encoded as plain ASCII strings."

see: https://github.com/ansible/ansible-tower/issues/7846
2018-01-09 23:46:17 -05:00
Ryan Petrello
1e8c89f536 implement support for per-playbook/project/org virtualenvs
see: https://github.com/ansible/awx/issues/34
2018-01-09 22:47:01 -05:00
Shane McDonald
54d3412820 Merge pull request #942 from wwitzel3/devel
Update asgi-amqp requirement
2018-01-09 19:17:45 -05:00
Wayne Witzel III
1690938dfb Update asgi-amqp requirement 2018-01-09 23:54:30 +00:00
Chris Meyers
0a9d3d47b9 more efficiently determine saml team mapping 2018-01-09 12:16:07 -05:00
Ryan Petrello
2952b0a0fe Merge pull request #807 from AlanCoding/inv_update_name
Make inventory update name combination of inventory and source
2018-01-08 10:43:44 -05:00
Ryan Petrello
1d3e8f8b87 Merge pull request #831 from AlanCoding/field_names
Use Options models to consolidate field_names list
2018-01-08 10:36:16 -05:00
Ryan Petrello
97c040aaa1 Merge pull request #839 from AlanCoding/cache_settings_dict
Cache the global settings list, cProfile speedup
2018-01-08 10:35:00 -05:00
Ryan Petrello
818c95501a Merge pull request #920 from ryanpetrello/fix-914
add vault_id to launch endpoints default vault credentials
2018-01-08 10:31:31 -05:00
Chris Meyers
664bdec57f add documentation 2018-01-05 14:43:33 -05:00
Michael Abashian
92068930a6 Merge pull request #919 from ansible/jakemcdermott-add-dialog-slider-imports
add dialog and slider vendor imports
2018-01-05 08:48:11 -05:00
Chris Meyers
d07a946183 Merge pull request #921 from chrismeyersfsu/fix-handle_work_error-689
Fix handle_work_error()
2018-01-05 07:54:03 -05:00
Chris Meyers
9d58b15135 allow for saml attributes to define team and org
related to https://github.com/ansible/awx/issues/217

* Adds a configure tower in tower setting for users to configure a saml
attribute that tower will use to put users into teams and orgs.
2018-01-04 15:35:11 -05:00
Chris Meyers
a0038276a4 do not use a custom task exception
* Celery + json pickling do not handle custom Exceptions (and may never
do so). Mentioning of, if handling custom Exceptions then the code would
be susceptible to same arbitrary code execution that python pickle is
vulnerable to.
* So don't use custom Exceptions.
2018-01-04 15:30:52 -05:00
Chris Meyers
f0ff6ecb0a handle_work_error signature to work
* celery error callback signature isn't well defined. Thus, our error
callback signature is made to handle just about any call signature and
depend on only 1 attribute, id, existing.

See https://github.com/celery/celery/issues/3709
2018-01-04 15:23:13 -05:00
Ryan Petrello
60743d6ba6 add the vault_id to the response payload on the JT launch endpoint
see: https://github.com/ansible/awx/issues/914
2018-01-04 15:13:48 -05:00
Ryan Petrello
4707b5e020 Merge pull request #917 from ryanpetrello/more-stdout-event-polish
more stdout event polish
2018-01-04 14:54:46 -05:00
Jake McDermott
ed7d7fcf00 add dialog and slider vendor imports 2018-01-04 14:46:42 -05:00
Aaron Tan
6c2a7f3782 Merge pull request #906 from jangsutsr/refactor_named_url_tests
Refactor named URL unit tests
2018-01-04 14:20:27 -05:00
Ryan Petrello
47875c5f9a Merge pull request #916 from ryanpetrello/memcache-fact-cache-size-warning
make the fact caching plugin fail more gracefully for large payloads
2018-01-04 13:06:29 -05:00
Ryan Petrello
f28f7c6184 refactor job event signal generation code 2018-01-04 12:50:12 -05:00
Ryan Petrello
1494c8395b update websockets docs to reflect new event groups 2018-01-04 11:40:15 -05:00
Ryan Petrello
2691e1d707 make the fact caching plugin fail more gracefully for large payloads
related: https://github.com/ansible/ansible/pull/34424
2018-01-04 11:33:47 -05:00
Ryan Petrello
6d413bd412 Merge pull request #833 from ryanpetrello/stdout-events
generalize stdout event processing to emit events for all job types
2018-01-04 11:28:52 -05:00
Aaron Tan
54bf7e13d8 Refactor named URL unit tests
The original tests set no longer works after Django 1.11 due to more
strict rules against dynamic model definition. The refactored tests set
aims at each existing model that apply named URL rules,  instead of
abstract general use cases, thus significantly improves maintainability
and readability.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2018-01-03 14:00:30 -05:00
Ryan Petrello
c6b6a3ad89 send and subcribe to summary events for all new events 2018-01-03 09:09:45 -05:00
Ryan Petrello
2bd656e61d calculate stdout download length using the ORM, not raw SQL 2018-01-03 09:09:44 -05:00
Ryan Petrello
35b8e40d3c remove deprecation from the stdout endpoint; text downloads still use it 2018-01-03 09:09:44 -05:00
Ryan Petrello
c4d901bf2c add functional API tests for deprecated job event stdout composition
see: https://github.com/ansible/awx/issues/200
2018-01-03 09:09:44 -05:00
Ryan Petrello
1369f72885 add new API endpoints and websocket emit for new job event types
see: https://github.com/ansible/awx/issues/200
2018-01-03 09:09:44 -05:00
Ryan Petrello
0b30e7907b change stdout composition to generate from job events on the fly
this approach totally removes the process of reading and writing stdout
files on the local file system at settings.JOBOUTPUT_ROOT when jobs are
run; now stdout content is only written on-demand as it's fetched for
the deprecated `stdout` endpoint

see: https://github.com/ansible/awx/issues/200
2018-01-03 09:09:43 -05:00
Ryan Petrello
fc94b3a943 generalize stdout event processing to emit events for *all* job types
* introduces three new models: `ProjectUpdateEvent`,
  `InventoryUpdateEvent`, and `SystemJobEvent`
* simplifies the stdout callback management in `tasks.py` - now _all_
  job run types capture and emit events to the callback receiver
* supports stdout reconstruction from events for stdout downloads for
  _all_ job types
* configures `ProjectUpdate` runs to configure the awx display callback
  (so we can capture real playbook events for `project_update.yml`)
* ProjectUpdate, InventoryUpdate, and SystemJob runs no longer write
  text blobs to the deprecated `main_unifiedjob.result_stdout_text` column

see: https://github.com/ansible/awx/issues/200
2018-01-03 09:09:42 -05:00
Scott Percival
fde9099198 Add REMOTE_HOST_HEADERS override to OpenShift template
Signed-off-by: Scott Percival <scott.percival@dbca.wa.gov.au>
2018-01-03 09:53:17 +08:00
Michael Abashian
815cd829e0 Merge pull request #872 from mabashian/865-jquery-ui-upgrade-bug
Fixed spinners after jquery-ui upgrade
2018-01-02 13:18:51 -05:00
Ryan Petrello
28c612ae9c Merge pull request #871 from AlanCoding/dirty_extra_data
Fix bug when creating system job schedule
2018-01-02 09:22:07 -05:00
Bill Nottingham
d6ed6a856d Merge pull request #845 from wenottingham/he-sees-you-when-you're-sleeping
Tweak collected information.
2017-12-21 13:52:11 -05:00
mabashian
706b370f7e Fixed spinners after jquery-ui upgrade 2017-12-20 16:02:53 -05:00
AlanCoding
80a2d10742 fix bug when creating system job schedule 2017-12-20 15:30:57 -05:00
Jake McDermott
f7259a1e78 Merge pull request #844 from jakemcdermott/fix-package-json-backmerge-issue
package.json updates / restore ui watch functionality
2017-12-18 17:03:03 -05:00
AlanCoding
08570fe785 make inventory update name combination of inventory and source 2017-12-18 16:21:39 -05:00
Jake McDermott
987cdc6802 Bump versions of angular-codemirror, jquery-ui, and moment 2017-12-18 16:21:29 -05:00
Alan Rominger
6e27294e2b Merge pull request #846 from AlanCoding/encrypt_on_save
Encrypt password answers on config save
2017-12-18 16:20:08 -05:00
AlanCoding
3439ba5f3b allow WFJT nodes without required variables 2017-12-18 12:03:40 -05:00
AlanCoding
c8e10adc96 fix bug saving extra_data and follow prompts rules
display_extra_vars was not taking a copy of the data before
acting on it - this causes a bug where the activity stream will
modify the existing object on the model. That leads to new data
not being accepted.

Also moved the processing of extra_data to prior to the accept
or ignore kwargs logic so that we pass the right (post-encryption)
form of the variables.
2017-12-18 10:50:22 -05:00
Christian Adams
7e261b5246 Merge pull request #847 from rooftopcellist/contributing_typos
Fixed Typos
2017-12-15 11:09:43 -05:00
AlanCoding
1e1839915d validate against unencrypted values at spawn point 2017-12-15 10:47:23 -05:00
AlanCoding
74bf058d62 encrypt password answers on config save 2017-12-15 07:48:55 -05:00
Matthew Jones
5ec537bad2 Merge pull request #843 from ansible/remove_old_tests
Removing old unused tests
2017-12-14 23:55:12 -05:00
Bill Nottingham
568901af74 Tweak collected information. 2017-12-14 19:22:18 -05:00
adamscmRH
c2e9926330 Fixed Typos 2017-12-14 16:13:04 -05:00
Jake McDermott
c4ccfa1b27 restoring ui watch functionality 2017-12-14 14:23:46 -05:00
Matthew Jones
478bcc0b07 Removing old unused tests 2017-12-14 11:34:43 -05:00
AlanCoding
0bb9c58e25 cache the global settings list, cProfile speedup 2017-12-14 11:29:30 -05:00
Alan Rominger
9c783aa0ce Merge pull request #804 from AlanCoding/active_count
simplify query for active_count
2017-12-14 10:47:12 -05:00
Alan Rominger
526391a072 Merge pull request #838 from AlanCoding/no_unicode_loop_2
Avoid slowdown generating smart_filter (alternative 2)
2017-12-14 10:23:31 -05:00
AlanCoding
98f8faa349 simplify query for active_count 2017-12-14 09:53:26 -05:00
AlanCoding
8a2a5b0fb1 avoid slowdown generating smart_filter 2017-12-14 09:39:39 -05:00
Jake McDermott
07cfa6cba5 Merge pull request #834 from AlanCoding/jump-the-start-line 2017-12-13 22:43:54 -05:00
AlanCoding
e188692acf use Options models to consolidate field_names list 2017-12-13 22:39:38 -05:00
Jake McDermott
ad70754b6a Merge pull request #832 from mabashian/linting-error-cleanup
Fixed linting/leftover merge errors
2017-12-13 19:46:02 -05:00
AlanCoding
9fb24f1a4c add hack to TimingMiddlWare for Shippable tests 2017-12-13 18:49:26 -05:00
mabashian
aefa30e1e9 Fixed linting/leftover merge errors 2017-12-13 18:28:52 -05:00
Alan Rominger
7eb2d86890 Merge pull request #749 from AlanCoding/detail_opt
Apply list view optimizations to detail view
2017-12-13 18:19:00 -05:00
Matthew Jones
2fb0144914 Add libcurl-devel to official image build 2017-12-13 16:14:55 -05:00
AlanCoding
e3a731bb9e apply listview optimizations to detail view 2017-12-13 16:09:37 -05:00
Ryan Petrello
451e9a7504 Merge pull request #826 from AlanCoding/322flake
flake8 fixes from removal of re-encrypt test
2017-12-13 15:23:41 -05:00
Ryan Petrello
8311acfba2 Merge pull request #825 from AlanCoding/towervars
Add back in support of towervars lost in merge
2017-12-13 15:19:47 -05:00
AlanCoding
77a1c405a6 flake8 fixes from removal of reencrypt test 2017-12-13 14:32:34 -05:00
AlanCoding
1b0bca8229 add back in support of towervars lost in merge 2017-12-13 14:30:11 -05:00
Ryan Petrello
bd91e8eb54 Merge pull request #824 from ryanpetrello/devel
fix a few tests caused by fallout between 3.2.2 bugs and 3.3 multicred
2017-12-13 14:10:39 -05:00
Ryan Petrello
ea4cd99003 fix a few tests caused by fallout between 3.2.2 bugs and 3.3 multicred 2017-12-13 14:02:25 -05:00
Shane McDonald
00ce244716 Merge pull request #822 from jakemcdermott/fix-merge-issues
fix merge issue with inventory source service
2017-12-13 13:32:13 -05:00
Jake McDermott
3b791609cd fix merge issue with inventory source service 2017-12-13 13:23:15 -05:00
Matthew Jones
a8d4eb7c1d Merge pull request #821 from ryanpetrello/devel
more test cleanup from 3.2.2 merge
2017-12-13 13:15:59 -05:00
Ryan Petrello
d35bfafcf5 more test cleanup from 3.2.2 merge 2017-12-13 13:14:22 -05:00
Ryan Petrello
9f8ef4d1e5 Merge pull request #820 from ryanpetrello/devel
fix a number of failing unit tests related to the 3.2.2 merge
2017-12-13 13:04:55 -05:00
Ryan Petrello
a978d094b4 fix a number of failing unit tests related to the 3.2.2 merge 2017-12-13 13:03:17 -05:00
Shane McDonald
47e422ba7a Merge pull request #819 from ansible/jakemcdermott-patch-1
fix arg name clash in hosts list controller
2017-12-13 13:02:47 -05:00
Jake McDermott
4b86815275 fix arg name clash in hosts list controller 2017-12-13 13:01:11 -05:00
Alan Rominger
6c1c850c5f Merge pull request #816 from AlanCoding/ints
use credential property that returns integers
2017-12-13 12:56:07 -05:00
AlanCoding
f4f1e0fd3c use credential property that returns integers 2017-12-13 12:54:32 -05:00
Matthew Jones
ca84e1c654 Merge pull request #817 from ansible/jakemcdermott-patch-1
fix missing comma in package.json
2017-12-13 12:48:16 -05:00
Jake McDermott
6b6e898882 fix missing comma in package.json 2017-12-13 12:45:26 -05:00
Matthew Jones
9dbcc5934e Merge remote-tracking branch 'tower/release_3.2.2' into devel 2017-12-13 12:25:47 -05:00
Greg Considine
fac7fd45f8 Merge pull request #614 from gconsidine/ui/fix/toggle-button-container
Adjust style of toggle button to accommodate text that exceeds 42px
2017-12-11 16:09:44 -05:00
Matthew Jones
34c206fab0 Bump psql-container pg version to 9.6 2017-12-11 12:01:28 -05:00
gconsidine
a2f64f1053 Adjust style of toggle button to accommodate text that exceeds 42px 2017-12-11 11:47:21 -05:00
Shane McDonald
334d47f3ab Pull updated translations 2017-12-11 09:42:06 -05:00
Ryan Petrello
4724b6a3d6 Merge pull request #613 from ryanpetrello/release_3.2.2
change how we detect the current user at LDAP login to avoid a nuanced recursion error
2017-12-08 15:46:32 -05:00
Ryan Petrello
ce94ba4c83 change how we detect the current user to avoid a nuanced recursion error
see: https://github.com/ansible/ansible-tower/issues/7802
2017-12-08 15:35:47 -05:00
Ryan Petrello
0dc4fa975b Merge pull request #612 from ryanpetrello/release_3.2.2
fix a race condition in "host.last_job" when jobs are deleted
2017-12-06 13:43:45 -05:00
Ryan Petrello
1fb890f4eb fix a race condition in "host.last_job" when jobs are deleted
see: https://github.com/ansible/ansible-tower/issues/7815
2017-12-06 11:30:19 -05:00
Shane McDonald
15e8fd5eca Pull updated translations 2017-12-05 14:17:18 -05:00
Ryan Petrello
06e751fea1 Merge pull request #611 from ryanpetrello/release_3.2.2
more unit tests for survey default handling
2017-12-05 09:18:48 -05:00
Ryan Petrello
fe93ef5488 more unit tests for survey default handling 2017-12-05 09:04:57 -05:00
Richard Bywater
9b05a41eec Add ability to append suffix to host names for Cloudforms Inventory
Allows for use of a suffix that will be appended to host names returned
from Cloudforms API if that suffix is not present.

For example with a suffix of 'example.org', the following results would
be shown for a particular Cloudforms host name:
someexample -> someexample.example.org
someexample.example.org -> someexample.example.org

The main use-case for this is, when one Inventory Source is returning
names that have a FQDN name whilst others are returning a shortname, to
ensure that the hosts in an inventory aren't effectively duplicated.
2017-12-05 14:47:33 +13:00
Ryan Petrello
2c12f1b66e Merge pull request #610 from ryanpetrello/faster-result-stdout-cleanup
don't fetch stdout when purging jobs - it's slow and causes OOMs
2017-12-04 15:41:16 -05:00
Ryan Petrello
33dedc88c8 don't fetch stdout when purging jobs - it's slow and causes OOMs
see: https://github.com/ansible/ansible-tower/issues/7751
2017-12-04 15:14:55 -05:00
Alan Rominger
759867c863 Merge pull request #609 from AlanCoding/more_encryption_tests
encryption tests around the contract with survey functionality
2017-12-04 14:20:30 -05:00
Alan Rominger
d4613d448c Merge pull request #608 from AlanCoding/empty_string_defaults
allow password default reuse with empty default
2017-12-04 14:19:32 -05:00
AlanCoding
dbd68c5747 encryption tests around the contract with survey functionality 2017-12-04 11:45:07 -05:00
AlanCoding
d23d7c422d allow password default reuse with empty default 2017-12-04 10:49:36 -05:00
Ryan Petrello
4b793dc58a Merge pull request #606 from ryanpetrello/test-7806
improve validation for empty default passwords
2017-12-04 10:19:06 -05:00
Ryan Petrello
112757e202 properly handle JT launch POST for required survey fields w/ no default
see: ansible/ansible-tower#7805
2017-12-04 09:45:21 -05:00
Ryan Petrello
12380fe1b1 add more tests for survey default encryption
see: https://github.com/ansible/ansible-tower/issues/7805
see: https://github.com/ansible/ansible-tower/issues/7806
see: https://github.com/ansible/ansible-tower/issues/7807
2017-12-04 09:45:14 -05:00
Ryan Petrello
b987b7daa0 Merge pull request #605 from ryanpetrello/release_3.2.2
fix another survey encryption-related bug
2017-12-01 17:30:43 -05:00
Ryan Petrello
6c7851b51f fix another survey encryption-related bug 2017-12-01 17:11:00 -05:00
Aaron Tan
1ff0591553 Merge pull request #603 from jangsutsr/fix-7737-1
Follow up fix #542
2017-12-01 16:07:14 -05:00
Aaron Tan
58ad214dcf Follow up fix #542
Relates
https://github.com/ansible/ansible-tower/issues/7737#issuecomment-348566452

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-12-01 15:58:33 -05:00
Ryan Petrello
a71cee9300 Merge pull request #604 from ryanpetrello/survey_spec_validation_refactor
refactor survey spec validation into a separate testable function
2017-12-01 15:44:07 -05:00
Ryan Petrello
1057b93570 refactor survey spec validation into a separate testable function 2017-12-01 15:34:07 -05:00
Alan Rominger
e0edfeac7c Merge pull request #602 from AlanCoding/clean_defaults
block user from entering encrypted as bare default
2017-12-01 14:54:42 -05:00
AlanCoding
47f45bf9b3 block user from entering encrypted as bare default 2017-12-01 14:44:06 -05:00
Ryan Petrello
8d162f9044 Merge pull request #601 from ryanpetrello/flake8-fixes
backport a few fixes from awx to address busted ci
2017-12-01 12:48:06 -05:00
AlanCoding
6269b43456 update tests to new Ansible core code 2017-12-01 12:31:56 -05:00
AlanCoding
67867cf0c8 flake8: comply with new E722 rule 2017-12-01 12:16:44 -05:00
Ryan Petrello
7538b4ce15 Merge pull request #600 from ryanpetrello/fix-7800-migration
upgrade survey encryption migration to work around an old survey bug
2017-12-01 12:13:18 -05:00
Ryan Petrello
8c6a1e348d upgrade survey encryption migration to work around an old survey bug
see: https://github.com/ansible/ansible-tower/issues/7800
2017-12-01 11:34:47 -05:00
Shane McDonald
3cd80ef67a Update pot files 2017-11-30 15:29:29 -05:00
Wayne Witzel III
f3310236e4 Merge pull request #599 from wwitzel3/release_3.2.2
Fix git project sync bug.
2017-11-30 11:22:29 -05:00
Wayne Witzel III
ed28faa3db Use TMP instead of TMPDIR and only set it in RunProjectUpdate 2017-11-30 16:10:12 +00:00
Ryan Petrello
fc4b02b79f Merge pull request #597 from ryanpetrello/jenkins-no-like-unicode
removing some cruft we thought would help us catch bugs (it didn't)
2017-11-29 16:39:20 -05:00
Ryan Petrello
a3dd9eb4b7 removing some cruft we thought would help us catch bugs (it didn't) 2017-11-29 16:23:07 -05:00
Richard Bywater
079abc162f Fix CloudForms enabled & id variable names
On Cloudforms (Version 2.0 at least), the dictionary that gets passed to
the inventory_import has a top-level 'cloudforms' dictionary element
that contains the 'id' and 'power_state' rather than those elements
being at the top-level of the dictionary.

This change adds in the 'cloudforms' into the expected name.
2017-11-30 09:30:23 +13:00
Ryan Petrello
d773d163f7 Merge pull request #595 from ryanpetrello/fix-workflow-survey-encrypt
more survey password encryption bug squashing
2017-11-29 15:09:40 -05:00
Ryan Petrello
68ada92f3b more survey password encryption bug squashing
the nature of this latest bug is that the WorkflowJob has a *different*
implementation of _accept_or_ignore_job_kwargs, and it wasn't performing
encryption for extra vars provided at launch time; this change places the
encryption mechanism in UJT.create_unified_job so that it works the same
for _all_ UJTs

see: https://github.com/ansible/ansible-tower/issues/7798
see: https://github.com/ansible/ansible-tower/issues/7046
2017-11-29 14:40:41 -05:00
Aaron Tan
4c43afda19 Merge pull request #586 from jangsutsr/fix-7768
Supress exception with concurrent deletion
2017-11-29 13:40:45 -05:00
Ryan Petrello
91cc4689c9 Merge pull request #594 from ryanpetrello/fix-sosreport-venv
fix incorrect virtualenv path for sosreport plugin
2017-11-29 10:07:12 -05:00
Hideki Saito
febfcf709d fix incorrect virtualenv path for sosreport plugin 2017-11-29 09:57:41 -05:00
Ryan Petrello
cf1d5a29f6 Merge pull request #593 from ryanpetrello/fix-7796
fix another encrypted survey password bug
2017-11-28 17:08:35 -05:00
Ryan Petrello
1425021106 fix another encrypted survey password bug
properly encrypt extra_vars that overlap with survey passwords when
`ask_variables_on_launch=true`

see: https://github.com/ansible/ansible-tower/issues/7796
2017-11-28 16:52:47 -05:00
Ryan Petrello
7b42316366 Merge pull request #592 from ryanpetrello/fix-7793
fix a bug which caused v1 cred backwards-compat to apply to v2 requests
2017-11-28 14:49:54 -05:00
Ryan Petrello
ce9d75c2e4 Merge pull request #591 from ryanpetrello/rename-ovirt
rename oVirt4 to Red Hat Virtualization
2017-11-28 13:10:58 -05:00
Ryan Petrello
26845642f0 fix a bug which caused v1 cred backwards-compat to apply to v2 requests
see: https://github.com/ansible/ansible-tower/issues/7793
2017-11-28 13:05:13 -05:00
Ryan Petrello
6fa0d9d4ed rename oVirt4 to Red Hat Virtualization
see: https://github.com/ansible/ansible-tower/issues/7790
2017-11-28 11:02:42 -05:00
Ryan Petrello
7accac2f63 Merge pull request #590 from ryanpetrello/fix-7784
fix a bug in survey password default validation within workflows
2017-11-27 18:04:30 -05:00
Ryan Petrello
044c047ac6 fix a bug in survey password default validation
see: https://github.com/ansible/ansible-tower/issues/7046
see: https://github.com/ansible/ansible-tower/issues/7764
see: https://github.com/ansible/ansible-tower/issues/7784
2017-11-27 17:25:45 -05:00
Chris Meyers
5a2ecd25e7 Merge pull request #589 from ansible/fix-project_update_cascade_fast
correctly cascade job cancel
2017-11-27 13:21:25 -05:00
Chris Meyers
6c89935521 correctly cascade job cancel
* Check the reason for a dependent project update failure. If it's
because of a cancel, then let the normal cancel mechanisms update the
jobs status and explanation. Do not update the dependent job's status
for a project update that was canceled, in the run code.
2017-11-27 12:34:55 -05:00
Aaron Tan
0641c6b0a6 Supress exception with concurrent deletion
Relates https://github.com/ansible/ansible-tower/issues/7768

This issue, as well as
https://github.com/ansible/ansible-tower/issues/7622, both rooted in a
concurrency issue of Django ORM:
https://github.com/ansible/ansible-tower/issues/762://code.djangoproject.com/ticket/28806

The solution related deals specifically with the related issue, but is
not a general solution. A general workaround can be found in
https://github.com/ansible/tower/pull/500.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-11-17 16:29:08 -05:00
Alan Rominger
4ea27e0d1b Merge pull request #587 from AlanCoding/computed_self
update original when updating computed fields
2017-11-17 12:23:43 -05:00
Wayne Witzel III
79c196fc08 Merge pull request #588 from wwitzel3/release_3.2.2
Include all previously run operations to satisfy Django migration planner
2017-11-17 12:12:21 -05:00
Wayne Witzel III
249a5e5e4d Include all previously run operations to satisfy Django migration planner. 2017-11-17 12:02:07 -05:00
AlanCoding
51c73cb357 update original when updating computed fields 2017-11-17 10:16:49 -05:00
Ryan Petrello
8d35b71321 Merge pull request #585 from ryanpetrello/fix-wfjt-survey-encryption
re-encrypt WFJT.survey_spec and WorkflowJob.extra_vars too
2017-11-17 08:52:54 -05:00
Ryan Petrello
a80d5b1b39 reencrypt WFJT.survey_spec too
https://github.com/ansible/ansible-tower/issues/7046
2017-11-16 23:00:22 -05:00
Wayne Witzel III
e5d86419c8 Merge pull request #582 from AlanCoding/smart_computed2
update smart inventory computed fields
2017-11-16 19:36:08 -05:00
Greg Considine
54a98ff612 Merge pull request #581 from gconsidine/ui/fix/disabled-button-opacity
Update disabled button color to match style guide
2017-11-16 16:40:06 -05:00
Ryan Petrello
e7077185bf Merge pull request #584 from ryanpetrello/release_3.2.2
make settings.AWX_ISOLATED_KEY_GENERATION readonly
2017-11-16 13:42:00 -05:00
Ryan Petrello
4187d02b8a make settings.AWX_ISOLATED_KEY_GENERATION readonly
see: https://github.com/ansible/ansible-tower/issues/7380
2017-11-16 13:35:37 -05:00
Matthew Jones
457359322f Merge pull request #583 from ansible/nicer_error_tower_inventory
Present the tower inventory sync failure in a better way
2017-11-16 13:16:22 -05:00
Matthew Jones
8a65c6e1c8 Present the tower inventory sync failure in a better way
This allows it to be handled better by ansible 2.4+
2017-11-16 12:59:12 -05:00
AlanCoding
fb29f68efc update smart inventory computed fields 2017-11-16 11:57:30 -05:00
gconsidine
1fcddba558 Update disabled button color to match style guide 2017-11-16 11:15:23 -05:00
Chris Meyers
e20599d7bb Merge pull request #580 from chrismeyersfsu/fix-project_update_cascade_tower
cascade cancel proj update when job canceled
2017-11-15 11:19:35 -05:00
Chris Meyers
9288b53015 cascade cancel proj update when job canceled
* Implicit project update, launch_type='sync', get "associated" with a
job via project_update. When a job is canceled, so should this implicit
project update. This change enforces that logic.
2017-11-15 11:17:52 -05:00
Ryan Petrello
82be0a8af2 Merge pull request #579 from ryanpetrello/fix-survey-encryption-migration-failure
fix a bug in the survey reencryption migration
2017-11-15 10:38:35 -05:00
Ryan Petrello
35c374fc79 fix a bug in the survey reencryption migration
see: https://github.com/ansible/ansible-tower/issues/7046
2017-11-15 10:26:46 -05:00
Alan Rominger
dbe135991b Merge pull request #575 from AlanCoding/single_cancel
do not propogate cancel of inventory sync back up to project
2017-11-15 09:46:41 -05:00
Bill Nottingham
64f89b3fce Merge pull request #578 from wenottingham/protect-our-environment
Remove some environment variables the callback plugin doesn't actually use
2017-11-14 16:10:53 -05:00
Bill Nottingham
aaaae87aa7 Remove some environment variables the callback plugin doesn't actually use. 2017-11-14 15:57:49 -05:00
Ryan Petrello
44a2d7a346 Merge pull request #577 from ryanpetrello/release_3.2.2
render survey_spec for display purposes in a safe manner
2017-11-13 15:18:09 -05:00
Ryan Petrello
be00b1ca96 render survey_spec for display purposes in a safe manner
survey_spec is a nested dict, so if we don't `deepcopy()` it, updates
to the individual fields could corrupt the original data structure;
this was causing a bug whereby activity stream updates converted
encrypted survey password defaults -> `$encrypted$`, but inadvertently
modified the originating model due to shared references

see: https://github.com/ansible/ansible-tower/issues/7769
2017-11-13 13:01:56 -05:00
AlanCoding
33574d70c8 do not propogate cancel of inventory sync back up to project 2017-11-13 08:44:00 -05:00
Ryan Petrello
bc705ad8ce Merge pull request #574 from ryanpetrello/fix-7764
properly perform validation on encrypted survey defaults
2017-11-10 12:07:54 -05:00
Ryan Petrello
78961c8037 properly perform validation on encrypted survey defaults
see: https://github.com/ansible/ansible-tower/issues/7764
2017-11-10 10:52:09 -05:00
Alan Rominger
e22486ada8 Merge pull request #573 from AlanCoding/7765
[3.2.2] fix bug of system auditor 404 viewing job
2017-11-08 11:16:28 -05:00
AlanCoding
0051da95c9 fix bug of system auditor 404 viewing job 2017-11-08 10:44:41 -05:00
Ryan Petrello
122142c040 Merge pull request #572 from ryanpetrello/isolated-debug-toolkit
add some useful tools for isolated connectivity debugging
2017-11-08 10:08:05 -05:00
Ryan Petrello
91ad0a9f89 add a useful tool for isolated connectivity debugging 2017-11-08 09:27:33 -05:00
Ryan Fitzpatrick
6ea3ecbb26 Merge pull request #567 from rmfitzpatrick/tower_inv_source_filter_wording
Provide more specificity to Tower inventory filter help text
2017-11-07 10:55:30 -05:00
Jared Tabor
e87dce023b Merge pull request #568 from jaredevantabor/source-deletion
When deleting a source, delete the source's groups too
2017-11-06 11:27:46 -08:00
Matthew Jones
89a05e9bbc Handle json decoder errors from tower inventory source 2017-11-06 14:15:53 -05:00
Jared Tabor
96fbc9ea27 Merge pull request #571 from jaredevantabor/6551
Band aid for rapidly deleting groups
2017-11-03 16:28:22 -07:00
Jared Tabor
e70d377a53 feedback from PR: launch both DELETE calls at the same time
for both groups and hosts.
2017-11-03 16:26:00 -07:00
Jared Tabor
f65ef9f75c prolong removing the delete modal until after the list refreshes, post-delete 2017-11-03 15:25:39 -07:00
Wayne Witzel III
7149c41804 Merge pull request #570 from wwitzel3/release_3.2.2
Handle ProgrammingError in squashed helpers
2017-11-03 10:45:09 -04:00
Wayne Witzel III
1a5b5c32b8 Handle ProgrammingError in squashed helpers 2017-11-03 10:34:25 -04:00
Wayne Witzel III
1b44ca8ef4 Merge pull request #569 from wwitzel3/release_3.2.2
Handle programming error when evaluating the replaces list for 320
2017-11-03 09:02:58 -04:00
Wayne Witzel III
d7f4707044 Handle programming error when evaluating the replaces list for 320 2017-11-03 08:53:53 -04:00
Jared Tabor
9d39ac83f9 When deleting a source, delete the source's groups too 2017-11-02 15:59:09 -07:00
Wayne Witzel III
ce393da6fd Merge pull request #564 from wwitzel3/squashbillies
Squashbillies - Fixing direct upgrades from 3.0/3.1 bug fix revisions.
2017-11-02 16:19:26 -04:00
Wayne Witzel III
2f86774006 3.2.0 should not have any replacements defined unless 0005a or 0005b migrations have been run previously 2017-11-02 16:11:45 -04:00
Ryan Fitzpatrick
e2c63c41e7 Provide more specificity to Tower inventory filter help text 2017-11-02 15:01:03 -04:00
Wayne Witzel III
f9685717b8 Move post 3.0 migrations to pre 3.1 position in migration files 2017-11-02 14:06:36 -04:00
Wayne Witzel III
47a3ba9bd5 Rename squash 300 to 30 2017-11-02 14:06:36 -04:00
Wayne Witzel III
af3e6f792c Rename squash 310 to 31 2017-11-02 14:06:35 -04:00
Wayne Witzel III
fc56a1c170 Fix 3.0 to 3.2 migration paths 2017-11-02 14:06:35 -04:00
Wayne Witzel III
84fb908261 Fix 3.1 to 3.2 migration paths 2017-11-02 14:06:33 -04:00
jlmitch5
cb4a38d7a7 Merge pull request #566 from ansible/click-to-close
Add close logic to clicking outside of the bounds of a lookup modal
2017-11-02 13:33:03 -04:00
John Mitchell
9518c38bb8 add close logic to clicking outside of the bounds of a lookup modal 2017-11-02 12:20:53 -04:00
Matthew Jones
5e37d6ea7e Remove unused TOWER_HOST and AWX_HOST env vars
These were conflicting with the new Tower credential
2017-11-02 10:44:06 -04:00
jlmitch5
54e76b2534 Merge pull request #565 from ansible/no_placeholder_select2_typeahead
munge placeholder from select2 typeahead search bar
2017-11-01 12:45:29 -04:00
jlmitch5
b8ed41fa82 munge placeholder from select2 typeahead search bar 2017-10-31 14:39:25 -04:00
Michael Abashian
fbd03287ea Merge pull request #549 from mabashian/7697-smart-inv-pagination
Only pass host filter param to smart inv shortcut form
2017-10-31 13:29:52 -04:00
Michael Abashian
7919433288 Merge pull request #548 from mabashian/7752-host-list
Fixed related host list linking
2017-10-31 13:29:35 -04:00
Ryan Petrello
3568be84c8 Merge pull request #561 from ryanpetrello/idle-hands
improve the callback worker's ability to deal with idle/disconnected DB
2017-10-31 10:05:29 -04:00
Ryan Petrello
8d2ab3de42 improve the callback worker's ability to deal with idle/disconnected DB
if database connectivity is lost, callback workers currently raise an
uncaught exception and hang; this can cause the entire process to stop
handling callback events

see: https://github.com/ansible/ansible-tower/issues/7660
2017-10-31 09:51:13 -04:00
Jared Tabor
4c4cbaef9f Merge pull request #562 from jaredevantabor/timezone
Changing angular-tz-extensions branch
2017-10-30 14:21:19 -07:00
Jared Tabor
aef224732c changing angular-tz to point to a branch with a patch for UTC timezones
and also patching angular-scheduler to point to angular 1.4.14
and also patching angular-codemirror to point to angular 1.4.14,
and adding fsevents:"*" to the package.json, and regenerating
npm-shrinkwrap.json for the new dependencies and their branches.
2017-10-30 14:20:24 -07:00
Jake McDermott
b0c1be7338 Merge pull request #563 from jakemcdermott/bug-7718
append credential types documentation link to popovers
2017-10-30 16:24:19 -04:00
Jake McDermott
14a3a6073e append credential types documentation link to help popovers 2017-10-30 15:23:27 -04:00
Jake McDermott
fc7c2117e9 Merge pull request #553 from jakemcdermott/bug-5449
use abbreviated month name for dashboard chart
2017-10-27 17:01:47 -04:00
Aaron Tan
962de13965 Merge pull request #503 from jangsutsr/fix-7712
[3.2.2]Special handle host related_search_fields
2017-10-27 11:16:09 -04:00
Aaron Tan
7211ff22df Special handle host related_search_fields
Relates #7712 of ansible-tower.

UI uses `related_search_fields` list to populate help text for resourse
search, `ansible_facts` is searchable via UI but the general pickup
logic would ignore it. So make it a corner case.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-27 10:55:43 -04:00
Alan Rominger
003d7f0915 Merge pull request #543 from AlanCoding/urlencode_host_filter
[3.2.2] urlencode unquote host_filter on save
2017-10-27 08:50:39 -04:00
Matthew Jones
f019452207 Merge pull request #551 from ansible/tower_inventory_source
Tower inventory source
2017-10-27 08:41:24 -04:00
Alan Rominger
c323a2393a Merge pull request #552 from AlanCoding/retry_cleanup
[3.2.2] retry cleanup of build artifacts for bwrap race condition
2017-10-27 08:23:28 -04:00
Matthew Jones
85be3c7692 Align inventory variables with Ansible modules 2017-10-27 08:12:14 -04:00
Matthew Jones
5f3ebc26e0 Adding license checks for Tower inventory source
* For Tower the license must match between the source and destination
* For AWX the check is disabled
* Hosts imported from another Tower don't count against your license
  in the local Tower
* Fix up some issues with enablement
* Prevent slashes from being used in the instance filter
* Add &all=1 filter to make sure we pick up all hosts
2017-10-27 08:12:14 -04:00
Matthew Jones
d282966aa1 Use towervars to enable turning on remote tracking vars on Tower src
* This allows the local Tower to track enabled state and unique
  instance id for each host imported from the remote Tower
2017-10-27 08:12:14 -04:00
Matthew Jones
71e132ce0f Show instance filter ui element with tower inventory source 2017-10-27 08:12:14 -04:00
Jake McDermott
d6d84e8f5e use abbreviated month name for dashboard chart 2017-10-26 21:44:01 -04:00
Matthew Jones
fdc7f58bb4 Support passing instance filters to tower inventory src
* Switch ignore ssl errors to default on
* Application inventory source defaults for Tower src
2017-10-26 13:51:05 -04:00
Matthew Jones
6c597ad165 Adding initial credential and invsrc for Tower
* New credential type for Tower
* Inventory source definitions and migrations for Tower
* Initial Tower inventory source script
2017-10-26 13:51:05 -04:00
AlanCoding
48ec69c4f5 retry cleanup of build artifacts for bwrap race condition 2017-10-26 13:33:21 -04:00
Aaron Tan
1ea3d55167 Merge pull request #550 from jangsutsr/fix-7737-1
Follow up fix #7737
2017-10-26 11:29:05 -04:00
Aaron Tan
7181bd1c9b Follow up fix #7737
The original fix introduced migration failure, this PR managed to fix
that.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-26 11:15:07 -04:00
Ryan Petrello
9e8ac3b09b Merge pull request #547 from ryanpetrello/fix-cleanup-memory-usage
[3.2.2] Backport (from awx) various memory optimizations for job cleanup
2017-10-26 10:39:58 -04:00
Aaron Tan
e24e1fc1f0 Merge pull request #542 from jangsutsr/fix-7737
[3.2.2]support `AZURE_CLOUD_ENVIRONMENT`
2017-10-26 10:20:14 -04:00
Aaron Tan
f28b48a473 support AZURE_CLOUD_ENVIRONMENT
Relates #7737 of ansible-tower.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-26 10:11:21 -04:00
Michael Abashian
4f58537949 Merge pull request #546 from mabashian/6209-license-readonly-auth-forms
Make codemirror ready-only when auth form field is disabled
2017-10-26 09:58:24 -04:00
mabashian
0512f65c8f Only pass host filter param to smart inv shortcut form 2017-10-26 09:55:36 -04:00
mabashian
947bdeed3e Fixed related host list linking 2017-10-25 16:59:44 -04:00
Mike McMahon
d3a7bec674 Backport (from awx) various memory optimizations for job cleanup
see: https://github.com/ansible/ansible-tower/issues/7751

0388568 Reduces the job to only looking at objects older than the cutoff date
0234311 missing colon and missing variable usage
399e0e5 switching to iterator and adding the missed Job cleanup
0cd34c1 jobs take count of gte cutoff, process only lt cutoff
2017-10-25 12:17:46 -04:00
Ryan Petrello
652facba9f Merge pull request #545 from ryanpetrello/fix-7746
work around an ansible bug that can cause project syncs to fail
2017-10-25 11:37:15 -04:00
mabashian
b1ef7506ea Make codemirror ready-only when auth form field is disabled 2017-10-25 11:30:26 -04:00
Ryan Petrello
c95d7d465a work around an ansible bug that can cause project syncs to fail
https://github.com/ansible/ansible-tower/issues/7746
https://github.com/ansible/ansible/issues/30064
2017-10-25 11:09:39 -04:00
Michael Abashian
70919638ba Merge pull request #541 from mabashian/7608-adhoc-launch-modal
Made adhoc launch modal height dynamic
2017-10-24 14:07:32 -04:00
Michael Abashian
6ea48cd73e Merge pull request #540 from mabashian/6370-delete-inv-src
Delete inv source hosts before inv source
2017-10-24 14:07:08 -04:00
Michael Abashian
63ca8e4134 Merge pull request #538 from mabashian/7707-host-filter-remove-tags-v2
Fixed removing host filter search term with encoded character
2017-10-24 14:06:37 -04:00
Aaron Tan
725cc469cf Merge pull request #544 from jangsutsr/fix-7747
[3.2.2]Include vault credential check in job relaunch
2017-10-24 12:14:01 -04:00
Aaron Tan
665a4d83e3 Include vault credential check in job relaunch
Relates #7747 of ansible-tower.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-24 11:00:13 -04:00
AlanCoding
018514d657 urlencode unquote host_filter on save 2017-10-24 08:05:08 -04:00
mabashian
71d428433f Made adhoc launch modal height dynamic 2017-10-23 12:09:25 -04:00
Michael Abashian
2f689fffbe Merge pull request #531 from mabashian/4796-workflow-resize-v2
Zoom workflow graph to fit screen on initial load
2017-10-23 10:47:14 -04:00
mabashian
3119d5ed22 Delete inv source hosts before inv source 2017-10-20 20:10:36 -04:00
Ryan Petrello
aab27e9b93 Merge pull request #539 from ryanpetrello/fix-7740
fix a unicode handling bug in inventory source name migration
2017-10-20 13:35:20 -04:00
Ryan Petrello
b60a30cbd4 fix a unicode handling bug in inventory source name migration
see: https://github.com/ansible/ansible-tower/issues/7740
2017-10-20 12:13:44 -04:00
Ryan Petrello
88acd95a72 Merge pull request #534 from ryanpetrello/release_3.2.2
store cloudforms inventory cache files in the proper location on disk
2017-10-20 09:41:05 -04:00
mabashian
c3fbb07535 Fixed removing host filter search term with encoded character 2017-10-19 19:31:41 -04:00
Michael Abashian
8d043e6f85 Merge pull request #532 from mabashian/7681-disassociate-help-popover
Disassociate host/group popover text
2017-10-19 11:12:28 -04:00
Michael Abashian
31602c4b28 Merge pull request #533 from mabashian/7720-adhoc-launch-error
Fixed error message when launching adhoc command
2017-10-19 11:11:44 -04:00
Ryan Petrello
57cd8adc2d Merge pull request #537 from ryanpetrello/ovirt4-auth-module
properly support authentication for ovirt4 ansible modules
2017-10-19 09:57:16 -04:00
Ryan Petrello
c1e20fe7a0 properly support authentication for ovirt4 ansible modules
see: https://github.com/ansible/ansible-tower/issues/6522
see: https://github.com/ansible/ansible-tower/issues/6522#issuecomment-337909863
2017-10-19 09:47:25 -04:00
Ryan Petrello
b1f5529aa4 Merge pull request #536 from ryanpetrello/fix-7741
properly follow symlinks for bwrap'd working directories
2017-10-19 08:58:16 -04:00
Alan Rominger
350699eda8 Merge pull request #504 from AlanCoding/fk_error_msg
[3.2.2] tweak of error message for ForeignKey filters
2017-10-18 19:06:12 -04:00
Ryan Petrello
10a7544d68 properly follow symlinks for bwrap'd working directories
see: https://github.com/ansible/ansible-tower/issues/7741
2017-10-18 17:03:10 -04:00
Jared Tabor
d3eea5e694 generalizing class which is ignored when trying to drag the host-event-modal
it was only applied to .CodeMirror, which is only used by the JSON tab
2017-10-18 10:55:53 -07:00
Marliana Lara
8fd9fea113 Merge pull request #530 from marshmalien/fix/7702-job-stdout-wordwrap
Fix job standard out word-wrap
2017-10-18 13:51:36 -04:00
Wayne Witzel III
470a4b7746 Merge pull request #535 from wwitzel3/release_3.2.2
use getattr in social auth django strategy
2017-10-18 11:02:11 -04:00
Wayne Witzel III
38c2ea7025 use getattr in social auth django strategy 2017-10-18 10:20:44 -04:00
Ryan Petrello
5895654538 store cloudforms inventory cache files in the proper location on disk
with process isolation enabled (which is the awx default), cloudforms
caches inventory script results on disk; awx should direct cloudforms to
store these cache files in a location that's exposed to the isolated
environment

see: ansible/ansible#31760
2017-10-17 17:07:21 -04:00
mabashian
b402d9ba6d Fixed error message when launching adhoc command 2017-10-17 14:42:24 -04:00
mabashian
5db478a4a0 Zoom workflow graph to fit screen on initial load 2017-10-17 12:59:44 -04:00
mabashian
059347eec3 Made disassociate host/group titles more descriptive 2017-10-17 10:23:17 -04:00
mabashian
e8dbfa42cf Fixed disassociate host from group help text 2017-10-17 10:10:34 -04:00
Michael Abashian
3d12e040ed Merge pull request #528 from mabashian/5129-jt-spinner-v2
Moved wait stop calls on jt form so that they fire right before reloading state
2017-10-16 16:20:02 -04:00
Michael Abashian
fceca3bcae Merge pull request #527 from mabashian/7697-smart-inventory-shortcut
Fixed smart inv button bug navigating to page 2 of hosts.
2017-10-16 16:19:40 -04:00
Marliana Lara
fcd03fb1c2 Fix job standard out error message word-wrap 2017-10-13 14:59:57 -04:00
mabashian
2cab6982c1 Moved wait stop calls on jt form so that they fire right before reloading state 2017-10-12 17:01:32 -04:00
mabashian
3ede367df4 Fixed smart inv button bug navigating to page 2 of hosts. Added tooltip when button is disabled. 2017-10-12 16:56:21 -04:00
Alan Rominger
f6bf0ad21f Merge pull request #521 from AlanCoding/update_isolated
[3.2.2] update isolated container requirements
2017-10-12 14:24:38 -04:00
Greg Considine
817b397d20 Merge pull request #524 from gconsidine/ui/fix/remove-unsupported-query-tokens
Remove unsupported tokens from search generated queries
2017-10-11 17:54:26 -04:00
jlmitch5
b61fdaf721 Merge pull request #526 from jlmitch5/hideWorkflowAndSurveyButtons
hide workflow and survey buttons from non-detail tabs
2017-10-11 11:35:35 -04:00
John Mitchell
1603106cb4 include workflow editor when showing buttons' 2017-10-11 10:58:38 -04:00
Aaron Tan
1454000b91 Merge pull request #522 from jangsutsr/ldap_docs
Add LDAP deploy instructions
2017-10-11 10:07:29 -04:00
Alan Rominger
b2e63d5e47 Merge pull request #510 from AlanCoding/event_accounting
add logger statement for number of events
2017-10-10 22:12:11 -04:00
Alan Rominger
e7ede6af4a Merge pull request #525 from AlanCoding/update_isolated_version
[3.2.2] update fallback isolated version to 3.2.2
2017-10-10 22:06:09 -04:00
Alan Rominger
5503d4efb4 Merge pull request #523 from AlanCoding/fix_isolated_capacity
[3.2.2] fix equation for isolated instance capacity
2017-10-10 22:04:03 -04:00
John Mitchell
54640dbca0 hide workflow and survey buttons from non-detail tabs
since the two are basically sub-states of the edit form (detail tab), they should only show up when that tab is selected
2017-10-10 17:08:04 -04:00
AlanCoding
eab82f3efa updated fallback isolated version to 3.2.2 2017-10-10 15:45:20 -04:00
gconsidine
9e3d90896b Remove unsupported tokens from search generated queries 2017-10-10 15:22:17 -04:00
AlanCoding
e66a1002ee fix equation for isolated instance capacity 2017-10-10 14:58:09 -04:00
Aaron Tan
82160e2072 Add LDAP deploy instructions
Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-10 14:54:53 -04:00
AlanCoding
e814f28039 add logger statement for number of events 2017-10-10 14:48:00 -04:00
AlanCoding
03e58523b2 tweak of error message for ForeignKey filters 2017-10-10 14:47:37 -04:00
AlanCoding
341ef411a4 update isolated container requirements 2017-10-10 14:46:41 -04:00
Aaron Tan
8d19555cf1 Merge pull request #519 from jangsutsr/fix-7726
Disable inventory var overwrite in inv import
2017-10-10 14:38:50 -04:00
Greg Considine
d23fd0515d Merge pull request #518 from gconsidine/ui/fix/credential-kind-list-display
Use credential_type to fetch associated types in list view
2017-10-10 14:38:50 -04:00
Aaron Tan
b9483c28b0 Disable inventory var overwrite in inv import
Relates #7726 of ansible-tower.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-10 14:38:50 -04:00
Alan Rominger
6f9fc0c3f8 Merge pull request #514 from AlanCoding/no_ordereddit
[3.2.2] prevent OrderedDict syntax in error message
2017-10-10 14:38:49 -04:00
gconsidine
766a088749 Use credential_type to fetch associated types in list view 2017-10-10 14:38:49 -04:00
Chris Church
2b539cab85 Merge pull request #511 from cchurch/ldap-filter-dash-support
[3.2.2] Support dash in LDAP attribute names in filters.
2017-10-10 14:38:49 -04:00
AlanCoding
2fb67a3648 prevent OrderedDict syntax in error message 2017-10-10 14:38:49 -04:00
Greg Considine
64c5e3994e Merge pull request #513 from gconsidine/ui/fix/lookup-component-empty-input
Set lookup value changed from something to nothing to be null
2017-10-10 14:38:49 -04:00
Alan Rominger
7b792926eb Merge pull request #509 from AlanCoding/max_ui_events
[3.2.2] add CTiT setting for max UI job events
2017-10-10 14:38:49 -04:00
Chris Church
c067788428 Support dash in LDAP attribute names in filters. 2017-10-10 14:38:49 -04:00
gconsidine
b7071a48c2 Set lookup value changed from something to nothing to be null 2017-10-10 14:38:49 -04:00
AlanCoding
dee4b72303 add CTiT setting for max UI job events 2017-10-10 14:38:49 -04:00
Alan Rominger
5994a77b84 Merge pull request #508 from AlanCoding/password_handholding
[3.2.2] reword error message about encrypted user input
2017-10-10 14:38:48 -04:00
Marliana Lara
f93506fe2c Merge pull request #491 from marshmalien/fix/7661-host-config-key-border
[3.2.2] Fix missing right border of lookup buttons
2017-10-10 14:38:48 -04:00
Alan Rominger
7c86e38b81 Merge pull request #506 from AlanCoding/active_job_period
[3.2.2] add period to active job conflict error
2017-10-10 14:38:48 -04:00
AlanCoding
1c374fba7d reword error message about encrypted user input 2017-10-10 14:38:48 -04:00
Marliana Lara
2cc9e2ca0b Fix hidden right border of form input lookup buttons 2017-10-10 14:38:48 -04:00
Alan Rominger
335dfd564a Merge pull request #505 from AlanCoding/v1_jt_vc_ct_sf
[3.2.2] Exclude credential type content from v1
2017-10-10 14:38:48 -04:00
AlanCoding
5380d57ce8 add period to active job conflict error
Rename StateConflict to ActiveJobConflict and used shared
message inside of that exception class.
2017-10-10 14:38:48 -04:00
AlanCoding
a01f80db5b Exclude credential type content from v1
credential_type_id was showing up in vault_credential
summary_fields in API v1
2017-10-10 14:38:48 -04:00
Aaron Tan
d7eba47adb Merge pull request #456 from jangsutsr/fix-7656
[3.2.2]Remove search term separators
2017-10-10 14:38:47 -04:00
Alan Rominger
5fffdec69d Merge pull request #490 from AlanCoding/many_deleter_320
[3.2.2] Delete all hosts or groups from inventory source
2017-10-10 14:38:47 -04:00
Aaron Tan
358ef76529 Remove search term separators
Relates #7656 in ansible-tower.

We have been using comma `,` and space ` ` to separate search terms in
query string `<field_name>__search=<search terms>`, however in general
we can always use `&` to achieve separation like
`<field_name>__search=<search term 1>&<field_name>__search=<search term
2>&...`. Using specific delimiters makes it impossible for search terms
to contain those delimiters, so they are better off being removed.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-10 14:38:47 -04:00
Aaron Tan
bb628c52ad Merge pull request #487 from jangsutsr/fix-7586
[3.2.2]Fix SAML auth behind load balancer issue.
2017-10-10 14:38:47 -04:00
AlanCoding
d2e0b26287 allow deleting hosts and groups from inv src sublists 2017-10-10 14:38:47 -04:00
Ryan Petrello
f2d46baf09 Merge pull request #496 from ryanpetrello/fix-6683
fix a bug when Tower is integrated with ipsilon SAML server
2017-10-10 14:38:47 -04:00
Ryan Petrello
c6fdadd7f2 Merge pull request #497 from ryanpetrello/fix-7259
properly sanitize encrypted default passwords in JT.survey_spec
2017-10-10 14:38:47 -04:00
Aaron Tan
cc8b115c6a Fix SAML auth behind load balancer issue.
Relates to #7586 of ansible-tower as a follow-up of fix #420 of tower.

The original fix works for Django version 1.9 and above, this PR
expanded the solution to Django verison 1.8 and below.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-10 14:38:47 -04:00
Ryan Petrello
82d05e0a10 properly sanitize encrypted default passwords in JT.survey_spec
see: https://github.com/ansible/ansible-tower/issues/7259
2017-10-10 14:38:47 -04:00
Ryan Petrello
9978b3f9ad Merge pull request #489 from ryanpetrello/release_3.2.2
fix busted 3.2.2 activity stream migration
2017-10-10 14:38:46 -04:00
Alan Rominger
4f4af058b3 Merge pull request #480 from AlanCoding/committed_cap
[3.2.2] add IG committed capacity to serializer
2017-10-10 14:38:46 -04:00
Ryan Petrello
b372cebf8d fix a bug when Tower is integrated with ipsilon SAML server
https://github.com/ansible/ansible-tower/issues/6683
2017-10-10 14:38:46 -04:00
Alan Rominger
3df8e2beb1 Merge pull request #494 from AlanCoding/wfjt_perm_fix2
[3.2.2] fix bug checking WFJT node for prompted resources
2017-10-10 14:38:46 -04:00
AlanCoding
c45fbcf2ee add IG committed capacity to serializer 2017-10-10 14:38:46 -04:00
Ryan Petrello
5efa50788f Merge pull request #481 from ryanpetrello/fix-7046
[3.2.2] encrypt job survey data
2017-10-10 14:38:46 -04:00
AlanCoding
3abbe87e10 fix bug checking WFJT node for prompted resources 2017-10-10 14:38:46 -04:00
Ryan Petrello
f26bdb3e96 migrate existing survey passwords to be encrypted
see: https://github.com/ansible/ansible-tower/issues/7046
2017-10-10 14:38:46 -04:00
Ryan Petrello
4be4e3db7f encrypt job survey data
see: https://github.com/ansible/ansible-tower/issues/7046
2017-10-10 14:38:46 -04:00
Alan Rominger
4ea92f0dcb Merge pull request #306 from AlanCoding/new_perf_logging
[3.2.2] new method of performance logging
2017-10-10 14:38:45 -04:00
Ryan Petrello
a0cfbb93e9 fix busted 3.2.2 activity stream migration
see: ansible/ansible-tower#7704
2017-10-10 14:38:45 -04:00
Aaron Tan
08a784d50c Merge pull request #474 from jangsutsr/fix-7386
Include Tower configurations into activity stream
2017-10-10 14:38:45 -04:00
AlanCoding
9ee18d02c8 new method of performance logging 2017-10-10 14:38:45 -04:00
Ryan Petrello
4fd190e4c8 Merge pull request #468 from ryanpetrello/smarter-credtype-migrations
[3.2.2] add new credential types in a more stable way in migrations
2017-10-10 14:38:45 -04:00
Aaron Tan
a11e33458f Include Tower configurations into activity stream
Relates #7386 of ansible-tower.

Due to the uniqueness of Tower configuration datastore model, it is not
fully compatible with activity stream workflow. This PR introduced
setting field for activitystream model along with other changes to make
Tower configuration a special case for activity streams.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-10 14:38:45 -04:00
Aaron Tan
84fdfbb898 Merge pull request #469 from jangsutsr/fix-7684
[3.2.2] Prevent slugify username from social sso backends
2017-10-10 14:38:45 -04:00
Ryan Petrello
f4a252a331 add new credential types in a more stable way in migrations
instead of writing individual migrations for new built-in credential
types, this change makes the "setup_tower_managed_defaults" function
idempotent so that it only adds the credential types you're missing
2017-10-10 14:38:45 -04:00
Ryan Petrello
d4fe60756b Merge pull request #466 from ryanpetrello/ovirt4-inv-source
don't install pycurl from pypi; use a system package instead
2017-10-10 14:38:45 -04:00
Aaron Tan
f4ab979b59 Prevent slugify username from social sso backends
Relates #7684 of ansible-tower.

Slugify username in python-social-auth means disallowing
any non-alphanumerial characters, which is an over-kill
for awx/tower, thus disabling it.

Signed-off-by: Aaron Tan <jangsutsr@gmail.com>
2017-10-10 14:38:45 -04:00
Ryan Petrello
3d3d79b6b3 Merge pull request #464 from ryanpetrello/ovirt4-inv-source
add ovirt sdk dependency for ovirt4 support
2017-10-10 14:38:44 -04:00
Ryan Petrello
e06d4d7734 don't install pycurl from pypi; use a system package instead
the ovirt4 sdk relies on pycurl, which is complicated to install w/ pip;
rely on pycurl to be provided by a system package instead
2017-10-10 14:38:44 -04:00
Ryan Petrello
ab18a4a440 Merge pull request #454 from ryanpetrello/ovirt4-inv-source
support ovirt4 as a built-in inventory source
2017-10-10 14:38:44 -04:00
Ryan Petrello
7438062b97 add ovirt sdk dependency for ovirt4 support 2017-10-10 14:38:44 -04:00
Ryan Petrello
4510cd11db Merge pull request #452 from ryanpetrello/fix-7609
disable GCE inventory caching w/ a .ini file
2017-10-10 14:38:44 -04:00
Ryan Petrello
74f2509482 support ovirt4 as a built-in inventory source
see: https://github.com/ansible/ansible-tower/issues/6522
2017-10-10 14:38:44 -04:00
Ryan Petrello
f84e42ed15 Merge pull request #451 from ryanpetrello/fix-7609
disable GCE inventory source cache
2017-10-10 14:38:44 -04:00
Ryan Petrello
94b4dabee2 disable GCE inventory caching w/ a .ini file
see: https://github.com/ansible/ansible-tower/issues/7609
see: https://github.com/ansible/tower/pull/451#pullrequestreview-64454393
2017-10-10 14:38:44 -04:00
Ryan Petrello
94d44e8791 disable GCE inventory source cache
by default, the GCE inventory script caches results on disk for
5 minutes; disable this behavior

see: https://github.com/ansible/ansible-tower/issues/7609
2017-10-10 14:38:44 -04:00
Ryan Petrello
d24166bd68 Merge pull request #442 from ryanpetrello/fix-7554
properly encode LDAP DN values on validation
2017-10-10 14:38:43 -04:00
Ryan Petrello
62f82e7a7e Merge pull request #441 from ryanpetrello/fix-7607
allow the credential type to be changed for unused credentials
2017-10-10 14:38:43 -04:00
Ryan Petrello
7a21a45781 properly encode LDAP DN values on validation
see: https://github.com/ansible/ansible-tower/issues/7554
2017-10-10 14:38:43 -04:00
Ryan Petrello
91ec0a4482 Merge pull request #430 from ryanpetrello/fix-7620
don't show polymorphic_ctype in unique validation error messaging
2017-10-10 14:38:43 -04:00
Ryan Petrello
c8f4320b58 allow the credential type to be changed for unused credentials
see: https://github.com/ansible/ansible-tower/issues/7607
2017-10-10 14:38:43 -04:00
Ryan Petrello
71a725c5f8 Merge pull request #432 from ryanpetrello/fix-7513
add awx meta variables to adhoc command extra_vars
2017-10-10 14:38:43 -04:00
Ryan Petrello
96572fe3d4 don't show polymorphic_ctype in unique validation error messaging
see: https://github.com/ansible/ansible-tower/issues/7620
2017-10-10 14:38:43 -04:00
Ryan Petrello
554a9586c6 add awx meta variables to adhoc command extra_vars
see: https://github.com/ansible/ansible-tower/issues/7513
2017-10-10 14:38:37 -04:00
Ryan Petrello
f41c8cf4f2 Merge pull request #426 from ryanpetrello/fix-7655
don't append to the activity stream on LDAP group disassociate
2017-10-10 14:38:18 -04:00
Ryan Petrello
f2f42c2c8a don't append to the activity stream on LDAP group disassociate
for organizations w/ a large number of ldap orgs/teams, this results in
a _huge_ number of extraneous activity stream entries

see: https://github.com/ansible/ansible-tower/issues/7655
2017-10-10 14:38:18 -04:00
316 changed files with 17371 additions and 23124 deletions

View File

@@ -57,7 +57,7 @@ For Linux platforms, refer to the following from Docker:
> https://docs.docker.com/engine/installation/linux/docker-ce/fedora/
**Centos**
**CentOS**
> https://docs.docker.com/engine/installation/linux/docker-ce/centos/
@@ -217,7 +217,7 @@ If you want to start and use the development environment, you'll first need to b
(container)# /bootstrap_development.sh
```
The above will do all the setup tasks, including running database migrations, so it amy take a couple minutes.
The above will do all the setup tasks, including running database migrations, so it may take a couple minutes.
Now you can start each service individually, or start all services in a pre-configured tmux session like so:
@@ -281,7 +281,7 @@ For feature work, take a look at the current [Enhancements](https://github.com/a
If it has someone assigned to it then that person is the person responsible for working the enhancement. If you feel like you could contribute then reach out to that person.
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start.
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start. For extra information on debugging tools, see [Debugging](https://github.com/ansible/awx/blob/devel/docs/debugging.md).
**NOTE**
@@ -293,7 +293,7 @@ Fixing bugs, adding translations, and updating the documentation are always appr
## Submitting Pull Requests
Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) agains the `devel` branch.
Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) against the `devel` branch.
Here are a few things you can do to help the visibility of your change, and increase the likelihood that it will be accepted:
@@ -312,7 +312,7 @@ It's generally a good idea to discuss features with us first by engaging us in t
We like to keep our commit history clean, and will require resubmission of pull requests that contain merge commits. Use `git pull --rebase`, rather than
`git pull`, and `git rebase`, rather than `git merge`.
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefuly. Please be patient.
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient.
All submitted PRs will have the linter and unit tests run against them, and the status reported in the PR.

View File

@@ -13,24 +13,30 @@ This document provides a guide for installing AWX.
- [Choose a deployment platform](#choose-a-deployment-platform)
- [Official vs Building Images](#official-vs-building-images)
- [OpenShift](#openshift)
- [Prerequisites](#prerequisites)
- [Prerequisites](#prerequisites-1)
- [Deploying to Minishift](#deploying-to-minishift)
- [Pre-build steps](#pre-build-steps)
- [PostgreSQL](#postgresql)
- [Start the build](#start-the-build)
- [Post build](#post-build)
- [Accessing AWX](#accessing-awx)
- [Docker](#docker)
- [Kubernetes](#kubernetes)
- [Prerequisites](#prerequisites-2)
- [Pre-build steps](#pre-build-steps-1)
- [Start the build](#start-the-build-1)
- [Accessing AWX](#accessing-awx-1)
- [SSL Termination](#ssl-termination)
- [Docker or Docker Compose](#docker-or-docker-compose)
- [Prerequisites](#prerequisites-3)
- [Pre-build steps](#pre-build-steps-2)
- [Deploying to a remote host](#deploying-to-a-remote-host)
- [Inventory variables](#inventory-variables)
- [Docker registry](#docker-registry)
- [PostgreSQL](#postgresql-1)
- [Proxy settings](#proxy-settings)
- [Start the build](#start-the-build-1)
- [Start the build](#start-the-build-2)
- [Post build](#post-build-1)
- [Accessing AWX](#accessing-awx-1)
- [Accessing AWX](#accessing-awx-2)
## Getting started
@@ -54,7 +60,7 @@ Before you can run a deployment, you'll need the following installed in your loc
- [Docker](https://docs.docker.com/engine/installation/)
- [docker-py](https://github.com/docker/docker-py) Python module
- [GNU Make](https://www.gnu.org/software/make/)
- [Git](https://git-scm.com/)
- [Git](https://git-scm.com/) Requires Version 1.8.4+
### System Requirements
@@ -63,7 +69,7 @@ The system that runs the AWX service will need to satisfy the following requirem
- At leasts 4GB of memory
- At least 2 cpu cores
- At least 20GB of space
- Running Docker or Openshift
- Running Docker, Openshift, or Kubernetes
### AWX Tunables
@@ -71,11 +77,14 @@ The system that runs the AWX service will need to satisfy the following requirem
### Choose a deployment platform
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, or a standalone Docker daemon. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, docker-compose or a standalone Docker daemon. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
The [installer](./installer) directory contains an [inventory](./installer/inventory) file, and a playbook, [install.yml](./installer/install.yml). You'll begin by setting variables in the inventory file according to the platform you wish to use, and then you'll start the image build and deployment process by running the playbook.
In the sections below, you'll find deployment details and instructions for each platform. To deploy to Docker, view the [Docker section](#docker), and for OpenShift, view the [OpenShift section](#openshift).
In the sections below, you'll find deployment details and instructions for each platform:
- [Docker and Docker Compose](#docker-and-docker-compose)
- [OpenShift](#openshift)
- [Kubernetes](#kubernetes).
### Official vs Building Images
@@ -133,10 +142,6 @@ Before starting the build process, review the [inventory](./installer/inventory)
> Name of the OpenShift project that will be created, and used as the namespace for the AWX app. Defaults to *awx*.
*awx_node_port*
> The web server port running inside the AWX pod. Defaults to *30083*.
*openshift_user*
> Username of the OpenShift user that will create the project, and deploy the application. Defaults to *developer*.
@@ -144,7 +149,7 @@ Before starting the build process, review the [inventory](./installer/inventory)
*docker_registry*
> IP address and port, or URL, for accessing a registry that the OpenShift cluster can access. Defaults to *172.30.1.1:5000*, the internal registry delivered with Minishift. This is not needed if you are using official hosted images.
n
*docker_registry_repository*
> Namespace to use when pushing and pulling images to and from the registry. Generally this will match the project name. It defaults to *awx*. This is not needed if you are using official hosted images.
@@ -271,16 +276,88 @@ The above example is taken from a Minishift instance. From a web browser, use `h
Once you access the AWX server, you will be prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
## Docker
## Kubernetes
### Prerequisites
You will need the following installed on the host where AWX will be deployed:
A Kubernetes deployment will require you to have access to a Kubernetes cluster as well as the following tools:
- [Docker](https://docs.docker.com/engine/installation/)
- [docker-py](https://github.com/docker/docker-py) Python module
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
- [helm](https://docs.helm.sh/using_helm/#quickstart-guide)
Note: After installing Docker, the Docker service must be started.
The installation program will reference `kubectl` directly. `helm` is only necessary if you are letting the installer configure PostgreSQL for you.
### Pre-build steps
Before starting the build process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section uncommenting when necessary. Make sure the openshift and standalone docker sections are commented out:
*kubernetes_context*
> Prior to running the installer, make sure you've configured the context for the cluster you'll be installing to. This is how the installer knows which cluster to connect to and what authentication to use
*awx_kubernetes_namespace*
> Name of the Kubernetes namespace where the AWX resources will be installed. This will be created if it doesn't exist
*docker_registry_*
> These settings should be used if building your own base images. You'll need access to an external registry and are responsible for making sure your kube cluster can talk to it and use it. If these are undefined and the dockerhub_ configuration settings are uncommented then the images will be pulled from dockerhub instead
### Start the build
After making changes to the `inventory` file use `ansible-playbook` to begin the install
```bash
$ ansible-playbook -i inventory install.yml
```
### Post build
After the playbook run completes, check the status of the deployment by running `kubectl get pods --namespace awx` (replace awx with the namespace you used):
```bash
# View the running pods, it may take a few minutes for everything to be marked in the Running state
$ kubectl get pods --namespace awx
NAME READY STATUS RESTARTS AGE
awx-2558692395-2r8ss 4/4 Running 0 29s
awx-postgresql-355348841-kltkn 1/1 Running 0 1m
```
### Accessing AWX
The AWX web interface is running in the AWX pod behind the `awx-web-svc` service:
```bash
# View available services
$ kubectl get svc --namespace awx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
awx-postgresql ClusterIP 10.7.250.208 <none> 5432/TCP 2m
awx-web-svc NodePort 10.7.241.35 <none> 80:30177/TCP 1m
```
The deployment process creates an `Ingress` named `awx-web-svc` also. Some kubernetes cloud providers will automatically handle routing configuration when an Ingress is created others may require that you more explicitly configure it. You can see what kubernetes knows about things with:
```bash
kubectl get ing --namespace awx
NAME HOSTS ADDRESS PORTS AGE
awx-web-svc * 35.227.x.y 80 3m
```
If your provider is able to allocate an IP Address from the Ingress controller then you can navigate to the address and access the AWX interface. For some providers it can take a few minutes to allocate and make this accessible. For other providers it may require you to manually intervene.
### SSL Termination
Unlike Openshift's `Route` the Kubernetes `Ingress` doesn't yet handle SSL termination. As such the default configuration will only expose AWX through HTTP on port 80. You are responsible for configuring SSL support until support is added (either to Kubernetes or AWX itself).
## Docker or Docker-Compose
### Prerequisites
- [Docker](https://docs.docker.com/engine/installation/) on the host where AWX will be deployed. After installing Docker, the Docker service must be started (depending on your OS, you may have to add the local user that uses Docker to the ``docker`` group, refer to the documentation for details)
- [docker-py](https://github.com/docker/docker-py) Python module.
If you're installing using Docker Compose, you'll need [Docker Compose](https://docs.docker.com/compose/install/).
### Pre-build steps
@@ -323,6 +400,13 @@ Before starting the build process, review the [inventory](./installer/inventory)
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. Defaults to *80*.
*use_docker_compose*
> Switch to ``true`` to use Docker Compose instead of the standalone Docker install.
*docker_compose_dir*
When using docker-compose, the `docker-compose.yml` file will be created there (default `/var/lib/awx`).
#### Docker registry
@@ -404,6 +488,8 @@ e240ed8209cd awx_task:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago
97e196120ab3 postgres:9.6 "docker-entrypoint..." 2 minutes ago Up 2 minutes 5432/tcp postgres
```
If you're deploying using Docker Compose, container names will be prefixed by the name of the folder where the docker-compose.yml file is created (by default, `awx`).
Immediately after the containers start, the *awx_task* container will perform required setup tasks, including database migrations. These tasks need to complete before the web interface can be accessed. To monitor the progress, you can follow the container's STDOUT by running the following:
```bash
@@ -466,3 +552,14 @@ Added instance awx to tower
The AWX web server is accessible on the deployment host, using the *host_port* value set in the *inventory* file. The default URL is [http://localhost](http://localhost).
You will prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
### Maintenance using docker-compose
After the installation, maintenance operations with docker-compose can be done by using the `docker-compose.yml` file created at the location pointed by `docker_compose_dir`.
Among the possible operations, you may:
- Stop AWX : `docker-compose stop`
- Upgrade AWX : `docker-compose pull && docker-compose up --force-recreate`
See the [docker-compose documentation](https://docs.docker.com/compose/) for details.

View File

@@ -12,10 +12,10 @@ MANAGEMENT_COMMAND ?= awx-manage
IMAGE_REPOSITORY_AUTH ?=
IMAGE_REPOSITORY_BASE ?= https://gcr.io
VERSION=$(shell git describe --long)
VERSION3=$(shell git describe --long | sed 's/\-g.*//')
VERSION3DOT=$(shell git describe --long | sed 's/\-g.*//' | sed 's/\-/\./')
RELEASE_VERSION=$(shell git describe --long | sed 's@\([0-9.]\{1,\}\).*@\1@')
VERSION=$(shell git describe --long --first-parent)
VERSION3=$(shell git describe --long --first-parent | sed 's/\-g.*//')
VERSION3DOT=$(shell git describe --long --first-parent | sed 's/\-g.*//' | sed 's/\-/\./')
RELEASE_VERSION=$(shell git describe --long --first-parent | sed 's@\([0-9.]\{1,\}\).*@\1@')
# NOTE: This defaults the container image version to the branch that's active
COMPOSE_TAG ?= $(GIT_BRANCH)
@@ -299,7 +299,7 @@ uwsgi: collectstatic
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --master-fifo=/awxfifo --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)"
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --master-fifo=/awxfifo --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:/bin/sh -c '[ -f /tmp/celery_pid ] && kill -1 `cat /tmp/celery_pid`'"
daphne:
@if [ "$(VENV_BASE)" ]; then \
@@ -322,10 +322,11 @@ runserver:
# Run to start the background celery worker for development.
celeryd:
rm -f /tmp/celery_pid
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
celery worker -A awx -l DEBUG -B -Ofair --autoscale=100,4 --schedule=$(CELERY_SCHEDULE_FILE) -Q tower_scheduler,tower_broadcast_all,$(COMPOSE_HOST),$(AWX_GROUP_QUEUES) -n celery@$(COMPOSE_HOST)
celery worker -A awx -l DEBUG -B -Ofair --autoscale=100,4 --schedule=$(CELERY_SCHEDULE_FILE) -Q tower_scheduler,tower_broadcast_all,$(COMPOSE_HOST),$(AWX_GROUP_QUEUES) -n celery@$(COMPOSE_HOST) --pidfile /tmp/celery_pid
# Run to start the zeromq callback receiver
receiver:
@@ -607,7 +608,7 @@ clean-elk:
docker rm tools_kibana_1
psql-container:
docker run -it --net tools_default --rm postgres:9.4.1 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
docker run -it --net tools_default --rm postgres:9.6 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
VERSION:
@echo $(VERSION_TARGET) > $@

View File

@@ -166,7 +166,13 @@ class FieldLookupBackend(BaseFilterBackend):
elif isinstance(field, models.BooleanField):
return to_python_boolean(value)
elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)):
return self.to_python_related(value)
try:
return self.to_python_related(value)
except ValueError:
raise ParseError(_('Invalid {field_name} id: {field_id}').format(
field_name=getattr(field, 'name', 'related field'),
field_id=value)
)
else:
return field.to_python(value)
@@ -243,11 +249,10 @@ class FieldLookupBackend(BaseFilterBackend):
# Search across related objects.
if key.endswith('__search'):
for value in values:
for search_term in force_text(value).replace(',', ' ').split():
search_value, new_keys = self.value_to_python(queryset.model, key, search_term)
assert isinstance(new_keys, list)
for new_key in new_keys:
search_filters.append((new_key, search_value))
search_value, new_keys = self.value_to_python(queryset.model, key, force_text(value))
assert isinstance(new_keys, list)
for new_key in new_keys:
search_filters.append((new_key, search_value))
continue
# Custom chain__ and or__ filters, mutually exclusive (both can

View File

@@ -21,7 +21,7 @@ from django.utils.translation import ugettext_lazy as _
# Django REST Framework
from rest_framework.authentication import get_authorization_header
from rest_framework.exceptions import PermissionDenied
from rest_framework.exceptions import PermissionDenied, AuthenticationFailed
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
@@ -30,6 +30,7 @@ from rest_framework import views
# AWX
from awx.api.filters import FieldLookupBackend
from awx.main.models import * # noqa
from awx.main.access import access_registry
from awx.main.utils import * # noqa
from awx.main.utils.db import get_all_field_names
from awx.api.serializers import ResourceAccessListElementSerializer
@@ -38,9 +39,10 @@ from awx.api.metadata import SublistAttachDetatchMetadata
__all__ = ['APIView', 'GenericAPIView', 'ListAPIView', 'SimpleListAPIView',
'ListCreateAPIView', 'SubListAPIView', 'SubListCreateAPIView',
'SubListDestroyAPIView',
'SubListCreateAttachDetachAPIView', 'RetrieveAPIView',
'RetrieveUpdateAPIView', 'RetrieveDestroyAPIView',
'RetrieveUpdateDestroyAPIView', 'DestroyAPIView',
'RetrieveUpdateDestroyAPIView',
'SubDetailAPIView',
'ResourceAccessList',
'ParentMixin',
@@ -115,6 +117,10 @@ class APIView(views.APIView):
drf_request = super(APIView, self).initialize_request(request, *args, **kwargs)
request.drf_request = drf_request
try:
request.drf_request_user = getattr(drf_request, 'user', False)
except AuthenticationFailed:
request.drf_request_user = None
return drf_request
def finalize_response(self, request, response, *args, **kwargs):
@@ -140,7 +146,6 @@ class APIView(views.APIView):
response['X-API-Query-Count'] = len(q_times)
response['X-API-Query-Time'] = '%0.3fs' % sum(q_times)
analytics_logger.info("api response", extra=dict(python_objects=dict(request=request, response=response)))
return response
def get_authenticate_header(self, request):
@@ -269,12 +274,17 @@ class GenericAPIView(generics.GenericAPIView, APIView):
return serializer
def get_queryset(self):
#if hasattr(self.request.user, 'get_queryset'):
# return self.request.user.get_queryset(self.model)
if self.queryset is not None:
return self.queryset._clone()
elif self.model is not None:
return self.model._default_manager.all()
qs = self.model._default_manager
if self.model in access_registry:
access_class = access_registry[self.model]
if access_class.select_related:
qs = qs.select_related(*access_class.select_related)
if access_class.prefetch_related:
qs = qs.prefetch_related(*access_class.prefetch_related)
return qs
else:
return super(GenericAPIView, self).get_queryset()
@@ -442,6 +452,41 @@ class SubListAPIView(ParentMixin, ListAPIView):
return qs & sublist_qs
class DestroyAPIView(generics.DestroyAPIView):
def has_delete_permission(self, obj):
return self.request.user.can_access(self.model, 'delete', obj)
def perform_destroy(self, instance, check_permission=True):
if check_permission and not self.has_delete_permission(instance):
raise PermissionDenied()
super(DestroyAPIView, self).perform_destroy(instance)
class SubListDestroyAPIView(DestroyAPIView, SubListAPIView):
"""
Concrete view for deleting everything related by `relationship`.
"""
check_sub_obj_permission = True
def destroy(self, request, *args, **kwargs):
instance_list = self.get_queryset()
if (not self.check_sub_obj_permission and
not request.user.can_access(self.parent_model, 'delete', self.get_parent_object())):
raise PermissionDenied()
self.perform_list_destroy(instance_list)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_list_destroy(self, instance_list):
if self.check_sub_obj_permission:
# Check permissions for all before deleting, avoiding half-deleted lists
for instance in instance_list:
if self.has_delete_permission(instance):
raise PermissionDenied()
for instance in instance_list:
self.perform_destroy(instance, check_permission=False)
class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
# Base class for a sublist view that allows for creating subobjects
# associated with the parent object.
@@ -680,22 +725,11 @@ class RetrieveUpdateAPIView(RetrieveAPIView, generics.RetrieveUpdateAPIView):
pass
class RetrieveDestroyAPIView(RetrieveAPIView, generics.RetrieveDestroyAPIView):
def destroy(self, request, *args, **kwargs):
# somewhat lame that delete has to call it's own permissions check
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, RetrieveDestroyAPIView):
class RetrieveDestroyAPIView(RetrieveAPIView, DestroyAPIView):
pass
class DestroyAPIView(GenericAPIView, generics.DestroyAPIView):
class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, DestroyAPIView):
pass

View File

@@ -9,7 +9,6 @@ import re
import six
import urllib
from collections import OrderedDict
from dateutil import rrule
# Django
from django.conf import settings
@@ -44,7 +43,7 @@ from awx.main.fields import ImplicitRoleField
from awx.main.utils import (
get_type_for_model, get_model_for_type, timestamp_apiformat,
camelcase_to_underscore, getattrd, parse_yaml_or_json,
has_model_field_prefetched, extract_ansible_vars)
has_model_field_prefetched, extract_ansible_vars, encrypt_dict)
from awx.main.utils.filters import SmartFilter
from awx.main.redact import REPLACE_STR
@@ -345,7 +344,9 @@ class BaseSerializer(serializers.ModelSerializer):
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
if field == 'credential_type_id' and fk == 'credential' and self.version < 2: # TODO: remove version check in 3.3
if (
self.version < 2 and field == 'credential_type_id' and
fk in ['credential', 'vault_credential']): # TODO: remove version check in 3.3
continue
fval = getattr(fkval, field, None)
@@ -612,14 +613,12 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete']
result_stdout = serializers.SerializerMethodField()
class Meta:
model = UnifiedJob
fields = ('*', 'unified_job_template', 'launch_type', 'status',
'failed', 'started', 'finished', 'elapsed', 'job_args',
'job_cwd', 'job_env', 'job_explanation', 'result_stdout',
'execution_node', 'result_traceback')
'job_cwd', 'job_env', 'job_explanation', 'execution_node',
'result_traceback')
extra_kwargs = {
'unified_job_template': {
'source': 'unified_job_template_id',
@@ -700,25 +699,17 @@ class UnifiedJobSerializer(BaseSerializer):
return ret
def get_result_stdout(self, obj):
obj_size = obj.result_stdout_size
if obj_size > settings.STDOUT_MAX_BYTES_DISPLAY:
return _("Standard Output too large to display (%(text_size)d bytes), "
"only download supported for sizes over %(supported_size)d bytes") % {
'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY}
return obj.result_stdout
class UnifiedJobListSerializer(UnifiedJobSerializer):
class Meta:
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback', '-result_stdout')
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback')
def get_field_names(self, declared_fields, info):
field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback', 'result_stdout'))
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback'))
def get_types(self):
if type(self) is UnifiedJobListSerializer:
@@ -758,14 +749,6 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
class Meta:
fields = ('result_stdout',)
def get_result_stdout(self, obj):
obj_size = obj.result_stdout_size
if obj_size > settings.STDOUT_MAX_BYTES_DISPLAY:
return _("Standard Output too large to display (%(text_size)d bytes), "
"only download supported for sizes over %(supported_size)d bytes") % {
'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY}
return obj.result_stdout
def get_types(self):
if type(self) is UnifiedJobStdoutSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
@@ -912,7 +895,7 @@ class OrganizationSerializer(BaseSerializer):
class Meta:
model = Organization
fields = ('*',)
fields = ('*', 'custom_virtualenv',)
def get_related(self, obj):
res = super(OrganizationSerializer, self).get_related(obj)
@@ -1000,7 +983,7 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
class Meta:
model = Project
fields = ('*', 'organization', 'scm_delete_on_next_update', 'scm_update_on_launch',
'scm_update_cache_timeout', 'scm_revision',) + \
'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \
('last_update_failed', 'last_updated') # Backwards compatibility
read_only_fields = ('scm_delete_on_next_update',)
@@ -1111,11 +1094,17 @@ class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
def get_related(self, obj):
res = super(ProjectUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
))
except ObjectDoesNotExist:
pass
res.update(dict(
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),
scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),
))
return res
@@ -1234,8 +1223,9 @@ class HostSerializer(BaseSerializerWithVariables):
model = Host
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
'has_active_failures', 'has_inventory_sources', 'last_job',
'last_job_host_summary', 'insights_system_id')
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',)
'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',
'ansible_facts_modified',)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
@@ -1726,10 +1716,18 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
def get_related(self, obj):
res = super(InventoryUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
inventory_source = self.reverse(
'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}
),
))
except ObjectDoesNotExist:
pass
res.update(dict(
inventory_source = self.reverse('api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}),
cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),
))
if obj.source_project_update_id:
res['source_project_update'] = self.reverse('api:project_update_detail',
@@ -2125,7 +2123,7 @@ class CredentialSerializer(BaseSerializer):
def to_internal_value(self, data):
# TODO: remove when API v1 is removed
if 'credential_type' not in data:
if 'credential_type' not in data and self.version == 1:
# If `credential_type` is not provided, assume the payload is a
# v1 credential payload that specifies a `kind` and a flat list
# of field values
@@ -2162,10 +2160,22 @@ class CredentialSerializer(BaseSerializer):
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
for rel in (
'ad_hoc_commands',
'insights_inventories',
'inventorysources',
'inventoryupdates',
'unifiedjobs',
'unifiedjobtemplates',
'projects',
'projectupdates',
'workflowjobnodes'
):
if getattr(self.instance, rel).count() > 0:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
return credential_type
@@ -2346,14 +2356,30 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.project:
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
if obj.credential:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential})
if obj.vault_credential:
res['vault_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.vault_credential})
try:
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
except ObjectDoesNotExist:
setattr(obj, 'inventory', None)
try:
if obj.project:
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
except ObjectDoesNotExist:
setattr(obj, 'project', None)
try:
if obj.credential:
res['credential'] = self.reverse(
'api:credential_detail', kwargs={'pk': obj.credential}
)
except ObjectDoesNotExist:
setattr(obj, 'credential', None)
try:
if obj.vault_credential:
res['vault_credential'] = self.reverse(
'api:credential_detail', kwargs={'pk': obj.vault_credential}
)
except ObjectDoesNotExist:
setattr(obj, 'vault_credential', None)
if self.version > 1:
if isinstance(obj, UnifiedJobTemplate):
res['extra_credentials'] = self.reverse(
@@ -2504,7 +2530,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
'allow_simultaneous')
'allow_simultaneous', 'custom_virtualenv')
def get_related(self, obj):
res = super(JobTemplateSerializer, self).get_related(obj)
@@ -2608,15 +2634,23 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),
))
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
try:
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
except ObjectDoesNotExist:
setattr(obj, 'job_template', None)
if (obj.can_start or True) and self.version == 1: # TODO: remove in 3.3
res['start'] = self.reverse('api:job_start', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
if obj.project_update:
res['project_update'] = self.reverse('api:project_update_detail', kwargs={'pk': obj.project_update.pk})
try:
if obj.project_update:
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update.pk}
)
except ObjectDoesNotExist:
pass
res['create_schedule'] = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
return res
@@ -2756,8 +2790,10 @@ class JobRelaunchSerializer(BaseSerializer):
def validate(self, attrs):
obj = self.context.get('obj')
if not obj.credential:
raise serializers.ValidationError(dict(credential=[_("Credential not found or deleted.")]))
if not obj.credential and not obj.vault_credential:
raise serializers.ValidationError(
dict(credential=[_("Neither credential nor vault credential provided.")])
)
if obj.project is None:
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
if obj.inventory is None or obj.inventory.pending_deletion:
@@ -2914,9 +2950,11 @@ class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):
class SystemJobSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
model = SystemJob
fields = ('*', 'system_job_template', 'job_type', 'extra_vars')
fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout')
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
@@ -2926,8 +2964,12 @@ class SystemJobSerializer(UnifiedJobSerializer):
res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})
res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})
return res
def get_result_stdout(self, obj):
return obj.result_stdout
class SystemJobCancelSerializer(SystemJobSerializer):
@@ -3068,12 +3110,38 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
def validate(self, attrs):
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
# Build unsaved version of this config, use it to detect prompts errors
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
# Insert survey_passwords to track redacted variables
if 'extra_data' in attrs:
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if hasattr(ujt, 'survey_password_variables'):
password_dict = {}
for key in ujt.survey_password_variables():
if key in extra_data:
password_dict[key] = REPLACE_STR
if not self.instance or password_dict != self.instance.survey_passwords:
attrs['survey_passwords'] = password_dict
if not isinstance(attrs['extra_data'], dict):
attrs['extra_data'] = parse_yaml_or_json(attrs['extra_data'])
encrypt_dict(attrs['extra_data'], password_dict.keys())
if self.instance:
db_extra_data = parse_yaml_or_json(self.instance.extra_data)
else:
db_extra_data = {}
for key in password_dict.keys():
if attrs['extra_data'].get(key, None) == REPLACE_STR:
if key not in db_extra_data:
raise serializers.ValidationError(
_('Provided variable {} has no database value to replace with.').format(key))
else:
attrs['extra_data'][key] = db_extra_data[key]
# Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs)
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(
_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
@@ -3085,19 +3153,9 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
raise serializers.ValidationError(errors)
# Model `.save` needs the container dict, not the psuedo fields
attrs['char_prompts'] = mock_obj.char_prompts
if mock_obj.char_prompts:
attrs['char_prompts'] = mock_obj.char_prompts
# Insert survey_passwords to track redacted variables
# TODO: perform encryption on save
if 'extra_data' in attrs:
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if hasattr(ujt, 'survey_password_variables'):
password_dict = {}
for key in ujt.survey_password_variables():
if key in extra_data:
password_dict[key] = REPLACE_STR
if not self.instance or password_dict != self.instance.survey_passwords:
attrs['survey_passwords'] = password_dict
return attrs
@@ -3108,7 +3166,7 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
exclude_errors = ('required') # required variables may be provided by WFJT or on launch
exclude_errors = ('required',) # required variables may be provided by WFJT or on launch
class Meta:
model = WorkflowJobTemplateNode
@@ -3360,6 +3418,41 @@ class JobEventWebSocketSerializer(JobEventSerializer):
return 'job_events'
class ProjectUpdateEventSerializer(JobEventSerializer):
class Meta:
model = ProjectUpdateEvent
fields = ('*', '-name', '-description', '-job', '-job_id',
'-parent_uuid', '-parent', '-host', 'project_update')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update_id}
)
return res
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'project_update_events'
class AdHocCommandEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display', read_only=True)
@@ -3419,6 +3512,76 @@ class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
return 'ad_hoc_command_events'
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = InventoryUpdateEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'inventory_update')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['inventory_update'] = self.reverse(
'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}
)
return res
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = InventoryUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'inventory_update_events'
class SystemJobEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = SystemJobEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'system_job')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['system_job'] = self.reverse(
'api:system_job_detail', kwargs={'pk': obj.system_job_id}
)
return res
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = SystemJobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'system_job_events'
class JobLaunchSerializer(BaseSerializer):
# Representational fields
@@ -3483,15 +3646,16 @@ class JobLaunchSerializer(BaseSerializer):
id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'credentials':
if self.version > 1:
defaults_dict[field_name] = [
dict(
for cred in obj.credentials.all():
cred_dict = dict(
id=cred.id,
name=cred.name,
credential_type=cred.credential_type.pk,
passwords_needed=cred.passwords_needed
)
for cred in obj.credentials.all()
]
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.inputs.get('vault_id') or None
defaults_dict.setdefault(field_name, []).append(cred_dict)
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
@@ -3506,7 +3670,7 @@ class JobLaunchSerializer(BaseSerializer):
template = self.context.get('template')
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(
_exclude_errors=['prompts', 'required'], # make several error types non-blocking
_exclude_errors=['prompts'], # make several error types non-blocking
**attrs)
self._ignored_fields = rejected
@@ -3708,32 +3872,11 @@ class LabelSerializer(BaseSerializer):
return res
class ScheduleSerializer(LaunchConfigurationBaseSerializer):
show_capabilities = ['edit', 'delete']
class SchedulePreviewSerializer(BaseSerializer):
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run',)
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
res.update(dict(
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
))
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
return res
def validate_unified_job_template(self, value):
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
elif type(value) == Project and value.scm_type == '':
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
raise serializers.ValidationError(_(
'Inventory sources with `update_on_project_update` cannot be scheduled. '
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
return value
fields = ('rrule',)
# We reject rrules if:
# - DTSTART is not include
@@ -3751,20 +3894,21 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer):
multi_by_month = ".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = ".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(".*?(DTSTART\:[0-9]+T[0-9]+Z)", rrule_value)
match_multiple_dtstart = re.findall(".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(".*?(RRULE\:)", rrule_value)
if not len(match_multiple_dtstart):
raise serializers.ValidationError(_('DTSTART required in rrule. Value should match: DTSTART:YYYYMMDDTHHMMSSZ'))
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
if len(match_native_dtstart):
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
if len(match_multiple_dtstart) > 1:
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
if not len(match_multiple_rrule):
raise serializers.ValidationError(_('RRULE require in rrule.'))
raise serializers.ValidationError(_('RRULE required in rrule.'))
if len(match_multiple_rrule) > 1:
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
if 'interval' not in rrule_value.lower():
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
if 'tzid' in rrule_value.lower():
raise serializers.ValidationError(_('TZID is not supported.'))
if 'secondly' in rrule_value.lower():
raise serializers.ValidationError(_('SECONDLY is not supported.'))
if re.match(multi_by_month_day, rrule_value):
@@ -3782,9 +3926,46 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer):
if int(count_val[1]) > 999:
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
try:
rrule.rrulestr(rrule_value)
except Exception:
raise serializers.ValidationError(_("rrule parsing failed validation."))
Schedule.rrulestr(rrule_value)
except Exception as e:
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
return value
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run',)
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
res.update(dict(
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
))
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
if obj.unified_job_template.project:
res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))
except ObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))
elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
return res
def validate_unified_job_template(self, value):
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
elif type(value) == Project and value.scm_type == '':
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
raise serializers.ValidationError(_(
six.text_type('Inventory sources with `update_on_project_update` cannot be scheduled. '
'Schedule its source project `{}` instead.').format(value.source_project.name)))
return value
@@ -3820,6 +4001,7 @@ class InstanceSerializer(BaseSerializer):
class InstanceGroupSerializer(BaseSerializer):
committed_capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.SerializerMethodField()
@@ -3827,7 +4009,8 @@ class InstanceGroupSerializer(BaseSerializer):
class Meta:
model = InstanceGroup
fields = ("id", "type", "url", "related", "name", "created", "modified", "capacity", "consumed_capacity",
fields = ("id", "type", "url", "related", "name", "created", "modified",
"capacity", "committed_capacity", "consumed_capacity",
"percent_capacity_remaining", "jobs_running", "instances", "controller")
def get_related(self, obj):
@@ -3856,7 +4039,10 @@ class InstanceGroupSerializer(BaseSerializer):
return self.context['capacity_map']
def get_consumed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['consumed_capacity']
return self.get_capacity_dict()[obj.name]['running_capacity']
def get_committed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['committed_capacity']
def get_percent_capacity_remaining(self, obj):
if not obj.capacity:
@@ -3954,6 +4140,11 @@ class ActivityStreamSerializer(BaseSerializer):
if fk == 'schedule':
rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.setting and obj.setting.get('category', None):
rel['setting'] = self.reverse(
'api:setting_singleton_detail',
kwargs={'category_slug': obj.setting['category']}
)
return rel
def _get_rel(self, obj, fk):
@@ -4005,6 +4196,8 @@ class ActivityStreamSerializer(BaseSerializer):
username = obj.actor.username,
first_name = obj.actor.first_name,
last_name = obj.actor.last_name)
if obj.setting:
summary_fields['setting'] = [obj.setting]
return summary_fields

View File

@@ -1,9 +1,9 @@
The resulting data structure contains:
{
"count": 99,
"next": null,
"previous": null,
"count": 99,
"next": null,
"previous": null,
"results": [
...
]
@@ -60,6 +60,10 @@ _Added in AWX 1.4_
?related__search=findme
Note: If you want to provide more than one search terms, please use multiple
search fields with the same key, like `?related__search=foo&related__search=bar`,
All search terms with the same key will be ORed together.
## Filtering
Any additional query string parameters may be used to filter the list of
@@ -70,7 +74,7 @@ in the specified value should be url-encoded. For example:
?field=value%20xyz
Fields may also span relations, only for fields and relationships defined in
the database:
the database:
?other__field=value

View File

@@ -0,0 +1,6 @@
{% include "api/sub_list_create_api_view.md" %}
# Delete all {{ model_verbose_name_plural }} of this {{ parent_model_verbose_name|title }}:
Make a DELETE request to this resource to delete all {{ model_verbose_name_plural }} show in the list.
The {{ parent_model_verbose_name|title }} will not be deleted by this request.

View File

@@ -9,6 +9,7 @@ from awx.api.views import (
InventoryUpdateCancel,
InventoryUpdateStdout,
InventoryUpdateNotificationsList,
InventoryUpdateEventsList,
)
@@ -18,6 +19,7 @@ urls = [
url(r'^(?P<pk>[0-9]+)/cancel/$', InventoryUpdateCancel.as_view(), name='inventory_update_cancel'),
url(r'^(?P<pk>[0-9]+)/stdout/$', InventoryUpdateStdout.as_view(), name='inventory_update_stdout'),
url(r'^(?P<pk>[0-9]+)/notifications/$', InventoryUpdateNotificationsList.as_view(), name='inventory_update_notifications_list'),
url(r'^(?P<pk>[0-9]+)/events/$', InventoryUpdateEventsList.as_view(), name='inventory_update_events_list'),
]
__all__ = ['urls']

View File

@@ -10,6 +10,7 @@ from awx.api.views import (
ProjectUpdateStdout,
ProjectUpdateScmInventoryUpdates,
ProjectUpdateNotificationsList,
ProjectUpdateEventsList,
)
@@ -20,6 +21,7 @@ urls = [
url(r'^(?P<pk>[0-9]+)/stdout/$', ProjectUpdateStdout.as_view(), name='project_update_stdout'),
url(r'^(?P<pk>[0-9]+)/scm_inventory_updates/$', ProjectUpdateScmInventoryUpdates.as_view(), name='project_update_scm_inventory_updates'),
url(r'^(?P<pk>[0-9]+)/notifications/$', ProjectUpdateNotificationsList.as_view(), name='project_update_notifications_list'),
url(r'^(?P<pk>[0-9]+)/events/$', ProjectUpdateEventsList.as_view(), name='project_update_events_list'),
]
__all__ = ['urls']

View File

@@ -8,6 +8,7 @@ from awx.api.views import (
SystemJobDetail,
SystemJobCancel,
SystemJobNotificationsList,
SystemJobEventsList
)
@@ -16,6 +17,7 @@ urls = [
url(r'^(?P<pk>[0-9]+)/$', SystemJobDetail.as_view(), name='system_job_detail'),
url(r'^(?P<pk>[0-9]+)/cancel/$', SystemJobCancel.as_view(), name='system_job_cancel'),
url(r'^(?P<pk>[0-9]+)/notifications/$', SystemJobNotificationsList.as_view(), name='system_job_notifications_list'),
url(r'^(?P<pk>[0-9]+)/events/$', SystemJobEventsList.as_view(), name='system_job_events_list'),
]
__all__ = ['urls']

View File

@@ -22,6 +22,8 @@ from awx.api.views import (
JobExtraCredentialsList,
JobTemplateCredentialsList,
JobTemplateExtraCredentialsList,
SchedulePreview,
ScheduleZoneInfo,
)
from .organization import urls as organization_urls
@@ -113,6 +115,8 @@ v2_urls = [
url(r'^jobs/(?P<pk>[0-9]+)/credentials/$', JobCredentialsList.as_view(), name='job_credentials_list'),
url(r'^job_templates/(?P<pk>[0-9]+)/extra_credentials/$', JobTemplateExtraCredentialsList.as_view(), name='job_template_extra_credentials_list'),
url(r'^job_templates/(?P<pk>[0-9]+)/credentials/$', JobTemplateCredentialsList.as_view(), name='job_template_credentials_list'),
url(r'^schedules/preview/$', SchedulePreview.as_view(), name='schedule_rrule'),
url(r'^schedules/zoneinfo/$', ScheduleZoneInfo.as_view(), name='schedule_zoneinfo'),
]
app_name = 'api'

View File

@@ -2,24 +2,23 @@
# All Rights Reserved.
# Python
import os
import re
import cgi
import dateutil
import time
import socket
import subprocess
import sys
import logging
import requests
from base64 import b64encode
from collections import OrderedDict, Iterable
import six
# Django
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.models import Q, Count, F
from django.db import IntegrityError, transaction, connection
from django.db import IntegrityError, transaction
from django.shortcuts import get_object_or_404
from django.utils.encoding import smart_text, force_text
from django.utils.safestring import mark_safe
@@ -54,6 +53,7 @@ import ansiconv
# Python Social Auth
from social_core.backends.utils import load_backends
import pytz
from wsgiref.util import FileWrapper
# AWX
@@ -72,6 +72,7 @@ from awx.main.utils import (
extract_ansible_vars,
decrypt_field,
)
from awx.main.utils.encryption import encrypt_value
from awx.main.utils.filters import SmartFilter
from awx.main.utils.insights import filter_insights_api_response
@@ -315,6 +316,7 @@ class ApiV1ConfigView(APIView):
data.update(dict(
project_base_dir = settings.PROJECTS_ROOT,
project_local_paths = Project.get_local_path_choices(),
custom_virtualenvs = get_custom_venv_choices(),
))
return Response(data)
@@ -607,6 +609,43 @@ class ScheduleDetail(RetrieveUpdateDestroyAPIView):
new_in_148 = True
class SchedulePreview(GenericAPIView):
model = Schedule
view_name = _('Schedule Recurrence Rule Preview')
serializer_class = SchedulePreviewSerializer
new_in_api_v2 = True
def post(self, request):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
next_stamp = now()
schedule = []
gen = Schedule.rrulestr(serializer.validated_data['rrule']).xafter(next_stamp, count=20)
# loop across the entire generator and grab the first 10 events
for event in gen:
if len(schedule) >= 10:
break
if not dateutil.tz.datetime_exists(event):
# skip imaginary dates, like 2:30 on DST boundaries
continue
schedule.append(event)
return Response({
'local': schedule,
'utc': [s.astimezone(pytz.utc) for s in schedule]
})
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ScheduleZoneInfo(APIView):
def get(self, request):
from dateutil.zoneinfo import get_zonefile_instance
return Response(sorted(get_zonefile_instance().zones.keys()))
class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
model = Credential
@@ -1366,6 +1405,45 @@ class ProjectUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
new_in_13 = True
class ProjectUpdateEventsList(SubListAPIView):
model = ProjectUpdateEvent
serializer_class = ProjectUpdateEventSerializer
parent_model = ProjectUpdate
relationship = 'project_update_events'
view_name = _('Project Update Events List')
def finalize_response(self, request, response, *args, **kwargs):
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
return super(ProjectUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
class SystemJobEventsList(SubListAPIView):
model = SystemJobEvent
serializer_class = SystemJobEventSerializer
parent_model = SystemJob
relationship = 'system_job_events'
view_name = _('System Job Events List')
def finalize_response(self, request, response, *args, **kwargs):
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
return super(SystemJobEventsList, self).finalize_response(request, response, *args, **kwargs)
class InventoryUpdateEventsList(SubListAPIView):
model = InventoryUpdateEvent
serializer_class = InventoryUpdateEventSerializer
parent_model = InventoryUpdate
relationship = 'inventory_update_events'
view_name = _('Inventory Update Events List')
def finalize_response(self, request, response, *args, **kwargs):
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
return super(InventoryUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
class ProjectUpdateCancel(RetrieveAPIView):
model = ProjectUpdate
@@ -1967,7 +2045,17 @@ class InventoryJobTemplateList(SubListAPIView):
return qs.filter(inventory=parent)
class HostList(ListCreateAPIView):
class HostRelatedSearchMixin(object):
@property
def related_search_fields(self):
# Edge-case handle: https://github.com/ansible/ansible-tower/issues/7712
ret = super(HostRelatedSearchMixin, self).related_search_fields
ret.append('ansible_facts')
return ret
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
always_allow_superuser = False
model = Host
@@ -2004,7 +2092,7 @@ class HostAnsibleFactsDetail(RetrieveAPIView):
new_in_api_v2 = True
class InventoryHostsList(SubListCreateAttachDetachAPIView):
class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
model = Host
serializer_class = HostSerializer
@@ -2274,7 +2362,9 @@ class GroupPotentialChildrenList(SubListAPIView):
return qs.exclude(pk__in=except_pks)
class GroupHostsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
class GroupHostsList(HostRelatedSearchMixin,
ControlledByScmMixin,
SubListCreateAttachDetachAPIView):
''' the list of hosts directly below a group '''
model = Host
@@ -2301,7 +2391,7 @@ class GroupHostsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
return super(GroupHostsList, self).create(request, *args, **kwargs)
class GroupAllHostsList(SubListAPIView):
class GroupAllHostsList(HostRelatedSearchMixin, SubListAPIView):
''' the list of all hosts below a group, even including subgroups '''
model = Host
@@ -2419,6 +2509,8 @@ class InventoryScriptView(RetrieveAPIView):
def retrieve(self, request, *args, **kwargs):
obj = self.get_object()
hostname = request.query_params.get('host', '')
hostvars = bool(request.query_params.get('hostvars', ''))
towervars = bool(request.query_params.get('towervars', ''))
show_all = bool(request.query_params.get('all', ''))
if hostname:
hosts_q = dict(name=hostname)
@@ -2427,7 +2519,8 @@ class InventoryScriptView(RetrieveAPIView):
host = get_object_or_404(obj.hosts, **hosts_q)
return Response(host.variables_dict)
return Response(obj.get_script_data(
hostvars=bool(request.query_params.get('hostvars', '')),
hostvars=hostvars,
towervars=towervars,
show_all=show_all
))
@@ -2607,23 +2700,25 @@ class InventorySourceNotificationTemplatesSuccessList(InventorySourceNotificatio
relationship = 'notification_templates_success'
class InventorySourceHostsList(SubListAPIView):
class InventorySourceHostsList(HostRelatedSearchMixin, SubListDestroyAPIView):
model = Host
serializer_class = HostSerializer
parent_model = InventorySource
relationship = 'hosts'
new_in_148 = True
check_sub_obj_permission = False
capabilities_prefetch = ['inventory.admin']
class InventorySourceGroupsList(SubListAPIView):
class InventorySourceGroupsList(SubListDestroyAPIView):
model = Group
serializer_class = GroupSerializer
parent_model = InventorySource
relationship = 'groups'
new_in_148 = True
check_sub_obj_permission = False
class InventorySourceUpdatesList(SubListAPIView):
@@ -2918,13 +3013,8 @@ class JobTemplateSurveySpec(GenericAPIView):
if not feature_enabled('surveys'):
raise LicenseForbids(_('Your license does not allow '
'adding surveys.'))
survey_spec = obj.survey_spec
for pos, field in enumerate(survey_spec.get('spec', [])):
if field.get('type') == 'password':
if 'default' in field and field['default']:
field['default'] = '$encrypted$'
return Response(survey_spec)
return Response(obj.display_survey_spec())
def post(self, request, *args, **kwargs):
obj = self.get_object()
@@ -2937,7 +3027,14 @@ class JobTemplateSurveySpec(GenericAPIView):
if not request.user.can_access(self.model, 'change', obj, None):
raise PermissionDenied()
new_spec = request.data
response = self._validate_spec_data(request.data, obj.survey_spec)
if response:
return response
obj.survey_spec = request.data
obj.save(update_fields=['survey_spec'])
return Response()
def _validate_spec_data(self, new_spec, old_spec):
if "name" not in new_spec:
return Response(dict(error=_("'name' missing from survey spec.")), status=status.HTTP_400_BAD_REQUEST)
if "description" not in new_spec:
@@ -2949,9 +3046,9 @@ class JobTemplateSurveySpec(GenericAPIView):
if len(new_spec["spec"]) < 1:
return Response(dict(error=_("'spec' doesn't contain any items.")), status=status.HTTP_400_BAD_REQUEST)
idx = 0
variable_set = set()
for survey_item in new_spec["spec"]:
old_spec_dict = JobTemplate.pivot_spec(old_spec)
for idx, survey_item in enumerate(new_spec["spec"]):
if not isinstance(survey_item, dict):
return Response(dict(error=_("Survey question %s is not a json object.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if "type" not in survey_item:
@@ -2968,21 +3065,41 @@ class JobTemplateSurveySpec(GenericAPIView):
if "required" not in survey_item:
return Response(dict(error=_("'required' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if survey_item["type"] == "password":
if survey_item.get("default") and survey_item["default"].startswith('$encrypted$'):
if not obj.survey_spec:
return Response(dict(error=_("$encrypted$ is reserved keyword and may not be used as a default for password {}.".format(str(idx)))),
status=status.HTTP_400_BAD_REQUEST)
else:
old_spec = obj.survey_spec
for old_item in old_spec['spec']:
if old_item['variable'] == survey_item['variable']:
survey_item['default'] = old_item['default']
idx += 1
if survey_item["type"] == "password" and "default" in survey_item:
if not isinstance(survey_item['default'], six.string_types):
return Response(dict(error=_(
"Value {question_default} for '{variable_name}' expected to be a string."
).format(
question_default=survey_item["default"], variable_name=survey_item["variable"])
), status=status.HTTP_400_BAD_REQUEST)
obj.survey_spec = new_spec
obj.save(update_fields=['survey_spec'])
return Response()
if ("default" in survey_item and isinstance(survey_item['default'], six.string_types) and
survey_item['default'].startswith('$encrypted$')):
# Submission expects the existence of encrypted DB value to replace given default
if survey_item["type"] != "password":
return Response(dict(error=_(
"$encrypted$ is a reserved keyword for password question defaults, "
"survey question {question_position} is type {question_type}."
).format(
question_position=str(idx), question_type=survey_item["type"])
), status=status.HTTP_400_BAD_REQUEST)
old_element = old_spec_dict.get(survey_item['variable'], {})
encryptedish_default_exists = False
if 'default' in old_element:
old_default = old_element['default']
if isinstance(old_default, six.string_types):
if old_default.startswith('$encrypted$'):
encryptedish_default_exists = True
elif old_default == "": # unencrypted blank string is allowed as DB value as special case
encryptedish_default_exists = True
if not encryptedish_default_exists:
return Response(dict(error=_(
"$encrypted$ is a reserved keyword, may not be used for new default in position {question_position}."
).format(question_position=str(idx))), status=status.HTTP_400_BAD_REQUEST)
survey_item['default'] = old_element['default']
elif survey_item["type"] == "password" and 'default' in survey_item:
# Submission provides new encrypted default
survey_item['default'] = encrypt_value(survey_item['default'])
def delete(self, request, *args, **kwargs):
obj = self.get_object()
@@ -3206,7 +3323,9 @@ class JobTemplateCallback(GenericAPIView):
for inventory_source in inventory_sources:
if inventory_source.needs_update_on_launch:
# FIXME: Doesn't check for any existing updates.
inventory_update = inventory_source.create_inventory_update(launch_type='callback')
inventory_update = inventory_source.create_inventory_update(
**{'_eager_fields': {'launch_type': 'callback'}}
)
inventory_update.signal_start()
inventory_update_pks.add(inventory_update.pk)
inventory_update_qs = InventoryUpdate.objects.filter(pk__in=inventory_update_pks, status__in=('pending', 'waiting', 'running'))
@@ -3240,7 +3359,8 @@ class JobTemplateCallback(GenericAPIView):
return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Everything is fine; actually create the job.
kv = {"limit": limit, "launch_type": 'callback'}
kv = {"limit": limit}
kv.setdefault('_eager_fields', {})['launch_type'] = 'callback'
if extra_vars is not None and job_template.ask_variables_on_launch:
extra_vars_redacted, removed = extract_ansible_vars(extra_vars)
kv['extra_vars'] = extra_vars_redacted
@@ -4121,7 +4241,7 @@ class JobEventChildrenList(SubListAPIView):
view_name = _('Job Event Children List')
class JobEventHostsList(SubListAPIView):
class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
model = Host
serializer_class = HostSerializer
@@ -4141,7 +4261,7 @@ class BaseJobEventsList(SubListAPIView):
search_fields = ('stdout',)
def finalize_response(self, request, response, *args, **kwargs):
response['X-UI-Max-Events'] = settings.RECOMMENDED_MAX_EVENTS_DISPLAY_HEADER
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
return super(BaseJobEventsList, self).finalize_response(request, response, *args, **kwargs)
@@ -4457,7 +4577,7 @@ class StdoutANSIFilter(object):
def __init__(self, fileobj):
self.fileobj = fileobj
self.extra_data = ''
if hasattr(fileobj,'close'):
if hasattr(fileobj, 'close'):
self.close = fileobj.close
def read(self, size=-1):
@@ -4491,97 +4611,69 @@ class UnifiedJobStdout(RetrieveAPIView):
def retrieve(self, request, *args, **kwargs):
unified_job = self.get_object()
obj_size = unified_job.result_stdout_size
if request.accepted_renderer.format not in {'txt_download', 'ansi_download'} and obj_size > settings.STDOUT_MAX_BYTES_DISPLAY:
response_message = _("Standard Output too large to display (%(text_size)d bytes), "
"only download supported for sizes over %(supported_size)d bytes") % {
'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY}
try:
target_format = request.accepted_renderer.format
if target_format in ('html', 'api', 'json'):
content_format = request.query_params.get('content_format', 'html')
content_encoding = request.query_params.get('content_encoding', None)
start_line = request.query_params.get('start_line', 0)
end_line = request.query_params.get('end_line', None)
dark_val = request.query_params.get('dark', '')
dark = bool(dark_val and dark_val[0].lower() in ('1', 't', 'y'))
content_only = bool(target_format in ('api', 'json'))
dark_bg = (content_only and dark) or (not content_only and (dark or not dark_val))
content, start, end, absolute_end = unified_job.result_stdout_raw_limited(start_line, end_line)
# Remove any ANSI escape sequences containing job event data.
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
body = ansiconv.to_html(cgi.escape(content))
context = {
'title': get_view_name(self.__class__),
'body': mark_safe(body),
'dark': dark_bg,
'content_only': content_only,
}
data = render_to_string('api/stdout.html', context).strip()
if target_format == 'api':
return Response(mark_safe(data))
if target_format == 'json':
if content_encoding == 'base64' and content_format == 'ansi':
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': b64encode(content.encode('utf-8'))})
elif content_format == 'html':
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': body})
return Response(data)
elif target_format == 'txt':
return Response(unified_job.result_stdout)
elif target_format == 'ansi':
return Response(unified_job.result_stdout_raw)
elif target_format in {'txt_download', 'ansi_download'}:
filename = '{type}_{pk}{suffix}.txt'.format(
type=camelcase_to_underscore(unified_job.__class__.__name__),
pk=unified_job.id,
suffix='.ansi' if target_format == 'ansi_download' else ''
)
content_fd = unified_job.result_stdout_raw_handle(enforce_max_bytes=False)
if target_format == 'txt_download':
content_fd = StdoutANSIFilter(content_fd)
response = HttpResponse(FileWrapper(content_fd), content_type='text/plain')
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
else:
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
except StdoutMaxBytesExceeded as e:
response_message = _(
"Standard Output too large to display ({text_size} bytes), "
"only download supported for sizes over {supported_size} bytes").format(
text_size=e.total, supported_size=e.supported
)
if request.accepted_renderer.format == 'json':
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})
else:
return Response(response_message)
if request.accepted_renderer.format in ('html', 'api', 'json'):
content_format = request.query_params.get('content_format', 'html')
content_encoding = request.query_params.get('content_encoding', None)
start_line = request.query_params.get('start_line', 0)
end_line = request.query_params.get('end_line', None)
dark_val = request.query_params.get('dark', '')
dark = bool(dark_val and dark_val[0].lower() in ('1', 't', 'y'))
content_only = bool(request.accepted_renderer.format in ('api', 'json'))
dark_bg = (content_only and dark) or (not content_only and (dark or not dark_val))
content, start, end, absolute_end = unified_job.result_stdout_raw_limited(start_line, end_line)
# Remove any ANSI escape sequences containing job event data.
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
body = ansiconv.to_html(cgi.escape(content))
context = {
'title': get_view_name(self.__class__),
'body': mark_safe(body),
'dark': dark_bg,
'content_only': content_only,
}
data = render_to_string('api/stdout.html', context).strip()
if request.accepted_renderer.format == 'api':
return Response(mark_safe(data))
if request.accepted_renderer.format == 'json':
if content_encoding == 'base64' and content_format == 'ansi':
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': b64encode(content)})
elif content_format == 'html':
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': body})
return Response(data)
elif request.accepted_renderer.format == 'txt':
return Response(unified_job.result_stdout)
elif request.accepted_renderer.format == 'ansi':
return Response(unified_job.result_stdout_raw)
elif request.accepted_renderer.format in {'txt_download', 'ansi_download'}:
if not os.path.exists(unified_job.result_stdout_file):
write_fd = open(unified_job.result_stdout_file, 'w')
with connection.cursor() as cursor:
try:
tablename, related_name = {
Job: ('main_jobevent', 'job_id'),
AdHocCommand: ('main_adhoccommandevent', 'ad_hoc_command_id'),
}.get(unified_job.__class__, (None, None))
if tablename is None:
# stdout job event reconstruction isn't supported
# for certain job types (such as inventory syncs),
# so just grab the raw stdout from the DB
write_fd.write(unified_job.result_stdout_text)
write_fd.close()
else:
cursor.copy_expert(
"copy (select stdout from {} where {}={} order by start_line) to stdout".format(
tablename,
related_name,
unified_job.id
),
write_fd
)
write_fd.close()
subprocess.Popen("sed -i 's/\\\\r\\\\n/\\n/g' {}".format(unified_job.result_stdout_file),
shell=True).wait()
except Exception as e:
return Response({"error": _("Error generating stdout download file: {}".format(e))})
try:
content_fd = open(unified_job.result_stdout_file, 'r')
if request.accepted_renderer.format == 'txt_download':
# For txt downloads, filter out ANSI escape sequences.
content_fd = StdoutANSIFilter(content_fd)
suffix = ''
else:
suffix = '_ansi'
response = HttpResponse(FileWrapper(content_fd), content_type='text/plain')
response["Content-Disposition"] = 'attachment; filename="job_%s%s.txt"' % (str(unified_job.id), suffix)
return response
except Exception as e:
return Response({"error": _("Error generating stdout download file: %s") % str(e)}, status=status.HTTP_400_BAD_REQUEST)
else:
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
class ProjectUpdateStdout(UnifiedJobStdout):

View File

@@ -1,6 +1,7 @@
# Python
import logging
import urlparse
from collections import OrderedDict
# Django
from django.core.validators import URLValidator
@@ -139,6 +140,8 @@ class KeyValueField(DictField):
ret = super(KeyValueField, self).to_internal_value(data)
for value in data.values():
if not isinstance(value, six.string_types + six.integer_types + (float,)):
if isinstance(value, OrderedDict):
value = dict(value)
self.fail('invalid_child', input=value)
return ret

View File

@@ -120,6 +120,9 @@ class SettingsRegistry(object):
def is_setting_read_only(self, setting):
return bool(self._registry.get(setting, {}).get('read_only', False))
def get_setting_category(self, setting):
return self._registry.get(setting, {}).get('category_slug', None)
def get_setting_field(self, setting, mixin_class=None, for_user=False, **kwargs):
from rest_framework.fields import empty
field_kwargs = {}

View File

@@ -87,8 +87,10 @@ class SettingSingletonSerializer(serializers.Serializer):
if self.instance and not hasattr(self.instance, key):
continue
extra_kwargs = {}
# Make LICENSE read-only here; update via /api/v1/config/ only.
if key == 'LICENSE':
# Make LICENSE and AWX_ISOLATED_KEY_GENERATION read-only here;
# LICENSE is only updated via /api/v1/config/
# AWX_ISOLATED_KEY_GENERATION is only set/unset via the setup playbook
if key in ('LICENSE', 'AWX_ISOLATED_KEY_GENERATION'):
extra_kwargs['read_only'] = True
field = settings_registry.get_setting_field(key, mixin_class=SettingFieldMixin, for_user=bool(category_slug == 'user'), **extra_kwargs)
fields[key] = field

View File

@@ -14,6 +14,7 @@ from django.conf import settings, UserSettingsHolder
from django.core.cache import cache as django_cache
from django.core.exceptions import ImproperlyConfigured
from django.db import ProgrammingError, OperationalError
from django.utils.functional import cached_property
# Django REST Framework
from rest_framework.fields import empty, SkipField
@@ -230,7 +231,8 @@ class SettingsWrapper(UserSettingsHolder):
self.__dict__['cache'] = EncryptedCacheProxy(cache, registry)
self.__dict__['registry'] = registry
def _get_supported_settings(self):
@cached_property
def all_supported_settings(self):
return self.registry.get_registered_settings()
def _preload_cache(self):
@@ -382,7 +384,7 @@ class SettingsWrapper(UserSettingsHolder):
def __getattr__(self, name):
value = empty
if name in self._get_supported_settings():
if name in self.all_supported_settings:
with _log_database_error():
value = self._get_local(name)
if value is not empty:
@@ -414,7 +416,7 @@ class SettingsWrapper(UserSettingsHolder):
# post_save handler will delete from cache when changed.
def __setattr__(self, name, value):
if name in self._get_supported_settings():
if name in self.all_supported_settings:
with _log_database_error():
self._set_local(name, value)
else:
@@ -430,7 +432,7 @@ class SettingsWrapper(UserSettingsHolder):
# pre_delete handler will delete from cache.
def __delattr__(self, name):
if name in self._get_supported_settings():
if name in self.all_supported_settings:
with _log_database_error():
self._del_local(name)
else:
@@ -440,7 +442,7 @@ class SettingsWrapper(UserSettingsHolder):
keys = []
with _log_database_error():
for setting in Setting.objects.filter(
key__in=self._get_supported_settings(), user__isnull=True):
key__in=self.all_supported_settings, user__isnull=True):
# Skip returning settings that have been overridden but are
# considered to be "not set".
if setting.value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE:
@@ -454,7 +456,7 @@ class SettingsWrapper(UserSettingsHolder):
def is_overridden(self, setting):
set_locally = False
if setting in self._get_supported_settings():
if setting in self.all_supported_settings:
with _log_database_error():
set_locally = Setting.objects.filter(key=setting, user__isnull=True).exists()
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)

View File

@@ -6,10 +6,10 @@ import glob
import os
import shutil
# RedBaron
from redbaron import RedBaron, indent
# AWX
from awx.conf.registry import settings_registry
__all__ = ['comment_assignments']
__all__ = ['comment_assignments', 'conf_to_dict']
def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix='.old'):
@@ -30,6 +30,8 @@ def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix=
def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup_filename=None):
from redbaron import RedBaron, indent
if isinstance(assignment_names, basestring):
assignment_names = [assignment_names]
else:
@@ -103,6 +105,13 @@ def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup
return '\n'.join(diff_lines)
def conf_to_dict(obj):
return {
'category': settings_registry.get_setting_category(obj.key),
'name': obj.key,
}
if __name__ == '__main__':
pattern = os.path.join(os.path.dirname(__file__), '..', 'settings', 'local_*.py')
diffs = comment_assignments(pattern, ['AUTH_LDAP_ORGANIZATION_MAP'])

View File

@@ -123,6 +123,8 @@ class EventContext(object):
event_data['job_id'] = int(os.getenv('JOB_ID', '0'))
if os.getenv('AD_HOC_COMMAND_ID', ''):
event_data['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0'))
if os.getenv('PROJECT_UPDATE_ID', ''):
event_data['project_update_id'] = int(os.getenv('PROJECT_UPDATE_ID', '0'))
event_data.setdefault('pid', os.getpid())
event_data.setdefault('uuid', str(uuid.uuid4()))
event_data.setdefault('created', datetime.datetime.utcnow().isoformat())
@@ -145,7 +147,7 @@ class EventContext(object):
event_data['res'] = {}
event_dict = dict(event=event, event_data=event_data)
for key in event_data.keys():
if key in ('job_id', 'ad_hoc_command_id', 'uuid', 'parent_uuid', 'created',):
if key in ('job_id', 'ad_hoc_command_id', 'project_update_id', 'uuid', 'parent_uuid', 'created',):
event_dict[key] = event_data.pop(key)
elif key in ('verbosity', 'pid'):
event_dict[key] = event_data[key]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -12,6 +12,7 @@ from django.db.models import Q, Prefetch
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
# Django REST Framework
from rest_framework.exceptions import ParseError, PermissionDenied, ValidationError
@@ -31,7 +32,7 @@ from awx.conf.license import LicenseForbids, feature_enabled
__all__ = ['get_user_queryset', 'check_user_access', 'check_user_access_with_errors',
'user_accessible_objects', 'consumer_access',
'user_admin_role', 'StateConflict',]
'user_admin_role', 'ActiveJobConflict',]
logger = logging.getLogger('awx.main.access')
@@ -71,9 +72,15 @@ def get_object_from_data(field, Model, data, obj=None):
raise ParseError(_("Bad data found in related field %s." % field))
class StateConflict(ValidationError):
class ActiveJobConflict(ValidationError):
status_code = 409
def __init__(self, active_jobs):
super(ActiveJobConflict, self).__init__({
"conflict": _("Resource is being used by running jobs."),
"active_jobs": active_jobs
})
def register_access(model_class, access_class):
access_registry[model_class] = access_class
@@ -568,8 +575,7 @@ class OrganizationAccess(BaseAccess):
active_jobs.extend([dict(type="inventory_update", id=o.id)
for o in InventoryUpdate.objects.filter(inventory_source__inventory__organization=obj, status__in=ACTIVE_STATES)])
if len(active_jobs) > 0:
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
"active_jobs": active_jobs})
raise ActiveJobConflict(active_jobs)
return True
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
@@ -662,8 +668,7 @@ class InventoryAccess(BaseAccess):
active_jobs.extend([dict(type="ad_hoc_command", id=o.id)
for o in AdHocCommand.objects.filter(inventory=obj, status__in=ACTIVE_STATES)])
if len(active_jobs) > 0:
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
"active_jobs": active_jobs})
raise ActiveJobConflict(active_jobs)
return True
def can_run_ad_hoc_commands(self, obj):
@@ -788,8 +793,7 @@ class GroupAccess(BaseAccess):
active_jobs.extend([dict(type="inventory_update", id=o.id)
for o in InventoryUpdate.objects.filter(inventory_source__in=obj.inventory_sources.all(), status__in=ACTIVE_STATES)])
if len(active_jobs) > 0:
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
"active_jobs": active_jobs})
raise ActiveJobConflict(active_jobs)
return True
def can_start(self, obj, validate_license=True):
@@ -839,8 +843,7 @@ class InventorySourceAccess(BaseAccess):
return False
active_jobs_qs = InventoryUpdate.objects.filter(inventory_source=obj, status__in=ACTIVE_STATES)
if active_jobs_qs.exists():
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
"active_jobs": [dict(type="inventory_update", id=o.id) for o in active_jobs_qs.all()]})
raise ActiveJobConflict([dict(type="inventory_update", id=o.id) for o in active_jobs_qs.all()])
return True
@check_superuser
@@ -943,7 +946,8 @@ class CredentialAccess(BaseAccess):
model = Credential
select_related = ('created_by', 'modified_by',)
prefetch_related = ('admin_role', 'use_role', 'read_role',
'admin_role__parents', 'admin_role__members',)
'admin_role__parents', 'admin_role__members',
'credential_type', 'organization')
def filtered_queryset(self):
return self.model.accessible_objects(self.user, 'read_role')
@@ -1090,8 +1094,7 @@ class ProjectAccess(BaseAccess):
active_jobs.extend([dict(type="project_update", id=o.id)
for o in ProjectUpdate.objects.filter(project=obj, status__in=ACTIVE_STATES)])
if len(active_jobs) > 0:
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
"active_jobs": active_jobs})
raise ActiveJobConflict(active_jobs)
return True
@check_superuser
@@ -1124,8 +1127,11 @@ class ProjectUpdateAccess(BaseAccess):
def can_start(self, obj, validate_license=True):
# for relaunching
if obj and obj.project:
return self.user in obj.project.update_role
try:
if obj and obj.project:
return self.user in obj.project.update_role
except ObjectDoesNotExist:
pass
return False
@check_superuser
@@ -1142,7 +1148,11 @@ class JobTemplateAccess(BaseAccess):
model = JobTemplate
select_related = ('created_by', 'modified_by', 'inventory', 'project',
'next_schedule',)
prefetch_related = ('credentials__credential_type',)
prefetch_related = (
'instance_groups',
'credentials__credential_type',
Prefetch('labels', queryset=Label.objects.all().order_by('name')),
)
def filtered_queryset(self):
return self.model.accessible_objects(self.user, 'read_role')
@@ -1265,8 +1275,7 @@ class JobTemplateAccess(BaseAccess):
active_jobs = [dict(type="job", id=o.id)
for o in obj.jobs.filter(status__in=ACTIVE_STATES)]
if len(active_jobs) > 0:
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
"active_jobs": active_jobs})
raise ActiveJobConflict(active_jobs)
return True
@check_superuser
@@ -1305,7 +1314,7 @@ class JobAccess(BaseAccess):
model = Job
select_related = ('created_by', 'modified_by', 'job_template', 'inventory',
'project', 'job_template',)
'project', 'project_update',)
prefetch_related = (
'unified_job_template',
'instance_group',
@@ -1771,8 +1780,7 @@ class WorkflowJobTemplateAccess(BaseAccess):
active_jobs = [dict(type="workflow_job", id=o.id)
for o in obj.workflow_jobs.filter(status__in=ACTIVE_STATES)]
if len(active_jobs) > 0:
raise StateConflict({"conflict": _("Resource is being used by running jobs"),
"active_jobs": active_jobs})
raise ActiveJobConflict(active_jobs)
return True
@@ -1979,6 +1987,64 @@ class JobEventAccess(BaseAccess):
return False
class ProjectUpdateEventAccess(BaseAccess):
'''
I can see project update event records whenever I can access the project update
'''
model = ProjectUpdateEvent
def filtered_queryset(self):
return self.model.objects.filter(
Q(project_update__in=ProjectUpdate.accessible_pk_qs(self.user, 'read_role')))
def can_add(self, data):
return False
def can_change(self, obj, data):
return False
def can_delete(self, obj):
return False
class InventoryUpdateEventAccess(BaseAccess):
'''
I can see inventory update event records whenever I can access the inventory update
'''
model = InventoryUpdateEvent
def filtered_queryset(self):
return self.model.objects.filter(
Q(inventory_update__in=InventoryUpdate.accessible_pk_qs(self.user, 'read_role')))
def can_add(self, data):
return False
def can_change(self, obj, data):
return False
def can_delete(self, obj):
return False
class SystemJobEventAccess(BaseAccess):
'''
I can only see manage System Jobs events if I'm a super user
'''
model = SystemJobEvent
def can_add(self, data):
return False
def can_change(self, obj, data):
return False
def can_delete(self, obj):
return False
class UnifiedJobTemplateAccess(BaseAccess):
'''
I can see a unified job template whenever I can see the same project,

View File

@@ -5,7 +5,7 @@ import re
from django.utils.translation import ugettext_lazy as _
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'satellite6', 'cloudforms')
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'cloudforms', 'tower')
SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + ('custom', 'scm',)
PRIVILEGE_ESCALATION_METHODS = [
('sudo', _('Sudo')), ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')),

View File

@@ -1,24 +1,36 @@
class AwxTaskError(Exception):
"""Base exception for errors in unified job runs"""
def __init__(self, task, message=None):
# Copyright (c) 2018 Ansible by Red Hat
# All Rights Reserved.
# Celery does not respect exception type when using a serializer different than pickle;
# and awx uses the json serializer
# https://github.com/celery/celery/issues/3586
class _AwxTaskError():
def build_exception(self, task, message=None):
if message is None:
message = "Execution error running {}".format(task.log_format)
super(AwxTaskError, self).__init__(message)
self.task = task
class TaskCancel(AwxTaskError):
"""Canceled flag caused run_pexpect to kill the job run"""
def __init__(self, task, rc):
super(TaskCancel, self).__init__(
task, message="{} was canceled (rc={})".format(task.log_format, rc))
self.rc = rc
e = Exception(message)
e.task = task
e.is_awx_task_error = True
return e
def TaskCancel(self, task, rc):
"""Canceled flag caused run_pexpect to kill the job run"""
message="{} was canceled (rc={})".format(task.log_format, rc)
e = self.build_exception(task, message)
e.rc = rc
e.awx_task_error_type = "TaskCancel"
return e
def TaskError(self, task, rc):
"""Userspace error (non-zero exit code) in run_pexpect subprocess"""
message = "{} encountered an error (rc={}), please see task stdout for details.".format(task.log_format, rc)
e = self.build_exception(task, message)
e.rc = rc
e.awx_task_error_type = "TaskError"
return e
class TaskError(AwxTaskError):
"""Userspace error (non-zero exit code) in run_pexpect subprocess"""
def __init__(self, task, rc):
super(TaskError, self).__init__(
task, message="%s encountered an error (rc=%s), please see task stdout for details.".format(task.log_format, rc))
self.rc = rc
AwxTaskError = _AwxTaskError()

View File

@@ -1,5 +1,4 @@
import base64
import cStringIO
import codecs
import StringIO
import json
@@ -143,7 +142,7 @@ class IsolatedManager(object):
# if an ssh private key fifo exists, read its contents and delete it
if self.ssh_key_path:
buff = cStringIO.StringIO()
buff = StringIO.StringIO()
with open(self.ssh_key_path, 'r') as fifo:
for line in fifo:
buff.write(line)
@@ -183,7 +182,7 @@ class IsolatedManager(object):
job_timeout=settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
pexpect_timeout=5
)
output = buff.getvalue()
output = buff.getvalue().encode('utf-8')
playbook_logger.info('Isolated job {} dispatch:\n{}'.format(self.instance.id, output))
if status != 'successful':
self.stdout_handle.write(output)
@@ -283,7 +282,7 @@ class IsolatedManager(object):
status = 'failed'
output = ''
rc = None
buff = cStringIO.StringIO()
buff = StringIO.StringIO()
last_check = time.time()
seek = 0
job_timeout = remaining = self.job_timeout
@@ -304,7 +303,7 @@ class IsolatedManager(object):
time.sleep(1)
continue
buff = cStringIO.StringIO()
buff = StringIO.StringIO()
logger.debug('Checking on isolated job {} with `check_isolated.yml`.'.format(self.instance.id))
status, rc = IsolatedManager.run_pexpect(
args, self.awx_playbook_path(), self.management_env, buff,
@@ -314,7 +313,7 @@ class IsolatedManager(object):
pexpect_timeout=5,
proot_cmd=self.proot_cmd
)
output = buff.getvalue()
output = buff.getvalue().encode('utf-8')
playbook_logger.info('Isolated job {} check:\n{}'.format(self.instance.id, output))
path = self.path_to('artifacts', 'stdout')
@@ -356,14 +355,14 @@ class IsolatedManager(object):
}
args = self._build_args('clean_isolated.yml', '%s,' % self.host, extra_vars)
logger.debug('Cleaning up job {} on isolated host with `clean_isolated.yml` playbook.'.format(self.instance.id))
buff = cStringIO.StringIO()
buff = StringIO.StringIO()
timeout = max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT)
status, rc = IsolatedManager.run_pexpect(
args, self.awx_playbook_path(), self.management_env, buff,
idle_timeout=timeout, job_timeout=timeout,
pexpect_timeout=5
)
output = buff.getvalue()
output = buff.getvalue().encode('utf-8')
playbook_logger.info('Isolated job {} cleanup:\n{}'.format(self.instance.id, output))
if status != 'successful':
@@ -406,14 +405,14 @@ class IsolatedManager(object):
env = cls._base_management_env()
env['ANSIBLE_STDOUT_CALLBACK'] = 'json'
buff = cStringIO.StringIO()
buff = StringIO.StringIO()
timeout = max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT)
status, rc = IsolatedManager.run_pexpect(
args, cls.awx_playbook_path(), env, buff,
idle_timeout=timeout, job_timeout=timeout,
pexpect_timeout=5
)
output = buff.getvalue()
output = buff.getvalue().encode('utf-8')
buff.close()
try:
@@ -445,7 +444,7 @@ class IsolatedManager(object):
instance.hostname, instance.modified))
@staticmethod
def wrap_stdout_handle(instance, private_data_dir, stdout_handle, event_data_key='job_id'):
def get_stdout_handle(instance, private_data_dir, event_data_key='job_id'):
dispatcher = CallbackQueueDispatcher()
def job_event_callback(event_data):
@@ -463,7 +462,7 @@ class IsolatedManager(object):
event_data.get('event', ''), event_data['uuid'], instance.id, event_data))
dispatcher.dispatch(event_data)
return OutputEventFilter(stdout_handle, job_event_callback)
return OutputEventFilter(job_event_callback)
def run(self, instance, host, private_data_dir, proot_temp_dir):
"""

View File

@@ -99,7 +99,6 @@ def run_pexpect(args, cwd, env, logfile,
password_patterns = expect_passwords.keys()
password_values = expect_passwords.values()
logfile_pos = logfile.tell()
child = pexpect.spawn(
args[0], args[1:], cwd=cwd, env=env, ignore_sighup=True,
encoding='utf-8', echo=False,
@@ -116,8 +115,6 @@ def run_pexpect(args, cwd, env, logfile,
password = password_values[result_id]
if password is not None:
child.sendline(password)
if logfile_pos != logfile.tell():
logfile_pos = logfile.tell()
last_stdout_update = time.time()
if cancelled_callback:
try:

View File

@@ -6,6 +6,7 @@ import copy
import json
import re
import six
import urllib
from jinja2 import Environment, StrictUndefined
from jinja2.exceptions import UndefinedError
@@ -352,6 +353,7 @@ class SmartFilterField(models.TextField):
# https://docs.python.org/2/library/stdtypes.html#truth-value-testing
if not value:
return None
value = urllib.unquote(value)
try:
SmartFilter().query_from_string(value)
except RuntimeError, e:

View File

@@ -173,6 +173,7 @@ class AnsibleInventoryLoader(object):
def load(self):
base_args = self.get_base_args()
logger.info('Reading Ansible inventory source: %s', self.source)
data = self.command_to_json(base_args + ['--list'])
# TODO: remove after we run custom scripts through ansible-inventory
@@ -225,6 +226,7 @@ def load_inventory_source(source, group_filter_re=None,
'''
# Sanity check: We sanitize these module names for our API but Ansible proper doesn't follow
# good naming conventions
source = source.replace('rhv.py', 'ovirt4.py')
source = source.replace('satellite6.py', 'foreman.py')
source = source.replace('vmware.py', 'vmware_inventory.py')
if not os.path.exists(source):
@@ -600,27 +602,20 @@ class Command(BaseCommand):
def _update_inventory(self):
'''
Update/overwrite variables from "all" group. If importing from a
cloud source attached to a specific group, variables will be set on
the base group, otherwise they will be set on the whole inventory.
Update inventory variables from "all" group.
'''
# FIXME: figure out how "all" variables are handled in the new inventory source system
# TODO: We disable variable overwrite here in case user-defined inventory variables get
# mangled. But we still need to figure out a better way of processing multiple inventory
# update variables mixing with each other.
all_obj = self.inventory
all_name = 'inventory'
db_variables = all_obj.variables_dict
if self.overwrite_vars:
db_variables = self.all_group.variables
else:
db_variables.update(self.all_group.variables)
db_variables.update(self.all_group.variables)
if db_variables != all_obj.variables_dict:
all_obj.variables = json.dumps(db_variables)
all_obj.save(update_fields=['variables'])
if self.overwrite_vars:
logger.info('%s variables replaced from "all" group', all_name.capitalize())
else:
logger.info('%s variables updated from "all" group', all_name.capitalize())
logger.info('Inventory variables updated from "all" group')
else:
logger.info('%s variables unmodified', all_name.capitalize())
logger.info('Inventory variables unmodified')
def _create_update_groups(self):
'''

View File

@@ -12,11 +12,17 @@ from awx.main.models import (
UnifiedJob,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob
)
from awx.main.consumers import emit_channel_notification
from awx.api.serializers import (
JobEventWebSocketSerializer,
AdHocCommandEventWebSocketSerializer,
ProjectUpdateEventWebSocketSerializer,
InventoryUpdateEventWebSocketSerializer,
SystemJobEventWebSocketSerializer
)
@@ -60,7 +66,16 @@ class ReplayJobEvents():
return self.replay_elapsed().total_seconds() - (self.recording_elapsed(created).total_seconds() * (1.0 / speed))
def get_job_events(self, job):
job_events = job.job_events.order_by('created')
if type(job) is Job:
job_events = job.job_events.order_by('created')
elif type(job) is AdHocCommand:
job_events = job.ad_hoc_command_events.order_by('created')
elif type(job) is ProjectUpdate:
job_events = job.project_update_events.order_by('created')
elif type(job) is InventoryUpdate:
job_events = job.inventory_update_events.order_by('created')
elif type(job) is SystemJob:
job_events = job.system_job_events.order_by('created')
if job_events.count() == 0:
raise RuntimeError("No events for job id {}".format(job.id))
return job_events
@@ -70,6 +85,12 @@ class ReplayJobEvents():
return JobEventWebSocketSerializer
elif type(job) is AdHocCommand:
return AdHocCommandEventWebSocketSerializer
elif type(job) is ProjectUpdate:
return ProjectUpdateEventWebSocketSerializer
elif type(job) is InventoryUpdate:
return InventoryUpdateEventWebSocketSerializer
elif type(job) is SystemJob:
return SystemJobEventWebSocketSerializer
else:
raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job)))
sys.exit(1)

View File

@@ -3,13 +3,14 @@
# Python
import logging
import os
import signal
import time
from uuid import UUID
from multiprocessing import Process
from multiprocessing import Queue as MPQueue
from Queue import Empty as QueueEmpty
from Queue import Full as QueueFull
import os
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
@@ -18,11 +19,13 @@ from kombu.mixins import ConsumerMixin
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from django.db import DatabaseError
from django.db import DatabaseError, OperationalError
from django.db.utils import InterfaceError, InternalError
from django.core.cache import cache as django_cache
# AWX
from awx.main.models import * # noqa
from awx.main.consumers import emit_channel_notification
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -39,6 +42,9 @@ class WorkerSignalHandler:
class CallbackBrokerWorker(ConsumerMixin):
MAX_RETRIES = 2
def __init__(self, connection, use_workers=True):
self.connection = connection
self.worker_queues = []
@@ -123,8 +129,17 @@ class CallbackBrokerWorker(ConsumerMixin):
logger.error("Exception on worker thread, restarting: " + str(e))
continue
try:
if 'job_id' not in body and 'ad_hoc_command_id' not in body:
raise Exception('Payload does not have a job_id or ad_hoc_command_id')
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
if not any([key in body for key in event_map]):
raise Exception('Payload does not have a job identifier')
if settings.DEBUG:
from pygments import highlight
from pygments.lexers import PythonLexer
@@ -132,14 +147,51 @@ class CallbackBrokerWorker(ConsumerMixin):
from pprint import pformat
logger.info('Body: {}'.format(
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
))
try:
if 'job_id' in body:
JobEvent.create_from_data(**body)
elif 'ad_hoc_command_id' in body:
AdHocCommandEvent.create_from_data(**body)
except DatabaseError as e:
logger.error('Database Error Saving Job Event: {}'.format(e))
)[:1024 * 4])
def _save_event_data():
for key, cls in event_map.items():
if key in body:
cls.create_from_data(**body)
job_identifier = 'unknown job'
for key in event_map.keys():
if key in body:
job_identifier = body[key]
break
if body.get('event') == 'EOF':
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier)
)
continue
retries = 0
while retries <= self.MAX_RETRIES:
try:
_save_event_data()
break
except (OperationalError, InterfaceError, InternalError) as e:
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
os.kill(os.getppid(), signal.SIGINT)
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError as e:
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
break
except Exception as exc:
import traceback
tb = traceback.format_exc()

View File

@@ -0,0 +1,50 @@
import os
import shutil
import subprocess
import sys
import tempfile
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from awx.main.expect import run
class Command(BaseCommand):
"""Tests SSH connectivity between a controller and target isolated node"""
help = 'Tests SSH connectivity between a controller and target isolated node'
option_list = BaseCommand.option_list + (
make_option('--hostname', dest='hostname', type='string',
help='Hostname of an isolated node'),
)
def handle(self, *args, **options):
hostname = options.get('hostname')
if not hostname:
raise CommandError("--hostname is a required argument")
try:
path = tempfile.mkdtemp(prefix='awx_isolated_ssh', dir=settings.AWX_PROOT_BASE_PATH)
args = [
'ansible', 'all', '-i', '{},'.format(hostname), '-u',
settings.AWX_ISOLATED_USERNAME, '-T5', '-m', 'shell',
'-a', 'hostname', '-vvv'
]
if all([
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)
]):
ssh_key_path = os.path.join(path, '.isolated')
ssh_auth_sock = os.path.join(path, 'ssh_auth.sock')
run.open_fifo_write(ssh_key_path, settings.AWX_ISOLATED_PRIVATE_KEY)
args = run.wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock)
try:
print ' '.join(args)
subprocess.check_call(args)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
finally:
shutil.rmtree(path)

View File

@@ -21,11 +21,15 @@ class HostManager(models.Manager):
"""Custom manager class for Hosts model."""
def active_count(self):
"""Return count of active, unique hosts for licensing."""
try:
return self.order_by('name').distinct('name').count()
except NotImplementedError: # For unit tests only, SQLite doesn't support distinct('name')
return len(set(self.values_list('name', flat=True)))
"""Return count of active, unique hosts for licensing.
Construction of query involves:
- remove any ordering specified in model's Meta
- Exclude hosts sourced from another Tower
- Restrict the query to only return the name column
- Only consider results that are unique
- Return the count of this query
"""
return self.order_by().exclude(inventory_sources__source='tower').values('name').distinct().count()
def get_queryset(self):
"""When the parent instance of the host query set has a `kind=smart` and a `host_filter`

View File

@@ -5,6 +5,10 @@ import logging
import threading
import uuid
import six
import time
import cProfile
import pstats
import os
from django.conf import settings
from django.contrib.auth.models import User
@@ -25,6 +29,40 @@ from awx.conf import fields, register
logger = logging.getLogger('awx.main.middleware')
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
perf_logger = logging.getLogger('awx.analytics.performance')
class TimingMiddleware(threading.local):
dest = '/var/lib/awx/profile'
def process_request(self, request):
self.start_time = time.time()
if settings.AWX_REQUEST_PROFILE:
self.prof = cProfile.Profile()
self.prof.enable()
def process_response(self, request, response):
if not hasattr(self, 'start_time'): # some tools may not invoke process_request
return response
total_time = time.time() - self.start_time
response['X-API-Total-Time'] = '%0.3fs' % total_time
if settings.AWX_REQUEST_PROFILE:
self.prof.disable()
cprofile_file = self.save_profile_file(request)
response['cprofile_file'] = cprofile_file
perf_logger.info('api response times', extra=dict(python_objects=dict(request=request, response=response)))
return response
def save_profile_file(self, request):
if not os.path.isdir(self.dest):
os.makedirs(self.dest)
filename = '%.3fs-%s' % (pstats.Stats(self.prof).total_tt, uuid.uuid4())
filepath = os.path.join(self.dest, filename)
with open(filepath, 'w') as f:
f.write('%s %s\n' % (request.method, request.get_full_path()))
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
return filepath
class ActivityStreamMiddleware(threading.local):

View File

@@ -8,14 +8,9 @@ from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import awx.main.fields
import jsonfield.fields
def update_dashed_host_variables(apps, schema_editor):
Host = apps.get_model('main', 'Host')
for host in Host.objects.filter(variables='---'):
host.variables = ''
host.save()
import _squashed
from _squashed_30 import SQUASHED_30
class Migration(migrations.Migration):
@@ -27,13 +22,7 @@ class Migration(migrations.Migration):
(b'main', '0025_v300_update_rbac_parents'),
(b'main', '0026_v300_credential_unique'),
(b'main', '0027_v300_team_migrations'),
(b'main', '0028_v300_org_team_cascade'),
(b'main', '0029_v302_add_ask_skip_tags'),
(b'main', '0030_v302_job_survey_passwords'),
(b'main', '0031_v302_migrate_survey_passwords'),
(b'main', '0032_v302_credential_permissions_update'),
(b'main', '0033_v303_v245_host_variable_fix'),]
(b'main', '0028_v300_org_team_cascade')] + _squashed.replaces(SQUASHED_30, applied=True)
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
@@ -130,27 +119,4 @@ class Migration(migrations.Migration):
field=models.ForeignKey(related_name='teams', to='main.Organization'),
preserve_default=False,
),
# add ask skip tags
migrations.AddField(
model_name='jobtemplate',
name='ask_skip_tags_on_launch',
field=models.BooleanField(default=False),
),
# job survery passwords
migrations.AddField(
model_name='job',
name='survey_passwords',
field=jsonfield.fields.JSONField(default={}, editable=False, blank=True),
),
# RBAC credential permission updates
migrations.AlterField(
model_name='credential',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_administrator', b'organization.admin_role'], to='main.Role', null=b'True'),
),
migrations.AlterField(
model_name='credential',
name='use_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'),
),
]
] + _squashed.operations(SQUASHED_30, applied=True)

View File

@@ -8,6 +8,9 @@ import django.db.models.deletion
import awx.main.models.workflow
import awx.main.fields
import _squashed
from _squashed_30 import SQUASHED_30
class Migration(migrations.Migration):
@@ -15,11 +18,11 @@ class Migration(migrations.Migration):
('main', '0003_squashed_v300_v303_updates'),
]
replaces = [
replaces = _squashed.replaces(SQUASHED_30) + [
(b'main', '0034_v310_release'),
]
operations = [
operations = _squashed.operations(SQUASHED_30) + [
# Create ChannelGroup table
migrations.CreateModel(
name='ChannelGroup',

View File

@@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db import migrations, models
import _squashed
from _squashed_31 import SQUASHED_31
class Migration(migrations.Migration):
@@ -10,28 +12,5 @@ class Migration(migrations.Migration):
('main', '0004_squashed_v310_release'),
]
replaces = [
(b'main', '0035_v310_remove_tower_settings'),
]
operations = [
# Remove Tower settings, these settings are now in separate awx.conf app.
migrations.RemoveField(
model_name='towersettings',
name='user',
),
migrations.DeleteModel(
name='TowerSettings',
),
migrations.AlterField(
model_name='project',
name='scm_type',
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
),
migrations.AlterField(
model_name='projectupdate',
name='scm_type',
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
),
]
replaces = _squashed.replaces(SQUASHED_31)
operations = _squashed.operations(SQUASHED_31)

View File

@@ -1,28 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_squashed_v310_v313_updates'),
]
replaces = [
(b'main', '0036_v311_insights'),
]
operations = [
migrations.AlterField(
model_name='project',
name='scm_type',
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
),
migrations.AlterField(
model_name='projectupdate',
name='scm_type',
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
),
]

View File

@@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005a_squashed_v310_v313_updates'),
]
replaces = [
(b'main', '0037_v313_instance_version'),
]
operations = [
# Remove Tower settings, these settings are now in separate awx.conf app.
migrations.AddField(
model_name='instance',
name='version',
field=models.CharField(max_length=24, blank=True),
),
]

View File

@@ -6,7 +6,13 @@ from __future__ import unicode_literals
from psycopg2.extensions import AsIs
# Django
from django.db import migrations, models
from django.db import (
connection,
migrations,
models,
OperationalError,
ProgrammingError
)
from django.conf import settings
import taggit.managers
@@ -15,12 +21,24 @@ import awx.main.fields
from awx.main.models import Host
def replaces():
squashed = ['0005a_squashed_v310_v313_updates', '0005b_squashed_v310_v313_updates']
try:
recorder = migrations.recorder.MigrationRecorder(connection)
result = recorder.migration_qs.filter(app='main').filter(name__in=squashed).all()
return [('main', m.name) for m in result]
except (OperationalError, ProgrammingError):
return []
class Migration(migrations.Migration):
dependencies = [
('main', '0005b_squashed_v310_v313_updates'),
('main', '0005_squashed_v310_v313_updates'),
]
replaces = replaces()
operations = [
# Release UJT unique_together constraint
migrations.AlterUniqueTogether(

View File

@@ -6,6 +6,7 @@ from __future__ import unicode_literals
from django.db import migrations, models
# AWX
from awx.main.migrations import ActivityStreamDisabledMigration
from awx.main.migrations import _inventory_source as invsrc
from awx.main.migrations import _migration_utils as migration_utils
from awx.main.migrations import _reencrypt as reencrypt
@@ -15,7 +16,7 @@ from awx.main.migrations import _azure_credentials as azurecreds
import awx.main.fields
class Migration(migrations.Migration):
class Migration(ActivityStreamDisabledMigration):
dependencies = [
('main', '0006_v320_release'),

View File

@@ -0,0 +1,20 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import awx.main.fields
class Migration(migrations.Migration):
dependencies = [
('main', '0008_v320_drop_v1_credential_fields'),
]
operations = [
migrations.AddField(
model_name='activitystream',
name='setting',
field=awx.main.fields.JSONField(default=dict, blank=True),
),
]

View File

@@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# AWX
from awx.main.migrations import _credentialtypes as credentialtypes
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_v322_add_setting_field_for_activity_stream'),
]
operations = [
migrations.RunPython(credentialtypes.create_rhv_tower_credtype),
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a Project'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'rhv', 'Red Hat Virtualization'), (b'tower', 'Ansible Tower'), (b'custom', 'Custom Script')]),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a Project'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'rhv', 'Red Hat Virtualization'), (b'tower', 'Ansible Tower'), (b'custom', 'Custom Script')]),
),
]

View File

@@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from awx.main.migrations import ActivityStreamDisabledMigration
from awx.main.migrations import _reencrypt as reencrypt
from awx.main.migrations import _migration_utils as migration_utils
class Migration(ActivityStreamDisabledMigration):
dependencies = [
('main', '0010_v322_add_ovirt4_tower_inventory'),
]
operations = [
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
migrations.RunPython(reencrypt.encrypt_survey_passwords),
]

View File

@@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# AWX
from awx.main.migrations import _credentialtypes as credentialtypes
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0011_v322_encrypt_survey_passwords'),
]
operations = [
migrations.RunPython(credentialtypes.add_azure_cloud_environment_field),
]

View File

@@ -11,7 +11,7 @@ from awx.main.migrations._multi_cred import migrate_to_multi_cred
class Migration(migrations.Migration):
dependencies = [
('main', '0008_v320_drop_v1_credential_fields'),
('main', '0012_v322_update_cred_types'),
]
operations = [

View File

@@ -13,7 +13,7 @@ from awx.main.migrations._scan_jobs import remove_scan_type_nodes
class Migration(migrations.Migration):
dependencies = [
('main', '0009_v330_multi_credential'),
('main', '0013_v330_multi_credential'),
]
operations = [

View File

@@ -13,7 +13,7 @@ from awx.main.migrations._reencrypt import blank_old_start_args
class Migration(migrations.Migration):
dependencies = [
('main', '0010_saved_launchtime_configs'),
('main', '0014_v330_saved_launchtime_configs'),
]
operations = [

View File

@@ -10,7 +10,7 @@ import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0011_blank_start_args'),
('main', '0015_v330_blank_start_args'),
]
operations = [

View File

@@ -8,7 +8,7 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_non_blank_workflow'),
('main', '0016_v330_non_blank_workflow'),
]
operations = [

View File

@@ -0,0 +1,85 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-14 15:13
from __future__ import unicode_literals
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0017_v330_move_deprecated_stdout'),
]
operations = [
migrations.CreateModel(
name='InventoryUpdateEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('event_data', awx.main.fields.JSONField(blank=True, default={})),
('uuid', models.CharField(default=b'', editable=False, max_length=1024)),
('counter', models.PositiveIntegerField(default=0, editable=False)),
('stdout', models.TextField(default=b'', editable=False)),
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
('start_line', models.PositiveIntegerField(default=0, editable=False)),
('end_line', models.PositiveIntegerField(default=0, editable=False)),
('inventory_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.InventoryUpdate')),
],
options={
'ordering': ('-pk',),
},
),
migrations.CreateModel(
name='ProjectUpdateEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('event', models.CharField(choices=[(b'runner_on_failed', 'Host Failed'), (b'runner_on_ok', 'Host OK'), (b'runner_on_error', 'Host Failure'), (b'runner_on_skipped', 'Host Skipped'), (b'runner_on_unreachable', 'Host Unreachable'), (b'runner_on_no_hosts', 'No Hosts Remaining'), (b'runner_on_async_poll', 'Host Polling'), (b'runner_on_async_ok', 'Host Async OK'), (b'runner_on_async_failed', 'Host Async Failure'), (b'runner_item_on_ok', 'Item OK'), (b'runner_item_on_failed', 'Item Failed'), (b'runner_item_on_skipped', 'Item Skipped'), (b'runner_retry', 'Host Retry'), (b'runner_on_file_diff', 'File Difference'), (b'playbook_on_start', 'Playbook Started'), (b'playbook_on_notify', 'Running Handlers'), (b'playbook_on_include', 'Including File'), (b'playbook_on_no_hosts_matched', 'No Hosts Matched'), (b'playbook_on_no_hosts_remaining', 'No Hosts Remaining'), (b'playbook_on_task_start', 'Task Started'), (b'playbook_on_vars_prompt', 'Variables Prompted'), (b'playbook_on_setup', 'Gathering Facts'), (b'playbook_on_import_for_host', 'internal: on Import for Host'), (b'playbook_on_not_import_for_host', 'internal: on Not Import for Host'), (b'playbook_on_play_start', 'Play Started'), (b'playbook_on_stats', 'Playbook Complete'), (b'debug', 'Debug'), (b'verbose', 'Verbose'), (b'deprecated', 'Deprecated'), (b'warning', 'Warning'), (b'system_warning', 'System Warning'), (b'error', 'Error')], max_length=100)),
('event_data', awx.main.fields.JSONField(blank=True, default={})),
('failed', models.BooleanField(default=False, editable=False)),
('changed', models.BooleanField(default=False, editable=False)),
('uuid', models.CharField(default=b'', editable=False, max_length=1024)),
('playbook', models.CharField(default=b'', editable=False, max_length=1024)),
('play', models.CharField(default=b'', editable=False, max_length=1024)),
('role', models.CharField(default=b'', editable=False, max_length=1024)),
('task', models.CharField(default=b'', editable=False, max_length=1024)),
('counter', models.PositiveIntegerField(default=0, editable=False)),
('stdout', models.TextField(default=b'', editable=False)),
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
('start_line', models.PositiveIntegerField(default=0, editable=False)),
('end_line', models.PositiveIntegerField(default=0, editable=False)),
('project_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.ProjectUpdate')),
],
options={
'ordering': ('pk',),
},
),
migrations.CreateModel(
name='SystemJobEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('event_data', awx.main.fields.JSONField(blank=True, default={})),
('uuid', models.CharField(default=b'', editable=False, max_length=1024)),
('counter', models.PositiveIntegerField(default=0, editable=False)),
('stdout', models.TextField(default=b'', editable=False)),
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
('start_line', models.PositiveIntegerField(default=0, editable=False)),
('end_line', models.PositiveIntegerField(default=0, editable=False)),
('system_job', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.SystemJob')),
],
options={
'ordering': ('-pk',),
},
),
migrations.RemoveField(
model_name='unifiedjob',
name='result_stdout_file',
),
]

View File

@@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-09 21:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0018_v330_add_additional_stdout_events'),
]
operations = [
migrations.AddField(
model_name='jobtemplate',
name='custom_virtualenv',
field=models.CharField(blank=True, default=None, max_length=100, null=True),
),
migrations.AddField(
model_name='organization',
name='custom_virtualenv',
field=models.CharField(blank=True, default=None, max_length=100, null=True),
),
migrations.AddField(
model_name='project',
name='custom_virtualenv',
field=models.CharField(blank=True, default=None, max_length=100, null=True),
),
]

View File

@@ -1,2 +1,12 @@
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
from django.db.migrations import Migration
class ActivityStreamDisabledMigration(Migration):
def apply(self, project_state, schema_editor, collect_sql=False):
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
return Migration.apply(self, project_state, schema_editor, collect_sql)

View File

@@ -178,3 +178,14 @@ def add_vault_id_field(apps, schema_editor):
vault_credtype = CredentialType.objects.get(kind='vault')
vault_credtype.inputs = CredentialType.defaults.get('vault')().inputs
vault_credtype.save()
def create_rhv_tower_credtype(apps, schema_editor):
CredentialType.setup_tower_managed_defaults()
def add_azure_cloud_environment_field(apps, schema_editor):
azure_rm_credtype = CredentialType.objects.get(kind='cloud',
name='Microsoft Azure Resource Manager')
azure_rm_credtype.inputs = CredentialType.defaults.get('azure_rm')().inputs
azure_rm_credtype.save()

View File

@@ -1,6 +1,7 @@
import logging
from django.db.models import Q
import six
logger = logging.getLogger('awx.main.migrations')
@@ -38,8 +39,10 @@ def rename_inventory_sources(apps, schema_editor):
Q(deprecated_group__inventory__organization=org)).distinct().all()):
inventory = invsrc.deprecated_group.inventory if invsrc.deprecated_group else invsrc.inventory
name = '{0} - {1} - {2}'.format(invsrc.name, inventory.name, i)
logger.debug("Renaming InventorySource({0}) {1} -> {2}".format(invsrc.pk, invsrc.name, name))
name = six.text_type('{0} - {1} - {2}').format(invsrc.name, inventory.name, i)
logger.debug(six.text_type("Renaming InventorySource({0}) {1} -> {2}").format(
invsrc.pk, invsrc.name, name
))
invsrc.name = name
invsrc.save()

View File

@@ -1,5 +1,7 @@
import logging
import json
from django.utils.translation import ugettext_lazy as _
import six
from awx.conf.migrations._reencrypt import (
decrypt_field,
@@ -65,7 +67,6 @@ def _credentials(apps):
credential.save()
def _unified_jobs(apps):
UnifiedJob = apps.get_model('main', 'UnifiedJob')
for uj in UnifiedJob.objects.all():
@@ -91,3 +92,53 @@ def blank_old_start_args(apps, schema_editor):
logger.debug('Blanking job args for %s', uj.pk)
uj.start_args = ''
uj.save()
def encrypt_survey_passwords(apps, schema_editor):
_encrypt_survey_passwords(
apps.get_model('main', 'Job'),
apps.get_model('main', 'JobTemplate'),
apps.get_model('main', 'WorkflowJob'),
apps.get_model('main', 'WorkflowJobTemplate'),
)
def _encrypt_survey_passwords(Job, JobTemplate, WorkflowJob, WorkflowJobTemplate):
from awx.main.utils.encryption import encrypt_value
for _type in (JobTemplate, WorkflowJobTemplate):
for jt in _type.objects.exclude(survey_spec={}):
changed = False
if jt.survey_spec.get('spec', []):
for field in jt.survey_spec['spec']:
if field.get('type') == 'password' and field.get('default', ''):
default = field['default']
if default.startswith('$encrypted$'):
if default == '$encrypted$':
# If you have a survey_spec with a literal
# '$encrypted$' as the default, you have
# encountered a known bug in awx/Tower
# https://github.com/ansible/ansible-tower/issues/7800
logger.error(
'{}.pk={} survey_spec has ambiguous $encrypted$ default for {}, needs attention...'.format(jt, jt.pk, field['variable'])
)
field['default'] = ''
changed = True
continue
field['default'] = encrypt_value(field['default'], pk=None)
changed = True
if changed:
jt.save()
for _type in (Job, WorkflowJob):
for job in _type.objects.defer('result_stdout_text').exclude(survey_passwords={}).iterator():
changed = False
for key in job.survey_passwords:
if key in job.extra_vars:
extra_vars = json.loads(job.extra_vars)
if not extra_vars.get(key, '') or extra_vars[key].startswith('$encrypted$'):
continue
extra_vars[key] = encrypt_value(extra_vars[key], pk=None)
job.extra_vars = json.dumps(extra_vars)
changed = True
if changed:
job.save()

View File

@@ -0,0 +1,63 @@
from itertools import chain
from django.db import (
connection,
migrations,
OperationalError,
ProgrammingError,
)
def squash_data(squashed):
'''Returns a tuple of the squashed_keys and the key position to begin
processing replace and operation lists'''
cm = current_migration()
squashed_keys = sorted(squashed.keys())
if cm is None:
return squashed_keys, 0
try:
key_index = squashed_keys.index(cm.name) + 1
except ValueError:
key_index = 0
return squashed_keys, key_index
def current_migration(exclude_squashed=True):
'''Get the latest migration non-squashed migration'''
try:
recorder = migrations.recorder.MigrationRecorder(connection)
migration_qs = recorder.migration_qs.filter(app='main')
if exclude_squashed:
migration_qs = migration_qs.exclude(name__contains='squashed')
return migration_qs.latest('id')
except (recorder.Migration.DoesNotExist, OperationalError, ProgrammingError):
return None
def replaces(squashed, applied=False):
'''Build a list of replacement migrations based on the most recent non-squashed migration
and the provided list of SQUASHED migrations. If the most recent non-squashed migration
is not present anywhere in the SQUASHED dictionary, assume they have all been applied.
If applied is True, this will return a list of all the migrations that have already
been applied.
'''
squashed_keys, key_index = squash_data(squashed)
if applied:
return [(b'main', key) for key in squashed_keys[:key_index]]
return [(b'main', key) for key in squashed_keys[key_index:]]
def operations(squashed, applied=False):
'''Build a list of migration operations based on the most recent non-squashed migration
and the provided list of squashed migrations. If the most recent non-squashed migration
is not present anywhere in the `squashed` dictionary, assume they have all been applied.
If applied is True, this will return a list of all the operations that have
already been applied.
'''
squashed_keys, key_index = squash_data(squashed)
op_keys = squashed_keys[:key_index] if applied else squashed_keys[key_index:]
ops = [squashed[op_key] for op_key in op_keys]
return [op for op in chain.from_iterable(ops)]

View File

@@ -0,0 +1,60 @@
from django.db import (
migrations,
models,
)
import jsonfield.fields
import awx.main.fields
from awx.main.migrations import _save_password_keys
from awx.main.migrations import _migration_utils as migration_utils
def update_dashed_host_variables(apps, schema_editor):
Host = apps.get_model('main', 'Host')
for host in Host.objects.filter(variables='---'):
host.variables = ''
host.save()
SQUASHED_30 = {
'0029_v302_add_ask_skip_tags': [
# add ask skip tags
migrations.AddField(
model_name='jobtemplate',
name='ask_skip_tags_on_launch',
field=models.BooleanField(default=False),
),
],
'0030_v302_job_survey_passwords': [
# job survery passwords
migrations.AddField(
model_name='job',
name='survey_passwords',
field=jsonfield.fields.JSONField(default={}, editable=False, blank=True),
),
],
'0031_v302_migrate_survey_passwords': [
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
migrations.RunPython(_save_password_keys.migrate_survey_passwords),
],
'0032_v302_credential_permissions_update': [
# RBAC credential permission updates
migrations.AlterField(
model_name='credential',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_administrator', b'organization.admin_role'], to='main.Role', null=b'True'),
),
migrations.AlterField(
model_name='credential',
name='use_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'),
),
],
'0033_v303_v245_host_variable_fix': [
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
migrations.RunPython(update_dashed_host_variables),
],
}
__all__ = ['SQUASHED_30']

View File

@@ -0,0 +1,50 @@
from django.db import (
migrations,
models,
)
SQUASHED_31 = {
'0035_v310_remove_tower_settings': [
# Remove Tower settings, these settings are now in separate awx.conf app.
migrations.RemoveField(
model_name='towersettings',
name='user',
),
migrations.DeleteModel(
name='TowerSettings',
),
migrations.AlterField(
model_name='project',
name='scm_type',
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
),
migrations.AlterField(
model_name='projectupdate',
name='scm_type',
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
),
],
'0036_v311_insights': [
migrations.AlterField(
model_name='project',
name='scm_type',
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
),
migrations.AlterField(
model_name='projectupdate',
name='scm_type',
field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'),
),
],
'0037_v313_instance_version': [
# Remove Tower settings, these settings are now in separate awx.conf app.
migrations.AddField(
model_name='instance',
name='version',
field=models.CharField(max_length=24, blank=True),
),
],
}
__all__ = ['SQUASHED_31']

View File

@@ -12,6 +12,7 @@ from awx.main.models.credential import * # noqa
from awx.main.models.projects import * # noqa
from awx.main.models.inventory import * # noqa
from awx.main.models.jobs import * # noqa
from awx.main.models.events import * # noqa
from awx.main.models.ad_hoc_commands import * # noqa
from awx.main.models.schedules import * # noqa
from awx.main.models.activity_stream import * # noqa

View File

@@ -3,6 +3,7 @@
# Tower
from awx.api.versioning import reverse
from awx.main.fields import JSONField
# Django
from django.db import models
@@ -66,6 +67,8 @@ class ActivityStream(models.Model):
role = models.ManyToManyField("Role", blank=True)
instance_group = models.ManyToManyField("InstanceGroup", blank=True)
setting = JSONField(blank=True)
def get_absolute_url(self, request=None):
return reverse('api:activity_stream_detail', kwargs={'pk': self.pk}, request=request)

View File

@@ -2,29 +2,26 @@
# All Rights Reserved.
# Python
import datetime
import logging
from urlparse import urljoin
# Django
from django.conf import settings
from django.db import models
from django.utils.dateparse import parse_datetime
from django.utils.text import Truncator
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
# AWX
from awx.api.versioning import reverse
from awx.main.models.base import * # noqa
from awx.main.models.events import AdHocCommandEvent
from awx.main.models.unified_jobs import * # noqa
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
from awx.main.fields import JSONField
logger = logging.getLogger('awx.main.models.ad_hoc_commands')
__all__ = ['AdHocCommand', 'AdHocCommandEvent']
__all__ = ['AdHocCommand']
class AdHocCommand(UnifiedJob, JobNotificationMixin):
@@ -127,6 +124,10 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
raise ValidationError(_('No argument passed to %s module.') % self.module_name)
return module_args
@property
def event_class(self):
return AdHocCommandEvent
@property
def passwords_needed_to_start(self):
'''Return list of password field names needed to start the job.'''
@@ -224,169 +225,3 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
def get_notification_friendly_name(self):
return "AdHoc Command"
class AdHocCommandEvent(CreatedModifiedModel):
'''
An event/message logged from the ad hoc event callback for each host.
'''
EVENT_TYPES = [
# (event, verbose name, failed)
('runner_on_failed', _('Host Failed'), True),
('runner_on_ok', _('Host OK'), False),
('runner_on_unreachable', _('Host Unreachable'), True),
# Tower won't see no_hosts (check is done earlier without callback).
# ('runner_on_no_hosts', _('No Hosts Matched'), False),
# Tower will see skipped (when running in check mode for a module that
# does not support check mode).
('runner_on_skipped', _('Host Skipped'), False),
# Tower does not support async for ad hoc commands (not used in v2).
# ('runner_on_async_poll', _('Host Polling'), False),
# ('runner_on_async_ok', _('Host Async OK'), False),
# ('runner_on_async_failed', _('Host Async Failure'), True),
# Tower does not yet support --diff mode.
# ('runner_on_file_diff', _('File Difference'), False),
# Additional event types for captured stdout not directly related to
# runner events.
('debug', _('Debug'), False),
('verbose', _('Verbose'), False),
('deprecated', _('Deprecated'), False),
('warning', _('Warning'), False),
('system_warning', _('System Warning'), False),
('error', _('Error'), False),
]
FAILED_EVENTS = [x[0] for x in EVENT_TYPES if x[2]]
EVENT_CHOICES = [(x[0], x[1]) for x in EVENT_TYPES]
class Meta:
app_label = 'main'
ordering = ('-pk',)
index_together = [
('ad_hoc_command', 'event'),
('ad_hoc_command', 'uuid'),
('ad_hoc_command', 'start_line'),
('ad_hoc_command', 'end_line'),
]
ad_hoc_command = models.ForeignKey(
'AdHocCommand',
related_name='ad_hoc_command_events',
on_delete=models.CASCADE,
editable=False,
)
host = models.ForeignKey(
'Host',
related_name='ad_hoc_command_events',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False,
)
host_name = models.CharField(
max_length=1024,
default='',
editable=False,
)
event = models.CharField(
max_length=100,
choices=EVENT_CHOICES,
)
event_data = JSONField(
blank=True,
default={},
)
failed = models.BooleanField(
default=False,
editable=False,
)
changed = models.BooleanField(
default=False,
editable=False,
)
uuid = models.CharField(
max_length=1024,
default='',
editable=False,
)
counter = models.PositiveIntegerField(
default=0,
editable=False,
)
stdout = models.TextField(
default='',
editable=False,
)
verbosity = models.PositiveIntegerField(
default=0,
editable=False,
)
start_line = models.PositiveIntegerField(
default=0,
editable=False,
)
end_line = models.PositiveIntegerField(
default=0,
editable=False,
)
def get_absolute_url(self, request=None):
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
def __unicode__(self):
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
def save(self, *args, **kwargs):
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
res = self.event_data.get('res', None)
if self.event in self.FAILED_EVENTS:
if not self.event_data.get('ignore_errors', False):
self.failed = True
if 'failed' not in update_fields:
update_fields.append('failed')
if isinstance(res, dict) and res.get('changed', False):
self.changed = True
if 'changed' not in update_fields:
update_fields.append('changed')
self.host_name = self.event_data.get('host', '').strip()
if 'host_name' not in update_fields:
update_fields.append('host_name')
if not self.host_id and self.host_name:
host_qs = self.ad_hoc_command.inventory.hosts.filter(name=self.host_name)
try:
host_id = host_qs.only('id').values_list('id', flat=True)
if host_id.exists():
self.host_id = host_id[0]
if 'host_id' not in update_fields:
update_fields.append('host_id')
except (IndexError, AttributeError):
pass
super(AdHocCommandEvent, self).save(*args, **kwargs)
@classmethod
def create_from_data(self, **kwargs):
# Convert the datetime for the ad hoc command event's creation
# appropriately, and include a time zone for it.
#
# In the event of any issue, throw it out, and Django will just save
# the current time.
try:
if not isinstance(kwargs['created'], datetime.datetime):
kwargs['created'] = parse_datetime(kwargs['created'])
if not kwargs['created'].tzinfo:
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
except (KeyError, ValueError):
kwargs.pop('created', None)
# Sanity check: Don't honor keys that we don't recognize.
valid_keys = {'ad_hoc_command_id', 'event', 'event_data', 'created',
'counter', 'uuid', 'stdout', 'start_line', 'end_line',
'verbosity'}
for key in kwargs.keys():
if key not in valid_keys:
kwargs.pop(key)
return AdHocCommandEvent.objects.create(**kwargs)

View File

@@ -50,7 +50,7 @@ PROJECT_UPDATE_JOB_TYPE_CHOICES = [
(PERM_INVENTORY_CHECK, _('Check')),
]
CLOUD_INVENTORY_SOURCES = ['ec2', 'vmware', 'gce', 'azure_rm', 'openstack', 'custom', 'satellite6', 'cloudforms', 'scm',]
CLOUD_INVENTORY_SOURCES = ['ec2', 'vmware', 'gce', 'azure_rm', 'openstack', 'rhv', 'custom', 'satellite6', 'cloudforms', 'scm', 'tower',]
VERBOSITY_CHOICES = [
(0, '0 (Normal)'),
@@ -288,7 +288,10 @@ class PrimordialModel(CreatedModifiedModel):
continue
if not (self.pk and self.pk == obj.pk):
errors.append(
'%s with this (%s) combination already exists.' % (model.__name__, ', '.join(ut))
'%s with this (%s) combination already exists.' % (
model.__name__,
', '.join(set(ut) - {'polymorphic_ctype'})
)
)
if errors:
raise ValidationError(errors)

View File

@@ -3,8 +3,10 @@
from collections import OrderedDict
import functools
import json
import logging
import operator
import os
import re
import stat
import tempfile
@@ -32,8 +34,33 @@ from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_AUDITOR,
)
from awx.main.utils import encrypt_field
from . import injectors as builtin_injectors
__all__ = ['Credential', 'CredentialType', 'V1Credential']
__all__ = ['Credential', 'CredentialType', 'V1Credential', 'build_safe_env']
logger = logging.getLogger('awx.main.models.credential')
HIDDEN_PASSWORD = '**********'
def build_safe_env(env):
'''
Build environment dictionary, hiding potentially sensitive information
such as passwords or keys.
'''
hidden_re = re.compile(r'API|TOKEN|KEY|SECRET|PASS', re.I)
urlpass_re = re.compile(r'^.*?://[^:]+:(.*?)@.*?$')
safe_env = dict(env)
for k, v in safe_env.items():
if k == 'AWS_ACCESS_KEY_ID':
continue
elif k.startswith('ANSIBLE_') and not k.startswith('ANSIBLE_NET'):
continue
elif hidden_re.search(k):
safe_env[k] = HIDDEN_PASSWORD
elif type(v) == str and urlpass_re.match(v):
safe_env[k] = urlpass_re.sub(HIDDEN_PASSWORD, v)
return safe_env
class V1Credential(object):
@@ -59,7 +86,9 @@ class V1Credential(object):
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('insights', 'Insights'),
('tower', 'Ansible Tower'),
]
FIELDS = {
'kind': models.CharField(
@@ -413,8 +442,8 @@ class CredentialType(CommonModelNameNotUnique):
ENV_BLACKLIST = set((
'VIRTUAL_ENV', 'PATH', 'PYTHONPATH', 'PROOT_TMP_DIR', 'JOB_ID',
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'TOWER_HOST',
'AWX_HOST', 'MAX_EVENT_RES', 'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
))
@@ -498,6 +527,11 @@ class CredentialType(CommonModelNameNotUnique):
for default in cls.defaults.values():
default_ = default()
if persisted:
if CredentialType.objects.filter(name=default_.name, kind=default_.kind).count():
continue
logger.debug(_(
"adding %s credential type" % default_.name
))
default_.save()
@classmethod
@@ -552,6 +586,11 @@ class CredentialType(CommonModelNameNotUnique):
files)
"""
if not self.injectors:
if self.managed_by_tower and credential.kind in dir(builtin_injectors):
injected_env = {}
getattr(builtin_injectors, credential.kind)(credential, injected_env)
env.update(injected_env)
safe_env.update(build_safe_env(injected_env))
return
class TowerNamespace:
@@ -1009,6 +1048,12 @@ def azure_rm(cls):
'id': 'tenant',
'label': 'Tenant ID',
'type': 'string'
}, {
'id': 'cloud_environment',
'label': 'Azure Cloud Environment',
'type': 'string',
'help_text': ('Environment variable AZURE_CLOUD_ENVIRONMENT when'
' using Azure GovCloud or Azure stack.')
}],
'required': ['subscription'],
}
@@ -1041,3 +1086,89 @@ def insights(cls):
},
},
)
@CredentialType.default
def rhv(cls):
return cls(
kind='cloud',
name='Red Hat Virtualization',
managed_by_tower=True,
inputs={
'fields': [{
'id': 'host',
'label': 'Host (Authentication URL)',
'type': 'string',
'help_text': ('The host to authenticate with.')
}, {
'id': 'username',
'label': 'Username',
'type': 'string'
}, {
'id': 'password',
'label': 'Password',
'type': 'string',
'secret': True,
}, {
'id': 'ca_file',
'label': 'CA File',
'type': 'string',
'help_text': ('Absolute file path to the CA file to use (optional)')
}],
'required': ['host', 'username', 'password'],
},
injectors={
# The duplication here is intentional; the ovirt4 inventory plugin
# writes a .ini file for authentication, while the ansible modules for
# ovirt4 use a separate authentication process that support
# environment variables; by injecting both, we support both
'file': {
'template': '\n'.join([
'[ovirt]',
'ovirt_url={{host}}',
'ovirt_username={{username}}',
'ovirt_password={{password}}',
'{% if ca_file %}ovirt_ca_file={{ca_file}}{% endif %}'])
},
'env': {
'OVIRT_INI_PATH': '{{tower.filename}}',
'OVIRT_URL': '{{host}}',
'OVIRT_USERNAME': '{{username}}',
'OVIRT_PASSWORD': '{{password}}'
}
},
)
@CredentialType.default
def tower(cls):
return cls(
kind='cloud',
name='Ansible Tower',
managed_by_tower=True,
inputs={
'fields': [{
'id': 'host',
'label': 'Ansible Tower Hostname',
'type': 'string',
'help_text': ('The Ansible Tower base URL to authenticate with.')
}, {
'id': 'username',
'label': 'Username',
'type': 'string'
}, {
'id': 'password',
'label': 'Password',
'type': 'string',
'secret': True,
}],
'required': ['host', 'username', 'password'],
},
injectors={
'env': {
'TOWER_HOST': '{{host}}',
'TOWER_USERNAME': '{{username}}',
'TOWER_PASSWORD': '{{password}}',
}
},
)

View File

@@ -0,0 +1,35 @@
from awx.main.utils import decrypt_field
from django.conf import settings
def aws(cred, env):
env['AWS_ACCESS_KEY_ID'] = cred.username
env['AWS_SECRET_ACCESS_KEY'] = decrypt_field(cred, 'password')
if len(cred.security_token) > 0:
env['AWS_SECURITY_TOKEN'] = decrypt_field(cred, 'security_token')
def gce(cred, env):
env['GCE_EMAIL'] = cred.username
env['GCE_PROJECT'] = cred.project
def azure_rm(cred, env):
if len(cred.client) and len(cred.tenant):
env['AZURE_CLIENT_ID'] = cred.client
env['AZURE_SECRET'] = decrypt_field(cred, 'secret')
env['AZURE_TENANT'] = cred.tenant
env['AZURE_SUBSCRIPTION_ID'] = cred.subscription
else:
env['AZURE_SUBSCRIPTION_ID'] = cred.subscription
env['AZURE_AD_USER'] = cred.username
env['AZURE_PASSWORD'] = decrypt_field(cred, 'password')
if cred.inputs.get('cloud_environment', None):
env['AZURE_CLOUD_ENVIRONMENT'] = cred.inputs['cloud_environment']
def vmware(cred, env):
env['VMWARE_USER'] = cred.username
env['VMWARE_PASSWORD'] = decrypt_field(cred, 'password')
env['VMWARE_HOST'] = cred.host
env['VMWARE_VALIDATE_CERTS'] = str(settings.VMWARE_VALIDATE_CERTS)

774
awx/main/models/events.py Normal file
View File

@@ -0,0 +1,774 @@
import datetime
import logging
from django.conf import settings
from django.db import models
from django.utils.dateparse import parse_datetime
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from awx.api.versioning import reverse
from awx.main.fields import JSONField
from awx.main.models.base import CreatedModifiedModel
from awx.main.utils import ignore_inventory_computed_fields
analytics_logger = logging.getLogger('awx.analytics.job_events')
__all__ = ['JobEvent', 'ProjectUpdateEvent', 'AdHocCommandEvent',
'InventoryUpdateEvent', 'SystemJobEvent']
class BasePlaybookEvent(CreatedModifiedModel):
'''
An event/message logged from a playbook callback for each host.
'''
VALID_KEYS = [
'event', 'event_data', 'playbook', 'play', 'role', 'task', 'created',
'counter', 'uuid', 'stdout', 'parent_uuid', 'start_line', 'end_line',
'verbosity'
]
class Meta:
abstract = True
# Playbook events will be structured to form the following hierarchy:
# - playbook_on_start (once for each playbook file)
# - playbook_on_vars_prompt (for each play, but before play starts, we
# currently don't handle responding to these prompts)
# - playbook_on_play_start (once for each play)
# - playbook_on_import_for_host (not logged, not used for v2)
# - playbook_on_not_import_for_host (not logged, not used for v2)
# - playbook_on_no_hosts_matched
# - playbook_on_no_hosts_remaining
# - playbook_on_include (only v2 - only used for handlers?)
# - playbook_on_setup (not used for v2)
# - runner_on*
# - playbook_on_task_start (once for each task within a play)
# - runner_on_failed
# - runner_on_ok
# - runner_on_error (not used for v2)
# - runner_on_skipped
# - runner_on_unreachable
# - runner_on_no_hosts (not used for v2)
# - runner_on_async_poll (not used for v2)
# - runner_on_async_ok (not used for v2)
# - runner_on_async_failed (not used for v2)
# - runner_on_file_diff (v2 event is v2_on_file_diff)
# - runner_item_on_ok (v2 only)
# - runner_item_on_failed (v2 only)
# - runner_item_on_skipped (v2 only)
# - runner_retry (v2 only)
# - playbook_on_notify (once for each notification from the play, not used for v2)
# - playbook_on_stats
EVENT_TYPES = [
# (level, event, verbose name, failed)
(3, 'runner_on_failed', _('Host Failed'), True),
(3, 'runner_on_ok', _('Host OK'), False),
(3, 'runner_on_error', _('Host Failure'), True),
(3, 'runner_on_skipped', _('Host Skipped'), False),
(3, 'runner_on_unreachable', _('Host Unreachable'), True),
(3, 'runner_on_no_hosts', _('No Hosts Remaining'), False),
(3, 'runner_on_async_poll', _('Host Polling'), False),
(3, 'runner_on_async_ok', _('Host Async OK'), False),
(3, 'runner_on_async_failed', _('Host Async Failure'), True),
(3, 'runner_item_on_ok', _('Item OK'), False),
(3, 'runner_item_on_failed', _('Item Failed'), True),
(3, 'runner_item_on_skipped', _('Item Skipped'), False),
(3, 'runner_retry', _('Host Retry'), False),
# Tower does not yet support --diff mode.
(3, 'runner_on_file_diff', _('File Difference'), False),
(0, 'playbook_on_start', _('Playbook Started'), False),
(2, 'playbook_on_notify', _('Running Handlers'), False),
(2, 'playbook_on_include', _('Including File'), False),
(2, 'playbook_on_no_hosts_matched', _('No Hosts Matched'), False),
(2, 'playbook_on_no_hosts_remaining', _('No Hosts Remaining'), False),
(2, 'playbook_on_task_start', _('Task Started'), False),
# Tower does not yet support vars_prompt (and will probably hang :)
(1, 'playbook_on_vars_prompt', _('Variables Prompted'), False),
(2, 'playbook_on_setup', _('Gathering Facts'), False),
(2, 'playbook_on_import_for_host', _('internal: on Import for Host'), False),
(2, 'playbook_on_not_import_for_host', _('internal: on Not Import for Host'), False),
(1, 'playbook_on_play_start', _('Play Started'), False),
(1, 'playbook_on_stats', _('Playbook Complete'), False),
# Additional event types for captured stdout not directly related to
# playbook or runner events.
(0, 'debug', _('Debug'), False),
(0, 'verbose', _('Verbose'), False),
(0, 'deprecated', _('Deprecated'), False),
(0, 'warning', _('Warning'), False),
(0, 'system_warning', _('System Warning'), False),
(0, 'error', _('Error'), True),
]
FAILED_EVENTS = [x[1] for x in EVENT_TYPES if x[3]]
EVENT_CHOICES = [(x[1], x[2]) for x in EVENT_TYPES]
LEVEL_FOR_EVENT = dict([(x[1], x[0]) for x in EVENT_TYPES])
event = models.CharField(
max_length=100,
choices=EVENT_CHOICES,
)
event_data = JSONField(
blank=True,
default={},
)
failed = models.BooleanField(
default=False,
editable=False,
)
changed = models.BooleanField(
default=False,
editable=False,
)
uuid = models.CharField(
max_length=1024,
default='',
editable=False,
)
playbook = models.CharField(
max_length=1024,
default='',
editable=False,
)
play = models.CharField(
max_length=1024,
default='',
editable=False,
)
role = models.CharField(
max_length=1024,
default='',
editable=False,
)
task = models.CharField(
max_length=1024,
default='',
editable=False,
)
counter = models.PositiveIntegerField(
default=0,
editable=False,
)
stdout = models.TextField(
default='',
editable=False,
)
verbosity = models.PositiveIntegerField(
default=0,
editable=False,
)
start_line = models.PositiveIntegerField(
default=0,
editable=False,
)
end_line = models.PositiveIntegerField(
default=0,
editable=False,
)
@property
def event_level(self):
return self.LEVEL_FOR_EVENT.get(self.event, 0)
def get_event_display2(self):
msg = self.get_event_display()
if self.event == 'playbook_on_play_start':
if self.play:
msg = "%s (%s)" % (msg, self.play)
elif self.event == 'playbook_on_task_start':
if self.task:
if self.event_data.get('is_conditional', False):
msg = 'Handler Notified'
if self.role:
msg = '%s (%s | %s)' % (msg, self.role, self.task)
else:
msg = "%s (%s)" % (msg, self.task)
# Change display for runner events triggered by async polling. Some of
# these events may not show in most cases, due to filterting them out
# of the job event queryset returned to the user.
res = self.event_data.get('res', {})
# Fix for existing records before we had added the workaround on save
# to change async_ok to async_failed.
if self.event == 'runner_on_async_ok':
try:
if res.get('failed', False) or res.get('rc', 0) != 0:
msg = 'Host Async Failed'
except (AttributeError, TypeError):
pass
# Runner events with ansible_job_id are part of async starting/polling.
if self.event in ('runner_on_ok', 'runner_on_failed'):
try:
module_name = res['invocation']['module_name']
job_id = res['ansible_job_id']
except (TypeError, KeyError, AttributeError):
module_name = None
job_id = None
if module_name and job_id:
if module_name == 'async_status':
msg = 'Host Async Checking'
else:
msg = 'Host Async Started'
# Handle both 1.2 on_failed and 1.3+ on_async_failed events when an
# async task times out.
if self.event in ('runner_on_failed', 'runner_on_async_failed'):
try:
if res['msg'] == 'timed out':
msg = 'Host Async Timeout'
except (TypeError, KeyError, AttributeError):
pass
return msg
def _update_from_event_data(self):
# Update event model fields from event data.
updated_fields = set()
event_data = self.event_data
res = event_data.get('res', None)
if self.event in self.FAILED_EVENTS and not event_data.get('ignore_errors', False):
self.failed = True
updated_fields.add('failed')
if isinstance(res, dict):
if res.get('changed', False):
self.changed = True
updated_fields.add('changed')
# If we're not in verbose mode, wipe out any module arguments.
invocation = res.get('invocation', None)
if isinstance(invocation, dict) and self.job_verbosity == 0 and 'module_args' in invocation:
event_data['res']['invocation']['module_args'] = ''
self.event_data = event_data
updated_fields.add('event_data')
if self.event == 'playbook_on_stats':
try:
failures_dict = event_data.get('failures', {})
dark_dict = event_data.get('dark', {})
self.failed = bool(sum(failures_dict.values()) +
sum(dark_dict.values()))
updated_fields.add('failed')
changed_dict = event_data.get('changed', {})
self.changed = bool(sum(changed_dict.values()))
updated_fields.add('changed')
except (AttributeError, TypeError):
pass
for field in ('playbook', 'play', 'task', 'role'):
value = force_text(event_data.get(field, '')).strip()
if value != getattr(self, field):
setattr(self, field, value)
updated_fields.add(field)
return updated_fields
@classmethod
def create_from_data(self, **kwargs):
pk = None
for key in ('job_id', 'project_update_id'):
if key in kwargs:
pk = key
if pk is None:
# payload must contain either a job_id or a project_update_id
return
# Convert the datetime for the job event's creation appropriately,
# and include a time zone for it.
#
# In the event of any issue, throw it out, and Django will just save
# the current time.
try:
if not isinstance(kwargs['created'], datetime.datetime):
kwargs['created'] = parse_datetime(kwargs['created'])
if not kwargs['created'].tzinfo:
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
except (KeyError, ValueError):
kwargs.pop('created', None)
# Sanity check: Don't honor keys that we don't recognize.
for key in kwargs.keys():
if key not in self.VALID_KEYS:
kwargs.pop(key)
event_data = kwargs.get('event_data', None)
artifact_dict = None
if event_data:
artifact_dict = event_data.pop('artifact_data', None)
job_event = self.objects.create(**kwargs)
analytics_logger.info('Event data saved.', extra=dict(python_objects=dict(job_event=job_event)))
# Save artifact data to parent job (if provided).
if artifact_dict:
if event_data and isinstance(event_data, dict):
# Note: Core has not added support for marking artifacts as
# sensitive yet. Going forward, core will not use
# _ansible_no_log to denote sensitive set_stats calls.
# Instead, they plan to add a flag outside of the traditional
# no_log mechanism. no_log will not work for this feature,
# in core, because sensitive data is scrubbed before sending
# data to the callback. The playbook_on_stats is the callback
# in which the set_stats data is used.
# Again, the sensitive artifact feature has not yet landed in
# core. The below is how we mark artifacts payload as
# senstive
# artifact_dict['_ansible_no_log'] = True
#
parent_job = self.objects.filter(pk=pk).first()
if hasattr(parent_job, 'artifacts') and parent_job.artifacts != artifact_dict:
parent_job.artifacts = artifact_dict
parent_job.save(update_fields=['artifacts'])
return job_event
@property
def job_verbosity(self):
return 0
def save(self, *args, **kwargs):
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
# Update model fields and related objects unless we're only updating
# failed/changed flags triggered from a child event.
from_parent_update = kwargs.pop('from_parent_update', False)
if not from_parent_update:
# Update model fields from event data.
updated_fields = self._update_from_event_data()
for field in updated_fields:
if field not in update_fields:
update_fields.append(field)
# Update host related field from host_name.
if hasattr(self, 'job') and not self.host_id and self.host_name:
host_qs = self.job.inventory.hosts.filter(name=self.host_name)
host_id = host_qs.only('id').values_list('id', flat=True).first()
if host_id != self.host_id:
self.host_id = host_id
if 'host_id' not in update_fields:
update_fields.append('host_id')
super(BasePlaybookEvent, self).save(*args, **kwargs)
# Update related objects after this event is saved.
if hasattr(self, 'job') and not from_parent_update:
if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False):
self._update_hosts()
if self.event == 'playbook_on_stats':
self._update_parents_failed_and_changed()
hostnames = self._hostnames()
self._update_host_summary_from_stats(hostnames)
self.job.inventory.update_computed_fields()
class JobEvent(BasePlaybookEvent):
'''
An event/message logged from the callback when running a job.
'''
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id']
class Meta:
app_label = 'main'
ordering = ('pk',)
index_together = [
('job', 'event'),
('job', 'uuid'),
('job', 'start_line'),
('job', 'end_line'),
('job', 'parent_uuid'),
]
job = models.ForeignKey(
'Job',
related_name='job_events',
on_delete=models.CASCADE,
editable=False,
)
host = models.ForeignKey(
'Host',
related_name='job_events_as_primary_host',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False,
)
host_name = models.CharField(
max_length=1024,
default='',
editable=False,
)
hosts = models.ManyToManyField(
'Host',
related_name='job_events',
editable=False,
)
parent = models.ForeignKey(
'self',
related_name='children',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False,
)
parent_uuid = models.CharField(
max_length=1024,
default='',
editable=False,
)
def get_absolute_url(self, request=None):
return reverse('api:job_event_detail', kwargs={'pk': self.pk}, request=request)
def __unicode__(self):
return u'%s @ %s' % (self.get_event_display2(), self.created.isoformat())
def _update_from_event_data(self):
# Update job event hostname
updated_fields = super(JobEvent, self)._update_from_event_data()
value = force_text(self.event_data.get('host', '')).strip()
if value != getattr(self, 'host_name'):
setattr(self, 'host_name', value)
updated_fields.add('host_name')
return updated_fields
def _update_parents_failed_and_changed(self):
# Update parent events to reflect failed, changed
runner_events = JobEvent.objects.filter(job=self.job,
event__startswith='runner_on')
changed_events = runner_events.filter(changed=True)
failed_events = runner_events.filter(failed=True)
JobEvent.objects.filter(uuid__in=changed_events.values_list('parent_uuid', flat=True)).update(changed=True)
JobEvent.objects.filter(uuid__in=failed_events.values_list('parent_uuid', flat=True)).update(failed=True)
def _update_hosts(self, extra_host_pks=None):
# Update job event hosts m2m from host_name, propagate to parent events.
extra_host_pks = set(extra_host_pks or [])
hostnames = set()
if self.host_name:
hostnames.add(self.host_name)
if self.event == 'playbook_on_stats':
try:
for v in self.event_data.values():
hostnames.update(v.keys())
except AttributeError: # In case event_data or v isn't a dict.
pass
qs = self.job.inventory.hosts.all()
qs = qs.filter(models.Q(name__in=hostnames) | models.Q(pk__in=extra_host_pks))
qs = qs.exclude(job_events__pk=self.id).only('id')
for host in qs:
self.hosts.add(host)
if self.parent_uuid:
parent = JobEvent.objects.filter(uuid=self.parent_uuid)
if parent.exists():
parent = parent[0]
parent._update_hosts(qs.values_list('id', flat=True))
def _hostnames(self):
hostnames = set()
try:
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
hostnames.update(self.event_data.get(stat, {}).keys())
except AttributeError: # In case event_data or v isn't a dict.
pass
return hostnames
def _update_host_summary_from_stats(self, hostnames):
with ignore_inventory_computed_fields():
qs = self.job.inventory.hosts.filter(name__in=hostnames)
job = self.job
for host in hostnames:
host_stats = {}
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
try:
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
except AttributeError: # in case event_data[stat] isn't a dict.
pass
if qs.filter(name=host).exists():
host_actual = qs.get(name=host)
host_summary, created = job.job_host_summaries.get_or_create(host=host_actual, host_name=host_actual.name, defaults=host_stats)
else:
host_summary, created = job.job_host_summaries.get_or_create(host_name=host, defaults=host_stats)
if not created:
update_fields = []
for stat, value in host_stats.items():
if getattr(host_summary, stat) != value:
setattr(host_summary, stat, value)
update_fields.append(stat)
if update_fields:
host_summary.save(update_fields=update_fields)
@property
def job_verbosity(self):
return self.job.verbosity
class ProjectUpdateEvent(BasePlaybookEvent):
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id']
class Meta:
app_label = 'main'
ordering = ('pk',)
index_together = [
('project_update', 'event'),
('project_update', 'uuid'),
('project_update', 'start_line'),
('project_update', 'end_line'),
]
project_update = models.ForeignKey(
'ProjectUpdate',
related_name='project_update_events',
on_delete=models.CASCADE,
editable=False,
)
@property
def host_name(self):
return 'localhost'
class BaseCommandEvent(CreatedModifiedModel):
'''
An event/message logged from a command for each host.
'''
VALID_KEYS = [
'event_data', 'created', 'counter', 'uuid', 'stdout', 'start_line',
'end_line', 'verbosity'
]
class Meta:
abstract = True
event_data = JSONField(
blank=True,
default={},
)
uuid = models.CharField(
max_length=1024,
default='',
editable=False,
)
counter = models.PositiveIntegerField(
default=0,
editable=False,
)
stdout = models.TextField(
default='',
editable=False,
)
verbosity = models.PositiveIntegerField(
default=0,
editable=False,
)
start_line = models.PositiveIntegerField(
default=0,
editable=False,
)
end_line = models.PositiveIntegerField(
default=0,
editable=False,
)
def __unicode__(self):
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
@classmethod
def create_from_data(self, **kwargs):
# Convert the datetime for the event's creation
# appropriately, and include a time zone for it.
#
# In the event of any issue, throw it out, and Django will just save
# the current time.
try:
if not isinstance(kwargs['created'], datetime.datetime):
kwargs['created'] = parse_datetime(kwargs['created'])
if not kwargs['created'].tzinfo:
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
except (KeyError, ValueError):
kwargs.pop('created', None)
# Sanity check: Don't honor keys that we don't recognize.
for key in kwargs.keys():
if key not in self.VALID_KEYS:
kwargs.pop(key)
return self.objects.create(**kwargs)
class AdHocCommandEvent(BaseCommandEvent):
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event']
class Meta:
app_label = 'main'
ordering = ('-pk',)
index_together = [
('ad_hoc_command', 'event'),
('ad_hoc_command', 'uuid'),
('ad_hoc_command', 'start_line'),
('ad_hoc_command', 'end_line'),
]
EVENT_TYPES = [
# (event, verbose name, failed)
('runner_on_failed', _('Host Failed'), True),
('runner_on_ok', _('Host OK'), False),
('runner_on_unreachable', _('Host Unreachable'), True),
# Tower won't see no_hosts (check is done earlier without callback).
# ('runner_on_no_hosts', _('No Hosts Matched'), False),
# Tower will see skipped (when running in check mode for a module that
# does not support check mode).
('runner_on_skipped', _('Host Skipped'), False),
# Tower does not support async for ad hoc commands (not used in v2).
# ('runner_on_async_poll', _('Host Polling'), False),
# ('runner_on_async_ok', _('Host Async OK'), False),
# ('runner_on_async_failed', _('Host Async Failure'), True),
# Tower does not yet support --diff mode.
# ('runner_on_file_diff', _('File Difference'), False),
# Additional event types for captured stdout not directly related to
# runner events.
('debug', _('Debug'), False),
('verbose', _('Verbose'), False),
('deprecated', _('Deprecated'), False),
('warning', _('Warning'), False),
('system_warning', _('System Warning'), False),
('error', _('Error'), False),
]
FAILED_EVENTS = [x[0] for x in EVENT_TYPES if x[2]]
EVENT_CHOICES = [(x[0], x[1]) for x in EVENT_TYPES]
event = models.CharField(
max_length=100,
choices=EVENT_CHOICES,
)
failed = models.BooleanField(
default=False,
editable=False,
)
changed = models.BooleanField(
default=False,
editable=False,
)
ad_hoc_command = models.ForeignKey(
'AdHocCommand',
related_name='ad_hoc_command_events',
on_delete=models.CASCADE,
editable=False,
)
host = models.ForeignKey(
'Host',
related_name='ad_hoc_command_events',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False,
)
host_name = models.CharField(
max_length=1024,
default='',
editable=False,
)
def get_absolute_url(self, request=None):
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
def save(self, *args, **kwargs):
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
res = self.event_data.get('res', None)
if self.event in self.FAILED_EVENTS:
if not self.event_data.get('ignore_errors', False):
self.failed = True
if 'failed' not in update_fields:
update_fields.append('failed')
if isinstance(res, dict) and res.get('changed', False):
self.changed = True
if 'changed' not in update_fields:
update_fields.append('changed')
self.host_name = self.event_data.get('host', '').strip()
if 'host_name' not in update_fields:
update_fields.append('host_name')
if not self.host_id and self.host_name:
host_qs = self.ad_hoc_command.inventory.hosts.filter(name=self.host_name)
try:
host_id = host_qs.only('id').values_list('id', flat=True)
if host_id.exists():
self.host_id = host_id[0]
if 'host_id' not in update_fields:
update_fields.append('host_id')
except (IndexError, AttributeError):
pass
super(AdHocCommandEvent, self).save(*args, **kwargs)
class InventoryUpdateEvent(BaseCommandEvent):
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id']
class Meta:
app_label = 'main'
ordering = ('-pk',)
index_together = [
('inventory_update', 'uuid'),
('inventory_update', 'start_line'),
('inventory_update', 'end_line'),
]
inventory_update = models.ForeignKey(
'InventoryUpdate',
related_name='inventory_update_events',
on_delete=models.CASCADE,
editable=False,
)
@property
def event(self):
return 'verbose'
@property
def failed(self):
return False
@property
def changed(self):
return False
class SystemJobEvent(BaseCommandEvent):
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id']
class Meta:
app_label = 'main'
ordering = ('-pk',)
index_together = [
('system_job', 'uuid'),
('system_job', 'start_line'),
('system_job', 'end_line'),
]
system_job = models.ForeignKey(
'SystemJob',
related_name='system_job_events',
on_delete=models.CASCADE,
editable=False,
)
@property
def event(self):
return 'verbose'
@property
def failed(self):
return False
@property
def changed(self):
return False

View File

@@ -8,6 +8,7 @@ import re
import copy
from urlparse import urljoin
import os.path
import six
# Django
from django.conf import settings
@@ -29,6 +30,7 @@ from awx.main.fields import (
)
from awx.main.managers import HostManager
from awx.main.models.base import * # noqa
from awx.main.models.events import InventoryUpdateEvent
from awx.main.models.unified_jobs import * # noqa
from awx.main.models.mixins import ResourceMixin, TaskManagerInventoryUpdateMixin
from awx.main.models.notifications import (
@@ -209,7 +211,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
group_children.add(from_group_id)
return group_children_map
def get_script_data(self, hostvars=False, show_all=False):
def get_script_data(self, hostvars=False, towervars=False, show_all=False):
if show_all:
hosts_q = dict()
else:
@@ -271,6 +273,10 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
data['_meta'].setdefault('hostvars', dict())
for host in self.hosts.filter(**hosts_q):
data['_meta']['hostvars'][host.name] = host.variables_dict
if towervars:
tower_dict = dict(remote_tower_enabled=str(host.enabled).lower(),
remote_tower_id=host.id)
data['_meta']['hostvars'][host.name].update(tower_dict)
return data
@@ -399,8 +405,13 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
active_hosts = self.hosts
failed_hosts = active_hosts.filter(has_active_failures=True)
active_groups = self.groups
if self.kind == 'smart':
active_groups = active_groups.none()
failed_groups = active_groups.filter(has_active_failures=True)
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
if self.kind == 'smart':
active_inventory_sources = self.inventory_sources.none()
else:
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
failed_inventory_sources = active_inventory_sources.filter(last_job_failed=True)
computed_fields = {
'has_active_failures': bool(failed_hosts.count()),
@@ -417,6 +428,8 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
for field, value in computed_fields.items():
if getattr(iobj, field) != value:
setattr(iobj, field, value)
# update in-memory object
setattr(self, field, value)
else:
computed_fields.pop(field)
if computed_fields:
@@ -464,6 +477,10 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin):
def save(self, *args, **kwargs):
self._update_host_smart_inventory_memeberships()
super(Inventory, self).save(*args, **kwargs)
if (self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and
connection.vendor != 'sqlite'):
# Minimal update of host_count for smart inventory host filter changes
self.update_computed_fields(update_groups=False, update_hosts=False)
def delete(self, *args, **kwargs):
self._update_host_smart_inventory_memeberships()
@@ -937,6 +954,8 @@ class InventorySourceOptions(BaseModel):
('satellite6', _('Red Hat Satellite 6')),
('cloudforms', _('Red Hat CloudForms')),
('openstack', _('OpenStack')),
('rhv', _('Red Hat Virtualization')),
('tower', _('Ansible Tower')),
('custom', _('Custom Script')),
]
@@ -1185,6 +1204,16 @@ class InventorySourceOptions(BaseModel):
"""Red Hat CloudForms region choices (not implemented)"""
return [('all', 'All')]
@classmethod
def get_rhv_region_choices(self):
"""No region supprt"""
return [('all', 'All')]
@classmethod
def get_tower_region_choices(self):
"""No region supprt"""
return [('all', 'All')]
def clean_credential(self):
if not self.source:
return None
@@ -1256,7 +1285,7 @@ class InventorySourceOptions(BaseModel):
raise ValidationError(_('Invalid filter expression: %(filter)s') %
{'filter': ', '.join(invalid_filters)})
return instance_filters
elif self.source == 'vmware':
elif self.source in ('vmware', 'tower'):
return instance_filters
else:
return ''
@@ -1337,9 +1366,9 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
@classmethod
def _get_unified_job_field_names(cls):
return ['name', 'description', 'source', 'source_path', 'source_script', 'source_vars', 'schedule',
'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
'timeout', 'verbosity', 'source_project_update',]
return set(f.name for f in InventorySourceOptions._meta.fields) | set(
['name', 'description', 'schedule']
)
def save(self, *args, **kwargs):
# If update_fields has been specified, add our field names to it,
@@ -1413,6 +1442,19 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
def create_inventory_update(self, **kwargs):
return self.create_unified_job(**kwargs)
def create_unified_job(self, **kwargs):
# Use special name, if name not already specified
if self.inventory:
if '_eager_fields' not in kwargs:
kwargs['_eager_fields'] = {}
if 'name' not in kwargs['_eager_fields']:
name = six.text_type('{} - {}').format(self.inventory.name, self.name)
name_field = self._meta.get_field('name')
if len(name) > name_field.max_length:
name = name[:name_field.max_length]
kwargs['_eager_fields']['name'] = name
return super(InventorySource, self).create_unified_job(**kwargs)
@property
def cache_timeout_blocked(self):
if not self.last_job_run:
@@ -1540,15 +1582,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
websocket_data.update(dict(group_id=self.inventory_source.deprecated_group.id))
return websocket_data
def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields', [])
inventory_source = self.inventory_source
if inventory_source.inventory and self.name == inventory_source.name:
self.name = inventory_source.inventory.name
if 'name' not in update_fields:
update_fields.append('name')
super(InventoryUpdate, self).save(*args, **kwargs)
def get_absolute_url(self, request=None):
return reverse('api:inventory_update_detail', kwargs={'pk': self.pk}, request=request)
@@ -1563,6 +1596,10 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
self.inventory_source.source_project.get_project_path(check_if_exists=False),
self.source_path)
@property
def event_class(self):
return InventoryUpdateEvent
@property
def task_impact(self):
return 50

View File

@@ -14,12 +14,9 @@ from django.conf import settings
from django.db import models
#from django.core.cache import cache
import memcache
from django.db.models import Q, Count
from django.utils.dateparse import parse_datetime
from dateutil import parser
from dateutil.tz import tzutc
from django.utils.encoding import force_text, smart_str
from django.utils.timezone import utc
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError, FieldDoesNotExist
@@ -29,27 +26,23 @@ from rest_framework.exceptions import ParseError
# AWX
from awx.api.versioning import reverse
from awx.main.models.base import * # noqa
from awx.main.models.events import JobEvent, SystemJobEvent
from awx.main.models.unified_jobs import * # noqa
from awx.main.models.notifications import (
NotificationTemplate,
JobNotificationMixin,
)
from awx.main.utils import (
ignore_inventory_computed_fields,
parse_yaml_or_json,
)
from awx.main.utils import parse_yaml_or_json
from awx.main.fields import ImplicitRoleField
from awx.main.models.mixins import ResourceMixin, SurveyJobTemplateMixin, SurveyJobMixin, TaskManagerJobMixin
from awx.main.models.mixins import ResourceMixin, SurveyJobTemplateMixin, SurveyJobMixin, TaskManagerJobMixin, CustomVirtualEnvMixin
from awx.main.fields import JSONField, AskForField
from awx.main.consumers import emit_channel_notification
logger = logging.getLogger('awx.main.models.jobs')
analytics_logger = logging.getLogger('awx.analytics.job_events')
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'JobEvent', 'SystemJobTemplate', 'SystemJob']
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'SystemJobTemplate', 'SystemJob']
class JobOptions(BaseModel):
@@ -222,7 +215,7 @@ class JobOptions(BaseModel):
return needed
class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin):
class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin, CustomVirtualEnvMixin):
'''
A job template is a reusable job definition for applying a project (with
playbook) to an inventory source with a given credential.
@@ -289,13 +282,9 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
@classmethod
def _get_unified_job_field_names(cls):
return ['name', 'description', 'job_type', 'inventory', 'project',
'playbook', 'credentials', 'forks', 'schedule', 'limit',
'verbosity', 'job_tags', 'extra_vars',
'force_handlers', 'skip_tags', 'start_at_task',
'become_enabled', 'labels', 'survey_passwords',
'allow_simultaneous', 'timeout', 'use_fact_cache',
'diff_mode',]
return set(f.name for f in JobOptions._meta.fields) | set(
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials']
)
@property
def validation_errors(self):
@@ -342,6 +331,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
# not block a provisioning callback from creating/launching jobs.
if callback_extra_vars is None:
for ask_field_name in set(self.get_ask_mapping().values()):
if ask_field_name == 'ask_credential_on_launch':
# if ask_credential_on_launch is True, it just means it can
# optionally be specified at launch time, not that it's *required*
# to launch
continue
if getattr(self, ask_field_name):
prompting_needed = True
break
@@ -355,7 +349,8 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
rejected_data = {}
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(
kwargs.get('extra_vars', {}),
_exclude_errors=exclude_errors)
_exclude_errors=exclude_errors,
extra_passwords=kwargs.get('survey_passwords', {}))
if accepted_vars:
prompted_data['extra_vars'] = accepted_vars
if rejected_vars:
@@ -519,6 +514,22 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
def get_ui_url(self):
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/{}".format(self.pk))
@property
def ansible_virtualenv_path(self):
# the order here enforces precedence (it matters)
for virtualenv in (
self.job_template.custom_virtualenv if self.job_template else None,
self.project.custom_virtualenv,
self.project.organization.custom_virtualenv
):
if virtualenv:
return virtualenv
return settings.ANSIBLE_VENV_PATH
@property
def event_class(self):
return JobEvent
@property
def ask_diff_mode_on_launch(self):
if self.job_template is not None:
@@ -664,7 +675,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
data.update(dict(inventory=self.inventory.name if self.inventory else None,
project=self.project.name if self.project else None,
playbook=self.playbook,
credential=self.credential.name if self.credential else None,
credential=getattr(self.get_deprecated_credential('ssh'), 'name', None),
limit=self.limit,
extra_vars=self.display_extra_vars(),
hosts=all_hosts))
@@ -801,7 +812,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
smart_str(host.inventory.name), smart_str(host.name)),
extra=dict(inventory_id=host.inventory.id, host_name=host.name,
ansible_facts=host.ansible_facts,
ansible_facts_modified=host.ansible_facts_modified.isoformat()))
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
job_id=self.id))
# Add on aliases for the non-related-model fields
@@ -892,7 +904,7 @@ class LaunchTimeConfig(BaseModel):
Hides fields marked as passwords in survey.
'''
if self.survey_passwords:
extra_data = parse_yaml_or_json(self.extra_data)
extra_data = parse_yaml_or_json(self.extra_data).copy()
for key, value in self.survey_passwords.items():
if key in extra_data:
extra_data[key] = value
@@ -1031,477 +1043,6 @@ class JobHostSummary(CreatedModifiedModel):
#self.host.update_computed_fields()
class JobEvent(CreatedModifiedModel):
'''
An event/message logged from the callback when running a job.
'''
# Playbook events will be structured to form the following hierarchy:
# - playbook_on_start (once for each playbook file)
# - playbook_on_vars_prompt (for each play, but before play starts, we
# currently don't handle responding to these prompts)
# - playbook_on_play_start (once for each play)
# - playbook_on_import_for_host (not logged, not used for v2)
# - playbook_on_not_import_for_host (not logged, not used for v2)
# - playbook_on_no_hosts_matched
# - playbook_on_no_hosts_remaining
# - playbook_on_include (only v2 - only used for handlers?)
# - playbook_on_setup (not used for v2)
# - runner_on*
# - playbook_on_task_start (once for each task within a play)
# - runner_on_failed
# - runner_on_ok
# - runner_on_error (not used for v2)
# - runner_on_skipped
# - runner_on_unreachable
# - runner_on_no_hosts (not used for v2)
# - runner_on_async_poll (not used for v2)
# - runner_on_async_ok (not used for v2)
# - runner_on_async_failed (not used for v2)
# - runner_on_file_diff (v2 event is v2_on_file_diff)
# - runner_item_on_ok (v2 only)
# - runner_item_on_failed (v2 only)
# - runner_item_on_skipped (v2 only)
# - runner_retry (v2 only)
# - playbook_on_notify (once for each notification from the play, not used for v2)
# - playbook_on_stats
EVENT_TYPES = [
# (level, event, verbose name, failed)
(3, 'runner_on_failed', _('Host Failed'), True),
(3, 'runner_on_ok', _('Host OK'), False),
(3, 'runner_on_error', _('Host Failure'), True),
(3, 'runner_on_skipped', _('Host Skipped'), False),
(3, 'runner_on_unreachable', _('Host Unreachable'), True),
(3, 'runner_on_no_hosts', _('No Hosts Remaining'), False),
(3, 'runner_on_async_poll', _('Host Polling'), False),
(3, 'runner_on_async_ok', _('Host Async OK'), False),
(3, 'runner_on_async_failed', _('Host Async Failure'), True),
(3, 'runner_item_on_ok', _('Item OK'), False),
(3, 'runner_item_on_failed', _('Item Failed'), True),
(3, 'runner_item_on_skipped', _('Item Skipped'), False),
(3, 'runner_retry', _('Host Retry'), False),
# Tower does not yet support --diff mode.
(3, 'runner_on_file_diff', _('File Difference'), False),
(0, 'playbook_on_start', _('Playbook Started'), False),
(2, 'playbook_on_notify', _('Running Handlers'), False),
(2, 'playbook_on_include', _('Including File'), False),
(2, 'playbook_on_no_hosts_matched', _('No Hosts Matched'), False),
(2, 'playbook_on_no_hosts_remaining', _('No Hosts Remaining'), False),
(2, 'playbook_on_task_start', _('Task Started'), False),
# Tower does not yet support vars_prompt (and will probably hang :)
(1, 'playbook_on_vars_prompt', _('Variables Prompted'), False),
(2, 'playbook_on_setup', _('Gathering Facts'), False),
(2, 'playbook_on_import_for_host', _('internal: on Import for Host'), False),
(2, 'playbook_on_not_import_for_host', _('internal: on Not Import for Host'), False),
(1, 'playbook_on_play_start', _('Play Started'), False),
(1, 'playbook_on_stats', _('Playbook Complete'), False),
# Additional event types for captured stdout not directly related to
# playbook or runner events.
(0, 'debug', _('Debug'), False),
(0, 'verbose', _('Verbose'), False),
(0, 'deprecated', _('Deprecated'), False),
(0, 'warning', _('Warning'), False),
(0, 'system_warning', _('System Warning'), False),
(0, 'error', _('Error'), True),
]
FAILED_EVENTS = [x[1] for x in EVENT_TYPES if x[3]]
EVENT_CHOICES = [(x[1], x[2]) for x in EVENT_TYPES]
LEVEL_FOR_EVENT = dict([(x[1], x[0]) for x in EVENT_TYPES])
class Meta:
app_label = 'main'
ordering = ('pk',)
index_together = [
('job', 'event'),
('job', 'uuid'),
('job', 'start_line'),
('job', 'end_line'),
('job', 'parent_uuid'),
]
job = models.ForeignKey(
'Job',
related_name='job_events',
on_delete=models.CASCADE,
editable=False,
)
event = models.CharField(
max_length=100,
choices=EVENT_CHOICES,
)
event_data = JSONField(
blank=True,
default={},
)
failed = models.BooleanField(
default=False,
editable=False,
)
changed = models.BooleanField(
default=False,
editable=False,
)
uuid = models.CharField(
max_length=1024,
default='',
editable=False,
)
host = models.ForeignKey(
'Host',
related_name='job_events_as_primary_host',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False,
)
host_name = models.CharField(
max_length=1024,
default='',
editable=False,
)
hosts = models.ManyToManyField(
'Host',
related_name='job_events',
editable=False,
)
playbook = models.CharField(
max_length=1024,
default='',
editable=False,
)
play = models.CharField(
max_length=1024,
default='',
editable=False,
)
role = models.CharField(
max_length=1024,
default='',
editable=False,
)
task = models.CharField(
max_length=1024,
default='',
editable=False,
)
parent = models.ForeignKey(
'self',
related_name='children',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False,
)
parent_uuid = models.CharField(
max_length=1024,
default='',
editable=False,
)
counter = models.PositiveIntegerField(
default=0,
editable=False,
)
stdout = models.TextField(
default='',
editable=False,
)
verbosity = models.PositiveIntegerField(
default=0,
editable=False,
)
start_line = models.PositiveIntegerField(
default=0,
editable=False,
)
end_line = models.PositiveIntegerField(
default=0,
editable=False,
)
def get_absolute_url(self, request=None):
return reverse('api:job_event_detail', kwargs={'pk': self.pk}, request=request)
def __unicode__(self):
return u'%s @ %s' % (self.get_event_display2(), self.created.isoformat())
@property
def event_level(self):
return self.LEVEL_FOR_EVENT.get(self.event, 0)
def get_event_display2(self):
msg = self.get_event_display()
if self.event == 'playbook_on_play_start':
if self.play:
msg = "%s (%s)" % (msg, self.play)
elif self.event == 'playbook_on_task_start':
if self.task:
if self.event_data.get('is_conditional', False):
msg = 'Handler Notified'
if self.role:
msg = '%s (%s | %s)' % (msg, self.role, self.task)
else:
msg = "%s (%s)" % (msg, self.task)
# Change display for runner events trigged by async polling. Some of
# these events may not show in most cases, due to filterting them out
# of the job event queryset returned to the user.
res = self.event_data.get('res', {})
# Fix for existing records before we had added the workaround on save
# to change async_ok to async_failed.
if self.event == 'runner_on_async_ok':
try:
if res.get('failed', False) or res.get('rc', 0) != 0:
msg = 'Host Async Failed'
except (AttributeError, TypeError):
pass
# Runner events with ansible_job_id are part of async starting/polling.
if self.event in ('runner_on_ok', 'runner_on_failed'):
try:
module_name = res['invocation']['module_name']
job_id = res['ansible_job_id']
except (TypeError, KeyError, AttributeError):
module_name = None
job_id = None
if module_name and job_id:
if module_name == 'async_status':
msg = 'Host Async Checking'
else:
msg = 'Host Async Started'
# Handle both 1.2 on_failed and 1.3+ on_async_failed events when an
# async task times out.
if self.event in ('runner_on_failed', 'runner_on_async_failed'):
try:
if res['msg'] == 'timed out':
msg = 'Host Async Timeout'
except (TypeError, KeyError, AttributeError):
pass
return msg
def _update_from_event_data(self):
# Update job event model fields from event data.
updated_fields = set()
job = self.job
verbosity = job.verbosity
event_data = self.event_data
res = event_data.get('res', None)
if self.event in self.FAILED_EVENTS and not event_data.get('ignore_errors', False):
self.failed = True
updated_fields.add('failed')
if isinstance(res, dict):
if res.get('changed', False):
self.changed = True
updated_fields.add('changed')
# If we're not in verbose mode, wipe out any module arguments.
invocation = res.get('invocation', None)
if isinstance(invocation, dict) and verbosity == 0 and 'module_args' in invocation:
event_data['res']['invocation']['module_args'] = ''
self.event_data = event_data
updated_fields.add('event_data')
if self.event == 'playbook_on_stats':
try:
failures_dict = event_data.get('failures', {})
dark_dict = event_data.get('dark', {})
self.failed = bool(sum(failures_dict.values()) +
sum(dark_dict.values()))
updated_fields.add('failed')
changed_dict = event_data.get('changed', {})
self.changed = bool(sum(changed_dict.values()))
updated_fields.add('changed')
except (AttributeError, TypeError):
pass
for field in ('playbook', 'play', 'task', 'role', 'host'):
value = force_text(event_data.get(field, '')).strip()
if field == 'host':
field = 'host_name'
if value != getattr(self, field):
setattr(self, field, value)
updated_fields.add(field)
return updated_fields
def _update_parents_failed_and_changed(self):
# Update parent events to reflect failed, changed
runner_events = JobEvent.objects.filter(job=self.job,
event__startswith='runner_on')
changed_events = runner_events.filter(changed=True)
failed_events = runner_events.filter(failed=True)
JobEvent.objects.filter(uuid__in=changed_events.values_list('parent_uuid', flat=True)).update(changed=True)
JobEvent.objects.filter(uuid__in=failed_events.values_list('parent_uuid', flat=True)).update(failed=True)
def _update_hosts(self, extra_host_pks=None):
# Update job event hosts m2m from host_name, propagate to parent events.
extra_host_pks = set(extra_host_pks or [])
hostnames = set()
if self.host_name:
hostnames.add(self.host_name)
if self.event == 'playbook_on_stats':
try:
for v in self.event_data.values():
hostnames.update(v.keys())
except AttributeError: # In case event_data or v isn't a dict.
pass
qs = self.job.inventory.hosts.all()
qs = qs.filter(Q(name__in=hostnames) | Q(pk__in=extra_host_pks))
qs = qs.exclude(job_events__pk=self.id).only('id')
for host in qs:
self.hosts.add(host)
if self.parent_uuid:
parent = JobEvent.objects.filter(uuid=self.parent_uuid)
if parent.exists():
parent = parent[0]
parent._update_hosts(qs.values_list('id', flat=True))
def _hostnames(self):
hostnames = set()
try:
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
hostnames.update(self.event_data.get(stat, {}).keys())
except AttributeError: # In case event_data or v isn't a dict.
pass
return hostnames
def _update_host_summary_from_stats(self, hostnames):
with ignore_inventory_computed_fields():
qs = self.job.inventory.hosts.filter(name__in=hostnames)
job = self.job
for host in hostnames:
host_stats = {}
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
try:
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
except AttributeError: # in case event_data[stat] isn't a dict.
pass
if qs.filter(name=host).exists():
host_actual = qs.get(name=host)
host_summary, created = job.job_host_summaries.get_or_create(host=host_actual, host_name=host_actual.name, defaults=host_stats)
else:
host_summary, created = job.job_host_summaries.get_or_create(host_name=host, defaults=host_stats)
if not created:
update_fields = []
for stat, value in host_stats.items():
if getattr(host_summary, stat) != value:
setattr(host_summary, stat, value)
update_fields.append(stat)
if update_fields:
host_summary.save(update_fields=update_fields)
def save(self, *args, **kwargs):
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
# Update model fields and related objects unless we're only updating
# failed/changed flags triggered from a child event.
from_parent_update = kwargs.pop('from_parent_update', False)
if not from_parent_update:
# Update model fields from event data.
updated_fields = self._update_from_event_data()
for field in updated_fields:
if field not in update_fields:
update_fields.append(field)
# Update host related field from host_name.
if not self.host_id and self.host_name:
host_qs = self.job.inventory.hosts.filter(name=self.host_name)
host_id = host_qs.only('id').values_list('id', flat=True).first()
if host_id != self.host_id:
self.host_id = host_id
if 'host_id' not in update_fields:
update_fields.append('host_id')
super(JobEvent, self).save(*args, **kwargs)
# Update related objects after this event is saved.
if not from_parent_update:
if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False):
self._update_hosts()
if self.event == 'playbook_on_stats':
self._update_parents_failed_and_changed()
hostnames = self._hostnames()
self._update_host_summary_from_stats(hostnames)
self.job.inventory.update_computed_fields()
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=self.job.id))
@classmethod
def create_from_data(self, **kwargs):
# Must have a job_id specified.
if not kwargs.get('job_id', None):
return
# Convert the datetime for the job event's creation appropriately,
# and include a time zone for it.
#
# In the event of any issue, throw it out, and Django will just save
# the current time.
try:
if not isinstance(kwargs['created'], datetime.datetime):
kwargs['created'] = parse_datetime(kwargs['created'])
if not kwargs['created'].tzinfo:
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
except (KeyError, ValueError):
kwargs.pop('created', None)
# Sanity check: Don't honor keys that we don't recognize.
valid_keys = {'job_id', 'event', 'event_data', 'playbook', 'play',
'role', 'task', 'created', 'counter', 'uuid', 'stdout',
'parent_uuid', 'start_line', 'end_line', 'verbosity'}
for key in kwargs.keys():
if key not in valid_keys:
kwargs.pop(key)
event_data = kwargs.get('event_data', None)
artifact_dict = None
if event_data:
artifact_dict = event_data.pop('artifact_data', None)
job_event = JobEvent.objects.create(**kwargs)
analytics_logger.info('Job event data saved.', extra=dict(python_objects=dict(job_event=job_event)))
# Save artifact data to parent job (if provided).
if artifact_dict:
if event_data and isinstance(event_data, dict):
# Note: Core has not added support for marking artifacts as
# sensitive yet. Going forward, core will not use
# _ansible_no_log to denote sensitive set_stats calls.
# Instead, they plan to add a flag outside of the traditional
# no_log mechanism. no_log will not work for this feature,
# in core, because sensitive data is scrubbed before sending
# data to the callback. The playbook_on_stats is the callback
# in which the set_stats data is used.
# Again, the sensitive artifact feature has not yet landed in
# core. The below is how we mark artifacts payload as
# senstive
# artifact_dict['_ansible_no_log'] = True
#
parent_job = Job.objects.filter(pk=kwargs['job_id']).first()
if parent_job and parent_job.artifacts != artifact_dict:
parent_job.artifacts = artifact_dict
parent_job.save(update_fields=['artifacts'])
return job_event
@classmethod
def get_startevent_queryset(cls, parent_task, starting_events, ordering=None):
'''
We need to pull information about each start event.
This is super tricky, because this table has a one-to-many
relationship with itself (parent-child), and we're getting
information for an arbitrary number of children. This means we
need stats on grandchildren, sorted by child.
'''
qs = (JobEvent.objects.filter(parent__parent=parent_task,
parent__event__in=starting_events)
.values('parent__id', 'event', 'changed')
.annotate(num=Count('event'))
.order_by('parent__id'))
if ordering is not None:
qs = qs.order_by(ordering)
return qs
class SystemJobOptions(BaseModel):
'''
Common fields for SystemJobTemplate and SystemJob.
@@ -1643,6 +1184,10 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
def get_ui_url(self):
return urljoin(settings.TOWER_URL_BASE, "/#/management_jobs/{}".format(self.pk))
@property
def event_class(self):
return SystemJobEvent
@property
def task_impact(self):
return 150

View File

@@ -1,25 +1,29 @@
# Python
import os
import json
from copy import copy
from copy import copy, deepcopy
# Django
from django.conf import settings
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User # noqa
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
# AWX
from awx.main.models.base import prevent_search
from awx.main.models.rbac import (
Role, RoleAncestorEntry, get_roles_on_resource
)
from awx.main.utils import parse_yaml_or_json
from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices
from awx.main.utils.encryption import decrypt_value, get_encryption_key, is_encrypted
from awx.main.fields import JSONField, AskForField
__all__ = ['ResourceMixin', 'SurveyJobTemplateMixin', 'SurveyJobMixin',
'TaskManagerUnifiedJobMixin', 'TaskManagerJobMixin', 'TaskManagerProjectUpdateMixin',
'TaskManagerInventoryUpdateMixin',]
'TaskManagerInventoryUpdateMixin', 'CustomVirtualEnvMixin']
class ResourceMixin(models.Model):
@@ -141,21 +145,27 @@ class SurveyJobTemplateMixin(models.Model):
else:
runtime_extra_vars = {}
# Overwrite with job template extra vars with survey default vars
# Overwrite job template extra vars with survey default vars
if self.survey_enabled and 'spec' in self.survey_spec:
for survey_element in self.survey_spec.get("spec", []):
default = survey_element.get('default')
variable_key = survey_element.get('variable')
if survey_element.get('type') == 'password':
if variable_key in runtime_extra_vars and default:
if variable_key in runtime_extra_vars:
kw_value = runtime_extra_vars[variable_key]
if kw_value.startswith('$encrypted$') and kw_value != default:
runtime_extra_vars[variable_key] = default
if kw_value == '$encrypted$':
runtime_extra_vars.pop(variable_key)
if default is not None:
data = {variable_key: default}
errors = self._survey_element_validation(survey_element, data)
decrypted_default = default
if (
survey_element['type'] == "password" and
isinstance(decrypted_default, basestring) and
decrypted_default.startswith('$encrypted$')
):
decrypted_default = decrypt_value(get_encryption_key('value', pk=None), decrypted_default)
errors = self._survey_element_validation(survey_element, {variable_key: decrypted_default})
if not errors:
survey_defaults[variable_key] = default
extra_vars.update(survey_defaults)
@@ -166,10 +176,25 @@ class SurveyJobTemplateMixin(models.Model):
create_kwargs['extra_vars'] = json.dumps(extra_vars)
return create_kwargs
def _survey_element_validation(self, survey_element, data):
def _survey_element_validation(self, survey_element, data, validate_required=True):
# Don't apply validation to the `$encrypted$` placeholder; the decrypted
# default (if any) will be validated against instead
errors = []
if (survey_element['type'] == "password"):
password_value = data.get(survey_element['variable'])
if (
isinstance(password_value, basestring) and
password_value == '$encrypted$'
):
if survey_element.get('default') is None and survey_element['required']:
if validate_required:
errors.append("'%s' value missing" % survey_element['variable'])
return errors
if survey_element['variable'] not in data and survey_element['required']:
errors.append("'%s' value missing" % survey_element['variable'])
if validate_required:
errors.append("'%s' value missing" % survey_element['variable'])
elif survey_element['type'] in ["textarea", "text", "password"]:
if survey_element['variable'] in data:
if type(data[survey_element['variable']]) not in (str, unicode):
@@ -233,7 +258,7 @@ class SurveyJobTemplateMixin(models.Model):
choice_list))
return errors
def _accept_or_ignore_variables(self, data, errors=None, _exclude_errors=()):
def _accept_or_ignore_variables(self, data, errors=None, _exclude_errors=(), extra_passwords=None):
survey_is_enabled = (self.survey_enabled and self.survey_spec)
extra_vars = data.copy()
if errors is None:
@@ -245,8 +270,16 @@ class SurveyJobTemplateMixin(models.Model):
# Check for data violation of survey rules
survey_errors = []
for survey_element in self.survey_spec.get("spec", []):
element_errors = self._survey_element_validation(survey_element, data)
key = survey_element.get('variable', None)
value = data.get(key, None)
validate_required = 'required' not in _exclude_errors
if extra_passwords and key in extra_passwords and is_encrypted(value):
element_errors = self._survey_element_validation(survey_element, {
key: decrypt_value(get_encryption_key('value', pk=None), value)
}, validate_required=validate_required)
else:
element_errors = self._survey_element_validation(
survey_element, data, validate_required=validate_required)
if element_errors:
survey_errors += element_errors
@@ -267,11 +300,46 @@ class SurveyJobTemplateMixin(models.Model):
rejected.update(extra_vars)
# ignored variables does not block manual launch
if 'prompts' not in _exclude_errors:
errors['extra_vars'] = [_('Variables {list_of_keys} are not allowed on launch.').format(
errors['extra_vars'] = [_('Variables {list_of_keys} are not allowed on launch. Check the Prompt on Launch setting '+
'on the Job Template to include Extra Variables.').format(
list_of_keys=', '.join(extra_vars.keys()))]
return (accepted, rejected, errors)
@staticmethod
def pivot_spec(spec):
'''
Utility method that will return a dictionary keyed off variable names
'''
pivoted = {}
for element_data in spec.get('spec', []):
if 'variable' in element_data:
pivoted[element_data['variable']] = element_data
return pivoted
def survey_variable_validation(self, data):
errors = []
if not self.survey_enabled:
return errors
if 'name' not in self.survey_spec:
errors.append("'name' missing from survey spec.")
if 'description' not in self.survey_spec:
errors.append("'description' missing from survey spec.")
for survey_element in self.survey_spec.get("spec", []):
errors += self._survey_element_validation(survey_element, data)
return errors
def display_survey_spec(self):
'''
Hide encrypted default passwords in survey specs
'''
survey_spec = deepcopy(self.survey_spec) if self.survey_spec else {}
for field in survey_spec.get('spec', []):
if field.get('type') == 'password':
if 'default' in field and field['default']:
field['default'] = '$encrypted$'
return survey_spec
class SurveyJobMixin(models.Model):
class Meta:
@@ -296,6 +364,20 @@ class SurveyJobMixin(models.Model):
else:
return self.extra_vars
def decrypted_extra_vars(self):
'''
Decrypts fields marked as passwords in survey.
'''
if self.survey_passwords:
extra_vars = json.loads(self.extra_vars)
for key in self.survey_passwords:
value = extra_vars.get(key)
if value and isinstance(value, basestring) and value.startswith('$encrypted$'):
extra_vars[key] = decrypt_value(get_encryption_key('value', pk=None), value)
return json.dumps(extra_vars)
else:
return self.extra_vars
class TaskManagerUnifiedJobMixin(models.Model):
class Meta:
@@ -312,6 +394,9 @@ class TaskManagerJobMixin(TaskManagerUnifiedJobMixin):
class Meta:
abstract = True
def get_jobs_fail_chain(self):
return [self.project_update] if self.project_update else []
def dependent_jobs_finished(self):
for j in self.dependent_jobs.all():
if j.status in ['pending', 'waiting', 'running']:
@@ -335,3 +420,23 @@ class TaskManagerProjectUpdateMixin(TaskManagerUpdateOnLaunchMixin):
class TaskManagerInventoryUpdateMixin(TaskManagerUpdateOnLaunchMixin):
class Meta:
abstract = True
class CustomVirtualEnvMixin(models.Model):
class Meta:
abstract = True
custom_virtualenv = models.CharField(
blank=True,
null=True,
default=None,
max_length=100
)
def clean_custom_virtualenv(self):
value = self.custom_virtualenv
if value and os.path.join(value, '') not in get_custom_venv_choices():
raise ValidationError(
_('{} is not a valid virtualenv in {}').format(value, settings.BASE_VENV_PATH)
)
return os.path.join(value or '', '')

View File

@@ -22,12 +22,12 @@ from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
)
from awx.main.models.mixins import ResourceMixin
from awx.main.models.mixins import ResourceMixin, CustomVirtualEnvMixin
__all__ = ['Organization', 'Team', 'Profile', 'AuthToken']
class Organization(CommonModel, NotificationFieldsModel, ResourceMixin):
class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVirtualEnvMixin):
'''
An organization is the basic unit of multi-tenancy divisions
'''

View File

@@ -18,12 +18,13 @@ from django.utils.timezone import now, make_aware, get_default_timezone
# AWX
from awx.api.versioning import reverse
from awx.main.models.base import * # noqa
from awx.main.models.events import ProjectUpdateEvent
from awx.main.models.notifications import (
NotificationTemplate,
JobNotificationMixin,
)
from awx.main.models.unified_jobs import * # noqa
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin, CustomVirtualEnvMixin
from awx.main.utils import update_scm_url
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
from awx.main.fields import ImplicitRoleField
@@ -222,7 +223,7 @@ class ProjectOptions(models.Model):
return proj_path + '.lock'
class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEnvMixin):
'''
A project represents a playbook git repo that can access a set of inventories
'''
@@ -306,9 +307,9 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
@classmethod
def _get_unified_job_field_names(cls):
return ['name', 'description', 'local_path', 'scm_type', 'scm_url',
'scm_branch', 'scm_clean', 'scm_delete_on_update',
'credential', 'schedule', 'timeout',]
return set(f.name for f in ProjectOptions._meta.fields) | set(
['name', 'description', 'schedule']
)
def save(self, *args, **kwargs):
new_instance = not bool(self.pk)
@@ -485,6 +486,10 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
websocket_data.update(dict(project_id=self.project.id))
return websocket_data
@property
def event_class(self):
return ProjectUpdateEvent
@property
def task_impact(self):
return 0 if self.job_type == 'run' else 20

View File

@@ -5,11 +5,12 @@ import re
import logging
import datetime
import dateutil.rrule
from dateutil.tz import gettz, datetime_exists
# Django
from django.db import models
from django.db.models.query import QuerySet
from django.utils.timezone import now, make_aware, get_default_timezone
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
# AWX
@@ -19,6 +20,9 @@ from awx.main.models.jobs import LaunchTimeConfig
from awx.main.utils import ignore_inventory_computed_fields
from awx.main.consumers import emit_channel_notification
import pytz
logger = logging.getLogger('awx.main.models.schedule')
__all__ = ['Schedule']
@@ -53,6 +57,10 @@ class ScheduleManager(ScheduleFilterMethods, models.Manager):
class Schedule(CommonModel, LaunchTimeConfig):
TZID_REGEX = re.compile(
"^(DTSTART;TZID=(?P<tzid>[^:]+)(?P<stamp>\:[0-9]+T[0-9]+))(?P<rrule> .*)$"
)
class Meta:
app_label = 'main'
ordering = ['-next_run']
@@ -91,6 +99,67 @@ class Schedule(CommonModel, LaunchTimeConfig):
help_text=_("The next time that the scheduled action will run.")
)
@classmethod
def rrulestr(cls, rrule, **kwargs):
"""
Apply our own custom rrule parsing logic to support TZID=
python-dateutil doesn't _natively_ support `DTSTART;TZID=`; this
function parses out the TZID= component and uses it to produce the
`tzinfos` keyword argument to `dateutil.rrule.rrulestr()`. In this
way, we translate:
DTSTART;TZID=America/New_York:20180601T120000 RRULE:FREQ=DAILY;INTERVAL=1
...into...
DTSTART:20180601T120000TZID RRULE:FREQ=DAILY;INTERVAL=1
...and we pass a hint about the local timezone to dateutil's parser:
`dateutil.rrule.rrulestr(rrule, {
'tzinfos': {
'TZID': dateutil.tz.gettz('America/New_York')
}
})`
it's likely that we can remove the custom code that performs this
parsing if TZID= gains support in upstream dateutil:
https://github.com/dateutil/dateutil/pull/619
"""
kwargs['forceset'] = True
kwargs['tzinfos'] = {x: dateutil.tz.tzutc() for x in dateutil.parser.parserinfo().UTCZONE}
match = cls.TZID_REGEX.match(rrule)
if match is not None:
rrule = cls.TZID_REGEX.sub("DTSTART\g<stamp>TZI\g<rrule>", rrule)
timezone = gettz(match.group('tzid'))
kwargs['tzinfos']['TZI'] = timezone
x = dateutil.rrule.rrulestr(rrule, **kwargs)
for r in x._rrule:
if r._dtstart and r._until:
if all((
r._dtstart.tzinfo != dateutil.tz.tzlocal(),
r._until.tzinfo != dateutil.tz.tzutc(),
)):
# According to RFC5545 Section 3.3.10:
# https://tools.ietf.org/html/rfc5545#section-3.3.10
#
# > If the "DTSTART" property is specified as a date with UTC
# > time or a date with local time and time zone reference,
# > then the UNTIL rule part MUST be specified as a date with
# > UTC time.
raise ValueError('RRULE UNTIL values must be specified in UTC')
try:
first_event = x[0]
if first_event < now() - datetime.timedelta(days=365 * 5):
# For older DTSTART values, if there are more than 1000 recurrences...
if len(x[:1001]) > 1000:
raise ValueError('RRULE values that yield more than 1000 events are not allowed.')
except IndexError:
pass
return x
def __unicode__(self):
return u'%s_t%s_%s_%s' % (self.name, self.unified_job_template.id, self.id, self.next_run)
@@ -106,21 +175,26 @@ class Schedule(CommonModel, LaunchTimeConfig):
return job_kwargs
def update_computed_fields(self):
future_rs = dateutil.rrule.rrulestr(self.rrule, forceset=True)
future_rs = Schedule.rrulestr(self.rrule)
next_run_actual = future_rs.after(now())
if next_run_actual is not None:
if not datetime_exists(next_run_actual):
# skip imaginary dates, like 2:30 on DST boundaries
next_run_actual = future_rs.after(next_run_actual)
next_run_actual = next_run_actual.astimezone(pytz.utc)
self.next_run = next_run_actual
try:
self.dtstart = future_rs[0]
self.dtstart = future_rs[0].astimezone(pytz.utc)
except IndexError:
self.dtstart = None
self.dtend = None
if 'until' in self.rrule.lower():
match_until = re.match(".*?(UNTIL\=[0-9]+T[0-9]+Z)", self.rrule)
until_date = match_until.groups()[0].split("=")[1]
self.dtend = make_aware(datetime.datetime.strptime(until_date, "%Y%m%dT%H%M%SZ"), get_default_timezone())
if 'count' in self.rrule.lower():
self.dtend = future_rs[-1]
if 'until' in self.rrule.lower() or 'count' in self.rrule.lower():
try:
self.dtend = future_rs[-1].astimezone(pytz.utc)
except IndexError:
self.dtend = None
emit_channel_notification('schedules-changed', dict(id=self.id, group_name='schedules'))
with ignore_inventory_computed_fields():
self.unified_job_template.update_computed_fields()

View File

@@ -2,14 +2,14 @@
# All Rights Reserved.
# Python
import codecs
from StringIO import StringIO
import json
import logging
import re
import os
import os.path
import re
import subprocess
import tempfile
from collections import OrderedDict
from StringIO import StringIO
# Django
from django.conf import settings
@@ -34,7 +34,7 @@ from django_celery_results.models import TaskResult
from awx.main.models.base import * # noqa
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
from awx.main.utils import (
decrypt_field, _inventory_updates,
encrypt_dict, decrypt_field, _inventory_updates,
copy_model_by_class, copy_m2m_relationships,
get_type_for_model, parse_yaml_or_json
)
@@ -42,7 +42,7 @@ from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.consumers import emit_channel_notification
from awx.main.fields import JSONField, AskForField
__all__ = ['UnifiedJobTemplate', 'UnifiedJob']
__all__ = ['UnifiedJobTemplate', 'UnifiedJob', 'StdoutMaxBytesExceeded']
logger = logging.getLogger('awx.main.models.unified_jobs')
@@ -345,11 +345,18 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
'''
new_job_passwords = kwargs.pop('survey_passwords', {})
eager_fields = kwargs.pop('_eager_fields', None)
# automatically encrypt survey fields
if hasattr(self, 'survey_spec') and getattr(self, 'survey_enabled', False):
password_list = self.survey_password_variables()
encrypt_dict(kwargs.get('extra_vars', {}), password_list)
unified_job_class = self._get_unified_job_class()
fields = self._get_unified_job_field_names()
unallowed_fields = set(kwargs.keys()) - set(fields)
if unallowed_fields:
raise Exception('Fields {} are not allowed as overrides.'.format(unallowed_fields))
logger.warn('Fields {} are not allowed as overrides.'.format(unallowed_fields))
map(kwargs.pop, unallowed_fields)
unified_job = copy_model_by_class(self, unified_job_class, fields, kwargs)
@@ -435,7 +442,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
errors[field_name] = [_("Field is not allowed on launch.")]
return ({}, kwargs, errors)
def accept_or_ignore_variables(self, data, errors=None, _exclude_errors=()):
def accept_or_ignore_variables(self, data, errors=None, _exclude_errors=(), extra_passwords=None):
'''
If subclasses accept any `variables` or `extra_vars`, they should
define _accept_or_ignore_variables to place those variables in the accepted dict,
@@ -453,7 +460,11 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
# SurveyJobTemplateMixin cannot override any methods because of
# resolution order, forced by how metaclass processes fields,
# thus the need for hasattr check
return self._accept_or_ignore_variables(data, errors, _exclude_errors=_exclude_errors)
if extra_passwords:
return self._accept_or_ignore_variables(
data, errors, _exclude_errors=_exclude_errors, extra_passwords=extra_passwords)
else:
return self._accept_or_ignore_variables(data, errors, _exclude_errors=_exclude_errors)
elif data:
errors['extra_vars'] = [
_('Variables {list_of_keys} provided, but this template cannot accept variables.'.format(
@@ -504,6 +515,13 @@ class UnifiedJobDeprecatedStdout(models.Model):
)
class StdoutMaxBytesExceeded(Exception):
def __init__(self, total, supported):
self.total = total
self.supported = supported
class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique, UnifiedJobTypeStringMixin, TaskManagerUnifiedJobMixin):
'''
Concrete base class for unified job run by the task engine.
@@ -632,11 +650,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
default='',
editable=False,
))
result_stdout_file = models.TextField( # FilePathfield?
blank=True,
default='',
editable=False,
)
result_traceback = models.TextField(
blank=True,
default='',
@@ -812,14 +825,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
# Done.
return result
def delete(self):
if self.result_stdout_file != "":
try:
os.remove(self.result_stdout_file)
except Exception:
pass
super(UnifiedJob, self).delete()
def copy_unified_job(self, limit=None):
'''
Returns saved object, including related fields.
@@ -828,7 +833,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
unified_job_class = self.__class__
unified_jt_class = self._get_unified_job_template_class()
parent_field_name = unified_job_class._get_parent_field_name()
fields = unified_jt_class._get_unified_job_field_names() + [parent_field_name]
fields = unified_jt_class._get_unified_job_field_names() | set([parent_field_name])
create_data = {"launch_type": "relaunch"}
if limit:
@@ -889,6 +894,10 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
config.credentials.add(*job_creds)
return config
@property
def event_class(self):
raise NotImplementedError()
@property
def result_stdout_text(self):
related = UnifiedJobDeprecatedStdout.objects.get(pk=self.pk)
@@ -902,36 +911,100 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
related.result_stdout_text = value
related.save()
def result_stdout_raw_handle(self, attempt=0):
"""Return a file-like object containing the standard out of the
job's result.
def result_stdout_raw_handle(self, enforce_max_bytes=True):
"""
msg = {
'pending': 'Waiting for results...',
'missing': 'stdout capture is missing',
}
if self.result_stdout_text:
return StringIO(self.result_stdout_text)
else:
if not os.path.exists(self.result_stdout_file) or os.stat(self.result_stdout_file).st_size < 1:
return StringIO(msg['missing' if self.finished else 'pending'])
This method returns a file-like object ready to be read which contains
all stdout for the UnifiedJob.
# There is a potential timing issue here, because another
# process may be deleting the stdout file after it is written
# to the database.
#
# Therefore, if we get an IOError (which generally means the
# file does not exist), reload info from the database and
# try again.
try:
return codecs.open(self.result_stdout_file, "r",
encoding='utf-8')
except IOError:
if attempt < 3:
self.result_stdout_text = type(self).objects.get(id=self.id).result_stdout_text
return self.result_stdout_raw_handle(attempt=attempt + 1)
If the size of the file is greater than
`settings.STDOUT_MAX_BYTES_DISPLAY`, a StdoutMaxBytesExceeded exception
will be raised.
"""
max_supported = settings.STDOUT_MAX_BYTES_DISPLAY
if enforce_max_bytes:
# If enforce_max_bytes is True, we're not grabbing the whole file,
# just the first <settings.STDOUT_MAX_BYTES_DISPLAY> bytes;
# in this scenario, it's probably safe to use a StringIO.
fd = StringIO()
else:
# If enforce_max_bytes = False, that means they're downloading
# the entire file. To avoid ballooning memory, let's write the
# stdout content to a temporary disk location
if not os.path.exists(settings.JOBOUTPUT_ROOT):
os.makedirs(settings.JOBOUTPUT_ROOT)
fd = tempfile.NamedTemporaryFile(
prefix='{}-{}-'.format(self.model_to_str(), self.pk),
suffix='.out',
dir=settings.JOBOUTPUT_ROOT
)
# Before the addition of event-based stdout, older versions of
# awx stored stdout as raw text blobs in a certain database column
# (`main_unifiedjob.result_stdout_text`)
# For older installs, this data still exists in the database; check for
# it and use if it exists
legacy_stdout_text = self.result_stdout_text
if legacy_stdout_text:
if enforce_max_bytes and len(legacy_stdout_text) > max_supported:
raise StdoutMaxBytesExceeded(len(legacy_stdout_text), max_supported)
fd.write(legacy_stdout_text)
if hasattr(fd, 'name'):
fd.flush()
return open(fd.name, 'r')
else:
# we just wrote to this StringIO, so rewind it
fd.seek(0)
return fd
else:
# Note: the code in this block _intentionally_ does not use the
# Django ORM because of the potential size (many MB+) of
# `main_jobevent.stdout`; we *do not* want to generate queries
# here that construct model objects by fetching large gobs of
# data (and potentially ballooning memory usage); instead, we
# just want to write concatenated values of a certain column
# (`stdout`) directly to a file
with connection.cursor() as cursor:
tablename = self._meta.db_table
related_name = {
'main_job': 'job_id',
'main_adhoccommand': 'ad_hoc_command_id',
'main_projectupdate': 'project_update_id',
'main_inventoryupdate': 'inventory_update_id',
'main_systemjob': 'system_job_id',
}[tablename]
if enforce_max_bytes:
# detect the length of all stdout for this UnifiedJob, and
# if it exceeds settings.STDOUT_MAX_BYTES_DISPLAY bytes,
# don't bother actually fetching the data
total = self.event_class.objects.filter(**{related_name: self.id}).aggregate(
total=models.Sum(models.Func(models.F('stdout'), function='LENGTH'))
)['total']
if total > max_supported:
raise StdoutMaxBytesExceeded(total, max_supported)
cursor.copy_expert(
"copy (select stdout from {} where {}={} order by start_line) to stdout".format(
tablename + 'event',
related_name,
self.id
),
fd
)
if hasattr(fd, 'name'):
# If we're dealing with a physical file, use `sed` to clean
# up escaped line sequences
fd.flush()
subprocess.Popen("sed -i 's/\\\\r\\\\n/\\n/g' {}".format(fd.name), shell=True).wait()
return open(fd.name, 'r')
else:
return StringIO(msg['missing' if self.finished else 'pending'])
# If we're dealing with an in-memory string buffer, use
# string.replace()
fd = StringIO(fd.getvalue().replace('\\r\\n', '\n'))
return fd
def _escape_ascii(self, content):
# Remove ANSI escape sequences used to embed event data.
@@ -941,7 +1014,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
return content
def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False):
content = self.result_stdout_raw_handle().read()
content = self.result_stdout_raw_handle().read().decode('utf-8')
if redact_sensitive:
content = UriCleaner.remove_sensitive(content)
if escape_ascii:
@@ -956,21 +1029,14 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
def result_stdout(self):
return self._result_stdout_raw(escape_ascii=True)
@property
def result_stdout_size(self):
try:
return os.stat(self.result_stdout_file).st_size
except Exception:
return len(self.result_stdout)
def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False):
return_buffer = u""
return_buffer = StringIO()
if end_line is not None:
end_line = int(end_line)
stdout_lines = self.result_stdout_raw_handle().readlines()
absolute_end = len(stdout_lines)
for line in stdout_lines[int(start_line):end_line]:
return_buffer += line
return_buffer.write(line)
if int(start_line) < 0:
start_actual = len(stdout_lines) + int(start_line)
end_actual = len(stdout_lines)
@@ -981,6 +1047,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
else:
end_actual = len(stdout_lines)
return_buffer = return_buffer.getvalue().decode('utf-8')
if redact_sensitive:
return_buffer = UriCleaner.remove_sensitive(return_buffer)
if escape_ascii:
@@ -1052,6 +1119,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
def can_schedule(self):
if getattr(self, 'passwords_needed_to_start', None):
return False
if getattr(self, 'inventory', None) is None:
return False
JobLaunchConfig = self._meta.get_field('launch_config').related_model
try:
self.launch_config

View File

@@ -316,15 +316,16 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
@classmethod
def _get_unified_job_field_names(cls):
return ['name', 'description', 'extra_vars', 'labels', 'survey_passwords',
'schedule', 'launch_type', 'allow_simultaneous']
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
['name', 'description', 'schedule', 'survey_passwords', 'labels']
)
@classmethod
def _get_unified_jt_copy_names(cls):
base_list = super(WorkflowJobTemplate, cls)._get_unified_jt_copy_names()
base_list.remove('labels')
return (base_list +
['survey_spec', 'survey_enabled', 'ask_variables_on_launch', 'organization'])
return (base_list |
set(['survey_spec', 'survey_enabled', 'ask_variables_on_launch', 'organization']))
def get_absolute_url(self, request=None):
return reverse('api:workflow_job_template_detail', kwargs={'pk': self.pk}, request=request)

View File

@@ -24,7 +24,9 @@ from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_gr
from awx.main.tasks import update_inventory_computed_fields
from awx.main.fields import is_implicit_parent
from awx.main.consumers import emit_channel_notification
from awx.main import consumers
from awx.conf.utils import conf_to_dict
__all__ = []
@@ -41,20 +43,35 @@ def get_current_user_or_none():
return u
def emit_job_event_detail(sender, **kwargs):
def emit_event_detail(serializer, relation, **kwargs):
instance = kwargs['instance']
created = kwargs['created']
if created:
event_serialized = JobEventWebSocketSerializer(instance).data
emit_channel_notification('job_events-' + str(instance.job.id), event_serialized)
event_serializer = serializer(instance)
consumers.emit_channel_notification(
'-'.join([event_serializer.get_group_name(instance), str(getattr(instance, relation))]),
event_serializer.data
)
def emit_job_event_detail(sender, **kwargs):
emit_event_detail(JobEventWebSocketSerializer, 'job_id', **kwargs)
def emit_ad_hoc_command_event_detail(sender, **kwargs):
instance = kwargs['instance']
created = kwargs['created']
if created:
event_serialized = AdHocCommandEventWebSocketSerializer(instance).data
emit_channel_notification('ad_hoc_command_events-' + str(instance.ad_hoc_command_id), event_serialized)
emit_event_detail(AdHocCommandEventWebSocketSerializer, 'ad_hoc_command_id', **kwargs)
def emit_project_update_event_detail(sender, **kwargs):
emit_event_detail(ProjectUpdateEventWebSocketSerializer, 'project_update_id', **kwargs)
def emit_inventory_update_event_detail(sender, **kwargs):
emit_event_detail(InventoryUpdateEventWebSocketSerializer, 'inventory_update_id', **kwargs)
def emit_system_job_event_detail(sender, **kwargs):
emit_event_detail(SystemJobEventWebSocketSerializer, 'system_job_id', **kwargs)
def emit_update_inventory_computed_fields(sender, **kwargs):
@@ -220,6 +237,9 @@ connect_computed_field_signals()
post_save.connect(emit_job_event_detail, sender=JobEvent)
post_save.connect(emit_ad_hoc_command_event_detail, sender=AdHocCommandEvent)
post_save.connect(emit_project_update_event_detail, sender=ProjectUpdateEvent)
post_save.connect(emit_inventory_update_event_detail, sender=InventoryUpdateEvent)
post_save.connect(emit_system_job_event_detail, sender=SystemJobEvent)
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
m2m_changed.connect(org_admin_edit_members, Role.members.through)
m2m_changed.connect(rbac_activity_stream, Role.members.through)
@@ -284,7 +304,12 @@ def _update_host_last_jhs(host):
except IndexError:
jhs = None
update_fields = []
last_job = jhs.job if jhs else None
try:
last_job = jhs.job if jhs else None
except Job.DoesNotExist:
# The job (and its summaries) have already been/are currently being
# deleted, so there's no need to update the host w/ a reference to it
return
if host.last_job != last_job:
host.last_job = last_job
update_fields.append('last_job')
@@ -392,12 +417,15 @@ def activity_stream_create(sender, instance, created, **kwargs):
object1=object1,
changes=json.dumps(changes),
actor=get_current_user_or_none())
activity_entry.save()
#TODO: Weird situation where cascade SETNULL doesn't work
# it might actually be a good idea to remove all of these FK references since
# we don't really use them anyway.
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
activity_entry.save()
getattr(activity_entry, object1).add(instance)
else:
activity_entry.setting = conf_to_dict(instance)
activity_entry.save()
def activity_stream_update(sender, instance, **kwargs):
@@ -423,9 +451,12 @@ def activity_stream_update(sender, instance, **kwargs):
object1=object1,
changes=json.dumps(changes),
actor=get_current_user_or_none())
activity_entry.save()
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
activity_entry.save()
getattr(activity_entry, object1).add(instance)
else:
activity_entry.setting = conf_to_dict(instance)
activity_entry.save()
def activity_stream_delete(sender, instance, **kwargs):
@@ -535,8 +566,8 @@ def get_current_user_from_drf_request(sender, **kwargs):
drf_request on the underlying Django Request object.
'''
request = get_current_request()
drf_request = getattr(request, 'drf_request', None)
return (getattr(drf_request, 'user', False), 0)
drf_request_user = getattr(request, 'drf_request_user', False)
return (drf_request_user, 0)
@receiver(pre_delete, sender=Organization)

View File

@@ -2,7 +2,6 @@
# All Rights Reserved.
# Python
import codecs
from collections import OrderedDict
import ConfigParser
import cStringIO
@@ -17,7 +16,6 @@ import tempfile
import time
import traceback
import urlparse
import uuid
from distutils.version import LooseVersion as Version
import yaml
import fcntl
@@ -50,7 +48,7 @@ from awx import celery_app
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS
from awx.main.models import * # noqa
from awx.main.models.unified_jobs import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, TaskCancel, TaskError
from awx.main.exceptions import AwxTaskError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.expect import run, isolated_manager
from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url,
@@ -81,7 +79,7 @@ logger = logging.getLogger('awx.main.tasks')
class LogErrorsTask(Task):
def on_failure(self, exc, task_id, args, kwargs, einfo):
if isinstance(exc, AwxTaskError):
if getattr(exc, 'is_awx_task_error', False):
# Error caused by user / tracked in job output
logger.warning(str(exc))
elif isinstance(self, BaseTask):
@@ -363,8 +361,9 @@ def handle_work_success(self, result, task_actual):
@shared_task(queue='tower', base=LogErrorsTask)
def handle_work_error(request, exc, traceback, task_id, subtasks=None):
logger.debug('Executing error task id %s, subtasks: %s' % (request.id, str(subtasks)))
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
@@ -432,13 +431,22 @@ def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
SmartInventoryMembership.objects.all().delete()
memberships = []
changed_inventories = set([])
for smart_inventory in smart_inventories:
memberships.extend([SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id[0])
for host_id in smart_inventory.hosts.values_list('id')])
add_for_inventory = [
SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id[0])
for host_id in smart_inventory.hosts.values_list('id')
]
memberships.extend(add_for_inventory)
if add_for_inventory:
changed_inventories.add(smart_inventory)
SmartInventoryMembership.objects.bulk_create(memberships)
except IntegrityError as e:
logger.error("Update Host Smart Inventory Memberships failed due to an exception: " + str(e))
return
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
@shared_task(bind=True, queue='tower', base=LogErrorsTask, max_retries=5)
@@ -489,6 +497,7 @@ def with_path_cleanup(f):
class BaseTask(LogErrorsTask):
name = None
model = None
event_model = None
abstract = True
cleanup_paths = []
proot_show_paths = []
@@ -509,17 +518,13 @@ class BaseTask(LogErrorsTask):
if updates:
update_fields = ['modified']
for field, value in updates.items():
if field in ('result_stdout', 'result_traceback'):
if field in ('result_traceback'):
for srch, repl in output_replacements:
value = value.replace(srch, repl)
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
if 'result_stdout_text' in update_fields:
# result_stdout_text is now deprecated, and is no longer
# an actual Django field (it's a property)
update_fields.remove('result_stdout_text')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
@@ -621,10 +626,16 @@ class BaseTask(LogErrorsTask):
'': '',
}
def add_ansible_venv(self, env, add_awx_lib=True):
env['VIRTUAL_ENV'] = settings.ANSIBLE_VENV_PATH
env['PATH'] = os.path.join(settings.ANSIBLE_VENV_PATH, "bin") + ":" + env['PATH']
venv_libdir = os.path.join(settings.ANSIBLE_VENV_PATH, "lib")
def add_ansible_venv(self, venv_path, env, add_awx_lib=True):
env['VIRTUAL_ENV'] = venv_path
env['PATH'] = os.path.join(venv_path, "bin") + ":" + env['PATH']
venv_libdir = os.path.join(venv_path, "lib")
if not os.path.exists(venv_libdir):
raise RuntimeError(
'a valid Python virtualenv does not exist at {}'.format(venv_path)
)
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
if os.path.isdir(os.path.join(venv_libdir, "python2.7")):
env['PYTHONPATH'] = os.path.join(venv_libdir, "python2.7", "site-packages") + ":"
@@ -659,25 +670,6 @@ class BaseTask(LogErrorsTask):
env['PROOT_TMP_DIR'] = settings.AWX_PROOT_BASE_PATH
return env
def build_safe_env(self, env, **kwargs):
'''
Build environment dictionary, hiding potentially sensitive information
such as passwords or keys.
'''
hidden_re = re.compile(r'API|TOKEN|KEY|SECRET|PASS', re.I)
urlpass_re = re.compile(r'^.*?://[^:]+:(.*?)@.*?$')
safe_env = dict(env)
for k,v in safe_env.items():
if k == 'AWS_ACCESS_KEY_ID':
continue
elif k.startswith('ANSIBLE_') and not k.startswith('ANSIBLE_NET'):
continue
elif hidden_re.search(k):
safe_env[k] = HIDDEN_PASSWORD
elif type(v) == str and urlpass_re.match(v):
safe_env[k] = urlpass_re.sub(HIDDEN_PASSWORD, v)
return safe_env
def should_use_proot(self, instance, **kwargs):
'''
Return whether this task should use proot.
@@ -729,14 +721,19 @@ class BaseTask(LogErrorsTask):
def get_stdout_handle(self, instance):
'''
Return an open file object for capturing stdout.
Return an virtual file object for capturing stdout and events.
'''
if not os.path.exists(settings.JOBOUTPUT_ROOT):
os.makedirs(settings.JOBOUTPUT_ROOT)
stdout_filename = os.path.join(settings.JOBOUTPUT_ROOT, "%d-%s.out" % (instance.pk, str(uuid.uuid1())))
stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8')
assert stdout_handle.name == stdout_filename
return stdout_handle
dispatcher = CallbackQueueDispatcher()
def event_callback(event_data):
event_data.setdefault(self.event_data_key, instance.id)
if 'uuid' in event_data:
cache_event = cache.get('ev-{}'.format(event_data['uuid']), None)
if cache_event is not None:
event_data.update(cache_event)
dispatcher.dispatch(event_data)
return OutputEventFilter(event_callback)
def pre_run_hook(self, instance, **kwargs):
'''
@@ -792,12 +789,14 @@ class BaseTask(LogErrorsTask):
kwargs['private_data_files'] = self.build_private_data_files(instance, **kwargs)
kwargs['passwords'] = self.build_passwords(instance, **kwargs)
kwargs['proot_show_paths'] = self.proot_show_paths
if getattr(instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH) != settings.ANSIBLE_VENV_PATH:
kwargs['proot_custom_virtualenv'] = instance.ansible_virtualenv_path
args = self.build_args(instance, **kwargs)
safe_args = self.build_safe_args(instance, **kwargs)
output_replacements = self.build_output_replacements(instance, **kwargs)
cwd = self.build_cwd(instance, **kwargs)
env = self.build_env(instance, **kwargs)
safe_env = self.build_safe_env(env, **kwargs)
safe_env = build_safe_env(env)
# handle custom injectors specified on the CredentialType
credentials = []
@@ -818,10 +817,8 @@ class BaseTask(LogErrorsTask):
if isolated_host is None:
stdout_handle = self.get_stdout_handle(instance)
else:
base_handle = super(self.__class__, self).get_stdout_handle(instance)
stdout_handle = isolated_manager.IsolatedManager.wrap_stdout_handle(
instance, kwargs['private_data_dir'], base_handle,
event_data_key=self.event_data_key)
stdout_handle = isolated_manager.IsolatedManager.get_stdout_handle(
instance, kwargs['private_data_dir'], event_data_key=self.event_data_key)
if self.should_use_proot(instance, **kwargs):
if not check_proot_installed():
raise RuntimeError('bubblewrap is not installed')
@@ -838,7 +835,7 @@ class BaseTask(LogErrorsTask):
args = run.wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock)
safe_args = run.wrap_args_with_ssh_agent(safe_args, ssh_key_path, ssh_auth_sock)
instance = self.update_model(pk, job_args=json.dumps(safe_args),
job_cwd=cwd, job_env=safe_env, result_stdout_file=stdout_handle.name)
job_cwd=cwd, job_env=safe_env)
expect_passwords = {}
for k, v in self.get_password_prompts(**kwargs).items():
@@ -874,6 +871,12 @@ class BaseTask(LogErrorsTask):
try:
stdout_handle.flush()
stdout_handle.close()
# If stdout_handle was wrapped with event filter, log data
if hasattr(stdout_handle, '_event_ct'):
logger.info('%s finished running, producing %s events.',
instance.log_format, stdout_handle._event_ct)
else:
logger.info('%s finished running', instance.log_format)
except Exception:
pass
@@ -897,9 +900,9 @@ class BaseTask(LogErrorsTask):
# Raising an exception will mark the job as 'failed' in celery
# and will stop a task chain from continuing to execute
if status == 'canceled':
raise TaskCancel(instance, rc)
raise AwxTaskError.TaskCancel(instance, rc)
else:
raise TaskError(instance, rc)
raise AwxTaskError.TaskError(instance, rc)
def get_ssh_key_path(self, instance, **kwargs):
'''
@@ -925,7 +928,8 @@ class RunJob(BaseTask):
name = 'awx.main.tasks.run_job'
model = Job
event_data_key= 'job_id'
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, **kwargs):
'''
@@ -1006,7 +1010,7 @@ class RunJob(BaseTask):
plugin_dirs.extend(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
plugin_path = ':'.join(plugin_dirs)
env = super(RunJob, self).build_env(job, **kwargs)
env = self.add_ansible_venv(env, add_awx_lib=kwargs.get('isolated', False))
env = self.add_ansible_venv(job.ansible_virtualenv_path, env, add_awx_lib=kwargs.get('isolated', False))
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
@@ -1026,13 +1030,7 @@ class RunJob(BaseTask):
env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
env['TOWER_HOST'] = settings.TOWER_URL_BASE
env['AWX_HOST'] = settings.TOWER_URL_BASE
env['CALLBACK_QUEUE'] = settings.CALLBACK_QUEUE
env['CALLBACK_CONNECTION'] = settings.CELERY_BROKER_URL
env['CACHE'] = settings.CACHES['default']['LOCATION'] if 'LOCATION' in settings.CACHES['default'] else ''
if getattr(settings, 'JOB_CALLBACK_DEBUG', False):
env['JOB_CALLBACK_DEBUG'] = '2'
elif settings.DEBUG:
env['JOB_CALLBACK_DEBUG'] = '1'
# Create a directory for ControlPath sockets that is unique to each
# job and visible inside the proot environment (when enabled).
@@ -1047,31 +1045,8 @@ class RunJob(BaseTask):
# Set environment variables for cloud credentials.
cred_files = kwargs.get('private_data_files', {}).get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.kind == 'aws':
env['AWS_ACCESS_KEY_ID'] = cloud_cred.username
env['AWS_SECRET_ACCESS_KEY'] = decrypt_field(cloud_cred, 'password')
if len(cloud_cred.security_token) > 0:
env['AWS_SECURITY_TOKEN'] = decrypt_field(cloud_cred, 'security_token')
# FIXME: Add EC2_URL, maybe EC2_REGION!
elif cloud_cred and cloud_cred.kind == 'gce':
env['GCE_EMAIL'] = cloud_cred.username
env['GCE_PROJECT'] = cloud_cred.project
if cloud_cred and cloud_cred.kind == 'gce':
env['GCE_PEM_FILE_PATH'] = cred_files.get(cloud_cred, '')
elif cloud_cred and cloud_cred.kind == 'azure_rm':
if len(cloud_cred.client) and len(cloud_cred.tenant):
env['AZURE_CLIENT_ID'] = cloud_cred.client
env['AZURE_SECRET'] = decrypt_field(cloud_cred, 'secret')
env['AZURE_TENANT'] = cloud_cred.tenant
env['AZURE_SUBSCRIPTION_ID'] = cloud_cred.subscription
else:
env['AZURE_SUBSCRIPTION_ID'] = cloud_cred.subscription
env['AZURE_AD_USER'] = cloud_cred.username
env['AZURE_PASSWORD'] = decrypt_field(cloud_cred, 'password')
elif cloud_cred and cloud_cred.kind == 'vmware':
env['VMWARE_USER'] = cloud_cred.username
env['VMWARE_PASSWORD'] = decrypt_field(cloud_cred, 'password')
env['VMWARE_HOST'] = cloud_cred.host
env['VMWARE_VALIDATE_CERTS'] = str(settings.VMWARE_VALIDATE_CERTS)
elif cloud_cred and cloud_cred.kind == 'openstack':
env['OS_CLIENT_CONFIG_FILE'] = cred_files.get(cloud_cred, '')
@@ -1159,7 +1134,7 @@ class RunJob(BaseTask):
if kwargs.get('display', False) and job.job_template:
extra_vars.update(json.loads(job.display_extra_vars()))
else:
extra_vars.update(job.extra_vars_dict)
extra_vars.update(json.loads(job.decrypted_extra_vars()))
args.extend(['-e', json.dumps(extra_vars)])
# Add path to playbook (relative to project.local_path).
@@ -1196,29 +1171,6 @@ class RunJob(BaseTask):
d[re.compile(r'Vault password \({}\):\s*?$'.format(vault_id), re.M)] = k
return d
def get_stdout_handle(self, instance):
'''
Wrap stdout file object to capture events.
'''
stdout_handle = super(RunJob, self).get_stdout_handle(instance)
if getattr(settings, 'USE_CALLBACK_QUEUE', False):
dispatcher = CallbackQueueDispatcher()
def job_event_callback(event_data):
event_data.setdefault(self.event_data_key, instance.id)
if 'uuid' in event_data:
cache_event = cache.get('ev-{}'.format(event_data['uuid']), None)
if cache_event is not None:
event_data.update(cache_event)
dispatcher.dispatch(event_data)
else:
def job_event_callback(event_data):
event_data.setdefault(self.event_data_key, instance.id)
JobEvent.create_from_data(**event_data)
return OutputEventFilter(stdout_handle, job_event_callback)
def should_use_proot(self, instance, **kwargs):
'''
Return whether this task should use proot.
@@ -1226,6 +1178,10 @@ class RunJob(BaseTask):
return getattr(settings, 'AWX_PROOT_ENABLED', False)
def pre_run_hook(self, job, **kwargs):
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
if job.project and job.project.scm_type:
job_request_id = '' if self.request.id is None else self.request.id
pu_ig = job.instance_group
@@ -1252,10 +1208,12 @@ class RunJob(BaseTask):
task_instance.run(local_project_sync.id)
job = self.update_model(job.pk, scm_revision=job.project.scm_revision)
except Exception:
job = self.update_model(job.pk, status='failed',
job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' %
('project_update', local_project_sync.name, local_project_sync.id)))
raise
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(job.pk, status='failed',
job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' %
('project_update', local_project_sync.name, local_project_sync.id)))
raise
if job.use_fact_cache and not kwargs.get('isolated'):
job.start_job_fact_cache()
@@ -1277,6 +1235,8 @@ class RunProjectUpdate(BaseTask):
name = 'awx.main.tasks.run_project_update'
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
@property
def proot_show_paths(self):
@@ -1322,11 +1282,18 @@ class RunProjectUpdate(BaseTask):
Build environment dictionary for ansible-playbook.
'''
env = super(RunProjectUpdate, self).build_env(project_update, **kwargs)
env = self.add_ansible_venv(env)
env = self.add_ansible_venv(settings.ANSIBLE_VENV_PATH, env)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_PROOT_BASE_PATH
env['CACHE'] = settings.CACHES['default']['LOCATION'] if 'LOCATION' in settings.CACHES['default'] else ''
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
env['ANSIBLE_CALLBACK_PLUGINS'] = self.get_path_to('..', 'plugins', 'callback')
env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
return env
def _build_scm_url_extra_vars(self, project_update, **kwargs):
@@ -1464,16 +1431,6 @@ class RunProjectUpdate(BaseTask):
def get_idle_timeout(self):
return getattr(settings, 'PROJECT_UPDATE_IDLE_TIMEOUT', None)
def get_stdout_handle(self, instance):
stdout_handle = super(RunProjectUpdate, self).get_stdout_handle(instance)
pk = instance.pk
def raw_callback(data):
instance_actual = self.update_model(pk)
result_stdout_text = instance_actual.result_stdout_text + data
self.update_model(pk, result_stdout_text=result_stdout_text)
return OutputEventFilter(stdout_handle, raw_callback=raw_callback)
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
project_request_id = '' if self.request.id is None else self.request.id
scm_revision = project_update.project.scm_revision
@@ -1519,11 +1476,11 @@ class RunProjectUpdate(BaseTask):
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag or local_inv_update.cancel_flag:
if not project_update.cancel_flag:
self.update_model(project_update.pk, cancel_flag=True, job_explanation=_(
'Dependent inventory update {} was canceled.'.format(local_inv_update.name)))
break # Stop rest of updates if project or inventory update was canceled
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
@@ -1599,6 +1556,8 @@ class RunInventoryUpdate(BaseTask):
name = 'awx.main.tasks.run_inventory_update'
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, **kwargs):
"""
@@ -1764,7 +1723,7 @@ class RunInventoryUpdate(BaseTask):
cp.set(section, 'ssl_verify', "false")
cloudforms_opts = dict(inventory_update.source_vars_dict.items())
for opt in ['version', 'purge_actions', 'clean_group_keys', 'nest_tags']:
for opt in ['version', 'purge_actions', 'clean_group_keys', 'nest_tags', 'suffix']:
if opt in cloudforms_opts:
cp.set(section, opt, cloudforms_opts[opt])
@@ -1840,45 +1799,39 @@ class RunInventoryUpdate(BaseTask):
# The inventory modules are vendored in AWX in the
# `awx/plugins/inventory` directory; those files should be kept in
# sync with those in Ansible core at all times.
passwords = kwargs.get('passwords', {})
cred_data = kwargs.get('private_data_files', {}).get('credentials', '')
cloud_credential = cred_data.get(inventory_update.credential, '')
if inventory_update.source == 'ec2':
if passwords.get('source_username', '') and passwords.get('source_password', ''):
env['AWS_ACCESS_KEY_ID'] = passwords['source_username']
env['AWS_SECRET_ACCESS_KEY'] = passwords['source_password']
if len(passwords['source_security_token']) > 0:
env['AWS_SECURITY_TOKEN'] = passwords['source_security_token']
env['EC2_INI_PATH'] = cloud_credential
elif inventory_update.source == 'vmware':
env['VMWARE_INI_PATH'] = cloud_credential
elif inventory_update.source == 'azure_rm':
if len(passwords.get('source_client', '')) and \
len(passwords.get('source_tenant', '')):
env['AZURE_CLIENT_ID'] = passwords.get('source_client', '')
env['AZURE_SECRET'] = passwords.get('source_secret', '')
env['AZURE_TENANT'] = passwords.get('source_tenant', '')
env['AZURE_SUBSCRIPTION_ID'] = passwords.get('source_subscription', '')
else:
env['AZURE_SUBSCRIPTION_ID'] = passwords.get('source_subscription', '')
env['AZURE_AD_USER'] = passwords.get('source_username', '')
env['AZURE_PASSWORD'] = passwords.get('source_password', '')
env['AZURE_INI_PATH'] = cloud_credential
elif inventory_update.source == 'gce':
env['GCE_EMAIL'] = passwords.get('source_username', '')
env['GCE_PROJECT'] = passwords.get('source_project', '')
env['GCE_PEM_FILE_PATH'] = cloud_credential
env['GCE_ZONE'] = inventory_update.source_regions if inventory_update.source_regions != 'all' else ''
elif inventory_update.source == 'openstack':
env['OS_CLIENT_CONFIG_FILE'] = cloud_credential
elif inventory_update.source == 'satellite6':
env['FOREMAN_INI_PATH'] = cloud_credential
elif inventory_update.source == 'cloudforms':
env['CLOUDFORMS_INI_PATH'] = cloud_credential
ini_mapping = {
'ec2': 'EC2_INI_PATH',
'vmware': 'VMWARE_INI_PATH',
'azure_rm': 'AZURE_INI_PATH',
'gce': 'GCE_PEM_FILE_PATH',
'openstack': 'OS_CLIENT_CONFIG_FILE',
'satellite6': 'FOREMAN_INI_PATH',
'cloudforms': 'CLOUDFORMS_INI_PATH'
}
if inventory_update.source in ini_mapping:
cred_data = kwargs.get('private_data_files', {}).get('credentials', '')
env[ini_mapping[inventory_update.source]] = cred_data.get(inventory_update.credential, '')
if inventory_update.source == 'gce':
env['GCE_ZONE'] = inventory_update.source_regions if inventory_update.source_regions != 'all' else '' # noqa
# by default, the GCE inventory source caches results on disk for
# 5 minutes; disable this behavior
cp = ConfigParser.ConfigParser()
cp.add_section('cache')
cp.set('cache', 'cache_max_age', '0')
handle, path = tempfile.mkstemp(dir=kwargs.get('private_data_dir', None))
cp.write(os.fdopen(handle, 'w'))
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
env['GCE_INI_PATH'] = path
elif inventory_update.source in ['scm', 'custom']:
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLACKLIST:
env[str(env_k)] = unicode(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'tower':
env['TOWER_INVENTORY'] = inventory_update.instance_filters
env['TOWER_LICENSE_TYPE'] = get_licenser().validate()['license_type']
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
# add private_data_files
@@ -1954,16 +1907,6 @@ class RunInventoryUpdate(BaseTask):
args.append('--traceback')
return args
def get_stdout_handle(self, instance):
stdout_handle = super(RunInventoryUpdate, self).get_stdout_handle(instance)
pk = instance.pk
def raw_callback(data):
instance_actual = self.update_model(pk)
result_stdout_text = instance_actual.result_stdout_text + data
self.update_model(pk, result_stdout_text=result_stdout_text)
return OutputEventFilter(stdout_handle, raw_callback=raw_callback)
def build_cwd(self, inventory_update, **kwargs):
return self.get_path_to('..', 'plugins', 'inventory')
@@ -2010,6 +1953,7 @@ class RunAdHocCommand(BaseTask):
name = 'awx.main.tasks.run_ad_hoc_command'
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, **kwargs):
@@ -2057,7 +2001,7 @@ class RunAdHocCommand(BaseTask):
'''
plugin_dir = self.get_path_to('..', 'plugins', 'callback')
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, **kwargs)
env = self.add_ansible_venv(env)
env = self.add_ansible_venv(settings.ANSIBLE_VENV_PATH, env)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
@@ -2066,14 +2010,8 @@ class RunAdHocCommand(BaseTask):
env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_dir
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal' # Hardcoded by Ansible for ad-hoc commands (either minimal or oneline).
env['CALLBACK_QUEUE'] = settings.CALLBACK_QUEUE
env['CALLBACK_CONNECTION'] = settings.CELERY_BROKER_URL
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
env['CACHE'] = settings.CACHES['default']['LOCATION'] if 'LOCATION' in settings.CACHES['default'] else ''
if getattr(settings, 'JOB_CALLBACK_DEBUG', False):
env['JOB_CALLBACK_DEBUG'] = '2'
elif settings.DEBUG:
env['JOB_CALLBACK_DEBUG'] = '1'
# Specify empty SSH args (should disable ControlPersist entirely for
# ad hoc commands).
@@ -2124,14 +2062,27 @@ class RunAdHocCommand(BaseTask):
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
# Define special extra_vars for AWX, combine with ad_hoc_command.extra_vars
extra_vars = {
'tower_job_id': ad_hoc_command.pk,
'awx_job_id': ad_hoc_command.pk,
}
if ad_hoc_command.created_by:
extra_vars.update({
'tower_user_id': ad_hoc_command.created_by.pk,
'tower_user_name': ad_hoc_command.created_by.username,
'awx_user_id': ad_hoc_command.created_by.pk,
'awx_user_name': ad_hoc_command.created_by.username,
})
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(removed_vars)))
args.extend(['-e', json.dumps(ad_hoc_command.extra_vars_dict)])
extra_vars.update(ad_hoc_command.extra_vars_dict)
args.extend(['-e', json.dumps(extra_vars)])
args.extend(['-m', ad_hoc_command.module_name])
args.extend(['-a', ad_hoc_command.module_args])
@@ -2160,29 +2111,6 @@ class RunAdHocCommand(BaseTask):
d[re.compile(r'Password:\s*?$', re.M)] = 'ssh_password'
return d
def get_stdout_handle(self, instance):
'''
Wrap stdout file object to capture events.
'''
stdout_handle = super(RunAdHocCommand, self).get_stdout_handle(instance)
if getattr(settings, 'USE_CALLBACK_QUEUE', False):
dispatcher = CallbackQueueDispatcher()
def ad_hoc_command_event_callback(event_data):
event_data.setdefault(self.event_data_key, instance.id)
if 'uuid' in event_data:
cache_event = cache.get('ev-{}'.format(event_data['uuid']), None)
if cache_event is not None:
event_data.update(cache_event)
dispatcher.dispatch(event_data)
else:
def ad_hoc_command_event_callback(event_data):
event_data.setdefault(self.event_data_key, instance.id)
AdHocCommandEvent.create_from_data(**event_data)
return OutputEventFilter(stdout_handle, ad_hoc_command_event_callback)
def should_use_proot(self, instance, **kwargs):
'''
Return whether this task should use proot.
@@ -2194,6 +2122,8 @@ class RunSystemJob(BaseTask):
name = 'awx.main.tasks.run_system_job'
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_args(self, system_job, **kwargs):
args = ['awx-manage', system_job.job_type]
@@ -2220,16 +2150,6 @@ class RunSystemJob(BaseTask):
logger.exception("%s Failed to parse system job", system_job.log_format)
return args
def get_stdout_handle(self, instance):
stdout_handle = super(RunSystemJob, self).get_stdout_handle(instance)
pk = instance.pk
def raw_callback(data):
instance_actual = self.update_model(pk)
result_stdout_text = instance_actual.result_stdout_text + data
self.update_model(pk, result_stdout_text=result_stdout_text)
return OutputEventFilter(stdout_handle, raw_callback=raw_callback)
def build_env(self, instance, **kwargs):
env = super(RunSystemJob, self).build_env(instance,
**kwargs)

View File

@@ -0,0 +1,163 @@
dn: dc=ansible,dc=com
dc: ansible
description: My wonderful company as much text as you want to place
in this line up to 32K continuation data for the line above must
have &lt;CR> or &lt;CR>&lt;LF> i.e. ENTER work
on both Windows and *nix system - new line MUST begin with ONE SPACE
objectClass: dcObject
objectClass: organization
o: ansible.com
# groups
dn: ou=groups,dc=ansible,dc=com
objectClass: top
objectClass: organizationalUnit
ou: groups
# group: Superusers
dn: cn=superusers,ou=groups,dc=ansible,dc=com
objectClass: top
objectClass: groupOfNames
cn: superusers
member: cn=super_user1,ou=people,dc=ansible,dc=com
# group: Engineering
dn: cn=engineering,ou=groups,dc=ansible,dc=com
objectClass: top
objectClass: groupOfNames
cn: engineering
member: cn=eng_admin1,ou=people,dc=ansible,dc=com
member: cn=eng_user1,ou=people,dc=ansible,dc=com
member: cn=eng_user2,ou=people,dc=ansible,dc=com
dn: cn=engineering_admins,ou=groups,dc=ansible,dc=com
objectClass: top
objectClass: groupOfNames
cn: engineering_admins
member: cn=eng_admin1,ou=people,dc=ansible,dc=com
# group: Sales
dn: cn=sales,ou=groups,dc=ansible,dc=com
objectClass: top
objectClass: groupOfNames
cn: sales
member: cn=sales_user1,ou=people,dc=ansible,dc=com
member: cn=sales_user2,ou=people,dc=ansible,dc=com
# group: IT
dn: cn=it,ou=groups,dc=ansible,dc=com
objectClass: top
objectClass: groupOfNames
cn: it
member: cn=it_user1,ou=people,dc=ansible,dc=com
member: cn=it_user2,ou=people,dc=ansible,dc=com
# users
dn: ou=people,dc=ansible,dc=com
objectClass: top
objectClass: organizationalUnit
ou: people
# users - superusers
dn: cn=super_user1,ou=people,dc=ansible,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: super_user1
sn: User 1
givenName: Super
mail: super_user1@ansible.com
userPassword: password
# users - engineering
dn: cn=eng_user1,ou=people,dc=ansible,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: eng_user1
sn: User 1
givenName: Engineering
mail: eng_user1@ansible.com
userPassword: password
dn: cn=eng_user2,ou=people,dc=ansible,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: eng_user2
sn: User 2
givenName: Engineering
mail: eng_user2@ansible.com
userPassword: password
dn: cn=eng_admin1,ou=people,dc=ansible,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: eng_admin1
sn: Admin 1
givenName: Engineering
mail: eng_admin1@ansible.com
userPassword: password
# users - IT
dn: cn=it_user1,ou=people,dc=ansible,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: it_user1
sn: Technology User 1
givenName: Information
mail: it_user1@ansible.com
userPassword: password
dn: cn=it_user2,ou=people,dc=ansible,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: it_user2
sn: Technology User 2
givenName: Information
mail: it_user2@ansible.com
userPassword: password
# users - Sales
dn: cn=sales_user1,ou=people,dc=ansible,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: sales_user1
sn: Person 1
givenName: Sales
mail: sales_user1@ansible.com
userPassword: password
dn: cn=sales_user2,ou=people,dc=ansible,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: sales_user2
sn: Person 2
givenName: Sales
mail: sales_user2@ansible.com
userPassword: password

View File

@@ -0,0 +1,78 @@
dn: dc=example,dc=com
dc: example
description: My wonderful company as much text as you want to place
in this line up to 32K continuation data for the line above must
have &lt;CR> or &lt;CR>&lt;LF> i.e. ENTER work
on both Windows and *nix system - new line MUST begin with ONE SPACE
objectClass: dcObject
objectClass: organization
o: example.com
# groups
dn: ou=groups,dc=example,dc=com
objectClass: top
objectClass: organizationalUnit
ou: groups
# group: Superusers
dn: cn=superusers,ou=groups,dc=example,dc=com
objectClass: top
objectClass: groupOfNames
cn: superusers
member: cn=super_user1,ou=people,dc=example,dc=com
# group: Sales
dn: cn=sales,ou=groups,dc=example,dc=com
objectClass: top
objectClass: groupOfNames
cn: sales
member: cn=sales_user1,ou=people,dc=example,dc=com
member: cn=sales_user2,ou=people,dc=example,dc=com
# users
dn: ou=people,dc=example,dc=com
objectClass: top
objectClass: organizationalUnit
ou: people
# users - superusers
dn: cn=super_user1,ou=people,dc=example,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: super_user1
sn: User 1
givenName: Super
mail: super_user1@example.com
userPassword: password
# users - Sales
dn: cn=sales_user1,ou=people,dc=example,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: sales_user1
sn: Person 1
givenName: Sales
mail: sales_user1@example.com
userPassword: password
dn: cn=sales_user2,ou=people,dc=example,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: sales_user2
sn: Person 2
givenName: Sales
mail: sales_user2@example.com
userPassword: password

View File

@@ -0,0 +1,163 @@
dn: dc=redhat,dc=com
dc: redhat
description: My wonderful company as much text as you want to place
in this line up to 32K continuation data for the line above must
have &lt;CR> or &lt;CR>&lt;LF> i.e. ENTER work
on both Windows and *nix system - new line MUST begin with ONE SPACE
objectClass: dcObject
objectClass: organization
o: redhat.com
# groups
dn: ou=groups,dc=redhat,dc=com
objectClass: top
objectClass: organizationalUnit
ou: groups
# group: Superusers
dn: cn=superusers,ou=groups,dc=redhat,dc=com
objectClass: top
objectClass: groupOfNames
cn: superusers
member: cn=super_user1,ou=people,dc=redhat,dc=com
# group: Engineering
dn: cn=engineering,ou=groups,dc=redhat,dc=com
objectClass: top
objectClass: groupOfNames
cn: engineering
member: cn=eng_admin1,ou=people,dc=redhat,dc=com
member: cn=eng_user1,ou=people,dc=redhat,dc=com
member: cn=eng_user2,ou=people,dc=redhat,dc=com
dn: cn=engineering_admins,ou=groups,dc=redhat,dc=com
objectClass: top
objectClass: groupOfNames
cn: engineering_admins
member: cn=eng_admin1,ou=people,dc=redhat,dc=com
# group: Sales
dn: cn=sales,ou=groups,dc=redhat,dc=com
objectClass: top
objectClass: groupOfNames
cn: sales
member: cn=sales_user1,ou=people,dc=redhat,dc=com
member: cn=sales_user2,ou=people,dc=redhat,dc=com
# group: IT
dn: cn=it,ou=groups,dc=redhat,dc=com
objectClass: top
objectClass: groupOfNames
cn: it
member: cn=it_user1,ou=people,dc=redhat,dc=com
member: cn=it_user2,ou=people,dc=redhat,dc=com
# users
dn: ou=people,dc=redhat,dc=com
objectClass: top
objectClass: organizationalUnit
ou: people
# users - superusers
dn: cn=super_user1,ou=people,dc=redhat,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: super_user1
sn: User 1
givenName: Super
mail: super_user1@redhat.com
userPassword: password
# users - engineering
dn: cn=eng_user1,ou=people,dc=redhat,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: eng_user1
sn: User 1
givenName: Engineering
mail: eng_user1@redhat.com
userPassword: password
dn: cn=eng_user2,ou=people,dc=redhat,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: eng_user2
sn: User 2
givenName: Engineering
mail: eng_user2@redhat.com
userPassword: password
dn: cn=eng_admin1,ou=people,dc=redhat,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: eng_admin1
sn: Admin 1
givenName: Engineering
mail: eng_admin1@redhat.com
userPassword: password
# users - IT
dn: cn=it_user1,ou=people,dc=redhat,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: it_user1
sn: Technology User 1
givenName: Information
mail: it_user1@redhat.com
userPassword: password
dn: cn=it_user2,ou=people,dc=redhat,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: it_user2
sn: Technology User 2
givenName: Information
mail: it_user2@redhat.com
userPassword: password
# users - Sales
dn: cn=sales_user1,ou=people,dc=redhat,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: sales_user1
sn: Person 1
givenName: Sales
mail: sales_user1@redhat.com
userPassword: password
dn: cn=sales_user2,ou=people,dc=redhat,dc=com
objectClass: top
objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
cn: sales_user2
sn: Person 2
givenName: Sales
mail: sales_user2@redhat.com
userPassword: password

View File

@@ -5,6 +5,7 @@ from awx.api.versioning import reverse
from awx.main.middleware import ActivityStreamMiddleware
from awx.main.models.activity_stream import ActivityStream
from awx.main.access import ActivityStreamAccess
from awx.conf.models import Setting
def mock_feature_enabled(feature):
@@ -47,6 +48,26 @@ def test_basic_fields(monkeypatch, organization, get, user, settings):
assert response.data['summary_fields']['organization'][0]['name'] == 'test-org'
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled)
@pytest.mark.django_db
def test_ctint_activity_stream(monkeypatch, get, user, settings):
Setting.objects.create(key="FOO", value="bar")
settings.ACTIVITY_STREAM_ENABLED = True
u = user('admin', True)
activity_stream = ActivityStream.objects.filter(setting__icontains="FOO").latest('pk')
activity_stream.actor = u
activity_stream.save()
aspk = activity_stream.pk
url = reverse('api:activity_stream_detail', kwargs={'pk': aspk})
response = get(url, user('admin', True))
assert response.status_code == 200
assert 'summary_fields' in response.data
assert 'setting' in response.data['summary_fields']
assert response.data['summary_fields']['setting'][0]['name'] == 'FOO'
@mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled)
@pytest.mark.django_db
def test_middleware_actor_added(monkeypatch, post, get, user, settings):

View File

@@ -4,7 +4,9 @@ import re
import mock # noqa
import pytest
from awx.main.models.credential import Credential, CredentialType
from awx.main.models import (AdHocCommand, Credential, CredentialType, Job, JobTemplate,
Inventory, InventorySource, Project,
WorkflowJobNode)
from awx.main.utils import decrypt_field
from awx.api.versioning import reverse
@@ -12,6 +14,17 @@ EXAMPLE_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nxyz==\n-----END PRIVATE KEY-
EXAMPLE_ENCRYPTED_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nxyz==\n-----END PRIVATE KEY-----'
@pytest.mark.django_db
def test_idempotent_credential_type_setup():
assert CredentialType.objects.count() == 0
CredentialType.setup_tower_managed_defaults()
total = CredentialType.objects.count()
assert total > 0
CredentialType.setup_tower_managed_defaults()
assert CredentialType.objects.count() == total
@pytest.mark.django_db
@pytest.mark.parametrize('kind, total', [
('ssh', 1), ('net', 0)
@@ -575,7 +588,7 @@ def test_create_org_credential_as_admin(post, organization, org_admin, credentia
params['name'] = 'Some name'
params['organization'] = organization.id
response = post(
reverse('api:credential_list'),
reverse('api:credential_list', kwargs={'version': version}),
params,
org_admin
)
@@ -591,7 +604,7 @@ def test_credential_detail(post, get, organization, org_admin, credentialtype_ss
params['name'] = 'Some name'
params['organization'] = organization.id
response = post(
reverse('api:credential_list'),
reverse('api:credential_list', kwargs={'version': version}),
params,
org_admin
)
@@ -1410,7 +1423,17 @@ def test_field_removal(put, organization, admin, credentialtype_ssh, version, pa
@pytest.mark.django_db
def test_credential_type_immutable_in_v2(patch, organization, admin, credentialtype_ssh, credentialtype_aws):
@pytest.mark.parametrize('relation, related_obj', [
['ad_hoc_commands', AdHocCommand()],
['insights_inventories', Inventory()],
['inventorysources', InventorySource()],
['unifiedjobs', Job()],
['unifiedjobtemplates', JobTemplate()],
['projects', Project()],
['workflowjobnodes', WorkflowJobNode()],
])
def test_credential_type_mutability(patch, organization, admin, credentialtype_ssh,
credentialtype_aws, relation, related_obj):
cred = Credential(
credential_type=credentialtype_ssh,
name='Best credential ever',
@@ -1422,19 +1445,39 @@ def test_credential_type_immutable_in_v2(patch, organization, admin, credentialt
)
cred.save()
related_obj.save()
getattr(cred, relation).add(related_obj)
def _change_credential_type():
return patch(
reverse('api:credential_detail', kwargs={'version': 'v2', 'pk': cred.pk}),
{
'credential_type': credentialtype_aws.pk,
'inputs': {
'username': u'jim',
'password': u'pass'
}
},
admin
)
response = _change_credential_type()
assert response.status_code == 400
expected = ['You cannot change the credential type of the credential, '
'as it may break the functionality of the resources using it.']
assert response.data['credential_type'] == expected
response = patch(
reverse('api:credential_detail', kwargs={'version': 'v2', 'pk': cred.pk}),
{
'credential_type': credentialtype_aws.pk,
'inputs': {
'username': u'jim',
'password': u'pass'
}
},
{'name': 'Worst credential ever'},
admin
)
assert response.status_code == 400
assert 'credential_type' in response.data
assert response.status_code == 200
assert Credential.objects.get(pk=cred.pk).name == 'Worst credential ever'
related_obj.delete()
response = _change_credential_type()
assert response.status_code == 200
@pytest.mark.django_db

View File

@@ -60,3 +60,34 @@ def test_proxy_ip_whitelist(get, patch, admin):
REMOTE_HOST='my.proxy.example.org',
HTTP_X_FROM_THE_LOAD_BALANCER='some-actual-ip')
assert middleware.environ['HTTP_X_FROM_THE_LOAD_BALANCER'] == 'some-actual-ip'
@pytest.mark.django_db
class TestDeleteViews:
def test_sublist_delete_permission_check(self, inventory_source, host, rando, delete):
inventory_source.hosts.add(host)
inventory_source.inventory.read_role.members.add(rando)
delete(
reverse(
'api:inventory_source_hosts_list',
kwargs={'version': 'v2', 'pk': inventory_source.pk}
), user=rando, expect=403
)
def test_sublist_delete_functionality(self, inventory_source, host, rando, delete):
inventory_source.hosts.add(host)
inventory_source.inventory.admin_role.members.add(rando)
delete(
reverse(
'api:inventory_source_hosts_list',
kwargs={'version': 'v2', 'pk': inventory_source.pk}
), user=rando, expect=204
)
assert inventory_source.hosts.count() == 0
def test_destroy_permission_check(self, job_factory, system_auditor, delete):
job = job_factory()
resp = delete(
job.get_absolute_url(), user=system_auditor
)
assert resp.status_code == 403

View File

@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import pytest
import mock
@@ -236,6 +237,51 @@ def test_create_inventory_smart_inventory_sources(post, get, inventory, admin_us
assert jdata['count'] == 0
@pytest.mark.django_db
def test_urlencode_host_filter(post, admin_user, organization):
"""
Host filters saved on the model must correspond to the same result
as when that host_filter is used in the URL as a querystring.
That means that it must be url-encoded patterns like %22 for quotes
must be escaped as the string is saved to the model.
Expected host filter in this test would match a host such as:
inventory.hosts.create(
ansible_facts={"ansible_distribution_version": "7.4"}
)
"""
# Create smart inventory with host filter that corresponds to querystring
post(
reverse('api:inventory_list'),
data={
'name': 'smart inventory', 'kind': 'smart',
'organization': organization.pk,
'host_filter': 'ansible_facts__ansible_distribution_version=%227.4%22'
},
user=admin_user,
expect=201
)
# Assert that the saved version of host filter has escaped ""
si = Inventory.objects.get(name='smart inventory')
assert si.host_filter == 'ansible_facts__ansible_distribution_version="7.4"'
@pytest.mark.django_db
def test_host_filter_unicode(post, admin_user, organization):
post(
reverse('api:inventory_list'),
data={
'name': 'smart inventory', 'kind': 'smart',
'organization': organization.pk,
'host_filter': u'ansible_facts__ansible_distribution=レッドハット'
},
user=admin_user,
expect=201
)
si = Inventory.objects.get(name='smart inventory')
assert si.host_filter == u'ansible_facts__ansible_distribution=レッドハット'
@pytest.mark.parametrize("role_field,expected_status_code", [
(None, 403),
('admin_role', 201),

View File

@@ -403,6 +403,22 @@ def test_job_launch_fails_with_missing_multivault_password(machine_credential, v
url = reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk})
resp = get(url, rando, expect=200)
assert {
'credential_type': vault_cred_first.credential_type_id,
'passwords_needed': ['vault_password.abc'],
'vault_id': u'abc',
'name': u'Vault #1',
'id': vault_cred_first.id
} in resp.data['defaults']['credentials']
assert {
'credential_type': vault_cred_second.credential_type_id,
'passwords_needed': ['vault_password.xyz'],
'vault_id': u'xyz',
'name': u'Vault #2',
'id': vault_cred_second.id
} in resp.data['defaults']['credentials']
assert resp.data['passwords_needed_to_start'] == ['vault_password.abc', 'vault_password.xyz']
assert sum([
cred['passwords_needed'] for cred in resp.data['defaults']['credentials']
@@ -544,10 +560,11 @@ def test_callback_accept_prompted_extra_var(mocker, survey_spec_factory, job_tem
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
admin_user, expect=201, format='json')
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({'extra_vars': {'survey_var': 4,
'job_launch_var': 3},
'launch_type': 'callback',
'limit': 'single-host'},)
assert JobTemplate.create_unified_job.call_args == ({
'extra_vars': {'survey_var': 4, 'job_launch_var': 3},
'_eager_fields': {'launch_type': 'callback'},
'limit': 'single-host'},
)
mock_job.signal_start.assert_called_once()
@@ -569,8 +586,10 @@ def test_callback_ignore_unprompted_extra_var(mocker, survey_spec_factory, job_t
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
admin_user, expect=201, format='json')
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({'launch_type': 'callback',
'limit': 'single-host'},)
assert JobTemplate.create_unified_job.call_args == ({
'_eager_fields': {'launch_type': 'callback'},
'limit': 'single-host'},
)
mock_job.signal_start.assert_called_once()

View File

@@ -1,3 +1,6 @@
import os
from backports.tempfile import TemporaryDirectory
import pytest
# AWX
@@ -7,6 +10,7 @@ from awx.main.models.jobs import Job, JobTemplate
from awx.main.migrations import _save_password_keys as save_password_keys
# Django
from django.conf import settings
from django.apps import apps
@@ -570,3 +574,31 @@ def test_save_survey_passwords_on_migration(job_template_with_survey_passwords):
save_password_keys.migrate_survey_passwords(apps, None)
job = job_template_with_survey_passwords.jobs.all()[0]
assert job.survey_passwords == {'SSN': '$encrypted$', 'secret_key': '$encrypted$'}
@pytest.mark.django_db
def test_job_template_custom_virtualenv(get, patch, organization_factory, job_template_factory):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
admin = objs.superusers.admin
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
url = reverse('api:job_template_detail', kwargs={'pk': jt.id})
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
@pytest.mark.django_db
def test_job_template_invalid_custom_virtualenv(get, patch, organization_factory,
job_template_factory):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
url = reverse('api:job_template_detail', kwargs={'pk': jt.id})
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=objs.superusers.admin, expect=400)
assert resp.data['custom_virtualenv'] == [
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
]

View File

@@ -2,15 +2,16 @@
# All Rights Reserved.
# Python
import os
from backports.tempfile import TemporaryDirectory
from django.conf import settings
import pytest
import mock
# Django
from awx.api.versioning import reverse
# AWX
from awx.main.models import * # noqa
from awx.api.versioning import reverse
@pytest.mark.django_db
@@ -188,3 +189,21 @@ def test_delete_organization_xfail1(delete, organization, alice):
@mock.patch('awx.main.access.BaseAccess.check_license', lambda *a, **kw: True)
def test_delete_organization_xfail2(delete, organization):
delete(reverse('api:organization_detail', kwargs={'pk': organization.id}), user=None, expect=401)
@pytest.mark.django_db
def test_organization_custom_virtualenv(get, patch, organization, admin):
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
@pytest.mark.django_db
def test_organization_invalid_custom_virtualenv(get, patch, organization, admin):
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=admin, expect=400)
assert resp.data['custom_virtualenv'] == [
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
]

View File

@@ -1,5 +1,11 @@
import os
from backports.tempfile import TemporaryDirectory
from django.conf import settings
import pytest
from awx.api.versioning import reverse
@pytest.mark.django_db
class TestInsightsCredential:
@@ -13,3 +19,20 @@ class TestInsightsCredential:
{'credential': scm_credential.id}, admin_user,
expect=400)
@pytest.mark.django_db
def test_project_custom_virtualenv(get, patch, project, admin):
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
url = reverse('api:project_detail', kwargs={'pk': project.id})
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
@pytest.mark.django_db
def test_project_invalid_custom_virtualenv(get, patch, project, admin):
url = reverse('api:project_detail', kwargs={'pk': project.id})
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=admin, expect=400)
assert resp.data['custom_virtualenv'] == [
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
]

View File

@@ -8,6 +8,17 @@ from awx.main.models import JobTemplate
RRULE_EXAMPLE = 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1'
def get_rrule(tz=None):
parts = ['DTSTART']
if tz:
parts.append(';TZID={}'.format(tz))
parts.append(':20300308T050000')
if tz is None:
parts.append('Z')
parts.append(' RRULE:FREQ=DAILY;INTERVAL=1;COUNT=5')
return ''.join(parts)
@pytest.mark.django_db
def test_non_job_extra_vars_prohibited(post, project, admin_user):
url = reverse('api:project_schedules_list', kwargs={'pk': project.id})
@@ -32,3 +43,234 @@ def test_valid_survey_answer(post, admin_user, project, inventory, survey_spec_f
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
post(url, {'name': 'test sch', 'rrule': RRULE_EXAMPLE, 'extra_data': '{"var1": 54}'},
admin_user, expect=201)
@pytest.mark.django_db
@pytest.mark.parametrize('rrule, error', [
("", "This field may not be blank"),
("DTSTART:NONSENSE", "Valid DTSTART required in rrule"),
("DTSTART:20300308T050000Z DTSTART:20310308T050000", "Multiple DTSTART is not supported"),
("DTSTART:20300308T050000Z", "RRULE required in rrule"),
("DTSTART:20300308T050000Z RRULE:NONSENSE", "INTERVAL required in rrule"),
("DTSTART:20300308T050000Z RRULE:FREQ=SECONDLY;INTERVAL=5;COUNT=6", "SECONDLY is not supported"),
("DTSTART:20300308T050000Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=3,4", "Multiple BYMONTHDAYs not supported"), # noqa
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYMONTH=1,2", "Multiple BYMONTHs not supported"), # noqa
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO", "BYDAY with numeric prefix not supported"), # noqa
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYYEARDAY=100", "BYYEARDAY not supported"), # noqa
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYWEEKNO=20", "BYWEEKNO not supported"),
("DTSTART:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000", "COUNT > 999 is unsupported"), # noqa
("DTSTART:20300308T050000Z RRULE:FREQ=REGULARLY;INTERVAL=1", "rrule parsing failed validation: invalid 'FREQ': REGULARLY"), # noqa
("DTSTART;TZID=America/New_York:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1", "rrule parsing failed validation"),
("DTSTART:20300308T050000 RRULE:FREQ=DAILY;INTERVAL=1", "DTSTART cannot be a naive datetime"),
("DTSTART:19700101T000000Z RRULE:FREQ=MINUTELY;INTERVAL=1", "more than 1000 events are not allowed"), # noqa
])
def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
job_template = JobTemplate.objects.create(
name='test-jt',
project=project,
playbook='helloworld.yml',
inventory=inventory
)
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
resp = post(url, {
'name': 'Some Schedule',
'rrule': rrule,
}, admin_user, expect=400)
assert error in resp.content
@pytest.mark.django_db
def test_utc_preview(post, admin_user):
url = reverse('api:schedule_rrule')
r = post(url, {'rrule': get_rrule()}, admin_user, expect=200)
assert r.data['utc'] == r.data['local']
assert map(str, r.data['utc']) == [
'2030-03-08 05:00:00+00:00',
'2030-03-09 05:00:00+00:00',
'2030-03-10 05:00:00+00:00',
'2030-03-11 05:00:00+00:00',
'2030-03-12 05:00:00+00:00',
]
@pytest.mark.django_db
def test_nyc_with_dst(post, admin_user):
url = reverse('api:schedule_rrule')
r = post(url, {'rrule': get_rrule('America/New_York')}, admin_user, expect=200)
# March 10, 2030 is when DST takes effect in NYC
assert map(str, r.data['local']) == [
'2030-03-08 05:00:00-05:00',
'2030-03-09 05:00:00-05:00',
'2030-03-10 05:00:00-04:00',
'2030-03-11 05:00:00-04:00',
'2030-03-12 05:00:00-04:00',
]
assert map(str, r.data['utc']) == [
'2030-03-08 10:00:00+00:00',
'2030-03-09 10:00:00+00:00',
'2030-03-10 09:00:00+00:00',
'2030-03-11 09:00:00+00:00',
'2030-03-12 09:00:00+00:00',
]
@pytest.mark.django_db
def test_phoenix_without_dst(post, admin_user):
# The state of Arizona (aside from a few Native American territories) does
# not observe DST
url = reverse('api:schedule_rrule')
r = post(url, {'rrule': get_rrule('America/Phoenix')}, admin_user, expect=200)
# March 10, 2030 is when DST takes effect in NYC
assert map(str, r.data['local']) == [
'2030-03-08 05:00:00-07:00',
'2030-03-09 05:00:00-07:00',
'2030-03-10 05:00:00-07:00',
'2030-03-11 05:00:00-07:00',
'2030-03-12 05:00:00-07:00',
]
assert map(str, r.data['utc']) == [
'2030-03-08 12:00:00+00:00',
'2030-03-09 12:00:00+00:00',
'2030-03-10 12:00:00+00:00',
'2030-03-11 12:00:00+00:00',
'2030-03-12 12:00:00+00:00',
]
@pytest.mark.django_db
def test_interval_by_local_day(post, admin_user):
url = reverse('api:schedule_rrule')
rrule = 'DTSTART;TZID=America/New_York:20300112T210000 RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYSETPOS=1;COUNT=4'
r = post(url, {'rrule': rrule}, admin_user, expect=200)
# March 10, 2030 is when DST takes effect in NYC
assert map(str, r.data['local']) == [
'2030-02-02 21:00:00-05:00',
'2030-03-02 21:00:00-05:00',
'2030-04-06 21:00:00-04:00',
'2030-05-04 21:00:00-04:00',
]
assert map(str, r.data['utc']) == [
'2030-02-03 02:00:00+00:00',
'2030-03-03 02:00:00+00:00',
'2030-04-07 01:00:00+00:00',
'2030-05-05 01:00:00+00:00',
]
@pytest.mark.django_db
def test_weekday_timezone_boundary(post, admin_user):
url = reverse('api:schedule_rrule')
rrule = 'DTSTART;TZID=America/New_York:20300101T210000 RRULE:FREQ=WEEKLY;BYDAY=TU;INTERVAL=1;COUNT=3'
r = post(url, {'rrule': rrule}, admin_user, expect=200)
assert map(str, r.data['local']) == [
'2030-01-01 21:00:00-05:00',
'2030-01-08 21:00:00-05:00',
'2030-01-15 21:00:00-05:00',
]
assert map(str, r.data['utc']) == [
'2030-01-02 02:00:00+00:00',
'2030-01-09 02:00:00+00:00',
'2030-01-16 02:00:00+00:00',
]
@pytest.mark.django_db
def test_first_monthly_weekday_timezone_boundary(post, admin_user):
url = reverse('api:schedule_rrule')
rrule = 'DTSTART;TZID=America/New_York:20300101T210000 RRULE:FREQ=MONTHLY;BYDAY=SU;BYSETPOS=1;INTERVAL=1;COUNT=3'
r = post(url, {'rrule': rrule}, admin_user, expect=200)
assert map(str, r.data['local']) == [
'2030-01-06 21:00:00-05:00',
'2030-02-03 21:00:00-05:00',
'2030-03-03 21:00:00-05:00',
]
assert map(str, r.data['utc']) == [
'2030-01-07 02:00:00+00:00',
'2030-02-04 02:00:00+00:00',
'2030-03-04 02:00:00+00:00',
]
@pytest.mark.django_db
def test_annual_timezone_boundary(post, admin_user):
url = reverse('api:schedule_rrule')
rrule = 'DTSTART;TZID=America/New_York:20301231T230000 RRULE:FREQ=YEARLY;INTERVAL=1;COUNT=3'
r = post(url, {'rrule': rrule}, admin_user, expect=200)
assert map(str, r.data['local']) == [
'2030-12-31 23:00:00-05:00',
'2031-12-31 23:00:00-05:00',
'2032-12-31 23:00:00-05:00',
]
assert map(str, r.data['utc']) == [
'2031-01-01 04:00:00+00:00',
'2032-01-01 04:00:00+00:00',
'2033-01-01 04:00:00+00:00',
]
def test_dst_phantom_hour(post, admin_user):
# The DST period in the United States begins at 02:00 (2 am) local time, so
# the hour from 2:00:00 to 2:59:59 does not exist in the night of the
# switch.
# Three Sundays, starting 2:30AM America/New_York, starting Mar 3, 2030,
# should _not_ include Mar 10, 2030 @ 2:30AM (because it doesn't exist)
url = reverse('api:schedule_rrule')
rrule = 'DTSTART;TZID=America/New_York:20300303T023000 RRULE:FREQ=WEEKLY;BYDAY=SU;INTERVAL=1;COUNT=3'
r = post(url, {'rrule': rrule}, admin_user, expect=200)
assert map(str, r.data['local']) == [
'2030-03-03 02:30:00-05:00',
'2030-03-17 02:30:00-04:00', # Skip 3/10 because 3/10 @ 2:30AM isn't a real date
]
assert map(str, r.data['utc']) == [
'2030-03-03 07:30:00+00:00',
'2030-03-17 06:30:00+00:00', # Skip 3/10 because 3/10 @ 2:30AM isn't a real date
]
@pytest.mark.django_db
def test_months_with_31_days(post, admin_user):
url = reverse('api:schedule_rrule')
rrule = 'DTSTART;TZID=America/New_York:20300101T000000 RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=31;COUNT=7'
r = post(url, {'rrule': rrule}, admin_user, expect=200)
# 30 days have September, April, June, and November...
assert map(str, r.data['local']) == [
'2030-01-31 00:00:00-05:00',
'2030-03-31 00:00:00-04:00',
'2030-05-31 00:00:00-04:00',
'2030-07-31 00:00:00-04:00',
'2030-08-31 00:00:00-04:00',
'2030-10-31 00:00:00-04:00',
'2030-12-31 00:00:00-05:00',
]
def test_dst_rollback_duplicates(post, admin_user):
# From Nov 2 -> Nov 3, 2030, daylight savings ends and we "roll back" an hour.
# Make sure we don't "double count" duplicate times in the "rolled back"
# hour.
url = reverse('api:schedule_rrule')
rrule = 'DTSTART;TZID=America/New_York:20301102T233000 RRULE:FREQ=HOURLY;INTERVAL=1;COUNT=5'
r = post(url, {'rrule': rrule}, admin_user, expect=200)
assert map(str, r.data['local']) == [
'2030-11-02 23:30:00-04:00',
'2030-11-03 00:30:00-04:00',
'2030-11-03 01:30:00-04:00',
'2030-11-03 02:30:00-05:00',
'2030-11-03 03:30:00-05:00',
]

View File

@@ -304,3 +304,19 @@ def test_isolated_keys_readonly(get, patch, delete, admin, key, expected):
delete(url, user=admin)
assert getattr(settings, key) == 'secret'
@pytest.mark.django_db
def test_isolated_key_flag_readonly(get, patch, delete, admin):
settings.AWX_ISOLATED_KEY_GENERATION = True
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'jobs'})
resp = get(url, user=admin)
assert resp.data['AWX_ISOLATED_KEY_GENERATION'] is True
patch(url, user=admin, data={
'AWX_ISOLATED_KEY_GENERATION': False
})
assert settings.AWX_ISOLATED_KEY_GENERATION is True
delete(url, user=admin)
assert settings.AWX_ISOLATED_KEY_GENERATION is True

View File

@@ -111,6 +111,241 @@ def test_survey_spec_sucessful_creation(survey_spec_factory, job_template, post,
assert updated_jt.survey_spec == survey_input_data
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db
@pytest.mark.parametrize('with_default', [True, False])
@pytest.mark.parametrize('value, status', [
('SUPERSECRET', 201),
(['some', 'invalid', 'list'], 400),
({'some-invalid': 'dict'}, 400),
(False, 400)
])
def test_survey_spec_passwords_are_encrypted_on_launch(job_template_factory, post, admin_user, with_default, value, status):
objects = job_template_factory('jt', organization='org1', project='prj',
inventory='inv', credential='cred')
job_template = objects.job_template
job_template.survey_enabled = True
job_template.save()
input_data = {
'description': 'A survey',
'spec': [{
'index': 0,
'question_name': 'What is your password?',
'required': True,
'variable': 'secret_value',
'type': 'password'
}],
'name': 'my survey'
}
if with_default:
input_data['spec'][0]['default'] = 'some-default'
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
data=input_data, user=admin_user, expect=200)
resp = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
dict(extra_vars=dict(secret_value=value)), admin_user, expect=status)
if status == 201:
job = Job.objects.get(pk=resp.data['id'])
assert json.loads(job.extra_vars)['secret_value'].startswith('$encrypted$')
assert json.loads(job.decrypted_extra_vars()) == {
'secret_value': value
}
else:
assert "for 'secret_value' expected to be a string." in json.dumps(resp.data)
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db
def test_survey_spec_passwords_with_empty_default(job_template_factory, post, admin_user):
objects = job_template_factory('jt', organization='org1', project='prj',
inventory='inv', credential='cred')
job_template = objects.job_template
job_template.survey_enabled = True
job_template.save()
input_data = {
'description': 'A survey',
'spec': [{
'index': 0,
'question_name': 'What is your password?',
'required': False,
'variable': 'secret_value',
'type': 'password',
'default': ''
}],
'name': 'my survey'
}
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
data=input_data, user=admin_user, expect=200)
resp = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
{}, admin_user, expect=201)
job = Job.objects.get(pk=resp.data['id'])
assert json.loads(job.extra_vars)['secret_value'] == ''
assert json.loads(job.decrypted_extra_vars()) == {
'secret_value': ''
}
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db
@pytest.mark.parametrize('default, launch_value, expected_extra_vars, status', [
['', '$encrypted$', {'secret_value': ''}, 201],
['', 'y', {'secret_value': 'y'}, 201],
['', 'y' * 100, None, 400],
[None, '$encrypted$', {}, 201],
[None, 'y', {'secret_value': 'y'}, 201],
[None, 'y' * 100, {}, 400],
['x', '$encrypted$', {'secret_value': 'x'}, 201],
['x', 'y', {'secret_value': 'y'}, 201],
['x', 'y' * 100, {}, 400],
['x' * 100, '$encrypted$', {}, 201],
['x' * 100, 'y', {'secret_value': 'y'}, 201],
['x' * 100, 'y' * 100, {}, 400],
])
def test_survey_spec_passwords_with_default_optional(job_template_factory, post, admin_user,
default, launch_value,
expected_extra_vars, status):
objects = job_template_factory('jt', organization='org1', project='prj',
inventory='inv', credential='cred')
job_template = objects.job_template
job_template.survey_enabled = True
job_template.save()
input_data = {
'description': 'A survey',
'spec': [{
'index': 0,
'question_name': 'What is your password?',
'required': False,
'variable': 'secret_value',
'type': 'password',
'max': 3
}],
'name': 'my survey'
}
if default is not None:
input_data['spec'][0]['default'] = default
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
data=input_data, user=admin_user, expect=200)
resp = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
data={'extra_vars': {'secret_value': launch_value}}, user=admin_user, expect=status)
if status == 201:
job = Job.objects.get(pk=resp.data['job'])
assert json.loads(job.decrypted_extra_vars()) == expected_extra_vars
if default:
assert default not in json.loads(job.extra_vars).values()
assert launch_value not in json.loads(job.extra_vars).values()
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db
@pytest.mark.parametrize('default, launch_value, expected_extra_vars, status', [
['', '$encrypted$', {'secret_value': ''}, 201],
[None, '$encrypted$', {}, 400],
[None, 'y', {'secret_value': 'y'}, 201],
])
def test_survey_spec_passwords_with_default_required(job_template_factory, post, admin_user,
default, launch_value,
expected_extra_vars, status):
objects = job_template_factory('jt', organization='org1', project='prj',
inventory='inv', credential='cred')
job_template = objects.job_template
job_template.survey_enabled = True
job_template.save()
input_data = {
'description': 'A survey',
'spec': [{
'index': 0,
'question_name': 'What is your password?',
'required': True,
'variable': 'secret_value',
'type': 'password',
'max': 3
}],
'name': 'my survey'
}
if default is not None:
input_data['spec'][0]['default'] = default
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
data=input_data, user=admin_user, expect=200)
resp = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
data={'extra_vars': {'secret_value': launch_value}}, user=admin_user, expect=status)
if status == 201:
job = Job.objects.get(pk=resp.data['job'])
assert json.loads(job.decrypted_extra_vars()) == expected_extra_vars
if default:
assert default not in json.loads(job.extra_vars).values()
assert launch_value not in json.loads(job.extra_vars).values()
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db
@pytest.mark.parametrize('default, status', [
('SUPERSECRET', 200),
(['some', 'invalid', 'list'], 400),
({'some-invalid': 'dict'}, 400),
(False, 400)
])
def test_survey_spec_default_passwords_are_encrypted(job_template, post, admin_user, default, status):
job_template.survey_enabled = True
job_template.save()
input_data = {
'description': 'A survey',
'spec': [{
'index': 0,
'question_name': 'What is your password?',
'required': True,
'variable': 'secret_value',
'default': default,
'type': 'password'
}],
'name': 'my survey'
}
resp = post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
data=input_data, user=admin_user, expect=status)
if status == 200:
updated_jt = JobTemplate.objects.get(pk=job_template.pk)
assert updated_jt.survey_spec['spec'][0]['default'].startswith('$encrypted$')
job = updated_jt.create_unified_job()
assert json.loads(job.extra_vars)['secret_value'].startswith('$encrypted$')
assert json.loads(job.decrypted_extra_vars()) == {
'secret_value': default
}
else:
assert "for 'secret_value' expected to be a string." in str(resp.data)
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db
def test_survey_spec_default_passwords_encrypted_on_update(job_template, post, put, admin_user):
input_data = {
'description': 'A survey',
'spec': [{
'index': 0,
'question_name': 'What is your password?',
'required': True,
'variable': 'secret_value',
'default': 'SUPERSECRET',
'type': 'password'
}],
'name': 'my survey'
}
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
data=input_data, user=admin_user, expect=200)
updated_jt = JobTemplate.objects.get(pk=job_template.pk)
# simulate a survey field edit where we're not changing the default value
input_data['spec'][0]['default'] = '$encrypted$'
post(url=reverse('api:job_template_survey_spec', kwargs={'pk': job_template.id}),
data=input_data, user=admin_user, expect=200)
assert updated_jt.survey_spec == JobTemplate.objects.get(pk=job_template.pk).survey_spec
# Tests related to survey content validation
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
@pytest.mark.django_db

View File

@@ -0,0 +1,271 @@
# -*- coding: utf-8 -*-
import base64
import json
import re
import shutil
import tempfile
from django.conf import settings
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
import pytest
from awx.api.versioning import reverse
from awx.main.models import (Job, JobEvent, AdHocCommand, AdHocCommandEvent,
Project, ProjectUpdate, ProjectUpdateEvent,
InventoryUpdate, InventorySource,
InventoryUpdateEvent, SystemJob, SystemJobEvent)
def _mk_project_update():
project = Project()
project.save()
return ProjectUpdate(project=project)
def _mk_inventory_update():
source = InventorySource()
source.save()
iu = InventoryUpdate(inventory_source=source)
return iu
@pytest.fixture(scope='function')
def sqlite_copy_expert(request):
# copy_expert is postgres-specific, and SQLite doesn't support it; mock its
# behavior to test that it writes a file that contains stdout from events
path = tempfile.mkdtemp(prefix='job-event-stdout')
def write_stdout(self, sql, fd):
# simulate postgres copy_expert support with ORM code
parts = sql.split(' ')
tablename = parts[parts.index('from') + 1]
for cls in (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent):
if cls._meta.db_table == tablename:
for event in cls.objects.order_by('start_line').all():
fd.write(event.stdout.encode('utf-8'))
setattr(SQLiteCursorWrapper, 'copy_expert', write_stdout)
request.addfinalizer(lambda: shutil.rmtree(path))
request.addfinalizer(lambda: delattr(SQLiteCursorWrapper, 'copy_expert'))
return path
@pytest.mark.django_db
@pytest.mark.parametrize('Parent, Child, relation, view', [
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
])
def test_text_stdout(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
job = Parent()
job.save()
for i in range(3):
Child(**{relation: job, 'stdout': 'Testing {}\n'.format(i), 'start_line': i}).save()
url = reverse(view, kwargs={'pk': job.pk}) + '?format=txt'
response = get(url, user=admin, expect=200)
assert response.content.splitlines() == ['Testing %d' % i for i in range(3)]
@pytest.mark.django_db
@pytest.mark.parametrize('Parent, Child, relation, view', [
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
])
@pytest.mark.parametrize('download', [True, False])
def test_ansi_stdout_filtering(sqlite_copy_expert, Parent, Child, relation,
view, download, get, admin):
job = Parent()
job.save()
for i in range(3):
Child(**{
relation: job,
'stdout': '\x1B[0;36mTesting {}\x1B[0m\n'.format(i),
'start_line': i
}).save()
url = reverse(view, kwargs={'pk': job.pk})
# ansi codes in ?format=txt should get filtered
fmt = "?format={}".format("txt_download" if download else "txt")
response = get(url + fmt, user=admin, expect=200)
assert response.content.splitlines() == ['Testing %d' % i for i in range(3)]
has_download_header = response.has_header('Content-Disposition')
assert has_download_header if download else not has_download_header
# ask for ansi and you'll get it
fmt = "?format={}".format("ansi_download" if download else "ansi")
response = get(url + fmt, user=admin, expect=200)
assert response.content.splitlines() == ['\x1B[0;36mTesting %d\x1B[0m' % i for i in range(3)]
has_download_header = response.has_header('Content-Disposition')
assert has_download_header if download else not has_download_header
@pytest.mark.django_db
@pytest.mark.parametrize('Parent, Child, relation, view', [
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
])
def test_colorized_html_stdout(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
job = Parent()
job.save()
for i in range(3):
Child(**{
relation: job,
'stdout': '\x1B[0;36mTesting {}\x1B[0m\n'.format(i),
'start_line': i
}).save()
url = reverse(view, kwargs={'pk': job.pk}) + '?format=html'
response = get(url, user=admin, expect=200)
assert '.ansi36 { color: #2dbaba; }' in response.content
for i in range(3):
assert '<span class="ansi36">Testing {}</span>'.format(i) in response.content
@pytest.mark.django_db
@pytest.mark.parametrize('Parent, Child, relation, view', [
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
])
def test_stdout_line_range(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
job = Parent()
job.save()
for i in range(20):
Child(**{relation: job, 'stdout': 'Testing {}\n'.format(i), 'start_line': i}).save()
url = reverse(view, kwargs={'pk': job.pk}) + '?format=html&start_line=5&end_line=10'
response = get(url, user=admin, expect=200)
assert re.findall('Testing [0-9]+', response.content) == ['Testing %d' % i for i in range(5, 10)]
@pytest.mark.django_db
def test_text_stdout_from_system_job_events(sqlite_copy_expert, get, admin):
job = SystemJob()
job.save()
for i in range(3):
SystemJobEvent(system_job=job, stdout='Testing {}\n'.format(i), start_line=i).save()
url = reverse('api:system_job_detail', kwargs={'pk': job.pk})
response = get(url, user=admin, expect=200)
assert response.data['result_stdout'].splitlines() == ['Testing %d' % i for i in range(3)]
@pytest.mark.django_db
@pytest.mark.parametrize('Parent, Child, relation, view', [
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
])
@pytest.mark.parametrize('fmt', ['txt', 'ansi'])
def test_max_bytes_display(sqlite_copy_expert, Parent, Child, relation, view, fmt, get, admin):
job = Parent()
job.save()
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
large_stdout = 'X' * total_bytes
Child(**{relation: job, 'stdout': large_stdout, 'start_line': 0}).save()
url = reverse(view, kwargs={'pk': job.pk})
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
assert response.content == (
'Standard Output too large to display ({actual} bytes), only download '
'supported for sizes over {max} bytes'.format(
actual=total_bytes,
max=settings.STDOUT_MAX_BYTES_DISPLAY
)
)
response = get(url + '?format={}_download'.format(fmt), user=admin, expect=200)
assert response.content == large_stdout
@pytest.mark.django_db
@pytest.mark.parametrize('Cls, view', [
[_mk_project_update, 'api:project_update_stdout'],
[_mk_inventory_update, 'api:inventory_update_stdout']
])
@pytest.mark.parametrize('fmt', ['txt', 'ansi', 'txt_download', 'ansi_download'])
def test_legacy_result_stdout_text_fallback(Cls, view, fmt, get, admin):
# older versions of stored raw stdout in a raw text blob at
# main_unifiedjob.result_stdout_text; this test ensures that fallback
# works properly if no job events exist
job = Cls()
job.save()
job.result_stdout_text = 'LEGACY STDOUT!'
job.save()
url = reverse(view, kwargs={'pk': job.pk})
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
assert response.content == 'LEGACY STDOUT!'
@pytest.mark.django_db
@pytest.mark.parametrize('Cls, view', [
[_mk_project_update, 'api:project_update_stdout'],
[_mk_inventory_update, 'api:inventory_update_stdout']
])
@pytest.mark.parametrize('fmt', ['txt', 'ansi'])
def test_legacy_result_stdout_with_max_bytes(Cls, view, fmt, get, admin):
job = Cls()
job.save()
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
large_stdout = 'X' * total_bytes
job.result_stdout_text = large_stdout
job.save()
url = reverse(view, kwargs={'pk': job.pk})
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
assert response.content == (
'Standard Output too large to display ({actual} bytes), only download '
'supported for sizes over {max} bytes'.format(
actual=total_bytes,
max=settings.STDOUT_MAX_BYTES_DISPLAY
)
)
response = get(url + '?format={}'.format(fmt + '_download'), user=admin, expect=200)
assert response.content == large_stdout
@pytest.mark.django_db
@pytest.mark.parametrize('Parent, Child, relation, view', [
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
])
@pytest.mark.parametrize('fmt', ['txt', 'ansi', 'txt_download', 'ansi_download'])
def test_text_with_unicode_stdout(sqlite_copy_expert, Parent, Child, relation,
view, get, admin, fmt):
job = Parent()
job.save()
for i in range(3):
Child(**{relation: job, 'stdout': u'{}\n'.format(i), 'start_line': i}).save()
url = reverse(view, kwargs={'pk': job.pk}) + '?format=' + fmt
response = get(url, user=admin, expect=200)
assert response.content.splitlines() == ['%d' % i for i in range(3)]
@pytest.mark.django_db
def test_unicode_with_base64_ansi(sqlite_copy_expert, get, admin):
job = Job()
job.save()
for i in range(3):
JobEvent(job=job, stdout=u'{}\n'.format(i), start_line=i).save()
url = reverse(
'api:job_stdout',
kwargs={'pk': job.pk}
) + '?format=json&content_encoding=base64&content_format=ansi'
response = get(url, user=admin, expect=200)
content = base64.b64decode(json.loads(response.content)['content'])
assert content.splitlines() == ['%d' % i for i in range(3)]

View File

@@ -73,7 +73,8 @@ def user():
try:
user = User.objects.get(username=name)
except User.DoesNotExist:
user = User(username=name, is_superuser=is_superuser, password=name)
user = User(username=name, is_superuser=is_superuser)
user.set_password(name)
user.save()
return user
return u
@@ -544,7 +545,8 @@ def _request(verb):
response.data = data_copy
print(response.data)
assert response.status_code == expect
response.render()
if hasattr(response, 'render'):
response.render()
return response
return rf

View File

@@ -0,0 +1,69 @@
import mock
import pytest
from awx.main.models import (Job, JobEvent, ProjectUpdate, ProjectUpdateEvent,
AdHocCommand, AdHocCommandEvent, InventoryUpdate,
InventorySource, InventoryUpdateEvent, SystemJob,
SystemJobEvent)
@pytest.mark.django_db
@mock.patch('awx.main.consumers.emit_channel_notification')
def test_job_event_websocket_notifications(emit):
j = Job(id=123)
j.save()
JobEvent.create_from_data(job_id=j.pk)
assert len(emit.call_args_list) == 1
topic, payload = emit.call_args_list[0][0]
assert topic == 'job_events-123'
assert payload['job'] == 123
@pytest.mark.django_db
@mock.patch('awx.main.consumers.emit_channel_notification')
def test_ad_hoc_event_websocket_notifications(emit):
ahc = AdHocCommand(id=123)
ahc.save()
AdHocCommandEvent.create_from_data(ad_hoc_command_id=ahc.pk)
assert len(emit.call_args_list) == 1
topic, payload = emit.call_args_list[0][0]
assert topic == 'ad_hoc_command_events-123'
assert payload['ad_hoc_command'] == 123
@pytest.mark.django_db
@mock.patch('awx.main.consumers.emit_channel_notification')
def test_project_update_event_websocket_notifications(emit, project):
pu = ProjectUpdate(id=123, project=project)
pu.save()
ProjectUpdateEvent.create_from_data(project_update_id=pu.pk)
assert len(emit.call_args_list) == 1
topic, payload = emit.call_args_list[0][0]
assert topic == 'project_update_events-123'
assert payload['project_update'] == 123
@pytest.mark.django_db
@mock.patch('awx.main.consumers.emit_channel_notification')
def test_inventory_update_event_websocket_notifications(emit, inventory):
source = InventorySource()
source.save()
iu = InventoryUpdate(id=123, inventory_source=source)
iu.save()
InventoryUpdateEvent.create_from_data(inventory_update_id=iu.pk)
assert len(emit.call_args_list) == 1
topic, payload = emit.call_args_list[0][0]
assert topic == 'inventory_update_events-123'
assert payload['inventory_update'] == 123
@pytest.mark.django_db
@mock.patch('awx.main.consumers.emit_channel_notification')
def test_system_job_event_websocket_notifications(emit, inventory):
j = SystemJob(id=123)
j.save()
SystemJobEvent.create_from_data(system_job_id=j.pk)
assert len(emit.call_args_list) == 1
topic, payload = emit.call_args_list[0][0]
assert topic == 'system_job_events-123'
assert payload['system_job'] == 123

View File

@@ -1,5 +1,8 @@
# -*- coding: utf-8 -*-
import pytest
import mock
import six
from django.core.exceptions import ValidationError
@@ -13,6 +16,52 @@ from awx.main.models import (
from awx.main.utils.filters import SmartFilter
@pytest.mark.django_db
class TestInventoryScript:
def test_hostvars(self, inventory):
inventory.hosts.create(name='ahost', variables={"foo": "bar"})
assert inventory.get_script_data(
hostvars=True
)['_meta']['hostvars']['ahost'] == {
'foo': 'bar'
}
def test_towervars(self, inventory):
host = inventory.hosts.create(name='ahost')
assert inventory.get_script_data(
hostvars=True,
towervars=True
)['_meta']['hostvars']['ahost'] == {
'remote_tower_enabled': 'true',
'remote_tower_id': host.id
}
@pytest.mark.django_db
class TestActiveCount:
def test_host_active_count(self, organization):
inv1 = Inventory.objects.create(name='inv1', organization=organization)
inv2 = Inventory.objects.create(name='inv2', organization=organization)
assert Host.objects.active_count() == 0
inv1.hosts.create(name='host1')
inv2.hosts.create(name='host1')
assert Host.objects.active_count() == 1
inv1.hosts.create(name='host2')
assert Host.objects.active_count() == 2
def test_active_count_minus_tower(self, inventory):
inventory.hosts.create(name='locally-managed-host')
source = inventory.inventory_sources.create(
name='tower-source', source='tower'
)
source.hosts.create(
name='remotely-managed-host', inventory=inventory
)
assert Host.objects.active_count() == 1
@pytest.mark.django_db
class TestSCMUpdateFeatures:
@@ -103,6 +152,30 @@ def setup_inventory_groups(inventory, group_factory):
groupB.save()
@pytest.mark.django_db
def test_inventory_update_name(inventory, inventory_source):
iu = inventory_source.update()
assert inventory_source.name != inventory.name
assert iu.name == inventory.name + ' - ' + inventory_source.name
@pytest.mark.django_db
def test_inventory_name_with_unicode(inventory, inventory_source):
inventory.name = six.u('オオオ')
inventory.save()
iu = inventory_source.update()
assert iu.name.startswith(inventory.name)
@pytest.mark.django_db
def test_inventory_update_excessively_long_name(inventory, inventory_source):
inventory.name = 'a' * 400 # field max length 512
inventory_source.name = 'b' * 400
iu = inventory_source.update()
assert inventory_source.name != inventory.name
assert iu.name.startswith(inventory.name)
@pytest.mark.django_db
class TestHostManager:
def test_host_filter_not_smart(self, setup_ec2_gce, organization):

View File

@@ -0,0 +1,51 @@
import pytest
from awx.main.models import JobTemplate, Job
@pytest.mark.django_db
def test_awx_virtualenv_from_settings(inventory, project, machine_credential):
jt = JobTemplate.objects.create(
name='my-jt',
inventory=inventory,
project=project,
playbook='helloworld.yml'
)
jt.credentials.add(machine_credential)
job = jt.create_unified_job()
assert job.ansible_virtualenv_path == '/venv/ansible'
@pytest.mark.django_db
def test_awx_custom_virtualenv(inventory, project, machine_credential):
jt = JobTemplate.objects.create(
name='my-jt',
inventory=inventory,
project=project,
playbook='helloworld.yml'
)
jt.credentials.add(machine_credential)
job = jt.create_unified_job()
job.project.organization.custom_virtualenv = '/venv/fancy-org'
job.project.organization.save()
assert job.ansible_virtualenv_path == '/venv/fancy-org'
job.project.custom_virtualenv = '/venv/fancy-proj'
job.project.save()
assert job.ansible_virtualenv_path == '/venv/fancy-proj'
job.job_template.custom_virtualenv = '/venv/fancy-jt'
job.job_template.save()
assert job.ansible_virtualenv_path == '/venv/fancy-jt'
@pytest.mark.django_db
def test_awx_custom_virtualenv_without_jt(project):
project.custom_virtualenv = '/venv/fancy-proj'
project.save()
job = Job(project=project)
job.save()
job = Job.objects.get(pk=job.id)
assert job.ansible_virtualenv_path == '/venv/fancy-proj'

View File

@@ -0,0 +1,205 @@
from datetime import datetime
import mock
import pytest
import pytz
from awx.main.models import JobTemplate, Schedule
@pytest.fixture
def job_template(inventory, project):
# need related resources set for these tests
return JobTemplate.objects.create(
name='test-job_template',
inventory=inventory,
project=project
)
@pytest.mark.django_db
def test_repeats_forever(job_template):
s = Schedule(
name='Some Schedule',
rrule='DTSTART:20300112T210000Z RRULE:FREQ=DAILY;INTERVAL=1',
unified_job_template=job_template
)
s.save()
assert str(s.next_run) == str(s.dtstart) == '2030-01-12 21:00:00+00:00'
assert s.dtend is None
@pytest.mark.django_db
def test_no_recurrence_utc(job_template):
s = Schedule(
name='Some Schedule',
rrule='DTSTART:20300112T210000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1',
unified_job_template=job_template
)
s.save()
assert str(s.next_run) == str(s.dtstart) == str(s.dtend) == '2030-01-12 21:00:00+00:00'
@pytest.mark.django_db
def test_no_recurrence_est(job_template):
s = Schedule(
name='Some Schedule',
rrule='DTSTART;TZID=America/New_York:20300112T210000 RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1',
unified_job_template=job_template
)
s.save()
assert str(s.next_run) == str(s.dtstart) == str(s.dtend) == '2030-01-13 02:00:00+00:00'
@pytest.mark.django_db
def test_next_run_utc(job_template):
s = Schedule(
name='Some Schedule',
rrule='DTSTART:20300112T210000Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYSETPOS=1;COUNT=4',
unified_job_template=job_template
)
s.save()
assert str(s.next_run) == '2030-02-02 21:00:00+00:00'
assert str(s.next_run) == str(s.dtstart)
assert str(s.dtend) == '2030-05-04 21:00:00+00:00'
@pytest.mark.django_db
def test_next_run_est(job_template):
s = Schedule(
name='Some Schedule',
rrule='DTSTART;TZID=America/New_York:20300112T210000 RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYSETPOS=1;COUNT=4',
unified_job_template=job_template
)
s.save()
assert str(s.next_run) == '2030-02-03 02:00:00+00:00'
assert str(s.next_run) == str(s.dtstart)
# March 10, 2030 is when DST takes effect in NYC
assert str(s.dtend) == '2030-05-05 01:00:00+00:00'
@pytest.mark.django_db
def test_year_boundary(job_template):
rrule = 'DTSTART;TZID=America/New_York:20301231T230000 RRULE:FREQ=YEARLY;INTERVAL=1;BYMONTH=12;BYMONTHDAY=31;COUNT=4' # noqa
s = Schedule(
name='Some Schedule',
rrule=rrule,
unified_job_template=job_template
)
s.save()
assert str(s.next_run) == '2031-01-01 04:00:00+00:00' # UTC = +5 EST
assert str(s.next_run) == str(s.dtstart)
assert str(s.dtend) == '2034-01-01 04:00:00+00:00' # UTC = +5 EST
@pytest.mark.django_db
def test_leap_year_day(job_template):
rrule = 'DTSTART;TZID=America/New_York:20320229T050000 RRULE:FREQ=YEARLY;INTERVAL=1;BYMONTH=02;BYMONTHDAY=29;COUNT=2' # noqa
s = Schedule(
name='Some Schedule',
rrule=rrule,
unified_job_template=job_template
)
s.save()
assert str(s.next_run) == '2032-02-29 10:00:00+00:00' # UTC = +5 EST
assert str(s.next_run) == str(s.dtstart)
assert str(s.dtend) == '2036-02-29 10:00:00+00:00' # UTC = +5 EST
@pytest.mark.django_db
@pytest.mark.parametrize('until, dtend', [
['20180602T170000Z', '2018-06-02 12:00:00+00:00'],
['20180602T000000Z', '2018-06-01 12:00:00+00:00'],
])
def test_utc_until(job_template, until, dtend):
rrule = 'DTSTART:20180601T120000Z RRULE:FREQ=DAILY;INTERVAL=1;UNTIL={}'.format(until)
s = Schedule(
name='Some Schedule',
rrule=rrule,
unified_job_template=job_template
)
s.save()
assert str(s.next_run) == '2018-06-01 12:00:00+00:00'
assert str(s.next_run) == str(s.dtstart)
assert str(s.dtend) == dtend
@pytest.mark.django_db
@pytest.mark.parametrize('dtstart, until', [
['20180601T120000Z', '20180602T170000'],
['TZID=America/New_York:20180601T120000', '20180602T170000'],
])
def test_tzinfo_naive_until(job_template, dtstart, until):
rrule = 'DTSTART;{} RRULE:FREQ=DAILY;INTERVAL=1;UNTIL={}'.format(dtstart, until) # noqa
s = Schedule(
name='Some Schedule',
rrule=rrule,
unified_job_template=job_template
)
with pytest.raises(ValueError):
s.save()
@pytest.mark.django_db
def test_until_must_be_utc(job_template):
rrule = 'DTSTART;TZID=America/New_York:20180601T120000 RRULE:FREQ=DAILY;INTERVAL=1;UNTIL=20180602T000000' # noqa the Z is required
s = Schedule(
name='Some Schedule',
rrule=rrule,
unified_job_template=job_template
)
with pytest.raises(ValueError) as e:
s.save()
assert 'RRULE UNTIL values must be specified in UTC' in str(e)
@pytest.mark.django_db
def test_utc_until_in_the_past(job_template):
rrule = 'DTSTART:20180601T120000Z RRULE:FREQ=DAILY;INTERVAL=1;UNTIL=20150101T100000Z'
s = Schedule(
name='Some Schedule',
rrule=rrule,
unified_job_template=job_template
)
s.save()
assert s.next_run is s.dtstart is s.dtend is None
@pytest.mark.django_db
@mock.patch('awx.main.models.schedules.now', lambda: datetime(2030, 03, 05, tzinfo=pytz.utc))
def test_dst_phantom_hour(job_template):
# The DST period in the United States begins at 02:00 (2 am) local time, so
# the hour from 2:00:00 to 2:59:59 does not exist in the night of the
# switch.
# Three Sundays, starting 2:30AM America/New_York, starting Mar 3, 2030,
# (which doesn't exist)
rrule = 'DTSTART;TZID=America/New_York:20300303T023000 RRULE:FREQ=WEEKLY;BYDAY=SU;INTERVAL=1;COUNT=3'
s = Schedule(
name='Some Schedule',
rrule=rrule,
unified_job_template=job_template
)
s.save()
# 3/10/30 @ 2:30AM is skipped because it _doesn't exist_ <cue twilight zone music>
assert str(s.next_run) == '2030-03-17 06:30:00+00:00'
@pytest.mark.django_db
def test_beginning_of_time(job_template):
# ensure that really large generators don't have performance issues
rrule = 'DTSTART:19700101T000000Z RRULE:FREQ=MINUTELY;INTERVAL=1'
s = Schedule(
name='Some Schedule',
rrule=rrule,
unified_job_template=job_template
)
with pytest.raises(ValueError):
s.save()

View File

@@ -25,9 +25,11 @@ def test_default_cred_types():
'insights',
'net',
'openstack',
'rhv',
'satellite6',
'scm',
'ssh',
'tower',
'vault',
'vmware',
]

View File

@@ -30,13 +30,15 @@ def test_job_capacity_and_with_inactive_node():
@pytest.mark.django_db
def test_job_notification_data(inventory):
def test_job_notification_data(inventory, machine_credential, project):
encrypted_str = "$encrypted$"
job = Job.objects.create(
job_template=None, inventory=inventory, name='hi world',
extra_vars=json.dumps({"SSN": "123-45-6789"}),
survey_passwords={"SSN": encrypted_str}
survey_passwords={"SSN": encrypted_str},
project=project,
)
job.credentials = [machine_credential]
notification_data = job.notification_data(block=0)
assert json.loads(notification_data['extra_vars'])['SSN'] == encrypted_str

Some files were not shown because too many files have changed in this diff Show More